Full Code of soulfx/gmusic-playlist for AI

master 46bb21df2089 cached
11 files
32.8 KB
8.0k tokens
35 symbols
1 requests
Download .txt
Repository: soulfx/gmusic-playlist
Branch: master
Commit: 46bb21df2089
Files: 11
Total size: 32.8 KB

Directory structure:
gitextract_7p8_x751/

├── .gitignore
├── ExamplePlaylist.csv
├── ExportLists.py
├── ImportList.py
├── LICENSE
├── README.md
├── common.py
├── preferences.py
└── test/
    ├── atestframe.py
    ├── test-common.py
    └── z-README.txt

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
*.csv
*.log
*.pyc

!ExamplePlaylist.csv


================================================
FILE: ExamplePlaylist.csv
================================================
,test comment (and blank track)

,test fuzzy artist title search
stray cats stray cat strut

,test fuzzy title artist search
just what i needed the cars

,test fuzzy search for song that should return a low match
instant karma! we all shine on john lennon

,"test detailed library search (this song isn't in aa, but it's in my library)"
classical gas,vanessa-mae

,test detailed all access search (the top fuzzy result is incorrect)
am/fm,!!!,strange weather

,this should return a low result
back in black,ac/dc,back in black

,test detailed search which should return song from library if you have it
orion,metallica,master of puppets

,test album distinction and slight artist mismatch
Moments in Love,The Art of Noise,And What Have You Done with My Body God?
Moments in Love,The Art of Noise,Daft

,test slight title mismatch
Making Love Out of Nothing at All,Air Supply,Ultimate Air Supply

,"test low score, mismatched title, and mistmatched artist, and comma in entry"
Blame It on the Rain,Milli Vanilli,Greatest Hits

,"test low score, mismatched title, mismatched artist, mismatched song, and entry comma"
1o1,Chris Duarte Groop,Ronp

,test useless info in brackets and duplicate checks
1o1 (Live!) [In Concert] {World Tour},Chris Duarte Groop,Ronp

,test title only search
Be Thou My Vision,Dallan Forgaill,

,test initial unmatched fuzzy with info in brackets
stray cats (asdfDoNotMatchMe1234) stray cat strut

,
,expected results
,13/15 tracks imported
,2 duplicate tracks
,



================================================
FILE: ExportLists.py
================================================
# Author: John Elkins <john.elkins@yahoo.com>
# License: MIT <LICENSE>

from common import *

if len(sys.argv) < 2:
    log('ERROR output directory is required')
    time.sleep(3)
    exit()

# setup the output directory, create it if needed
output_dir = sys.argv[1]
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# log in and load personal library
api = open_api()
library = load_personal_library()

def playlist_handler(playlist_name, playlist_description, playlist_tracks):
    # skip empty and no-name playlists
    if not playlist_name: return
    if len(playlist_tracks) == 0: return

    # setup output files
    playlist_name = playlist_name.replace('/', '')
    open_log(os.path.join(output_dir,playlist_name+u'.log'))
    outfile = codecs.open(os.path.join(output_dir,playlist_name+u'.csv'),
        encoding='utf-8',mode='w')

    # keep track of stats
    stats = create_stats()
    export_skipped = 0
    # keep track of songids incase we need to skip duplicates
    song_ids = []

    log('')
    log('============================================================')
    log(u'Exporting '+ unicode(len(playlist_tracks)) +u' tracks from '
        +playlist_name)
    log('============================================================')

    # add the playlist description as a "comment"
    if playlist_description:
        outfile.write(tsep)
        outfile.write(playlist_description)
        outfile.write(os.linesep)

    for tnum, pl_track in enumerate(playlist_tracks):
        track = pl_track.get('track')

        # we need to look up these track in the library
        if not track:
            library_track = [
                item for item in library if item.get('id')
                in pl_track.get('trackId')]
            if len(library_track) == 0:
                log(u'!! '+str(tnum+1)+repr(pl_track))
                export_skipped += 1
                continue
            track = library_track[0]

        result_details = create_result_details(track)

        if not allow_duplicates and result_details['songid'] in song_ids:
            log('{D} '+str(tnum+1)+'. '+create_details_string(result_details,True))
            export_skipped += 1
            continue

        # update the stats
        update_stats(track,stats)

        # export the track
        song_ids.append(result_details['songid'])
        outfile.write(create_details_string(result_details))
        outfile.write(os.linesep)

    # calculate the stats
    stats_results = calculate_stats_results(stats,len(playlist_tracks))

    # output the stats to the log
    log('')
    log_stats(stats_results)
    log(u'export skipped: '+unicode(export_skipped))

    # close the files
    close_log()
    outfile.close()

# the personal library is used so we can lookup tracks that fail to return
# info from the ...playlist_contents() call

playlist_contents = api.get_all_user_playlist_contents()

for playlist in playlist_contents:
    playlist_name = playlist.get('name')
    playlist_description = playlist.get('description')
    playlist_tracks = playlist.get('tracks')

    playlist_handler(playlist_name, playlist_description, playlist_tracks)

if export_thumbs_up:
    # get thumbs up playlist
    thumbs_up_tracks = []
    for track in library:
        if track.get('rating') is not None and int(track.get('rating')) > 1:
            thumbs_up_tracks.append(track)


    # modify format of each dictionary to match the data type
    # of the other playlists
    thumbs_up_tracks_formatted = []
    for t in thumbs_up_tracks:
        thumbs_up_tracks_formatted.append({'track': t})

    playlist_handler('Thumbs up', 'Thumbs up tracks', thumbs_up_tracks_formatted)

if export_all:
    all_tracks_formatted = []
    for t in library:
        all_tracks_formatted.append({'track': t})

    playlist_handler('All', 'All tracks', all_tracks_formatted)

close_api()
    


================================================
FILE: ImportList.py
================================================
# Author: John Elkins <john.elkins@yahoo.com>
# License: MIT <LICENSE>

import re
import datetime
import math
import time
from common import *

# the file for outputing the information google has one each song
csvfile = None

# cleans up any open resources
def cleanup():
    if csvfile:
        csvfile.close()
    close_log()
    close_api()

# compares two strings based only on their characters
def s_in_s(string1,string2):
    if not string1 or not string2:
        return False
    s1 = re.compile('[\W_]+', re.UNICODE).sub(u'',string1.lower())
    s2 = re.compile('[\W_]+', re.UNICODE).sub(u'',string2.lower())

    return s1 in s2 or s2 in s1

# sleeps a little bit after printing message before exiting
def delayed_exit(message):
    log(message)
    time.sleep(5)
    cleanup()
    exit()

# add the song
def add_song(details,score):
    (result_score,score_reason) = score

    if ('+' in result_score and log_high_matches) or '-' in result_score:
        log(result_score+track+score_reason+u' #'+str(len(song_ids)))
        log (u'   ' + create_details_string(details, True))

    if not allow_duplicates and details['songid'] in song_ids:
        return

    song_ids.append(details['songid'])
    csvfile.write(create_details_string(details))
    csvfile.write(os.linesep)

# log an unmatched track
def log_unmatched(track):
    global no_matches
    log(u'!! '+track)
    csvfile.write(track)
    csvfile.write(os.linesep)
    no_matches += 1

# search for the song with the given details
def search_for_track(details):
    search_results = []
    dlog('search details: '+str(details))

    # search the personal library for the track
    lib_album_match = False
    if details['artist'] and details['title'] and search_personal_library:
        lib_results = [item for item in library if
            s_in_s(details['artist'],item.get('artist'))
            and s_in_s(details['title'],item.get('title'))]
        dlog('lib search results: '+str(len(lib_results)))
        for result in lib_results:
            if s_in_s(result['album'],details['album']):
                lib_album_match = True
            item = {}
            item[u'track'] = result
            item[u'score'] = 200
            search_results.append(item)

    # search all access for the track
    if not lib_album_match:
        query = u''
        if details['artist']:
            query = details['artist']
        if details['title']:
            query += u' ' + details['title']
        if not len(query):
            query = track
        dlog('aa search query:'+query)
        aa_results = aa_search(query,7)
        dlog('aa search results: '+str(len(aa_results)))
        search_results.extend(aa_results)

    if not len(search_results):
        return None

    top_result = search_results[0]
    # if we have detailed info, perform a detailed search
    if details['artist'] and details['title']:
        search_results = [item for item in search_results if
            s_in_s(details['title'],item['track']['title'])
            and s_in_s(details['artist'],item['track']['artist'])]
        if details['album']:
            search_results = [item for item in search_results if
                    s_in_s(details['album'],item['track']['album'])]
        dlog('detail search results: '+str(len(search_results)))
        if len(search_results) != 0:
            top_result = search_results[0]

    return top_result

# match score stats
no_matches = 0
low_scores = 0
low_titles = 0
low_artists = 0
track_count = 0
duplicates = 0

# score the match against the query
def score_track(details,result_details,top_score = 200):
    global low_scores
    global low_titles
    global low_artists
    global duplicates

    # check for low quality matches
    result_score = u' + '
    score_reason = u' '
    is_low_result = False
    if top_score < 120:
        score_reason += u'{s}'
        #low scores alone don't seem to me a good indication of an issue
        #is_low_result = True
    # wrong song
    if ((details['title']
        and not s_in_s(details['title'],result_details['title']))
        or (not details['title']
        and not s_in_s(track,result_details['title']))):
        score_reason += u'{T}'
        low_titles += 1
        is_low_result = True
    # wrong album
    if (details['album'] and not ignore_album_mismatch
        and not s_in_s(details['album'],result_details['album'])):
        score_reason += u'{a}'
        is_low_result = True
    # wrong artist
    if (details['artist']
        and not s_in_s(details['artist'],result_details['artist'])):
        score_reason += u'{A}'
        low_artists += 1
        is_low_result = True
    # duplicate song
    if not allow_duplicates and result_details['songid'] in song_ids:
        score_reason += u'{D}'
        duplicates += 1
        is_low_result = True

    if is_low_result:
        result_score = u' - '
        low_scores += 1

    return (result_score,score_reason)

# check to make sure a filename was given
if len(sys.argv) < 2:
    delayed_exit(u'ERROR input filename is required')


# setup the input and output filenames and derive the playlist name
input_filename = sys.argv[1].decode('utf-8')
output_filename = os.path.splitext(input_filename)[0]
output_filename = re.compile('_\d{14}$').sub(u'',output_filename)
playlist_name = os.path.basename(output_filename)

output_filename += u'_' + unicode(datetime.datetime.now().strftime(
    '%Y%m%d%H%M%S'))
log_filename = output_filename + u'.log'
csv_filename = output_filename + u'.csv'

#open the log and output csv files
csvfile = codecs.open(csv_filename, encoding='utf-8', mode='w', buffering=1)
open_log(log_filename)

# read the playlist file into the tracks variable
tracks = []
plog('Reading playlist... ')
with codecs.open(input_filename, encoding='utf-8', mode='r', errors='ignore') as f:
    tracks = f.read().splitlines()
log('done. '+str(len(tracks))+' lines loaded.')

# log in and load personal library
api = open_api()
library = load_personal_library()

# begin searching for the tracks
log('===============================================================')
log(u'Searching for songs from: '+playlist_name)
log('===============================================================')


# gather up the song_ids and submit as a batch
song_ids = []

# collect some stats on the songs
stats = create_stats()

# time how long it takes
start_time = time.time()

# loop over the tracks that were read from the input file
for track in tracks:
    
    # skip empty lines
    if not track:
        continue

    # parse the track info if the line is in detail format
    details_list = get_csv_fields(track)
    details = create_details(details_list)

    # skip comment lines
    if len(details_list) == 2 and not details_list[0]:
        log(details_list[1])
        csvfile.write(tsep)
        csvfile.write(details_list[1])
        csvfile.write(os.linesep)
        continue

    # skip empty details records
    if (len(details_list) >= 3 and not details['artist']
        and not details['album'] and not details['title']):
        continue

    # at this point we should have a valid track
    track_count += 1

    # don't search if we already have a track id
    if details['songid']:
        add_song(details,score_track(details,details))
        continue

    # search for the song
    search_result = search_for_track(details)

    # a details dictionary we can use for 'smart' searching
    smart_details = {}
    smart_details['title'] = details['title']
    smart_details['artist'] = details['artist']
    smart_details['album'] = details['album']

    if not details['title']:
        smart_details['title'] = track

    # if we didn't find anything strip out any (),{},[],<> from title
    match_string = '\[.*?\]|{.*?}|\(.*?\)|<.*?>'
    if not search_result and re.search(match_string,smart_details['title']):
        dlog('No results found, attempting search again with modified title.')
        smart_details['title'] = re.sub(match_string,'',smart_details['title'])
        search_result = search_for_track(smart_details)

    # if there isn't a result, try searching for the title only
    if not search_result and search_title_only:
        dlog('Attempting to search for title only')
        smart_details['artist'] = None
        smart_details['album'] = None
        smart_details['title_only_search'] = True
        search_result = search_for_track(smart_details)

    # check for a result
    if not search_result:
        log_unmatched(track)
        continue

    # gather up info about result
    result = search_result.get('track')
    result_details = create_result_details(result)
    result_score = score_track(details,result_details,
        search_result.get('score'))

    # if the song title doesn't match after a title only search, skip it
    (score,reason) = result_score
    if '{T}' in reason and 'title_only_search' in smart_details:
        log_unmatched(track)
        continue

    update_stats(result,stats)
    
    # add the song to the id list
    add_song(result_details,result_score)

total_time = time.time() - start_time

log('===============================================================')
log(u'Adding '+unicode(len(song_ids))+' found songs to: '+playlist_name)
log('===============================================================')

# add the songs to the playlist(s)
max_playlist_size = 1000
current_playlist = 1
total_playlists_needed = int(math.ceil(len(song_ids)/float(max_playlist_size)))
while current_playlist <= total_playlists_needed:
    # build the playlist name, add part number if needed
    current_playlist_name = playlist_name
    if total_playlists_needed > 1:
        current_playlist_name += u' Part ' + unicode(current_playlist)

    # create the playlist and add the songs
    playlist_id = api.create_playlist(current_playlist_name)
    current_playlist_index = ( current_playlist - 1 ) * max_playlist_size
    current_songs = song_ids[current_playlist_index :
                             current_playlist_index + max_playlist_size]

    added_songs = api.add_songs_to_playlist(playlist_id,current_songs)

    log(u' + '+current_playlist_name+u' - '+unicode(len(added_songs))+
        u'/'+unicode(len(current_songs))+' songs')

    # go to the next playlist section
    current_playlist += 1

# log a final status
no_match_ratio = float(no_matches) / track_count if track_count else 0
low_score_ratio = float(low_scores) / track_count if track_count else 0
low_artists_ratio = float(low_artists) / low_scores if low_scores else 0
low_titles_ratio = float(low_titles) / low_scores if low_scores else 0
found_ratio = 1 - no_match_ratio - low_score_ratio

log('===============================================================')
log('   ' + str(len(song_ids)) + '/' + str(track_count) + ' tracks imported')
log(' ! ' + str(no_match_ratio*100) + '% of tracks could not be matched')
log(' - ' + str(low_score_ratio*100) + '% of tracks had low match scores')
log('  {T} ' + str(low_titles)
    + ' low matches were due to a song title mismatch')
log('  {A} ' + str(low_artists)
    + ' low matches were due to song artist mismatch')
if not allow_duplicates:
    log ('  {D} ' + str(duplicates)
        + ' duplicates were found and skipped')
log(' + ' + str(found_ratio*100) + '% of tracks had high match scores')
log('')
stats_results = calculate_stats_results(stats,len(song_ids))
log_stats(stats_results)

log('\nsearch time: '+str(total_time))

cleanup()



================================================
FILE: LICENSE
================================================
The MIT License (MIT)

Copyright (c) 2014 John Elkins

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

================================================
FILE: README.md
================================================
gmusic-playlist
===============

playlist scripts for gmusic

## Prerequisites

- python 2.7 - https://www.python.org
- gmusicapi - https://github.com/simon-weber/Unofficial-Google-Music-API

Before using the scripts, open up the preferences.py file and change the username.

When the scripts are run they will prompt for your password.  If you use two factor authentication you will need to create and use an application password.

## ExportLists.py

This script will export all playlists to a given directory as csv files.  For the purpose of these scripts CSV stands for character seperated value.  The default separator charator is ','  The separator character is configurable in the preferences file.  Versions of the code previous to Aug 16 2015 used a '\' separator character as the default.  Most spreadsheet apps can open csv files.

The order in which the artist, album, and title information appears as well as the separating character between each piece of information is configured in the preference.py file.  The default order and separator character will output song info as: "title","artist","album","songid"

The csv files can be re-imported using the ImportList.py script.

Command Line Usage: python ExportLists.py OutputDir

OutputDir is a directory you would like the playlists to be output to.

The export progress will be output to the console and to a log file.  At the completion of the export a status of the overal makeup of the playlist will be output.

## ImportList.py

This script will import a given csv file into google music as a playlist. The title of the playlist will be the name of the text file and each track will be matched to each line in the text file.

Command Line Usage: python ImportList.py ExamplePlaylist.csv

The progress of the playlist creation will be output to the console and to a log file.  Tracks that could not be found are prefixed with !! and tracks that were found but may not be a good match are prefixed with -.  One or more of the following will appear after a track with a low match: {A}{a}{T}{s}  These markings indicate why the match was low,  {A} means the artist didn't match, {T} means the title didn't match, {a} means the album didn't match, and {s} means it had a low result score.  In addition to a log file, a csv file is created which contains all tracks found and their associated google music song id.

The csv file output from the ImportList.py script can be used to fix any song that didn't import correctly.  Open the csv file, look for the songs without any song id and see if there is something that you can change in the track info to get google to find the song.  Save the file and then re-run it through the ImportList.py script.  Since the csv file will contain the song id's for songs it already found it won't need to look those up again and will just focus on finding the songs that don't have id's yet.

You can also look up the song you want via google music's web interface and get the song id by clicking share > get link.  The song id is given in the link.

## Playlist files

The format of each track in a playlist file can either be fuzzy or detailed info.  Comments are also supported.

A fuzzy track is a track that has no separating characters and simply lists a song title, song title and author, or song author and title.  See the ExamplePlaylist.csv file for a few examples of fuzzy tracks.  Fuzzy tracks will only be matched to all access tracks.  If you have a song in a playlist that isn't in all access, but is in your personal library you will need to use a detailed track.

A detailed track lists title,artist,and album information separated by the separator character and in the order defined in the preferences.py file.  The songId is optional, and will be added by the scripts when outputting a csv file.  See the ExamplePlaylist.csv file for a few examples of detailed track lists.  The album can be left out if not required.

A comment in a playlist file follows the form of Ccomment where C is the separator character and comment is the comment.  See the ExamplePlaylist.csv file.

## see also 

[a javascript version](https://github.com/soulfx/gmusic-playlist.js) for doing import / export directly within google music.


================================================
FILE: common.py
================================================
# Author: John Elkins <john.elkins@yahoo.com>
# License: MIT <LICENSE>

__version__ = '0.160530'

__required_gmusicapi_version__ = '10.0.0'

from collections import Counter
from gmusicapi import __version__ as gmusicapi_version
from gmusicapi import Mobileclient
from gmusicapi.exceptions import CallFailure
from preferences import *
import re
import time
import getpass
import sys
import os
import codecs

# the api to use for accessing google music
api = None

# the logfile for keeping track of things
logfile = None

# provide a shortcut for track_info_separator
tsep = track_info_separator

# flag indicating if account is all access capable
allaccess = True

# check for debug set via cmd line
if '-dDEBUG' in sys.argv:
    debug = True

# check versions
def assert_prerequisites():

    required = __required_gmusicapi_version__
    actual = gmusicapi_version
    
    def version(ver):
        return int(re.sub(r'\D','',ver))

    if ( version(actual) < version(required) ):
        log("ERROR gmusicapi version of at least "+required+" is required. ")
        exit()

# loads the personal library
def load_personal_library():
    plog('Loading personal library... ')
    plib = api.get_all_songs()
    log('done. '+str(len(plib))+' personal tracks loaded.')
    return plib

# opens the log for writing
def open_log(filename):
    global logfile
    logfile = codecs.open(filename, encoding='utf-8', mode='w', buffering=1)
    return logfile

# closes the log
def close_log():
    if logfile:
        logfile.close()

# logs to both the console and log file if it exists
def log(message, nl = True):
    if nl:
        message += os.linesep
    sys.stdout.write(message.encode(sys.stdout.encoding, errors='replace'))
    if logfile:
        logfile.write(message)

# logs a message if debug is true
def dlog(message):
    if debug:
        log(message)

# logs a progress message (a message without a line return)
def plog(message):
    log(message, nl = False)

# search all access
def aa_search(search_string,max_results):
    global allaccess
    results = []
    if allaccess:
        try:
            results = api.search(search_string,
                    max_results=max_results).get('song_hits')
        except CallFailure:
            allaccess = False
            log('WARNING no all access subscription detected. '+
                ' all access search disabled.')
    return results

# gets the track details available for google tracks
def get_google_track_details(sample_song = 'one u2'):
    results = aa_search(sample_song,1)
    if len(results):
        return (results[0].get('track').keys())
    return "['title','artist','album']"

# creates result details from the given track
def create_result_details(track):
    result_details = {}
    for key, value in track.iteritems():
        result_details[key] = value
    result_details['songid'] = (track.get('storeId')
        if track.get('storeId') else track.get('id'))
    return result_details

# creates details dictionary based off the given details list
def create_details(details_list):
    details = {}
    details['artist'] = None
    details['album'] = None
    details['title'] = None
    details['songid'] = None
    if len(details_list) < 2:
        return details
    for pos, nfo in enumerate(details_list):
        if len(track_info_order) <= pos:
            continue
        details[track_info_order[pos]] = nfo.strip()
    return details

# split a csv line into it's separate fields
def get_csv_fields(csvString,sepChar=tsep):
    fields = []
    fieldValue = u''
    ignoreTsep = False
    for c in csvString:
        if c == sepChar and not ignoreTsep:
            fields.append(handle_quote_input(fieldValue))
            fieldValue = u''
            continue
        elif c == '"':
            ignoreTsep = (not ignoreTsep)
        fieldValue += c
    fields.append(handle_quote_input(fieldValue))
    return fields

# add quotes around a csv field and return the quoted field
def handle_quote_output(aString):
  """ See: https://en.wikipedia.org/wiki/Comma-separated_values#Basic_rules_and_examples """
  if aString.find('"') > -1 or aString.find(tsep) > -1:
    return '"%s"' % aString.replace('"', '""')
  else:
    return aString

# remove the quotes from around a csv field, and return the unquoted field
def handle_quote_input(aString):
  if len(aString) > 0 and aString[0] == '"' and aString[-1] == '"':
      return aString[1:-1].replace('""', '"')
  else:
      return aString

# creates details string based off the given details dictionary
def create_details_string(details_dict, skip_id = False):
    out_string = u''
    for nfo in track_info_order:
        if skip_id and nfo == 'songid':
            continue
        if len(out_string) != 0:
            out_string += track_info_separator
        try:
            out_string += handle_quote_output(unicode(details_dict[nfo]))
        except KeyError:
            # some songs don't have info like year, genre, etc
            pass
    return out_string

# logs into google music api
def open_api():
    global api
    log('Logging into google music...')
    # get the password each time so that it isn't stored in plain text
    password = getpass.getpass(username + '\'s password: ')
    
    api = Mobileclient()
    if not api.login(username, password, Mobileclient.FROM_MAC_ADDRESS):
        log('ERROR unable to login')
        time.sleep(3)
        exit()
        
    password = None
    log('Login Successful.')
    dlog(u'Available track details: '+str(get_google_track_details()))
    return api

# logs out of the google music api
def close_api():
    if api:
        api.logout()

# creates a stats dictionary
def create_stats():
    stats = {}
    stats['genres'] = []
    stats['artists'] = []
    stats['years'] = []
    stats['total_playcount'] = 0
    return stats

# updates the stats dictionary with info from the track
def update_stats(track,stats):
    stats['artists'].append(track.get('artist'))
    if track.get('genre'): stats['genres'].append(track.get('genre'))
    if track.get('year'): stats['years'].append(track.get('year'))
    if track.get('playCount'): stats['total_playcount'] += track.get(
        'playCount')

# calculates stats
def calculate_stats_results(stats,total_tracks):
    results = {}
    results['genres'] = Counter(stats['genres'])
    results['artists'] = Counter(stats['artists'])
    results['years'] = Counter(stats['years'])
    results['playback_ratio'] = stats['total_playcount']/float(total_tracks)
    return results    

# logs the stats results
def log_stats(results):
    log(u'top 3 genres: '+repr(results['genres'].most_common(3)))
    log(u'top 3 artists: '+repr(results['artists'].most_common(3)))
    log(u'top 3 years: '+repr(results['years'].most_common(3)))
    log(u'playlist playback ratio: '+unicode(results['playback_ratio']))

# display version and check prerequisites
log("gmusic-playlist: "+__version__)
log("gmusicapi: "+gmusicapi_version)
assert_prerequisites();


================================================
FILE: preferences.py
================================================

# the username to use
username = 'john.elkins@gmail.com'

# the separator to use for detailed track information
track_info_separator = u','
#track_info_separator = u'\\'
#track_info_separator = u'|'

# the order of the track details
track_info_order = ['title','artist','album','songid']
#track_info_order = ['title','artist','album','genre','year','durationMillis','playCount','rating','songid']

# output debug information to the log
debug = False

# don't import or export the same song twice
allow_duplicates = False

# == ImportList.py preferences ==============================================

# ignore mismatched albums.  An album mismatch often doesn't mean the song is
# wrong.  This is set to true so that mismatched albums don't scew the results
# and flag too many songs with low scores
ignore_album_mismatch = True

# search for tracks in the personal library, tracks found there will work
# for you, but if you share your playlist others may not be able to play
# some tracks.  Set to false if you want to make sure that your playlist doesn't
# contain any tracks that are not shareable.
search_personal_library = True

# when unable to locate a track using full details (title,artist,album); perform
# a search using only the song title.  this will hopefully find something to
# at least put into the track spot.  this is handy for playlists that list the
# composer or songwriter for a song instead of a singer.
search_title_only = True

# log high matches in addition to the songs that couldn't be found or had
# low matches.
log_high_matches = False

# export "Thumbs Up" playlist
export_thumbs_up = True

# export "ALL" playlist
export_all = True


================================================
FILE: test/atestframe.py
================================================
# put the parent directory onto the path
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import unittest

def run_test():
    unittest.main(verbosity=2)


================================================
FILE: test/test-common.py
================================================
from atestframe import *
from common import *

class TestCommon(unittest.TestCase):

    def test_get_csv_fields(self):
        """ test that quoted and unquoted fields are being recognized """
        fields = get_csv_fields(u'something,"good",to "eat","like a ""hot""",dog',u',')
        self.assertEqual(fields[0],u'something')
        self.assertEqual(fields[1],u'good')
        self.assertEqual(fields[2],u'to "eat"')
        self.assertEqual(fields[3],u'like a "hot"')
        self.assertEqual(fields[4],u'dog')
        fields = get_csv_fields(u',hello',u',')
        self.assertEqual(fields[0],u'')
        self.assertEqual(fields[1],u'hello')
        fields = get_csv_fields(u'test,"commas, in, the, field"',u',')
        self.assertEqual(len(fields),2)
        self.assertEqual(fields[0],u'test')
        self.assertEqual(fields[1],u'commas, in, the, field')

    def test_handle_quote_input(self):
        """ test that quotes are being removed as expected """
        self.assertEqual(handle_quote_input(u''),u'')
        self.assertEqual(handle_quote_input(u'a'),u'a')
        self.assertEqual(handle_quote_input(u'""'),u'')
        self.assertEqual(handle_quote_input(u'""asdf""'),u'"asdf"')
        self.assertEqual(handle_quote_input(u'"asdf"'),u'asdf')

    def test_handle_quote_output(self):
        """ test that quotes are applied only when needed """
        self.assertEqual(handle_quote_output("nothing to quote"),"nothing to quote")
        self.assertEqual(handle_quote_output('this "needs" quoting'),'"this ""needs"" quoting"')
        self.assertEqual(handle_quote_output('tsep, in field'),'"tsep, in field"')

    def test_quote_unquote(self):
        """ test for verifying the quoting and unquoting that occurs in track values """
        test_values = (("", ""),
                       ("bog", "bog"),
                       ("\"bog", "\"\"\"bog\""),
                       ("\"bog\"", "\"\"\"bog\"\"\""),
                       ("b\"o\"g", "\"b\"\"o\"\"g\""),
                       ("\"", "\"\"\"\""))
        for (invalue, expected) in test_values:
            actual_out = handle_quote_output(invalue)
            self.assertEqual(actual_out, expected) 

            actual_in = handle_quote_input(actual_out)
            self.assertEqual(actual_in, invalue) 

run_test()


================================================
FILE: test/z-README.txt
================================================
run tests as regular python executables like so:
python test*

if you have the coverage.py script installed
the tests can be run with coverage info like so:
python -m coverage run --branch test*
python -m coverage html
Download .txt
gitextract_7p8_x751/

├── .gitignore
├── ExamplePlaylist.csv
├── ExportLists.py
├── ImportList.py
├── LICENSE
├── README.md
├── common.py
├── preferences.py
└── test/
    ├── atestframe.py
    ├── test-common.py
    └── z-README.txt
Download .txt
SYMBOL INDEX (35 symbols across 5 files)

FILE: ExportLists.py
  function playlist_handler (line 20) | def playlist_handler(playlist_name, playlist_description, playlist_tracks):

FILE: ImportList.py
  function cleanup (line 14) | def cleanup():
  function s_in_s (line 21) | def s_in_s(string1,string2):
  function delayed_exit (line 30) | def delayed_exit(message):
  function add_song (line 37) | def add_song(details,score):
  function log_unmatched (line 52) | def log_unmatched(track):
  function search_for_track (line 60) | def search_for_track(details):
  function score_track (line 120) | def score_track(details,result_details,top_score = 200):

FILE: common.py
  function assert_prerequisites (line 37) | def assert_prerequisites():
  function load_personal_library (line 50) | def load_personal_library():
  function open_log (line 57) | def open_log(filename):
  function close_log (line 63) | def close_log():
  function log (line 68) | def log(message, nl = True):
  function dlog (line 76) | def dlog(message):
  function plog (line 81) | def plog(message):
  function aa_search (line 85) | def aa_search(search_string,max_results):
  function get_google_track_details (line 99) | def get_google_track_details(sample_song = 'one u2'):
  function create_result_details (line 106) | def create_result_details(track):
  function create_details (line 115) | def create_details(details_list):
  function get_csv_fields (line 130) | def get_csv_fields(csvString,sepChar=tsep):
  function handle_quote_output (line 146) | def handle_quote_output(aString):
  function handle_quote_input (line 154) | def handle_quote_input(aString):
  function create_details_string (line 161) | def create_details_string(details_dict, skip_id = False):
  function open_api (line 176) | def open_api():
  function close_api (line 194) | def close_api():
  function create_stats (line 199) | def create_stats():
  function update_stats (line 208) | def update_stats(track,stats):
  function calculate_stats_results (line 216) | def calculate_stats_results(stats,total_tracks):
  function log_stats (line 225) | def log_stats(results):

FILE: test/atestframe.py
  function run_test (line 6) | def run_test():

FILE: test/test-common.py
  class TestCommon (line 4) | class TestCommon(unittest.TestCase):
    method test_get_csv_fields (line 6) | def test_get_csv_fields(self):
    method test_handle_quote_input (line 22) | def test_handle_quote_input(self):
    method test_handle_quote_output (line 30) | def test_handle_quote_output(self):
    method test_quote_unquote (line 36) | def test_quote_unquote(self):
Condensed preview — 11 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (35K chars).
[
  {
    "path": ".gitignore",
    "chars": 40,
    "preview": "*.csv\n*.log\n*.pyc\n\n!ExamplePlaylist.csv\n"
  },
  {
    "path": "ExamplePlaylist.csv",
    "chars": 1488,
    "preview": ",test comment (and blank track)\n\n,test fuzzy artist title search\nstray cats stray cat strut\n\n,test fuzzy title artist se"
  },
  {
    "path": "ExportLists.py",
    "chars": 3890,
    "preview": "# Author: John Elkins <john.elkins@yahoo.com>\n# License: MIT <LICENSE>\n\nfrom common import *\n\nif len(sys.argv) < 2:\n    "
  },
  {
    "path": "ImportList.py",
    "chars": 11486,
    "preview": "# Author: John Elkins <john.elkins@yahoo.com>\n# License: MIT <LICENSE>\n\nimport re\nimport datetime\nimport math\nimport tim"
  },
  {
    "path": "LICENSE",
    "chars": 1077,
    "preview": "The MIT License (MIT)\n\nCopyright (c) 2014 John Elkins\n\nPermission is hereby granted, free of charge, to any person obtai"
  },
  {
    "path": "README.md",
    "chars": 4236,
    "preview": "gmusic-playlist\n===============\n\nplaylist scripts for gmusic\n\n## Prerequisites\n\n- python 2.7 - https://www.python.org\n- "
  },
  {
    "path": "common.py",
    "chars": 7009,
    "preview": "# Author: John Elkins <john.elkins@yahoo.com>\n# License: MIT <LICENSE>\n\n__version__ = '0.160530'\n\n__required_gmusicapi_v"
  },
  {
    "path": "preferences.py",
    "chars": 1668,
    "preview": "\n# the username to use\nusername = 'john.elkins@gmail.com'\n\n# the separator to use for detailed track information\ntrack_i"
  },
  {
    "path": "test/atestframe.py",
    "chars": 198,
    "preview": "# put the parent directory onto the path\nfrom os import sys, path\nsys.path.append(path.dirname(path.dirname(path.abspath"
  },
  {
    "path": "test/test-common.py",
    "chars": 2307,
    "preview": "from atestframe import *\nfrom common import *\n\nclass TestCommon(unittest.TestCase):\n\n    def test_get_csv_fields(self):\n"
  },
  {
    "path": "test/z-README.txt",
    "chars": 218,
    "preview": "run tests as regular python executables like so:\npython test*\n\nif you have the coverage.py script installed\nthe tests ca"
  }
]

About this extraction

This page contains the full source code of the soulfx/gmusic-playlist GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 11 files (32.8 KB), approximately 8.0k tokens, and a symbol index with 35 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!