From 6a18cb686600e173c91ea54f44a764f8475866d2 Mon Sep 17 00:00:00 2001 From: phowell Date: Thu, 23 Mar 2023 12:05:27 -0700 Subject: [PATCH] mods for running w/out cache --- depricated.py | 164 ++++++++++++++++++++++++----------------------- localcache.py | 11 ++-- requirements.txt | 1 - 3 files changed, 90 insertions(+), 86 deletions(-) diff --git a/depricated.py b/depricated.py index bdedc76..d0e0934 100644 --- a/depricated.py +++ b/depricated.py @@ -908,112 +908,114 @@ rez = [[m[j][i] for j in range(len(m))] for i in range(len(m[0]))] - """ - ilearn_by_id = {} - ilearn_by_name = {} - for x in ilearn_list: - ilearn_by_id[x[3]] = x - ilearn_by_name[x[0]] = x +""" +ilearn_by_id = {} +ilearn_by_name = {} +for x in ilearn_list: + ilearn_by_id[x[3]] = x + ilearn_by_name[x[0]] = x - for ml in open('cache/teacher_manual_name_lookup.csv','r').readlines(): - parts = ml.strip().split(',') - try: - manual_list[parts[0]] = ilearn_by_id[parts[1]] - except Exception as e: - print "Teacher missing: " + parts[0] +for ml in open('cache/teacher_manual_name_lookup.csv','r').readlines(): + parts = ml.strip().split(',') + try: + manual_list[parts[0]] = ilearn_by_id[parts[1]] + except Exception as e: + print "Teacher missing: " + parts[0] - il_names = [ x[0] for x in ilearn_list ] - il_byname = {} - for x in ilearn_list: il_byname[x[0]] = x - sched_list_missed = [x for x in sched_list] +il_names = [ x[0] for x in ilearn_list ] +il_byname = {} +for x in ilearn_list: il_byname[x[0]] = x +sched_list_missed = [x for x in sched_list] - # - # key is long name (with middle name) from schedule, value is tuple with everything - name_lookup = manual_list - matches = [] +# +# key is long name (with middle name) from schedule, value is tuple with everything +name_lookup = manual_list +matches = [] - #print ilearn_list +#print ilearn_list - num_in_sched = len(sched_list) - num_in_ilearn = len(ilearn_list) +num_in_sched = len(sched_list) +num_in_ilearn = len(ilearn_list) - #for i in range(min(num_in_sched,num_in_ilearn)): - # print "|"+sched_list[i] + "|\t\t|" + ilearn_list[i][0] + "|" +#for i in range(min(num_in_sched,num_in_ilearn)): +# print "|"+sched_list[i] + "|\t\t|" + ilearn_list[i][0] + "|" - print("Sched names: %i, iLearn names: %i" % (num_in_sched,num_in_ilearn)) +print("Sched names: %i, iLearn names: %i" % (num_in_sched,num_in_ilearn)) - for s in sched_list: - for t in il_names: - if first_last(s) == t: - #print ' MATCHED ' + s + ' to ' + t - sched_list_missed.remove(s) - try: - name_lookup[s] = ilearn_by_name[ first_last(s) ] - except Exception as e: - print "Teacher missing (2): " + s - il_names.remove(first_last(s)) - matches.append(s) +for s in sched_list: + for t in il_names: + if first_last(s) == t: + #print ' MATCHED ' + s + ' to ' + t + sched_list_missed.remove(s) + try: + name_lookup[s] = ilearn_by_name[ first_last(s) ] + except Exception as e: + print "Teacher missing (2): " + s + il_names.remove(first_last(s)) + matches.append(s) - print "Matched: " + str(matches) +print "Matched: " + str(matches) - print "\nDidn't match: " + str(len(sched_list_missed)) + " schedule names." +print "\nDidn't match: " + str(len(sched_list_missed)) + " schedule names." - print "\nFinal results: " - print name_lookup +print "\nFinal results: " +print name_lookup - nlf = codecs.open('cache/sched_to_ilearn_names.json','w','utf-8') - nlf.write(json.dumps(name_lookup,indent=2)) - # STRING DISTANCE - #sim = find_most_similar(s,i_names) - #print ' CLOSEST MATCHES to ' + s + ' are: ' + str(sim) - #mm.write(s+',\n') - """ +nlf = codecs.open('cache/sched_to_ilearn_names.json','w','utf-8') +nlf.write(json.dumps(name_lookup,indent=2)) +# STRING DISTANCE + #sim = find_most_similar(s,i_names) + #print ' CLOSEST MATCHES to ' + s + ' are: ' + str(sim) + #mm.write(s+',\n') +""" - #ilearn_list = sorted(list(set(map( - # lambda x: #(tfi[x]['name'],tfi[x]['email'],tfi[x]['dept'],str(tfi[x]['id']),tfi[x]['goo']), - # tfi.keys())))) - #i_names = [ x[0] for x in ilearn_list ] + #ilearn_list = sorted(list(set(map( +# lambda x: #(tfi[x]['name'],tfi[x]['email'],tfi[x]['dept'],str(tfi[x]['id']),tfi[x]['goo']), +# tfi.keys())))) +#i_names = [ x[0] for x in ilearn_list ] - #print json.dumps(i_names,indent=2) - #return +#print json.dumps(i_names,indent=2) +#return - # how to filter a dict based on values - # filtered = {k: v for k, v in course_combos.items() if v['dept'] == 'LIB' or v['dept'] == 'CSIS' } +# how to filter a dict based on values +# filtered = {k: v for k, v in course_combos.items() if v['dept'] == 'LIB' or v['dept'] == 'CSIS' } - # more pandas - # gapminder['continent'].unique() +# more pandas +# gapminder['continent'].unique() - #for name,group in bycode: - # #print name - # print name, " ", group['type'] +#for name,group in bycode: +# #print name +# print name, " ", group['type'] - #onl = gg.agg( lambda x: has_online(x) ) - #ttl = gg.agg( lambda x: len(x) ) - #ttl = ttl.rename(columns={'type':'total_sections'}) +#onl = gg.agg( lambda x: has_online(x) ) +#ttl = gg.agg( lambda x: len(x) ) +#ttl = ttl.rename(columns={'type':'total_sections'}) - #onl.join(gg.agg( lambda x: has_hybrid(x) ),how='outer') - #onl.join(gg.agg( lambda x: has_lecture(x) ), how='outer') +#onl.join(gg.agg( lambda x: has_hybrid(x) ),how='outer') +#onl.join(gg.agg( lambda x: has_lecture(x) ), how='outer') - #onl['num_sections'] = 0 - #onl['num_lec'] = 0 - #onl['num_online'] = 0 +#onl['num_sections'] = 0 +#onl['num_lec'] = 0 +#onl['num_online'] = 0 - #all = pd.merge([onl,hyb,lec]) - #print onl - #total=len, f2f=lambda x: ) set(x) - #{ 'num_sections': "count", - # 'num_lec': lambda x: 5, - # 'num_online': lambda x: 5 } ) - #print gg +#all = pd.merge([onl,hyb,lec]) +#print onl +#total=len, f2f=lambda x: ) set(x) +#{ 'num_sections': "count", +# 'num_lec': lambda x: 5, +# 'num_online': lambda x: 5 } ) +#print gg """ + + def has_online(series): # if any items of the series have the string 'online', return 1 for i in series: @@ -1429,12 +1431,12 @@ def categorize(): #print this_section[0][0] + "\t", course_location(this_section) this_section = [ parts, ] return sections + + + + - - - - - + diff --git a/localcache.py b/localcache.py index 978a9a2..ee843c3 100644 --- a/localcache.py +++ b/localcache.py @@ -20,10 +20,13 @@ sqlite_file = local_data_folder + 'data.db' #'data_su20_4hr_blocks.db' mylog = codecs.open(local_data_folder + 'canvas_data_log.txt','w') thefiles_dat = {} -for L in open('cache/canvas_data_index.txt','r').readlines(): - L = L.strip() - (fname,start,finish) = L.split(',') - thefiles_dat[fname] = start +try: + for L in open('cache/canvas_data_index.txt','r').readlines(): + L = L.strip() + (fname,start,finish) = L.split(',') + thefiles_dat[fname] = start +except Exception as e: + print("cache/canvas_data_index.txt was not found") thefiles = open('cache/canvas_data_index_temp.txt','a') # rename me if nothing crashes :) diff --git a/requirements.txt b/requirements.txt index 739f36e..eff981f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -alabaster==0.7.10 anaconda-client==1.6.5 anaconda-navigator==1.6.9 anaconda-project==0.8.0