mods for running w/out cache
This commit is contained in:
parent
25028b6df2
commit
6a18cb6866
112
depricated.py
112
depricated.py
|
|
@ -908,41 +908,41 @@ rez = [[m[j][i] for j in range(len(m))] for i in range(len(m[0]))]
|
|||
|
||||
|
||||
|
||||
"""
|
||||
ilearn_by_id = {}
|
||||
ilearn_by_name = {}
|
||||
for x in ilearn_list:
|
||||
"""
|
||||
ilearn_by_id = {}
|
||||
ilearn_by_name = {}
|
||||
for x in ilearn_list:
|
||||
ilearn_by_id[x[3]] = x
|
||||
ilearn_by_name[x[0]] = x
|
||||
|
||||
for ml in open('cache/teacher_manual_name_lookup.csv','r').readlines():
|
||||
for ml in open('cache/teacher_manual_name_lookup.csv','r').readlines():
|
||||
parts = ml.strip().split(',')
|
||||
try:
|
||||
manual_list[parts[0]] = ilearn_by_id[parts[1]]
|
||||
except Exception as e:
|
||||
print "Teacher missing: " + parts[0]
|
||||
|
||||
il_names = [ x[0] for x in ilearn_list ]
|
||||
il_byname = {}
|
||||
for x in ilearn_list: il_byname[x[0]] = x
|
||||
sched_list_missed = [x for x in sched_list]
|
||||
il_names = [ x[0] for x in ilearn_list ]
|
||||
il_byname = {}
|
||||
for x in ilearn_list: il_byname[x[0]] = x
|
||||
sched_list_missed = [x for x in sched_list]
|
||||
|
||||
#
|
||||
# key is long name (with middle name) from schedule, value is tuple with everything
|
||||
name_lookup = manual_list
|
||||
matches = []
|
||||
#
|
||||
# key is long name (with middle name) from schedule, value is tuple with everything
|
||||
name_lookup = manual_list
|
||||
matches = []
|
||||
|
||||
#print ilearn_list
|
||||
#print ilearn_list
|
||||
|
||||
num_in_sched = len(sched_list)
|
||||
num_in_ilearn = len(ilearn_list)
|
||||
num_in_sched = len(sched_list)
|
||||
num_in_ilearn = len(ilearn_list)
|
||||
|
||||
#for i in range(min(num_in_sched,num_in_ilearn)):
|
||||
# print "|"+sched_list[i] + "|\t\t|" + ilearn_list[i][0] + "|"
|
||||
#for i in range(min(num_in_sched,num_in_ilearn)):
|
||||
# print "|"+sched_list[i] + "|\t\t|" + ilearn_list[i][0] + "|"
|
||||
|
||||
print("Sched names: %i, iLearn names: %i" % (num_in_sched,num_in_ilearn))
|
||||
print("Sched names: %i, iLearn names: %i" % (num_in_sched,num_in_ilearn))
|
||||
|
||||
for s in sched_list:
|
||||
for s in sched_list:
|
||||
for t in il_names:
|
||||
if first_last(s) == t:
|
||||
#print ' MATCHED ' + s + ' to ' + t
|
||||
|
|
@ -955,65 +955,67 @@ rez = [[m[j][i] for j in range(len(m))] for i in range(len(m[0]))]
|
|||
matches.append(s)
|
||||
|
||||
|
||||
print "Matched: " + str(matches)
|
||||
print "Matched: " + str(matches)
|
||||
|
||||
print "\nDidn't match: " + str(len(sched_list_missed)) + " schedule names."
|
||||
print "\nDidn't match: " + str(len(sched_list_missed)) + " schedule names."
|
||||
|
||||
print "\nFinal results: "
|
||||
print name_lookup
|
||||
print "\nFinal results: "
|
||||
print name_lookup
|
||||
|
||||
nlf = codecs.open('cache/sched_to_ilearn_names.json','w','utf-8')
|
||||
nlf.write(json.dumps(name_lookup,indent=2))
|
||||
# STRING DISTANCE
|
||||
nlf = codecs.open('cache/sched_to_ilearn_names.json','w','utf-8')
|
||||
nlf.write(json.dumps(name_lookup,indent=2))
|
||||
# STRING DISTANCE
|
||||
#sim = find_most_similar(s,i_names)
|
||||
#print ' CLOSEST MATCHES to ' + s + ' are: ' + str(sim)
|
||||
#mm.write(s+',\n')
|
||||
"""
|
||||
"""
|
||||
|
||||
|
||||
#ilearn_list = sorted(list(set(map(
|
||||
# lambda x: #(tfi[x]['name'],tfi[x]['email'],tfi[x]['dept'],str(tfi[x]['id']),tfi[x]['goo']),
|
||||
# tfi.keys()))))
|
||||
#i_names = [ x[0] for x in ilearn_list ]
|
||||
# lambda x: #(tfi[x]['name'],tfi[x]['email'],tfi[x]['dept'],str(tfi[x]['id']),tfi[x]['goo']),
|
||||
# tfi.keys()))))
|
||||
#i_names = [ x[0] for x in ilearn_list ]
|
||||
|
||||
#print json.dumps(i_names,indent=2)
|
||||
#return
|
||||
#print json.dumps(i_names,indent=2)
|
||||
#return
|
||||
|
||||
|
||||
|
||||
# how to filter a dict based on values
|
||||
# filtered = {k: v for k, v in course_combos.items() if v['dept'] == 'LIB' or v['dept'] == 'CSIS' }
|
||||
# how to filter a dict based on values
|
||||
# filtered = {k: v for k, v in course_combos.items() if v['dept'] == 'LIB' or v['dept'] == 'CSIS' }
|
||||
|
||||
# more pandas
|
||||
# gapminder['continent'].unique()
|
||||
# more pandas
|
||||
# gapminder['continent'].unique()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#for name,group in bycode:
|
||||
# #print name
|
||||
# print name, " ", group['type']
|
||||
#for name,group in bycode:
|
||||
# #print name
|
||||
# print name, " ", group['type']
|
||||
|
||||
#onl = gg.agg( lambda x: has_online(x) )
|
||||
#ttl = gg.agg( lambda x: len(x) )
|
||||
#ttl = ttl.rename(columns={'type':'total_sections'})
|
||||
#onl = gg.agg( lambda x: has_online(x) )
|
||||
#ttl = gg.agg( lambda x: len(x) )
|
||||
#ttl = ttl.rename(columns={'type':'total_sections'})
|
||||
|
||||
#onl.join(gg.agg( lambda x: has_hybrid(x) ),how='outer')
|
||||
#onl.join(gg.agg( lambda x: has_lecture(x) ), how='outer')
|
||||
#onl.join(gg.agg( lambda x: has_hybrid(x) ),how='outer')
|
||||
#onl.join(gg.agg( lambda x: has_lecture(x) ), how='outer')
|
||||
|
||||
#onl['num_sections'] = 0
|
||||
#onl['num_lec'] = 0
|
||||
#onl['num_online'] = 0
|
||||
#onl['num_sections'] = 0
|
||||
#onl['num_lec'] = 0
|
||||
#onl['num_online'] = 0
|
||||
|
||||
#all = pd.merge([onl,hyb,lec])
|
||||
#print onl
|
||||
#total=len, f2f=lambda x: ) set(x)
|
||||
#{ 'num_sections': "count",
|
||||
# 'num_lec': lambda x: 5,
|
||||
# 'num_online': lambda x: 5 } )
|
||||
#print gg
|
||||
#all = pd.merge([onl,hyb,lec])
|
||||
#print onl
|
||||
#total=len, f2f=lambda x: ) set(x)
|
||||
#{ 'num_sections': "count",
|
||||
# 'num_lec': lambda x: 5,
|
||||
# 'num_online': lambda x: 5 } )
|
||||
#print gg
|
||||
"""
|
||||
|
||||
|
||||
def has_online(series):
|
||||
# if any items of the series have the string 'online', return 1
|
||||
for i in series:
|
||||
|
|
|
|||
|
|
@ -20,10 +20,13 @@ sqlite_file = local_data_folder + 'data.db' #'data_su20_4hr_blocks.db'
|
|||
mylog = codecs.open(local_data_folder + 'canvas_data_log.txt','w')
|
||||
|
||||
thefiles_dat = {}
|
||||
for L in open('cache/canvas_data_index.txt','r').readlines():
|
||||
try:
|
||||
for L in open('cache/canvas_data_index.txt','r').readlines():
|
||||
L = L.strip()
|
||||
(fname,start,finish) = L.split(',')
|
||||
thefiles_dat[fname] = start
|
||||
except Exception as e:
|
||||
print("cache/canvas_data_index.txt was not found")
|
||||
|
||||
thefiles = open('cache/canvas_data_index_temp.txt','a') # rename me if nothing crashes :)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
alabaster==0.7.10
|
||||
anaconda-client==1.6.5
|
||||
anaconda-navigator==1.6.9
|
||||
anaconda-project==0.8.0
|
||||
|
|
|
|||
Loading…
Reference in New Issue