Compare commits
No commits in common. "f16288322e900d7adfbe6d7956d1637eb0594950" and "16077622663486a471dba0a3167a8f1eb769bca5" have entirely different histories.
f16288322e
...
1607762266
463
courses.py
463
courses.py
|
|
@ -27,7 +27,7 @@ def get_gott1_passers():
|
||||||
min_passing = 85
|
min_passing = 85
|
||||||
passers_filename = 'cache/teacherdata/bootcamp_passed.csv'
|
passers_filename = 'cache/teacherdata/bootcamp_passed.csv'
|
||||||
still_active_filename = 'cache/teacherdata/bootcamp_active.csv'
|
still_active_filename = 'cache/teacherdata/bootcamp_active.csv'
|
||||||
#get_course_passers(course, min_passing, passers_filename, still_active_filename)
|
get_course_passers(course, min_passing, passers_filename, still_active_filename)
|
||||||
|
|
||||||
# Plagiarism Module - report on who completed it.
|
# Plagiarism Module - report on who completed it.
|
||||||
def get_plague_passers():
|
def get_plague_passers():
|
||||||
|
|
@ -35,7 +35,6 @@ def get_plague_passers():
|
||||||
min_passing = 85
|
min_passing = 85
|
||||||
passers_filename = 'cache/teacherdata/plagiarism_passed.csv'
|
passers_filename = 'cache/teacherdata/plagiarism_passed.csv'
|
||||||
still_active_filename = 'cache/teacherdata/plagiarism_active.csv'
|
still_active_filename = 'cache/teacherdata/plagiarism_active.csv'
|
||||||
"""
|
|
||||||
(passed, didnt) = get_course_passers(course, min_passing, passers_filename, still_active_filename)
|
(passed, didnt) = get_course_passers(course, min_passing, passers_filename, still_active_filename)
|
||||||
passed = set( [z[2] for z in passed] )
|
passed = set( [z[2] for z in passed] )
|
||||||
didnt = set( [z[2] for z in didnt] )
|
didnt = set( [z[2] for z in didnt] )
|
||||||
|
|
@ -55,36 +54,40 @@ def get_plague_passers():
|
||||||
outputfile = open('cache/plagcheck.txt','w').write( json.dumps( [ [z[2] for z in passed],[z[2] for z in didnt],enrol],indent=2))
|
outputfile = open('cache/plagcheck.txt','w').write( json.dumps( [ [z[2] for z in passed],[z[2] for z in didnt],enrol],indent=2))
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
passed = {}
|
passed_d = {}
|
||||||
didnt = {}
|
didnt_d = {}
|
||||||
|
|
||||||
output_by_course = {}
|
output_by_course = {}
|
||||||
course_s = {}
|
course_s = {}
|
||||||
|
|
||||||
for p in passed: passed_by_deptr(p[2])] = p
|
for p in passed: passed_d[str(p[2])] = p
|
||||||
for p in didnt: didnt_d(p[2])] = p
|
for p in didnt: didnt_d[str(p[2])] = p
|
||||||
|
|
||||||
passed_s = [ str(k) for k in passed_d() ]
|
passed_s = [ str(k) for k in passed_d.keys() ]
|
||||||
didnt_s = [ str(k) for k in didnt_by_deptys() ]
|
didnt_s = [ str(k) for k in didnt_d.keys() ]
|
||||||
|
|
||||||
|
|
||||||
crossref = ['11677','11698',]
|
crossref = ['11677','11698',]
|
||||||
|
|
||||||
outputfile = open('cache/plagcheck.txt','w')
|
outputfile = open('cache/plagcheck.txt','w')
|
||||||
oo = { 'passed': passed_by_deptdidnt': didnt_by_dept
|
oo = { 'passed': passed_d, 'didnt': didnt_d }
|
||||||
|
|
||||||
for cr in crossref:
|
for cr in crossref:
|
||||||
student_int = course_enrollment(cr)
|
student_int = course_enrollment(cr)
|
||||||
student_by_dict{ str(k): v for k,v in student_int.items() }
|
student_d = { str(k): v for k,v in student_int.items() }
|
||||||
oo[cr] = student_by_dict
|
oo[cr] = student_d
|
||||||
|
|
||||||
output_by_course[cr] = { 'passed':{}, 'didnt':{}, 'missing':{} }
|
output_by_course[cr] = { 'passed':{}, 'didnt':{}, 'missing':{} }
|
||||||
|
|
||||||
course_s[cr] = set( [ str(k) for k in student_by_dict.keys() ])
|
course_s[cr] = set( [ str(k) for k in student_d.keys() ])
|
||||||
|
|
||||||
for k,v in student_by_dict.items():
|
for k,v in student_d.items():
|
||||||
key_s = str(k)
|
key_s = str(k)
|
||||||
|
|
||||||
if key_s in passed_by_dict output_by_course[cr]['passed'][key_s] = passed_by_dicty_s]
|
if key_s in passed_d:
|
||||||
elif key_s in didnt_by_dict output_by_course[cr]['didnt'][key_s] = didnt_by_dicty_s]
|
output_by_course[cr]['passed'][key_s] = passed_d[key_s]
|
||||||
|
elif key_s in didnt_d:
|
||||||
|
output_by_course[cr]['didnt'][key_s] = didnt_d[key_s]
|
||||||
else:
|
else:
|
||||||
output_by_course[cr]['missing'][key_s] = v['user']
|
output_by_course[cr]['missing'][key_s] = v['user']
|
||||||
|
|
||||||
|
|
@ -140,7 +143,7 @@ def get_course_passers(course, min_passing, passers_filename, still_active_filen
|
||||||
print("Saved output to \n - passed: %s\n - not passed: %s\n" % (passers_filename, still_active_filename))
|
print("Saved output to \n - passed: %s\n - not passed: %s\n" % (passers_filename, still_active_filename))
|
||||||
return (passed,didnt)
|
return (passed,didnt)
|
||||||
|
|
||||||
"""
|
|
||||||
# Gott 1A
|
# Gott 1A
|
||||||
"""course = '2908'
|
"""course = '2908'
|
||||||
quiz = '15250'
|
quiz = '15250'
|
||||||
|
|
@ -228,9 +231,9 @@ def users_in_semester():
|
||||||
|
|
||||||
#
|
#
|
||||||
# All students in STEM (or any list of depts.. match the course_code). Return SET of canvas ids.
|
# All students in STEM (or any list of depts.. match the course_code). Return SET of canvas ids.
|
||||||
def users_in_by_depts_live(depts=[], termid='171'):
|
def users_in_depts_live(depts=[], termid='171'):
|
||||||
courses_by_by_dept = {}
|
courses_by_dept = {}
|
||||||
students_by_by_dept = {}
|
students_by_dept = {}
|
||||||
|
|
||||||
all_c = getCoursesInTerm(termid,0,0)
|
all_c = getCoursesInTerm(termid,0,0)
|
||||||
codecs.open('cache/courses_in_term_%s.json' % termid,'w','utf-8').write( json.dumps(all_c,indent=2) )
|
codecs.open('cache/courses_in_term_%s.json' % termid,'w','utf-8').write( json.dumps(all_c,indent=2) )
|
||||||
|
|
@ -241,19 +244,19 @@ def users_in_by_depts_live(depts=[], termid='171'):
|
||||||
match = re.search('^(%s)' % d, c['course_code'])
|
match = re.search('^(%s)' % d, c['course_code'])
|
||||||
if match:
|
if match:
|
||||||
print("Getting enrollments for %s" % c['course_code'])
|
print("Getting enrollments for %s" % c['course_code'])
|
||||||
if d in courses_by_by_dept: courses_by_by_dept[d].append(c)
|
if d in courses_by_dept: courses_by_dept[d].append(c)
|
||||||
else: courses_by_by_dept[d] = [ c, ]
|
else: courses_by_dept[d] = [ c, ]
|
||||||
for u in course_enrollment(c['id']).values():
|
for u in course_enrollment(c['id']).values():
|
||||||
if u['type'] != "StudentEnrollment": continue
|
if u['type'] != "StudentEnrollment": continue
|
||||||
if not (d in students_by_by_dept):
|
if not (d in students_by_dept):
|
||||||
students_by_by_dept[d] = set()
|
students_by_dept[d] = set()
|
||||||
students_by_by_dept[d].add(u['user_id'])
|
students_by_dept[d].add(u['user_id'])
|
||||||
continue
|
continue
|
||||||
print(students_by_by_dept)
|
print(students_by_dept)
|
||||||
codecs.open('cache/students_by_by_dept_in_term_%s.json' % termid,'w','utf-8').write( str(students_by_by_dept) )
|
codecs.open('cache/students_by_dept_in_term_%s.json' % termid,'w','utf-8').write( str(students_by_dept) )
|
||||||
all_students = set()
|
all_students = set()
|
||||||
for dd in students_by_by_dept.values(): all_students.update(dd)
|
for dd in students_by_dept.values(): all_students.update(dd)
|
||||||
codecs.open('cache/all_students_in_by_depts_in_term_%s.json' % termid,'w','utf-8').write( str(all_students) )
|
codecs.open('cache/all_students_in_depts_in_term_%s.json' % termid,'w','utf-8').write( str(all_students) )
|
||||||
return all_students
|
return all_students
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -292,7 +295,7 @@ def askForTerms():
|
||||||
print("Terms: ")
|
print("Terms: ")
|
||||||
for u in s:
|
for u in s:
|
||||||
print(str(u['id']) + "\t" + u['name'])
|
print(str(u['id']) + "\t" + u['name'])
|
||||||
#print json.dumps(results_by_dept,indent=2)
|
#print json.dumps(results_dict,indent=2)
|
||||||
term = input("The term id? ")
|
term = input("The term id? ")
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
@ -349,10 +352,10 @@ def update_course_conclude(courseid="13590",enddate='2021-12-23T01:00Z'):
|
||||||
print('****%s' % str(e))
|
print('****%s' % str(e))
|
||||||
|
|
||||||
# Relevant stuff trying to see if its even being used or not
|
# Relevant stuff trying to see if its even being used or not
|
||||||
def course_term_summary_local(term="180",term_label="FA23"):
|
def course_term_summary_local(term="176",term_label="FA22"):
|
||||||
O = "\t<li>Course: <a href='%s' target='_blank' class='%s'>%s</a><br />Status: <b>%s</b><br />Teacher: %s<br />Number students: %s</li>\n"
|
O = "\t<li>Course: <a href='%s' target='_blank' class='%s'>%s</a><br />Status: <b>%s</b><br />Teacher: %s<br />Number students: %s</li>\n"
|
||||||
courses = get_courses_in_term_local(term)
|
courses = get_courses_in_term_local(term)
|
||||||
oo = codecs.open(f'cache/semester_summary_{term_label}.html','w','utf-8')
|
oo = codecs.open('cache/semester_summary.html','w','utf-8')
|
||||||
oo.write('<style>.a{background-color:yellow;}.b{background-color:pink;}</style><ul>\n')
|
oo.write('<style>.a{background-color:yellow;}.b{background-color:pink;}</style><ul>\n')
|
||||||
|
|
||||||
for C in sorted(courses):
|
for C in sorted(courses):
|
||||||
|
|
@ -373,7 +376,7 @@ def course_term_summary_local(term="180",term_label="FA23"):
|
||||||
oo.write('\n</ul>\n')
|
oo.write('\n</ul>\n')
|
||||||
|
|
||||||
# Relevant stuff trying to see if its even being used or not
|
# Relevant stuff trying to see if its even being used or not
|
||||||
def course_term_summary(term="180",term_label="FA23"):
|
def course_term_summary(term="176",term_label="FA22"):
|
||||||
print("Summary of %s" % term_label)
|
print("Summary of %s" % term_label)
|
||||||
courses = getCoursesInTerm(term,0,0)
|
courses = getCoursesInTerm(term,0,0)
|
||||||
|
|
||||||
|
|
@ -544,12 +547,13 @@ def all_equal2(iterator):
|
||||||
177 2023 Winter
|
177 2023 Winter
|
||||||
"""
|
"""
|
||||||
def semester_cross_lister():
|
def semester_cross_lister():
|
||||||
sem = "sp24"
|
sem = "fa23"
|
||||||
term = 181
|
term = 180
|
||||||
xlist_filename = f"cache/{sem}_crosslist.csv"
|
xlist_filename = f"cache/{sem}_crosslist.csv"
|
||||||
checkfile = codecs.open('cache/xlist_check.html','w','utf-8')
|
checkfile = codecs.open('cache/xlist_check.html','w','utf-8')
|
||||||
checkfile.write('<html><body><table>\n')
|
checkfile.write('<html><body><table>\n')
|
||||||
|
|
||||||
|
current_term = 179
|
||||||
xlistfile = codecs.open(xlist_filename,'r','utf-8').readlines()[1:]
|
xlistfile = codecs.open(xlist_filename,'r','utf-8').readlines()[1:]
|
||||||
by_section = {}
|
by_section = {}
|
||||||
by_group = defaultdict( list )
|
by_group = defaultdict( list )
|
||||||
|
|
@ -604,17 +608,14 @@ def semester_cross_lister():
|
||||||
nums_list = list(set([ z[1].split(' ')[1] for z in by_group[y] ]))
|
nums_list = list(set([ z[1].split(' ')[1] for z in by_group[y] ]))
|
||||||
if all_equal2(depts_list):
|
if all_equal2(depts_list):
|
||||||
depts = depts_list[0]
|
depts = depts_list[0]
|
||||||
nums_list.sort()
|
|
||||||
nums = '/'.join(nums_list)
|
nums = '/'.join(nums_list)
|
||||||
else:
|
else:
|
||||||
depts = list(set(depts_list))
|
depts = list(set(depts_list))
|
||||||
depts.sort()
|
|
||||||
depts = '/'.join(depts )
|
|
||||||
nums = by_group[y][0][1].split(' ')[1]
|
nums = by_group[y][0][1].split(' ')[1]
|
||||||
|
|
||||||
new_name = f"{depts}{nums} {' '.join(by_group[y][0][4].split(' ')[1:-1])} {new_sec}"
|
new_name = depts + nums + " " + ' '.join(by_group[y][0][4].split(' ')[1:-1]) + " " + new_sec
|
||||||
#new_name = by_group[y][0][4][0:-5] + new_sec
|
#new_name = by_group[y][0][4][0:-5] + new_sec
|
||||||
new_code = f"{depts}{nums} {sem.upper()} {new_sec}"
|
new_code = depts + nums + " " + new_sec
|
||||||
#new_code = by_group[y][0][5][0:-5] + new_sec
|
#new_code = by_group[y][0][5][0:-5] + new_sec
|
||||||
print(y)
|
print(y)
|
||||||
print("\t", sects)
|
print("\t", sects)
|
||||||
|
|
@ -626,15 +627,13 @@ def semester_cross_lister():
|
||||||
|
|
||||||
for target_section in sections:
|
for target_section in sections:
|
||||||
xlist_ii(target_section[3],host_id,new_name,new_code)
|
xlist_ii(target_section[3],host_id,new_name,new_code)
|
||||||
#pass
|
|
||||||
|
|
||||||
# Perform an actual cross-list, given 2 id numbers, new name and code
|
|
||||||
def xlist_ii(parasite_id,host_id,new_name,new_code):
|
def xlist_ii(parasite_id,host_id,new_name,new_code):
|
||||||
print("Parasite id: ",parasite_id," Host id: ", host_id)
|
print("Parasite id: ",parasite_id," Host id: ", host_id)
|
||||||
print("New name: ", new_name)
|
print("New name: ", new_name)
|
||||||
print("New code: ", new_code)
|
print("New code: ", new_code)
|
||||||
xyz = 'y'
|
xyz = input("Perform cross list? Enter for yes, n for no: ")
|
||||||
#xyz = input("Perform cross list? Enter for yes, n for no: ")
|
|
||||||
if xyz != 'n':
|
if xyz != 'n':
|
||||||
uu = url + '/api/v1/courses/%s/sections' % parasite_id
|
uu = url + '/api/v1/courses/%s/sections' % parasite_id
|
||||||
c_sect = fetch(uu)
|
c_sect = fetch(uu)
|
||||||
|
|
@ -851,10 +850,10 @@ def enroll_id_list_to_shell(id_list, shell_id, v=0):
|
||||||
|
|
||||||
|
|
||||||
def enroll_stem_students_live():
|
def enroll_stem_students_live():
|
||||||
the_term = '180' # su23 fa23 = 180
|
the_term = '179' # su23 fa23 = 180
|
||||||
do_removes = 0
|
do_removes = 1
|
||||||
depts = "MATH BIO CHEM CSIS PHYS PSCI GEOG ASTR ECOL ENVS ENGR".split(" ")
|
depts = "MATH BIO CHEM CSIS PHYS PSCI GEOG ASTR ECOL ENVS ENGR".split(" ")
|
||||||
users_to_enroll = users_in_by_depts_live(depts, the_term) # term id
|
users_to_enroll = users_in_depts_live(depts, the_term) # term id
|
||||||
|
|
||||||
stem_enrollments = course_enrollment(stem_course_id) # by user_id
|
stem_enrollments = course_enrollment(stem_course_id) # by user_id
|
||||||
|
|
||||||
|
|
@ -925,7 +924,7 @@ def enroll_stem_students_live():
|
||||||
###########################
|
###########################
|
||||||
|
|
||||||
def enroll_bulk_students_bydept(course_id, depts, the_term="172", cautious=1): # a string, a list of strings
|
def enroll_bulk_students_bydept(course_id, depts, the_term="172", cautious=1): # a string, a list of strings
|
||||||
users_to_enroll = users_in_by_depts_live(depts, the_term) # term id
|
users_to_enroll = users_in_depts_live(depts, the_term) # term id
|
||||||
|
|
||||||
targeted_enrollments = course_enrollment(course_id) # by user_id.. (live, uses api)
|
targeted_enrollments = course_enrollment(course_id) # by user_id.. (live, uses api)
|
||||||
|
|
||||||
|
|
@ -1003,13 +1002,13 @@ def enroll_gott_workshops_su23():
|
||||||
#print(by_email.keys())
|
#print(by_email.keys())
|
||||||
|
|
||||||
workshop_ids = {
|
workshop_ids = {
|
||||||
#'GOTT 2: Intro to Async Online Teaching and Learning2023-07-09 17:00:00': 17992,
|
'GOTT 2: Intro to Async Online Teaching and Learning2023-07-09 17:00:00': 17992,
|
||||||
#'GOTT 4: Assessment in Digital Learning2023-07-09 17:00:00': 17995,
|
'GOTT 4: Assessment in Digital Learning2023-07-09 17:00:00': 17995,
|
||||||
#'Restricted to STEM faculty. Humanizing (STEM) Online Learning 2023-06-18 17:00:00': 17996,
|
'Restricted to STEM faculty. Humanizing (STEM) Online Learning 2023-06-18 17:00:00': 17996,
|
||||||
#'GOTT 6: Online Live Teaching and Learning2023-06-11 17:00:00': 17986,
|
'GOTT 6: Online Live Teaching and Learning2023-06-11 17:00:00': 17986,
|
||||||
#'GOTT 5: Essentials of Blended Learning2023-06-25 17:00:00': 17987,
|
'GOTT 5: Essentials of Blended Learning2023-06-25 17:00:00': 17987,
|
||||||
#'GOTT 5: Essentials of Blended Learning (HyFlex)2023-06-25 17:00:00': 17987,
|
'GOTT 5: Essentials of Blended Learning (HyFlex)2023-06-25 17:00:00': 17987,
|
||||||
#'GOTT 1: Intro to Teaching Online with Canvas2023-05-29 17:00:00': 17985,
|
'GOTT 1: Intro to Teaching Online with Canvas2023-05-29 17:00:00': 17985,
|
||||||
'GOTT 1: Intro to Teaching Online with Canvas2023-08-20 17:00:00': 17994
|
'GOTT 1: Intro to Teaching Online with Canvas2023-08-20 17:00:00': 17994
|
||||||
}
|
}
|
||||||
#print(json.dumps(signups,indent=4))
|
#print(json.dumps(signups,indent=4))
|
||||||
|
|
@ -1017,8 +1016,6 @@ def enroll_gott_workshops_su23():
|
||||||
|
|
||||||
subs = {'csalvin@gavilan.edu':'christinasalvin@gmail.com',
|
subs = {'csalvin@gavilan.edu':'christinasalvin@gmail.com',
|
||||||
'karenjeansutton@gmail.com': 'ksutton@gavilan.edu',
|
'karenjeansutton@gmail.com': 'ksutton@gavilan.edu',
|
||||||
'elisepeeren@gmail.com': 'epeeren@gavilan.edu',
|
|
||||||
'kjoyenderle@gmail.com': 'kenderle@gavilan.edu',
|
|
||||||
'flozana@gmail.com': 'flozano@gavilan.edu',
|
'flozana@gmail.com': 'flozano@gavilan.edu',
|
||||||
'fyarahmadi2191@gmail.com': 'fyarahmadi@gavilan.edu',
|
'fyarahmadi2191@gmail.com': 'fyarahmadi@gavilan.edu',
|
||||||
'jacquelinejeancollins@yahoo.com': 'jcollins@gavilan.edu',
|
'jacquelinejeancollins@yahoo.com': 'jcollins@gavilan.edu',
|
||||||
|
|
@ -1030,7 +1027,6 @@ def enroll_gott_workshops_su23():
|
||||||
|
|
||||||
for wkshp,su_list in signups.items():
|
for wkshp,su_list in signups.items():
|
||||||
if wkshp not in workshop_ids:
|
if wkshp not in workshop_ids:
|
||||||
print(f"skipping {wkshp}")
|
|
||||||
continue
|
continue
|
||||||
to_enroll = []
|
to_enroll = []
|
||||||
|
|
||||||
|
|
@ -1121,7 +1117,7 @@ def make_ztc_list(sem='sp20'):
|
||||||
result = open('cache/ztc_crossref.csv','w')
|
result = open('cache/ztc_crossref.csv','w')
|
||||||
result.write('Course,Section,Name,Teacher,ZTC teacher\n')
|
result.write('Course,Section,Name,Teacher,ZTC teacher\n')
|
||||||
|
|
||||||
ztc_by_dept = {}
|
ztc_dict = {}
|
||||||
for R in responses:
|
for R in responses:
|
||||||
R = re.sub(',Yes','',R)
|
R = re.sub(',Yes','',R)
|
||||||
R = re.sub('\s\s+',',',R)
|
R = re.sub('\s\s+',',',R)
|
||||||
|
|
@ -1133,18 +1129,18 @@ def make_ztc_list(sem='sp20'):
|
||||||
for C in parts[1:] :
|
for C in parts[1:] :
|
||||||
C = C.strip()
|
C = C.strip()
|
||||||
#print(C)
|
#print(C)
|
||||||
if C in ztc_by_dept:
|
if C in ztc_dict:
|
||||||
ztc_by_dept[C] += ', ' + parts[0]
|
ztc_dict[C] += ', ' + parts[0]
|
||||||
else:
|
else:
|
||||||
ztc_by_dept[C] = parts[0]
|
ztc_dict[C] = parts[0]
|
||||||
print(ztc_by_dept)
|
print(ztc_dict)
|
||||||
for CO in sched:
|
for CO in sched:
|
||||||
#if re.match(r'CWE',CO['code']):
|
#if re.match(r'CWE',CO['code']):
|
||||||
#print(CO)
|
#print(CO)
|
||||||
|
|
||||||
if CO['code'] in ztc_by_dept:
|
if CO['code'] in ztc_dict:
|
||||||
print(('Possible match, ' + CO['code'] + ' ' + ztc_by_dept[CO['code']] + ' is ztc, this section taught by: ' + CO['teacher'] ))
|
print(('Possible match, ' + CO['code'] + ' ' + ztc_dict[CO['code']] + ' is ztc, this section taught by: ' + CO['teacher'] ))
|
||||||
result.write( ','.join( [CO['code'] ,CO['crn'] , CO['name'] , CO['teacher'] , ztc_by_dept[CO['code']] ]) + "\n" )
|
result.write( ','.join( [CO['code'] ,CO['crn'] , CO['name'] , CO['teacher'] , ztc_dict[CO['code']] ]) + "\n" )
|
||||||
|
|
||||||
def course_search_by_sis():
|
def course_search_by_sis():
|
||||||
term = 65
|
term = 65
|
||||||
|
|
@ -1162,7 +1158,7 @@ def course_search_by_sis():
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def course_by_depts_terms(section=0):
|
def course_dates_terms(section=0):
|
||||||
"""s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
|
"""s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
|
||||||
s = list(funcy.flatten(s))
|
s = list(funcy.flatten(s))
|
||||||
s.sort()
|
s.sort()
|
||||||
|
|
@ -1173,24 +1169,13 @@ def course_by_depts_terms(section=0):
|
||||||
#c = getCoursesInTerm(174,0,1) # sp22
|
#c = getCoursesInTerm(174,0,1) # sp22
|
||||||
#c = getCoursesInTerm(176,0,1) # fa22
|
#c = getCoursesInTerm(176,0,1) # fa22
|
||||||
|
|
||||||
get_fresh = 1
|
get_fresh = 0
|
||||||
SP_TERM = 181
|
|
||||||
WI_TERM = 182
|
|
||||||
SEM = "sp24"
|
|
||||||
|
|
||||||
make_changes = 1
|
|
||||||
make_changes_LS = 1
|
|
||||||
|
|
||||||
winter_start_day = 2
|
|
||||||
aviation_start_day = 11
|
|
||||||
nursing_start_day = 15
|
|
||||||
spring_start_day = 29
|
|
||||||
|
|
||||||
if get_fresh:
|
if get_fresh:
|
||||||
c = getCoursesInTerm(SP_TERM,0,0)
|
c = getCoursesInTerm(178,0,0) # sp23
|
||||||
codecs.open(f'cache/courses_in_term_{SP_TERM}.json','w','utf-8').write(json.dumps(c,indent=2))
|
codecs.open('cache/courses_in_term_178.json','w','utf-8').write(json.dumps(c,indent=2))
|
||||||
else:
|
else:
|
||||||
c = json.loads( codecs.open(f'cache/courses_in_term_{SP_TERM}.json','r','utf-8').read() )
|
c = json.loads( codecs.open('cache/courses_in_term_178.json','r','utf-8').read() )
|
||||||
|
|
||||||
crn_to_canvasid = {}
|
crn_to_canvasid = {}
|
||||||
for C in c:
|
for C in c:
|
||||||
|
|
@ -1201,45 +1186,38 @@ def course_by_depts_terms(section=0):
|
||||||
#print(crn_to_canvasid)
|
#print(crn_to_canvasid)
|
||||||
#return
|
#return
|
||||||
|
|
||||||
#s = json.loads( codecs.open(f'cache/{SEM}_sched_expanded.json','r','utf-8').read() )
|
s = json.loads( codecs.open('cache/sp23_sched_expanded.json','r','utf-8').read() )
|
||||||
s = requests.get(f"http://gavilan.cc/schedule/{SEM}_sched_expanded.json").json()
|
|
||||||
for S in s:
|
for S in s:
|
||||||
start = re.sub( r'\-','/', S['start']) + '/20' + SEM[2:4]
|
start = re.sub( r'\-','/', S['start']) + '/2023'
|
||||||
d_start = datetime.strptime(start,"%m/%d/%Y")
|
d_start = datetime.strptime(start,"%m/%d/%Y")
|
||||||
|
|
||||||
if d_start.month > 5:
|
if d_start.month > 5:
|
||||||
print("Ignoring ", d_start, " starting too late...")
|
print("Ignoring ", d_start, " starting too late...")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if d_start.month == 1 and d_start.day == aviation_start_day:
|
if d_start.month == 1 and d_start.day == 12:
|
||||||
print("- Aviation ", start, d_start, " - ", S['code'], " ", S['crn'] )
|
print("- Aviation ", start, d_start, " - ", S['code'], " ", S['crn'] )
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if d_start.month == 1 and d_start.day == nursing_start_day:
|
if d_start.month == 1 and d_start.day ==3:
|
||||||
print("- Nursing ", start, d_start, " - ", S['code'], " ", S['crn'] )
|
|
||||||
continue
|
|
||||||
|
|
||||||
if d_start.month == 1 and d_start.day == winter_start_day:
|
|
||||||
print("+ winter session: ", d_start, " - ", S['code'])
|
print("+ winter session: ", d_start, " - ", S['code'])
|
||||||
data = {'course[term_id]':WI_TERM}
|
winter_term = '177'
|
||||||
|
data = {'course[term_id]':winter_term}
|
||||||
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
|
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
|
||||||
if make_changes:
|
|
||||||
r3 = requests.put(u2, headers=header, params=data)
|
r3 = requests.put(u2, headers=header, params=data)
|
||||||
print(" updated.. OK")
|
print(u2, " OK")
|
||||||
#print(r3.text)
|
#print(r3.text)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if d_start.month == 1 and d_start.day == spring_start_day:
|
if d_start.month == 1 and d_start.day == 30:
|
||||||
# normal class
|
# normal class
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print("- Late start? ", start, d_start, " - ", S['code'], " ", S['crn'] )
|
print("- Late start? ", start, d_start, " - ", S['code'], " ", S['crn'] )
|
||||||
if make_changes_LS:
|
data = {'course[start_at]':d_start.isoformat(), 'course[restrict_enrollments_to_course_dates]': True}
|
||||||
data = {'course[start_at]':d_start.isoformat(), 'course[restrict_student_future_view]': True,
|
|
||||||
'course[restrict_enrollments_to_course_dates]':True }
|
|
||||||
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
|
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
|
||||||
r3 = requests.put(u2, headers=header, params=data)
|
r3 = requests.put(u2, headers=header, params=data)
|
||||||
print(" updated.. OK")
|
print(u2, " OK")
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
@ -1253,9 +1231,9 @@ def xlist_cwe():
|
||||||
# cwe192 get put into another shell
|
# cwe192 get put into another shell
|
||||||
|
|
||||||
|
|
||||||
this_sem_190_id = 18424 # they get 190s and 290s
|
this_sem_190_id = 17549 # they get 190s and 290s
|
||||||
this_sem_192_id = 18519 # they get 192s
|
this_sem_192_id = 17154 # they get 192s
|
||||||
this_sem_term = 181
|
this_sem_term = 180 # fa23
|
||||||
|
|
||||||
get_fresh = 0
|
get_fresh = 0
|
||||||
sem_courses = getCoursesInTerm(this_sem_term, get_fresh, 0)
|
sem_courses = getCoursesInTerm(this_sem_term, get_fresh, 0)
|
||||||
|
|
@ -1345,21 +1323,6 @@ def modify_courses():
|
||||||
print('****%s' % str(e))
|
print('****%s' % str(e))
|
||||||
|
|
||||||
|
|
||||||
def teacher_to_many_shells():
|
|
||||||
for id in range(18089,18110):
|
|
||||||
#print(id)
|
|
||||||
#continue
|
|
||||||
|
|
||||||
# Add teacher
|
|
||||||
u3 = url + f"/api/v1/courses/{id}/enrollments"
|
|
||||||
#usrid = input("id of %s? " % N)
|
|
||||||
usrid = '78'
|
|
||||||
data2 = { "enrollment[type]":"TeacherEnrollment", "enrollment[user_id]":usrid,
|
|
||||||
"enrollment[enrollment_state]":"active" }
|
|
||||||
r4 = requests.post(u3, headers=header, params=data2)
|
|
||||||
print(f"enrolled user id: {usrid} as teacher in course {id}.")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def create_sandboxes():
|
def create_sandboxes():
|
||||||
|
|
@ -1367,23 +1330,11 @@ def create_sandboxes():
|
||||||
sandboxes = [ ('JH','45324'), ('PK','38183'), ('GM','5167'), ('BS','19231'),
|
sandboxes = [ ('JH','45324'), ('PK','38183'), ('GM','5167'), ('BS','19231'),
|
||||||
('ST','303'), ('KW','5145')]
|
('ST','303'), ('KW','5145')]
|
||||||
|
|
||||||
sandboxes = [ ('CD','51701'), ('LC','45193'), ('JC','70'), ('DG','133'), ('JH','2816'),('SM','18812'), ('GM','211'),
|
sandboxes = [ ('PH', '2'), ]
|
||||||
('RM','45341'), ('DP','251'), ('BT','58059'), ('TT','36834') ]
|
|
||||||
|
|
||||||
sandboxes = [ ('MA','8'), ('WA','15'), ('BA','18'), ('CC','51701'), ('LC','45193'), ('PC','4100'), ('ED','82'), ('KE','101'),
|
|
||||||
('OF','41897'), ('SG','115'), ('JG','37654'), ('DG','133'), ('DK','168'), ('JM','204'), ('GM', '211'),
|
|
||||||
('RM','45341'), ('CR','5655'), ('CS','272'), ('BS','19231'), ('SS', '274') ]
|
|
||||||
|
|
||||||
sandboxes = [ ('SM','191')]
|
|
||||||
|
|
||||||
sandboxes = [ ('KD', '2509'), ('KE', '2904'), ('SH', '144'), ('SN','60996'), ('EP', '16726'), ('PS','60938'), ('JW', '43052') ]
|
|
||||||
|
|
||||||
sandboxes = [('HA','61620'), ('AS','61451'), ('MP', '11565'), ('AA','51276') ]
|
|
||||||
sandboxes = [('JR','61062')]
|
|
||||||
report = []
|
|
||||||
for (N,usrid) in sandboxes:
|
for (N,usrid) in sandboxes:
|
||||||
coursename = f"{N} Sandbox SU23 (GOTT1)"
|
#names = input("what are the initials of people? Separate with spaces ").split()
|
||||||
coursecode = f"{N} SU23 Sandbox (GOTT1)"
|
coursename = f"{N} Sandbox SU23 Humanizing STEM"
|
||||||
|
coursecode = f"{N} SU23 Sandbox STEM"
|
||||||
print(f"Creating course: {coursename} for {N}, id: {usrid}")
|
print(f"Creating course: {coursename} for {N}, id: {usrid}")
|
||||||
u2 = url + "/api/v1/accounts/1/courses"
|
u2 = url + "/api/v1/accounts/1/courses"
|
||||||
data = {
|
data = {
|
||||||
|
|
@ -1394,12 +1345,10 @@ def create_sandboxes():
|
||||||
|
|
||||||
# Create a course
|
# Create a course
|
||||||
r3 = requests.post(u2, headers=header, params=data)
|
r3 = requests.post(u2, headers=header, params=data)
|
||||||
course_by_dept = json.loads(r3.text)
|
course_data = json.loads(r3.text)
|
||||||
id = course_by_dept['id']
|
id = course_data['id']
|
||||||
print(f"created course id {id}")
|
print(f"created course id {id}")
|
||||||
|
|
||||||
report.append( f"{coursename} https://ilearn.gavilan.edu/courses/{id}" )
|
|
||||||
|
|
||||||
# Add teacher
|
# Add teacher
|
||||||
u3 = url + f"/api/v1/courses/{id}/enrollments"
|
u3 = url + f"/api/v1/courses/{id}/enrollments"
|
||||||
#usrid = input("id of %s? " % N)
|
#usrid = input("id of %s? " % N)
|
||||||
|
|
@ -1425,35 +1374,9 @@ def create_sandboxes():
|
||||||
#print(json.dumps(json.loads(r4.text),indent=2))
|
#print(json.dumps(json.loads(r4.text),indent=2))
|
||||||
#print()
|
#print()
|
||||||
#x = input("enter to continue")
|
#x = input("enter to continue")
|
||||||
print("\n\n")
|
|
||||||
print("\n".join(report))
|
|
||||||
print("\n")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def course_term_summary_2():
|
def course_term_summary_2():
|
||||||
lines = codecs.open('cache/term_summary.txt','r','utf-8').readlines()
|
|
||||||
output = codecs.open('cache/term_summary.html','w','utf-8')
|
|
||||||
for L in lines:
|
|
||||||
try:
|
|
||||||
L = L.strip()
|
|
||||||
print(L)
|
|
||||||
if re.search('unpublished',L):
|
|
||||||
m = re.search(r"'id': (\d+),",L)
|
|
||||||
m2 = re.search(r"'course_code': '(.+?)',",L)
|
|
||||||
if m:
|
|
||||||
ss = "<br />Course: <a href='%s' target='_blank'>%s</a><br />" % ("https://ilearn.gavilan.edu/courses/"+str(m.group(1)), m2.group(1))
|
|
||||||
output.write( ss )
|
|
||||||
print(ss+"\n")
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def course_term_summary_3():
|
|
||||||
# doesn't work cause of single, not double quotes?!?!
|
|
||||||
lines = codecs.open('cache/term_summary.txt','r','utf-8').readlines()
|
lines = codecs.open('cache/term_summary.txt','r','utf-8').readlines()
|
||||||
output = codecs.open('cache/term_summary.html','w','utf-8')
|
output = codecs.open('cache/term_summary.html','w','utf-8')
|
||||||
for L in lines:
|
for L in lines:
|
||||||
|
|
@ -1480,13 +1403,12 @@ def course_term_summary_3():
|
||||||
## ##
|
## ##
|
||||||
|
|
||||||
def do_gav_connect():
|
def do_gav_connect():
|
||||||
term = 180
|
term = 178
|
||||||
sem = "202370"
|
sem = "202330"
|
||||||
get_fresh = 1
|
get_fresh = 0
|
||||||
crns = [sem + "-" + x.strip() for x in open('cache/starfish.txt','r').readlines()]
|
crns = [sem + "-" + x.strip() for x in open('cache/starfish.txt','r').readlines()]
|
||||||
target = len(crns)
|
target = len(crns)
|
||||||
print(crns)
|
print(crns)
|
||||||
print("Press enter to begin.")
|
|
||||||
a = input()
|
a = input()
|
||||||
|
|
||||||
c = getCoursesInTerm(term, get_fresh, 0)
|
c = getCoursesInTerm(term, get_fresh, 0)
|
||||||
|
|
@ -1496,13 +1418,9 @@ def do_gav_connect():
|
||||||
if course['sis_course_id'] in crns:
|
if course['sis_course_id'] in crns:
|
||||||
print("Adding gav connect to", course['name'])
|
print("Adding gav connect to", course['name'])
|
||||||
print()
|
print()
|
||||||
result = add_gav_connect(course['id'])
|
add_gav_connect(course['id'])
|
||||||
if result:
|
|
||||||
i += 1
|
i += 1
|
||||||
else:
|
|
||||||
print("Something went wrong with", course['name'])
|
|
||||||
print(f"Added {i} redirects out of {target}.")
|
print(f"Added {i} redirects out of {target}.")
|
||||||
|
|
||||||
def add_gav_connect(course_id):
|
def add_gav_connect(course_id):
|
||||||
params = { "name": "GavConnect",
|
params = { "name": "GavConnect",
|
||||||
"privacy_level": "anonymous",
|
"privacy_level": "anonymous",
|
||||||
|
|
@ -1570,44 +1488,55 @@ def instructor_list_to_activate_evals():
|
||||||
|
|
||||||
def add_evals(section=0):
|
def add_evals(section=0):
|
||||||
# show or hide?
|
# show or hide?
|
||||||
|
hidden = False
|
||||||
TERM = 180
|
#s = [ x.strip() for x in codecs.open('cache/sp21_eval_sections.txt','r').readlines()]
|
||||||
SEM = "fa23"
|
#s = [ x.split(',')[4].split('::') for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
|
||||||
|
#s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
|
||||||
hidden = True
|
s = [ x.strip() for x in codecs.open('cache/sp23_eval_sections.csv','r').readlines()]
|
||||||
s = [ x.strip() for x in codecs.open(f'cache/{SEM}_eval_sections.csv','r').readlines()]
|
|
||||||
s = list(funcy.flatten(s))
|
s = list(funcy.flatten(s))
|
||||||
s.sort()
|
s.sort()
|
||||||
print(s)
|
print(s)
|
||||||
print()
|
|
||||||
xyz = input('hit return to continue')
|
xyz = input('hit return to continue')
|
||||||
|
|
||||||
c = getCoursesInTerm(TERM,0,1)
|
#c = getCoursesInTerm(168,0,1)
|
||||||
|
#c = getCoursesInTerm(174,0,1) # sp22
|
||||||
|
#c = getCoursesInTerm(176,0,1) # fa22
|
||||||
|
c = getCoursesInTerm(178,0,1) # sp23
|
||||||
|
print(c)
|
||||||
ids = []
|
ids = []
|
||||||
courses = {}
|
courses = {}
|
||||||
for C in c:
|
for C in c:
|
||||||
if C and 'sis_course_id' in C and C['sis_course_id']:
|
if C and 'sis_course_id' in C and C['sis_course_id']:
|
||||||
parts = C['sis_course_id'].split('-')
|
parts = C['sis_course_id'].split('-')
|
||||||
if parts[1] in s:
|
if parts[1] in s:
|
||||||
#print(C['name'])
|
print(C['name'])
|
||||||
courses[str(C['id'])] = C
|
courses[str(C['id'])] = C
|
||||||
ids.append(str(C['id']))
|
ids.append(str(C['id']))
|
||||||
|
|
||||||
ask = 1
|
ask = 1
|
||||||
data = {'position':2, 'hidden':hidden}
|
data = {'position':2, 'hidden':hidden}
|
||||||
ids.sort()
|
|
||||||
|
|
||||||
for i in ids:
|
for i in ids:
|
||||||
if ask:
|
if ask:
|
||||||
a = input(f"Hit q to quit, a to do all, or enter to activate eval for: {courses[i]['id']} / {courses[i]['name']} : ")
|
a = input("Hit q to quit, a to do all, or enter to activate eval for: " + str(courses[i]))
|
||||||
if a == 'a': ask = 0
|
if a == 'a': ask = 0
|
||||||
if a == 'q': return
|
if a == 'q': return
|
||||||
else:
|
|
||||||
print(f"{courses[i]['id']} / {courses[i]['name']}")
|
|
||||||
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s/tabs/context_external_tool_1953" % i
|
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s/tabs/context_external_tool_1953" % i
|
||||||
r3 = requests.put(u2, headers=header, params=data)
|
r3 = requests.put(u2, headers=header, params=data)
|
||||||
#print(r3.text)
|
print(r3.text)
|
||||||
#time.sleep(0.400)
|
time.sleep(0.400)
|
||||||
|
|
||||||
|
|
||||||
|
return 1
|
||||||
|
|
||||||
|
u2 = "https://gavilan.instructure.com:443/api/v1/courses/12001/tabs"
|
||||||
|
r = fetch(u2)
|
||||||
|
print(json.dumps(r,indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# PUT /api/v1/courses/:course_id/tabs/:tab_id
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1693,116 +1622,6 @@ def remove_n_analytics(section=0):
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_rubric_scores(course_id=16528, assignment_id=1):
|
|
||||||
api_url = f'{url}/api/v1/courses/{course_id}'
|
|
||||||
course_info = fetch(api_url)
|
|
||||||
|
|
||||||
out = codecs.open('cache/rubric_scores.txt','w','utf-8')
|
|
||||||
|
|
||||||
#print(course_info)
|
|
||||||
|
|
||||||
# Extract course details
|
|
||||||
course_name = course_info['name']
|
|
||||||
course_short_name = course_info['course_code']
|
|
||||||
course_semester = course_info['enrollment_term_id']
|
|
||||||
|
|
||||||
# Print course information
|
|
||||||
out.write(f"Course Name: {course_name}\n")
|
|
||||||
out.write(f"Short Name: {course_short_name}\n")
|
|
||||||
out.write(f"Semester: {course_semester}\n")
|
|
||||||
|
|
||||||
api_url = f'{url}/api/v1/courses/{course_id}/assignments'
|
|
||||||
assignments_list = fetch(api_url)
|
|
||||||
|
|
||||||
#print(assignments_list)
|
|
||||||
|
|
||||||
assignments_by_dept = {}
|
|
||||||
ratings_by_dept = {}
|
|
||||||
|
|
||||||
# Iterate through the list of assignments and populate the dictionary
|
|
||||||
for assignment in assignments_list:
|
|
||||||
assignment_id = assignment['id']
|
|
||||||
assignment_name = assignment['name']
|
|
||||||
rubric = assignment.get('rubric', []) # Get the rubric field (default to an empty list if not present)
|
|
||||||
|
|
||||||
has_rubric = 'no'
|
|
||||||
if rubric: has_rubric = 'yes'
|
|
||||||
|
|
||||||
out.write(f" Asmt Name: {assignment_name} ID: {assignment_id} Rubric: {has_rubric}\n")
|
|
||||||
|
|
||||||
# Save assignment details including rubric
|
|
||||||
assignments_by_dept[assignment_id] = {
|
|
||||||
'name': assignment_name,
|
|
||||||
'rubric': rubric
|
|
||||||
# Add more assignment details if needed
|
|
||||||
}
|
|
||||||
|
|
||||||
if rubric:
|
|
||||||
print("RUBRIC:")
|
|
||||||
print(json.dumps(rubric,indent=2))
|
|
||||||
for r in rubric:
|
|
||||||
for rat in r.get('ratings',[]):
|
|
||||||
ratings_by_dept[rat['id']] = { 'description': r['description'], 'long_description': rat['description'], 'points': rat['points']}
|
|
||||||
|
|
||||||
|
|
||||||
# Print the assignments dictionary
|
|
||||||
out.write(json.dumps(assignments_by_dept,indent=2)+'\n\n\n')
|
|
||||||
out.write(json.dumps(ratings_by_dept,indent=2)+'\n\n\n')
|
|
||||||
|
|
||||||
# Loop thru assignments with rubrics and report on grades
|
|
||||||
for assignment in assignments_list:
|
|
||||||
|
|
||||||
if not assignment.get('rubric', []):
|
|
||||||
continue
|
|
||||||
|
|
||||||
assignment_id = assignment['id']
|
|
||||||
out.write(f" Asmt Name: {assignment_name} ID: {assignment_id}\n")
|
|
||||||
|
|
||||||
api_url = f'{url}/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions?include[]=rubric_assessment'
|
|
||||||
|
|
||||||
# Include the 'include[]=rubric_assessment' parameter to request rubric assessments
|
|
||||||
# params = {'include[]': 'rubric_assessment'}
|
|
||||||
|
|
||||||
# Make the API request with the parameters
|
|
||||||
#response = requests.get(api_url, params=params)
|
|
||||||
|
|
||||||
# Check if the request was successful (status code 200)
|
|
||||||
#if response.status_code != 200:
|
|
||||||
# print(f"Request failed with status code {response.status_code}")
|
|
||||||
# continue
|
|
||||||
|
|
||||||
submissions_by_dept = fetch(api_url)
|
|
||||||
|
|
||||||
|
|
||||||
# Iterate through the list of submissions and retrieve rubric scores and comments
|
|
||||||
for submission in submissions_by_dept:
|
|
||||||
user_id = submission['user_id']
|
|
||||||
rubric = submission.get('rubric_assessment', []) # Get the rubric assessment (empty list if not present)
|
|
||||||
comments = submission.get('submission_comments', '') # Get submission comments (empty string if not present)
|
|
||||||
score = submission.get('score', -1)
|
|
||||||
|
|
||||||
|
|
||||||
# Process and use rubric scores and comments as needed
|
|
||||||
# Example: Print user information, rubric scores, and comments
|
|
||||||
if rubric:
|
|
||||||
print(json.dumps(submission,indent=2))
|
|
||||||
|
|
||||||
out.write(f"\nSubmission User ID/Assignment ID: {user_id}/{assignment_id}\n")
|
|
||||||
out.write(f"Score: {score}\n")
|
|
||||||
out.write(f"Submission Comments: {comments}\n")
|
|
||||||
out.write(f"Rubric:\n")
|
|
||||||
for k,v in rubric.items():
|
|
||||||
rub_by_dept = '?'
|
|
||||||
rat_by_dept = '?'
|
|
||||||
if v['rating_id'] in ratings_by_dept:
|
|
||||||
rub_rating = ratings_by_dept[v['rating_id']]
|
|
||||||
rub_by_dept = rub_rating['rub_by_deptription']
|
|
||||||
rat_by_dept = rub_rating['rat_by_deptription']
|
|
||||||
out.write(f" {rub_by_dept} - {rat_by_dept} ({v['rating_id']}): {v['points']}/{rub_rating['points']} points: {v['comments']}\n")
|
|
||||||
out.write("---") # Separator between submissions
|
|
||||||
out.flush()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1822,7 +1641,7 @@ def create_calendar_event():
|
||||||
local = pytz.timezone("America/Los_Angeles")
|
local = pytz.timezone("America/Los_Angeles")
|
||||||
naive = datetime.strptime(date, "%Y-%m-%d")
|
naive = datetime.strptime(date, "%Y-%m-%d")
|
||||||
local_dt = local.localize(naive, is_dst=None)
|
local_dt = local.localize(naive, is_dst=None)
|
||||||
utc_dt = local_dt.timezone(pytz.utc).isoformat()
|
utc_dt = local_dt.astimezone(pytz.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1831,7 +1650,7 @@ def create_calendar_event():
|
||||||
"calendar_event[title]": title,
|
"calendar_event[title]": title,
|
||||||
"calendar_event[description]": desc,
|
"calendar_event[description]": desc,
|
||||||
"calendar_event[start_at]": utc_dt, # DateTime
|
"calendar_event[start_at]": utc_dt, # DateTime
|
||||||
"calendar_event[all_by_dept": "true",
|
"calendar_event[all_day]": "true",
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1853,7 +1672,7 @@ def utc_to_local(utc_str):
|
||||||
|
|
||||||
# Convert the UTC datetime to the Pacific Time Zone
|
# Convert the UTC datetime to the Pacific Time Zone
|
||||||
pacific_tz = pytz.timezone('US/Pacific')
|
pacific_tz = pytz.timezone('US/Pacific')
|
||||||
pacific_dt = pytz.timezone(pacific_tz)
|
pacific_dt = utc_dt.astimezone(pacific_tz)
|
||||||
|
|
||||||
return pacific_dt.strftime('%a %b %d, %Y %#I:%M%p')
|
return pacific_dt.strftime('%a %b %d, %Y %#I:%M%p')
|
||||||
|
|
||||||
|
|
@ -1869,41 +1688,6 @@ def list_all_assignments():
|
||||||
print(f"{a['name']}\t{p}\t{date}")
|
print(f"{a['name']}\t{p}\t{date}")
|
||||||
|
|
||||||
|
|
||||||
def bulk_unenroll():
|
|
||||||
course_id = input("course id> ")
|
|
||||||
enrollments = fetch(f"{url}/api/v1/courses/{course_id}/enrollments")
|
|
||||||
|
|
||||||
for enrollment in enrollments:
|
|
||||||
enrollment_id = enrollment['id']
|
|
||||||
skiplist = ['51237','58362','237']
|
|
||||||
if enrollment_id in skiplist:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Set the headers and parameters for the DELETE API call
|
|
||||||
api_url = f"{url}/api/v1/courses/{course_id}/enrollments/{enrollment_id}"
|
|
||||||
|
|
||||||
# Make the DELETE request
|
|
||||||
response = requests.delete(api_url, headers=header)
|
|
||||||
|
|
||||||
# Check the response
|
|
||||||
if response.status_code == 200:
|
|
||||||
print(f"Successfully unenrolled student with id {enrollment_id} from course {course_id}.")
|
|
||||||
else:
|
|
||||||
print(f"Failed to unenroll student with id {enrollment_id} from course {course_id}. Error: {response.text}")
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_announcements():
|
|
||||||
course_id = 18268
|
|
||||||
announcements_url = f"{url}/api/v1/announcements?context_codes[]=course_{course_id}"
|
|
||||||
announcements = fetch(announcements_url)
|
|
||||||
|
|
||||||
print(json.dumps(announcements,indent=2))
|
|
||||||
|
|
||||||
filename = f"cache/announcements{course_id}.json"
|
|
||||||
with open(filename, "w") as file:
|
|
||||||
json.dump(announcements, file)
|
|
||||||
|
|
||||||
print("Announcements saved to ", filename)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1917,12 +1701,10 @@ if __name__ == "__main__":
|
||||||
8: ['Save enrollments in a course', course_enrollment],
|
8: ['Save enrollments in a course', course_enrollment],
|
||||||
9: ['Simple list of course data, search by sis_id', course_search_by_sis],
|
9: ['Simple list of course data, search by sis_id', course_search_by_sis],
|
||||||
10: ['Overview of a term', course_term_summary],
|
10: ['Overview of a term', course_term_summary],
|
||||||
|
|
||||||
11: ['Enroll ORIENTATION and STEM student shells after catching up database.', enroll_o_s_students],
|
11: ['Enroll ORIENTATION and STEM student shells after catching up database.', enroll_o_s_students],
|
||||||
12: ['Enroll stem students', enroll_stem_students_live],
|
12: ['Enroll stem students', enroll_stem_students_live],
|
||||||
13: ['Enroll orientation students (refresh local db)', enroll_orientation_students],
|
13: ['Enroll orientation students (refresh local db)', enroll_orientation_students],
|
||||||
14: ['Enroll ART students', enroll_art_students_live],
|
14: ['Enroll ART students', enroll_art_students_live],
|
||||||
|
|
||||||
15: ['List users who passed GOTT 1 / Bootcamp', get_gott1_passers],
|
15: ['List users who passed GOTT 1 / Bootcamp', get_gott1_passers],
|
||||||
16: ['List users who passed Plagiarism Module', get_plague_passers],
|
16: ['List users who passed Plagiarism Module', get_plague_passers],
|
||||||
18: ['Create some sandbox courses', create_sandboxes],
|
18: ['Create some sandbox courses', create_sandboxes],
|
||||||
|
|
@ -1930,15 +1712,13 @@ if __name__ == "__main__":
|
||||||
20: ['process the semester overview output (10)', course_term_summary_2],
|
20: ['process the semester overview output (10)', course_term_summary_2],
|
||||||
22: ['Get a course info by id',getCourses],
|
22: ['Get a course info by id',getCourses],
|
||||||
23: ['Reset course conclude date',update_course_conclude],
|
23: ['Reset course conclude date',update_course_conclude],
|
||||||
|
|
||||||
25: ['ext tools',get_ext_tools],
|
25: ['ext tools',get_ext_tools],
|
||||||
26: ['set ext tools',set_ext_tools],
|
26: ['set ext tools',set_ext_tools],
|
||||||
32: ['Get course ext tools', get_course_ext_tools],
|
32: ['Get course ext tools', get_course_ext_tools],
|
||||||
33: ['Add GavConnect to a course', do_gav_connect],
|
33: ['Add GavConnect to a course', do_gav_connect],
|
||||||
17: ['Remove "new analytics" from all courses navs in a semester', remove_n_analytics],
|
17: ['Remove "new analytics" from all courses navs in a semester', remove_n_analytics],
|
||||||
21: ['Add course evals', add_evals],
|
21: ['Add course evals', add_evals],
|
||||||
|
27: ['Fine tune term dates and winter session', course_dates_terms],
|
||||||
27: ['Fine tune term dates and winter session', course_by_depts_terms],
|
|
||||||
3: ['Cross-list classes', xlist ],
|
3: ['Cross-list classes', xlist ],
|
||||||
6: ['Cross list helper', eslCrosslister],
|
6: ['Cross list helper', eslCrosslister],
|
||||||
28: ['Cross list a semester from file', semester_cross_lister],
|
28: ['Cross list a semester from file', semester_cross_lister],
|
||||||
|
|
@ -1949,15 +1729,10 @@ if __name__ == "__main__":
|
||||||
35: ['list all assignments', list_all_assignments],
|
35: ['list all assignments', list_all_assignments],
|
||||||
|
|
||||||
40: ['Enroll GOTT Workshops', enroll_gott_workshops_su23],
|
40: ['Enroll GOTT Workshops', enroll_gott_workshops_su23],
|
||||||
42: ['Add teacher to many shells', teacher_to_many_shells],
|
|
||||||
43: ['Bulk unenroll from course', bulk_unenroll],
|
|
||||||
# 24: ['Add course evals to whole semester',instructor_list_to_activate_evals],
|
# 24: ['Add course evals to whole semester',instructor_list_to_activate_evals],
|
||||||
# 21: ['Add announcements to homepage', change_course_ann_homepage],
|
# 21: ['Add announcements to homepage', change_course_ann_homepage],
|
||||||
# TODO wanted: group shell for each GP (guided pathway) as a basic student services gateway....
|
# TODO wanted: group shell for each GP (guided pathway) as a basic student services gateway....
|
||||||
#
|
#
|
||||||
|
|
||||||
45: ['Fetch rubric scores and comments', fetch_rubric_scores],
|
|
||||||
46: ['Fetch announcements in a course', fetch_announcements],
|
|
||||||
}
|
}
|
||||||
print ('')
|
print ('')
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,11 +14,8 @@ displaynames = []
|
||||||
|
|
||||||
from canvas_secrets import cq_user, cq_pasw
|
from canvas_secrets import cq_user, cq_pasw
|
||||||
|
|
||||||
from outcomes import quick_add_course_outcomes
|
|
||||||
|
|
||||||
|
|
||||||
CQ_URL = "https://secure.curricunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc"
|
CQ_URL = "https://secure.curricunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc"
|
||||||
CQ_URL = "https://mws.services.curriqunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc"
|
|
||||||
PARAM = "?returnFormat=json&method=getCourses"
|
PARAM = "?returnFormat=json&method=getCourses"
|
||||||
|
|
||||||
user = cq_user
|
user = cq_user
|
||||||
|
|
@ -672,7 +669,6 @@ def another_request(url,startat):
|
||||||
newparam = "&skip=" + str(startat)
|
newparam = "&skip=" + str(startat)
|
||||||
print((url+newparam))
|
print((url+newparam))
|
||||||
r = requests.get(url+newparam, auth=(user,pasw))
|
r = requests.get(url+newparam, auth=(user,pasw))
|
||||||
#print(r.text)
|
|
||||||
try:
|
try:
|
||||||
mydata = json.loads(r.text, strict=False)
|
mydata = json.loads(r.text, strict=False)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -855,11 +851,11 @@ if __name__ == "__main__":
|
||||||
options = { 1: ['fetch all courses', fetch_all_classes],
|
options = { 1: ['fetch all courses', fetch_all_classes],
|
||||||
2: ['process all classes', path_style_test],
|
2: ['process all classes', path_style_test],
|
||||||
3: ['courses - path style to html catalog', course_path_style_2_html],
|
3: ['courses - path style to html catalog', course_path_style_2_html],
|
||||||
4: ['show course outcomes', all_outcomes],
|
4: ['courses - rank by all versions', course_rank],
|
||||||
5: ['courses - rank by all versions', course_rank],
|
5: ['fetch all programs', fetch_all_programs],
|
||||||
10: ['fetch all programs', fetch_all_programs],
|
6: ['process all programs', path_style_prog],
|
||||||
11: ['process all programs', path_style_prog],
|
9: ['show course outcomes', all_outcomes],
|
||||||
12: ['programs - path style to html catalog', path_style_2_html],
|
10: ['programs - path style to html catalog', path_style_2_html],
|
||||||
}
|
}
|
||||||
|
|
||||||
print ('')
|
print ('')
|
||||||
|
|
|
||||||
118
docs.py
118
docs.py
|
|
@ -1,118 +0,0 @@
|
||||||
|
|
||||||
import codecs, os,regex, subprocess
|
|
||||||
|
|
||||||
|
|
||||||
def html_to_markdown(infile,out):
|
|
||||||
cmd = f"pandoc -o \"./{out}\" -f html -t markdown \"./{infile}\""
|
|
||||||
print(cmd)
|
|
||||||
result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
if result.returncode != 0:
|
|
||||||
print(f"Error occurred: {result.stderr.decode('utf-8')}")
|
|
||||||
else:
|
|
||||||
print(f"Successfully converted '{infile}' to '{out}'")
|
|
||||||
|
|
||||||
|
|
||||||
def pdf_to_html(infile,out):
|
|
||||||
import PyPDF2
|
|
||||||
|
|
||||||
pdf_file = open(infile, 'rb')
|
|
||||||
pdf_reader = PyPDF2.PdfReader(pdf_file)
|
|
||||||
|
|
||||||
text = ''
|
|
||||||
for page_num in range(len(pdf_reader.pages)):
|
|
||||||
page = pdf_reader.pages[page_num]
|
|
||||||
text += page.extract_text()
|
|
||||||
|
|
||||||
pdf_file.close()
|
|
||||||
ofile = codecs.open(out,'w','utf-8')
|
|
||||||
ofile.write(text)
|
|
||||||
ofile.close()
|
|
||||||
|
|
||||||
|
|
||||||
def pdf_to_html2(infile,out):
|
|
||||||
from pdfminer.high_level import extract_pages
|
|
||||||
from pdfminer.layout import LTTextContainer
|
|
||||||
import html
|
|
||||||
|
|
||||||
ofile = codecs.open(out,'w','utf-8')
|
|
||||||
|
|
||||||
print(infile)
|
|
||||||
for page_layout in extract_pages(infile):
|
|
||||||
for element in page_layout:
|
|
||||||
if isinstance(element, LTTextContainer):
|
|
||||||
text = html.escape(element.get_text()) # sanitize the text for HTML
|
|
||||||
ofile.write(f"<p>{text}</p>") # wraps in HTML paragraph tags
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert(filename=""):
|
|
||||||
|
|
||||||
target_dir = 'cache/docs'
|
|
||||||
ff = os.listdir(target_dir)
|
|
||||||
|
|
||||||
if filename:
|
|
||||||
parts = filename.split('.')
|
|
||||||
OUTFILE = f"{parts[0]}.html"
|
|
||||||
pdf_to_html(target_dir + "/" + filename, target_dir + "/" + OUTFILE)
|
|
||||||
html_to_markdown( target_dir + "/" + OUTFILE, target_dir + "/" + parts[0] + ".md" )
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
for INFILE in ff:
|
|
||||||
if INFILE.endswith(".pdf"):
|
|
||||||
parts = INFILE.split('.')
|
|
||||||
OUTFILE = f"{parts[0]}.html"
|
|
||||||
|
|
||||||
pdf_to_html(target_dir + "/" + INFILE, target_dir + "/" + OUTFILE)
|
|
||||||
|
|
||||||
html_to_markdown( target_dir + "/" + OUTFILE, target_dir + "/" + parts[0] + ".md" )
|
|
||||||
|
|
||||||
|
|
||||||
def clean(fn):
|
|
||||||
# Open file and read contents
|
|
||||||
with open(fn, 'r', encoding='utf-8') as myfile:
|
|
||||||
data = myfile.read()
|
|
||||||
|
|
||||||
# Replace unicode non-breaking space with a regular space
|
|
||||||
data = data.replace('\u00A0', ' ')
|
|
||||||
data = data.replace('\u00AD', '')
|
|
||||||
data = data.replace('\u200B', '')
|
|
||||||
|
|
||||||
# Write cleaned data back to file
|
|
||||||
with open(fn, 'w', encoding='utf-8') as myfile:
|
|
||||||
myfile.write(data)
|
|
||||||
|
|
||||||
|
|
||||||
def fix_line_breaks(fn):
|
|
||||||
with codecs.open(fn, 'r', 'utf-8') as file:
|
|
||||||
lines = file.readlines()
|
|
||||||
|
|
||||||
new_lines = []
|
|
||||||
paragraph = ''
|
|
||||||
|
|
||||||
for line in lines:
|
|
||||||
if line.strip() == '':
|
|
||||||
# If the line is blank, it's the end of a paragraph
|
|
||||||
new_lines.append(paragraph.strip())
|
|
||||||
paragraph = ''
|
|
||||||
else:
|
|
||||||
# If the line is not blank, add it to the paragraph (extra space included for word separation)
|
|
||||||
paragraph += line.strip() + ' '
|
|
||||||
|
|
||||||
# Handle the last paragraph
|
|
||||||
if paragraph != '':
|
|
||||||
new_lines.append(paragraph.strip())
|
|
||||||
|
|
||||||
fout = codecs.open(fn, 'w','utf-8')
|
|
||||||
fout.write('\n'.join(new_lines))
|
|
||||||
|
|
||||||
|
|
||||||
fix_file = 'hyflex.md'
|
|
||||||
convert('hyflex.pdf')
|
|
||||||
|
|
||||||
clean(f'cache/docs/{fix_file}')
|
|
||||||
|
|
||||||
fix_line_breaks(f'cache/docs/{fix_file}')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
296
localcache.py
296
localcache.py
|
|
@ -40,10 +40,9 @@ requests_format = "id timestamp year month day userid courseid rootid course_acc
|
||||||
users_format = "id canvasid rootactid name tz created vis school position gender locale public bd cc state sortablename globalid".split(" ")
|
users_format = "id canvasid rootactid name tz created vis school position gender locale public bd cc state sortablename globalid".split(" ")
|
||||||
cc_format = "id canvasid userid address type position state created updated".split(" ")
|
cc_format = "id canvasid userid address type position state created updated".split(" ")
|
||||||
term_format = "id canvasid rootid name start end sis".split(" ")
|
term_format = "id canvasid rootid name start end sis".split(" ")
|
||||||
course_format = "id canvasid rootactid acctid termid name code type created start conclude visible sis state wikiid schedule".split(" ")
|
course_format = "id canvasid rootactid acctid termid name code type created start conclude visible sis state wikiid".split(" ")
|
||||||
role_format = "id canvas_id root_account_id account_id name base_role_type workflow_state created_at updated_at deleted_at".split(" ")
|
role_format = "id canvas_id root_account_id account_id name base_role_type workflow_state created_at updated_at deleted_at".split(" ")
|
||||||
course_score_format = "s_id c_id a_id course_id enrol_id current final muted_current muted_final".split(" ")
|
course_score_format = "s_id c_id a_id course_id enrol_id current final muted_current muted_final".split(" ")
|
||||||
course_section_dim_format = "id canvas_id name course_id enrollment_term_id default_section accepting_enrollments can_manually_enroll start_at end_at created_at workflow_state restrict_enrollments_to_section_dates nonxlist_course_id sis_source_id".split(" ")
|
|
||||||
enrollment_dim_format = "id cid root course_section role type workflow created updated start end complete self sis course_id user_id last_activity".split(" ")
|
enrollment_dim_format = "id cid root course_section role type workflow created updated start end complete self sis course_id user_id last_activity".split(" ")
|
||||||
communication_channel_dim_format = "id canvas_id user_id address type position workflow_state created_at updated_at".split(" ")
|
communication_channel_dim_format = "id canvas_id user_id address type position workflow_state created_at updated_at".split(" ")
|
||||||
pseudonym_dim_format = "id canvas_id user_id account_id workflow_state last_request_at last_login_at current_login_at last_login_ip current_login_ip position created_at updated_at password_auto_generated deleted_at sis_user_id unique_name integration_id authentication_provider_id".split(" ")
|
pseudonym_dim_format = "id canvas_id user_id account_id workflow_state last_request_at last_login_at current_login_at last_login_ip current_login_ip position created_at updated_at password_auto_generated deleted_at sis_user_id unique_name integration_id authentication_provider_id".split(" ")
|
||||||
|
|
@ -93,12 +92,12 @@ DB_CUR = 0
|
||||||
######### LOCAL DB
|
######### LOCAL DB
|
||||||
#########
|
#########
|
||||||
|
|
||||||
def db(file=sqlite_file):
|
def db():
|
||||||
global DB_CON, DB_CUR
|
global DB_CON, DB_CUR
|
||||||
if DB_CON:
|
if DB_CON:
|
||||||
return (DB_CON,DB_CUR)
|
return (DB_CON,DB_CUR)
|
||||||
print('grabbing db connection')
|
print('grabbing db connection')
|
||||||
DB_CON = sqlite3.connect(file)
|
DB_CON = sqlite3.connect(sqlite_file)
|
||||||
DB_CUR = DB_CON.cursor()
|
DB_CUR = DB_CON.cursor()
|
||||||
|
|
||||||
return (DB_CON, DB_CUR)
|
return (DB_CON, DB_CUR)
|
||||||
|
|
@ -198,17 +197,6 @@ def setup_table(table='requests'):
|
||||||
q += "\t%s %s" % (col,type)
|
q += "\t%s %s" % (col,type)
|
||||||
q += "\n);"
|
q += "\n);"
|
||||||
|
|
||||||
if table=='course_sections':
|
|
||||||
first = 1
|
|
||||||
q = "CREATE TABLE IF NOT EXISTS course_sections (\n"
|
|
||||||
for L in course_section_dim_format:
|
|
||||||
(col,type) = (L,'text')
|
|
||||||
if not first:
|
|
||||||
q += ",\n"
|
|
||||||
first = 0
|
|
||||||
q += "\t%s %s" % (col,type)
|
|
||||||
q += "\n);"
|
|
||||||
|
|
||||||
if table=='enrollment':
|
if table=='enrollment':
|
||||||
first = 1
|
first = 1
|
||||||
q = "CREATE TABLE IF NOT EXISTS enrollment (\n"
|
q = "CREATE TABLE IF NOT EXISTS enrollment (\n"
|
||||||
|
|
@ -606,7 +594,22 @@ def is_requestfile_interesting(fname):
|
||||||
global thefiles, thefiles_dat
|
global thefiles, thefiles_dat
|
||||||
#begin_month = ['2020-01','2020-02','2020-03','2020-04','2020-05','2020-06','2020-07']
|
#begin_month = ['2020-01','2020-02','2020-03','2020-04','2020-05','2020-06','2020-07']
|
||||||
#begin_month = ['2020-09','2020-10','2020-08']
|
#begin_month = ['2020-09','2020-10','2020-08']
|
||||||
begin_month = ['2023-08', '2023-09', '2023-10', '2023-11', '2023-12', '2024-01', '2024-02', '2024-03', '2024-04', '2024-05', '2024-06', '2024-07']
|
begin_month = ['2021-02','2021-03']
|
||||||
|
|
||||||
|
#AE 600 (80040; 80045; 80047) 10945
|
||||||
|
#AE 602 (80048; 80049; 80050) 10746
|
||||||
|
#AE 636 (80332; 80381) 10783
|
||||||
|
#CSIS 571A (80428) 10956
|
||||||
|
#GUID 558A (80429) 10957
|
||||||
|
|
||||||
|
# The AEC sections of interest.
|
||||||
|
sections = '10945 10746 1783 10956 10957'.split(' ')
|
||||||
|
# Just once, to get the people
|
||||||
|
#[ course_enrollment(x) for x in sections ]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
first = {}
|
first = {}
|
||||||
lines = False
|
lines = False
|
||||||
|
|
@ -742,8 +745,6 @@ def requests_file(fname_list):
|
||||||
samples = codecs.open('cache/request_samples.txt', 'a', 'utf-8')
|
samples = codecs.open('cache/request_samples.txt', 'a', 'utf-8')
|
||||||
conn,cur = db()
|
conn,cur = db()
|
||||||
|
|
||||||
RESUME = 610
|
|
||||||
|
|
||||||
folderi = 0
|
folderi = 0
|
||||||
filei = 0
|
filei = 0
|
||||||
last_time = time.process_time()
|
last_time = time.process_time()
|
||||||
|
|
@ -754,11 +755,8 @@ def requests_file(fname_list):
|
||||||
#if folderi > 2: return
|
#if folderi > 2: return
|
||||||
print("\n%i\t%s \t" % (folderi, fname), end='', flush=True)
|
print("\n%i\t%s \t" % (folderi, fname), end='', flush=True)
|
||||||
folderi += 1
|
folderi += 1
|
||||||
if folderi < RESUME:
|
|
||||||
continue
|
|
||||||
filei = 0
|
filei = 0
|
||||||
|
|
||||||
try:
|
|
||||||
lines = is_requestfile_interesting(fname)
|
lines = is_requestfile_interesting(fname)
|
||||||
if lines:
|
if lines:
|
||||||
vals_cache = []
|
vals_cache = []
|
||||||
|
|
@ -766,18 +764,24 @@ def requests_file(fname_list):
|
||||||
thisline = requests_line(L,filei) #TODO select if timeblock exists
|
thisline = requests_line(L,filei) #TODO select if timeblock exists
|
||||||
if not thisline:
|
if not thisline:
|
||||||
continue
|
continue
|
||||||
if random.random() > 0.99999:
|
if random.random() > 0.9999:
|
||||||
#L = str(L)
|
#L = str(L)
|
||||||
if type(L) == type(b'abc'): L = L.decode('utf-8')
|
if type(L) == type(b'abc'): L = L.decode('utf-8')
|
||||||
parts = L.split('\t')
|
parts = L.split('\t')
|
||||||
if len(parts)>17:
|
if len(parts)>17:
|
||||||
samples.write( "\t".join( [parts[13] , parts[14], parts[15], parts[16], parts[18], parts[19]]))
|
samples.write( "\t".join( [parts[13] , parts[14], parts[15], parts[16], parts[18], parts[19]]))
|
||||||
|
|
||||||
|
#q,v = dict_to_insert(thisline,'requests')
|
||||||
if not 'courseid' in thisline: continue
|
if not 'courseid' in thisline: continue
|
||||||
if not 'userid' in thisline: continue
|
if not 'userid' in thisline: continue
|
||||||
|
|
||||||
|
# Limit this database to certain courses?
|
||||||
|
# if thisline['courseid'] not in mycourses: continue
|
||||||
|
|
||||||
v = ( thisline['userid'], thisline['courseid'], thisline['time_block'], 1 )
|
v = ( thisline['userid'], thisline['courseid'], thisline['time_block'], 1 )
|
||||||
vals_cache.append( [ str(x) for x in v ] )
|
vals_cache.append( [ str(x) for x in v ] )
|
||||||
|
try:
|
||||||
|
#cur.execute(q)
|
||||||
if filei % 5000 == 0:
|
if filei % 5000 == 0:
|
||||||
conn.executemany(q, vals_cache)
|
conn.executemany(q, vals_cache)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
@ -787,6 +791,11 @@ def requests_file(fname_list):
|
||||||
print("\nLoop %i - committed to db in %0.1fs. " % (filei,delta), end='', flush=True)
|
print("\nLoop %i - committed to db in %0.1fs. " % (filei,delta), end='', flush=True)
|
||||||
samples.flush()
|
samples.flush()
|
||||||
filei += 1
|
filei += 1
|
||||||
|
except Exception as e:
|
||||||
|
print(thisline)
|
||||||
|
print(e)
|
||||||
|
print(q)
|
||||||
|
print(v)
|
||||||
# do the commit on the entire file...
|
# do the commit on the entire file...
|
||||||
conn.executemany(q, vals_cache)
|
conn.executemany(q, vals_cache)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
@ -794,9 +803,7 @@ def requests_file(fname_list):
|
||||||
delta = t - last_time
|
delta = t - last_time
|
||||||
last_time = t
|
last_time = t
|
||||||
print("\nLoop %i - committed to db in %0.1fs. " % (filei,delta), end='', flush=True)
|
print("\nLoop %i - committed to db in %0.1fs. " % (filei,delta), end='', flush=True)
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
print("Failed on: %s" % fname)
|
|
||||||
|
|
||||||
# Insert or update a request line.
|
# Insert or update a request line.
|
||||||
def upsert_request(line, vals):
|
def upsert_request(line, vals):
|
||||||
|
|
@ -831,16 +838,17 @@ def dict_to_insert(thisline,table): # a dict
|
||||||
# This and the following merge functions do direct inserts without further tallying.
|
# This and the following merge functions do direct inserts without further tallying.
|
||||||
# This now does tallying by timeblock.
|
# This now does tallying by timeblock.
|
||||||
def merge_requests():
|
def merge_requests():
|
||||||
req = [x for x in os.listdir(local_data_folder) if 'requests' in x]
|
req = []
|
||||||
print(f"Checking {len(req)} request log files.")
|
i = 0
|
||||||
requests_file(req)
|
max = 2000
|
||||||
#i = 0
|
|
||||||
#max = 20000
|
|
||||||
|
|
||||||
#for f in os.listdir(local_data_folder):
|
for f in os.listdir(local_data_folder):
|
||||||
# if re.search(r'requests',f) and i < max:
|
if re.search(r'requests',f) and i < max:
|
||||||
# req.append(f)
|
req.append(f)
|
||||||
# i += 1
|
i += 1
|
||||||
|
#req = ['requests-00000-afc834d1.gz',]
|
||||||
|
print("Checking %i request log files." % len(req))
|
||||||
|
requests_file(req)
|
||||||
|
|
||||||
def merge_comm_channel():
|
def merge_comm_channel():
|
||||||
setup_table('comm_channel')
|
setup_table('comm_channel')
|
||||||
|
|
@ -912,26 +920,6 @@ def merge_courses():
|
||||||
print(q)
|
print(q)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
def merge_course_sections():
|
|
||||||
setup_table('course_sections')
|
|
||||||
(conn,cur) = db()
|
|
||||||
|
|
||||||
c_file = most_recent_file_of('course_section_dim')
|
|
||||||
c_sections = parse_file_with( c_file, course_section_dim_format)
|
|
||||||
count = 0
|
|
||||||
for U in c_sections:
|
|
||||||
q,v = dict_to_insert(U,'course_sections')
|
|
||||||
count += 1
|
|
||||||
#if count % 1000 == 0:
|
|
||||||
# print( "%i - " % count + q + " " + str(v) )
|
|
||||||
try:
|
|
||||||
cur.execute(q,v)
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
print(q)
|
|
||||||
conn.commit()
|
|
||||||
print("Processed %i course sections" % count)
|
|
||||||
|
|
||||||
def merge_enrollment():
|
def merge_enrollment():
|
||||||
setup_table('enrollment')
|
setup_table('enrollment')
|
||||||
(conn,cur) = db()
|
(conn,cur) = db()
|
||||||
|
|
@ -1145,7 +1133,6 @@ def full_reload():
|
||||||
merge_enrollment()
|
merge_enrollment()
|
||||||
merge_term()
|
merge_term()
|
||||||
merge_roles()
|
merge_roles()
|
||||||
merge_course_sections()
|
|
||||||
|
|
||||||
#merge_requests()
|
#merge_requests()
|
||||||
|
|
||||||
|
|
@ -1866,201 +1853,7 @@ WHERE
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def add_sessions():
|
|
||||||
j_in = json.loads( codecs.open('cache/2023sessions.json','r','utf-8').read())
|
|
||||||
|
|
||||||
# Function to format the starttime
|
|
||||||
def format_starttime(day, time):
|
|
||||||
day_parts = day.split()
|
|
||||||
start_time = time.split('-')[0].strip()
|
|
||||||
return f"2023-08-{day_parts[1][:2]} {start_time}:00"
|
|
||||||
|
|
||||||
def esc(input_string):
|
|
||||||
input_string = input_string.replace(' ', ' ')
|
|
||||||
return input_string.replace("'", "''")
|
|
||||||
|
|
||||||
def ugh(s):
|
|
||||||
return f"`{s}`"
|
|
||||||
|
|
||||||
# Sample values for track and location
|
|
||||||
track = 1
|
|
||||||
id = 1341
|
|
||||||
|
|
||||||
f = "id, title, desc, type, length, starttime, track, location, location_irl, mode, gets_survey, is_flex_approved, parent"
|
|
||||||
fields = ",".join([ ugh(x) for x in f.split(', ') ])
|
|
||||||
|
|
||||||
for session_data in j_in:
|
|
||||||
#print(json.dumps(session_data,indent=2))
|
|
||||||
|
|
||||||
location = ""
|
|
||||||
link = ""
|
|
||||||
if "link" in session_data: link = session_data['link']
|
|
||||||
|
|
||||||
mode = ""
|
|
||||||
if "mode" in session_data: mode = session_data['mode']
|
|
||||||
if mode == "Zoom": mode = "online"
|
|
||||||
if mode == "Hybrid": mode = "hybrid"
|
|
||||||
if mode == "Face-to-Face": mode = "inperson"
|
|
||||||
|
|
||||||
# Generate INSERT statement
|
|
||||||
insert_statement = f"""INSERT INTO conf_sessions
|
|
||||||
({fields})
|
|
||||||
VALUES
|
|
||||||
({id}, '{esc(session_data["title"])}', '{esc(session_data["description"])}', 101, 1, '{format_starttime(session_data["day"], session_data["time"])}', {track}, '{location}', '{link}', '{mode}', 1, 1, 1320);
|
|
||||||
"""
|
|
||||||
|
|
||||||
print(insert_statement)
|
|
||||||
id += 1
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_long_running():
|
|
||||||
from time import sleep
|
|
||||||
print("Starting long process...")
|
|
||||||
for i in range(20):
|
|
||||||
print("sleeping %s" % i, flush=True)
|
|
||||||
sleep(1)
|
|
||||||
|
|
||||||
|
|
||||||
def courses_to_sched():
|
|
||||||
# Correlate rows in courses table with an id to rows in schedule table.
|
|
||||||
conn,cur = db('cache/canvas_data/data20231012.db')
|
|
||||||
q = "SELECT canvasid, code, sis, schedule FROM courses ORDER BY sis DESC"
|
|
||||||
conn.row_factory = dict_factory
|
|
||||||
|
|
||||||
seasons = {'10':'wi','30':'sp','50':'su','70':'fa'}
|
|
||||||
|
|
||||||
cur.execute(q)
|
|
||||||
courses = cur.fetchall()
|
|
||||||
sem = ''
|
|
||||||
for c in courses:
|
|
||||||
try:
|
|
||||||
if re.search(r'^\d\d\d\d\d\d\-\d\d\d\d\d$', c[2]):
|
|
||||||
semparts = c[2].split('-')
|
|
||||||
yr = semparts[0][2:4]
|
|
||||||
if yr in ['16','17']: continue
|
|
||||||
print(c, end=' ')
|
|
||||||
season = seasons[ str(semparts[0][4:6]) ]
|
|
||||||
sem = f"{season}{yr}"
|
|
||||||
crn = semparts[1]
|
|
||||||
print(sem, end=' ')
|
|
||||||
q2 = f"SELECT * FROM schedule WHERE crn='{crn}' AND sem='{sem}'"
|
|
||||||
cur.execute(q2)
|
|
||||||
sched = cur.fetchall()
|
|
||||||
if sched:
|
|
||||||
sched = sched[0]
|
|
||||||
id = sched[0]
|
|
||||||
q3 = f"UPDATE courses SET schedule='{id}' WHERE canvasid='{c[0]}'"
|
|
||||||
cur.execute(q3)
|
|
||||||
|
|
||||||
print(sched)
|
|
||||||
#print(q3)
|
|
||||||
else:
|
|
||||||
print()
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
conn.commit()
|
|
||||||
|
|
||||||
def query_multiple(q, database=sqlite_file):
|
|
||||||
conn,cur = db(database) # 'cache/canvas_data/data20231012.db'
|
|
||||||
conn.row_factory = dict_factory
|
|
||||||
cur = conn.cursor()
|
|
||||||
cur.execute(q)
|
|
||||||
return cur.fetchall()
|
|
||||||
|
|
||||||
def query_execute(q, database=sqlite_file):
|
|
||||||
conn,cur = db(database)
|
|
||||||
cur.execute(q)
|
|
||||||
conn.commit()
|
|
||||||
|
|
||||||
def sched_to_db():
|
|
||||||
d = 'DROP TABLE IF EXISTS `schedule`;'
|
|
||||||
table = '''CREATE TABLE `schedule` (
|
|
||||||
`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
|
||||||
`crn` varchar(10) NOT NULL,
|
|
||||||
`code` varchar(30) NOT NULL,
|
|
||||||
`units` varchar(20) NOT NULL,
|
|
||||||
`teacher` tinytext NOT NULL,
|
|
||||||
`start` varchar(30) NOT NULL,
|
|
||||||
`end` varchar(30) NOT NULL,
|
|
||||||
`type` varchar(20) NOT NULL,
|
|
||||||
`loc` varchar(80) NOT NULL,
|
|
||||||
`site` varchar(50) NOT NULL,
|
|
||||||
`partofday` varchar(40) NOT NULL,
|
|
||||||
`cap` INTEGER,
|
|
||||||
`act` INTEGER,
|
|
||||||
`sem` varchar(10) NOT NULL
|
|
||||||
) ;
|
|
||||||
'''
|
|
||||||
|
|
||||||
conn,cur = db('cache/canvas_data/data20231012.db')
|
|
||||||
print(table)
|
|
||||||
cur.execute(d)
|
|
||||||
cur.execute(table)
|
|
||||||
conn.commit()
|
|
||||||
|
|
||||||
vals_cache = []
|
|
||||||
last_time = time.process_time()
|
|
||||||
i = 0
|
|
||||||
|
|
||||||
output = codecs.open('cache/schedule.sql','w','utf-8')
|
|
||||||
for year in ['16','17','18','19','20','21','22','23']:
|
|
||||||
for sem in ['sp','su','fa']:
|
|
||||||
term = f"{sem}{year}"
|
|
||||||
print(term)
|
|
||||||
try:
|
|
||||||
sched = requests.get(f"http://gavilan.cc/schedule/{term}_sched_expanded.json").json()
|
|
||||||
show_summary = 1
|
|
||||||
|
|
||||||
query = "INSERT INTO schedule (crn, code, units, teacher, start, end, type, loc, site, partofday, cap, act, sem) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?);"
|
|
||||||
|
|
||||||
for c in sched:
|
|
||||||
pod = ''
|
|
||||||
if 'partofday' in c: pod = c['partofday']
|
|
||||||
q = [c['crn'], c['code'], c['cred'], c['teacher'], c['start'], c['end'], c['type'], c['loc'], c['site'], pod, c['cap'], c['act'], term]
|
|
||||||
vals_cache.append( q ) # [ str(x) for x in q ] )
|
|
||||||
#print(f"{i}: {q}")
|
|
||||||
i += 1
|
|
||||||
if i % 500 == 0:
|
|
||||||
conn.executemany(query, vals_cache)
|
|
||||||
conn.commit()
|
|
||||||
vals_cache = []
|
|
||||||
t = time.process_time()
|
|
||||||
delta = t - last_time
|
|
||||||
last_time = t
|
|
||||||
print(f"Loop {i} - committed to db in %0.3fs. " % delta, flush=True)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
conn.executemany(query, vals_cache)
|
|
||||||
conn.commit()
|
|
||||||
|
|
||||||
def students_current_semester(sem='202370'):
|
|
||||||
q = f"""SELECT u.canvasid FROM enrollment AS e
|
|
||||||
JOIN users AS u ON e.user_id=u.id
|
|
||||||
JOIN courses AS c ON e.course_id=c.id
|
|
||||||
WHERE c.sis LIKE "{sem}-%"
|
|
||||||
AND e.workflow="active"
|
|
||||||
AND e."type"="StudentEnrollment"
|
|
||||||
GROUP BY u.canvasid;"""
|
|
||||||
result = query_multiple(q)
|
|
||||||
#for r in result:
|
|
||||||
# print(json.dumps(result,indent=2))
|
|
||||||
return result
|
|
||||||
|
|
||||||
def users_with_history():
|
|
||||||
q = '''SELECT u.name, u.sortablename, u.canvasid, c.code, s.partofday, s.type, s.site, s.units, t.sis, s.sem FROM users u
|
|
||||||
JOIN enrollment e ON u.id = e.user_id
|
|
||||||
JOIN courses c ON c.id = e.course_id
|
|
||||||
JOIN terms t ON c.termid = t.id
|
|
||||||
JOIN schedule s ON (s.crn=SUBSTR(c.sis,INSTR(c.sis, '-')+1,5) AND s.semsis=t.sis)
|
|
||||||
WHERE e.type='StudentEnrollment' AND e.workflow='active'
|
|
||||||
ORDER BY u.sortablename, t.sis, c.code ;'''
|
|
||||||
result = query_multiple(q)
|
|
||||||
#for r in result:
|
|
||||||
# print(json.dumps(result,indent=2))
|
|
||||||
return result
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
|
@ -2088,11 +1881,6 @@ if __name__ == "__main__":
|
||||||
20: ['Process enrollment data', process_enrollment_data],
|
20: ['Process enrollment data', process_enrollment_data],
|
||||||
21: ['Encode data', do_encoding],
|
21: ['Encode data', do_encoding],
|
||||||
22: ['all students course history', all_students_history],
|
22: ['all students course history', all_students_history],
|
||||||
23: ['test long running', test_long_running],
|
|
||||||
24: ['add conference sessions', add_sessions],
|
|
||||||
25: ['gavilan.cc extended schedule to sql insert format', sched_to_db],
|
|
||||||
26: ['correlate courses to schedule id', courses_to_sched],
|
|
||||||
27: ['report all users', users_with_history],
|
|
||||||
#19: ['add evals for a whole semester', instructor_list_to_activate_evals],
|
#19: ['add evals for a whole semester', instructor_list_to_activate_evals],
|
||||||
#16: ['Upload new employees to flex app', employees_refresh_flex],
|
#16: ['Upload new employees to flex app', employees_refresh_flex],
|
||||||
}
|
}
|
||||||
|
|
|
||||||
77
myweb.py
77
myweb.py
|
|
@ -1,77 +0,0 @@
|
||||||
from flask import Flask, render_template
|
|
||||||
from flask_socketio import SocketIO, emit
|
|
||||||
from flask_sse import sse
|
|
||||||
|
|
||||||
from threading import Thread
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
from flask import Flask, render_template
|
|
||||||
from flask_sse import sse
|
|
||||||
from threading import Thread
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
|
||||||
app.config['REDIS_URL'] = 'redis://localhost'
|
|
||||||
app.register_blueprint(sse, url_prefix='/stream')
|
|
||||||
|
|
||||||
# Background thread to run the long-running task
|
|
||||||
def run_long_running_task():
|
|
||||||
process = subprocess.Popen(['python', 'localcache.py', '23'],
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
universal_newlines=True)
|
|
||||||
for line in process.stdout:
|
|
||||||
# Emit the output line as a server-sent event
|
|
||||||
sse.publish({'data': line.strip()}, type='output')
|
|
||||||
process.wait()
|
|
||||||
|
|
||||||
@app.route('/')
|
|
||||||
def index():
|
|
||||||
return render_template('myweb.html')
|
|
||||||
|
|
||||||
@app.route('/start')
|
|
||||||
def start_task():
|
|
||||||
# Start the long-running task in a background thread
|
|
||||||
thread = Thread(target=run_long_running_task)
|
|
||||||
thread.start()
|
|
||||||
return 'Task started'
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
app.run()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
'''
|
|
||||||
app = Flask(__name__)
|
|
||||||
app.config['SECRET_KEY'] = 'secret_key'
|
|
||||||
socketio = SocketIO(app)
|
|
||||||
|
|
||||||
# Background process to run the long-running task
|
|
||||||
def run_long_running_task():
|
|
||||||
process = subprocess.Popen(['python', 'localcache.py', '23'],
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
universal_newlines=True)
|
|
||||||
for line in process.stdout:
|
|
||||||
# Emit the output line to the client
|
|
||||||
socketio.emit('output', {'data': line.strip()})
|
|
||||||
print(f"Sent: {line.strip()}")
|
|
||||||
print("Process is done")
|
|
||||||
process.wait()
|
|
||||||
|
|
||||||
@app.route('/')
|
|
||||||
def index():
|
|
||||||
return render_template('myweb.html')
|
|
||||||
|
|
||||||
@socketio.on('connect')
|
|
||||||
def on_connect():
|
|
||||||
# Start the long-running task when a client connects
|
|
||||||
thread = Thread(target=run_long_running_task)
|
|
||||||
thread.start()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
socketio.run(app)
|
|
||||||
'''
|
|
||||||
13
outcomes.py
13
outcomes.py
|
|
@ -13,8 +13,6 @@ f = codecs.open('cache/slo/log.txt','w','utf-8')
|
||||||
|
|
||||||
VERBOSE = 1
|
VERBOSE = 1
|
||||||
|
|
||||||
TERM = '180'
|
|
||||||
|
|
||||||
SLO_CURRENT_SOURCE = 'cache/slo/2018_slo.csv' # term 21
|
SLO_CURRENT_SOURCE = 'cache/slo/2018_slo.csv' # term 21
|
||||||
#SLO_CURRENT_SOURCE = 'cache/slo/2020_slo.csv'
|
#SLO_CURRENT_SOURCE = 'cache/slo/2020_slo.csv'
|
||||||
|
|
||||||
|
|
@ -296,7 +294,7 @@ def create_dept_group(short):
|
||||||
r = requests.post(t,data=new_group, headers=header)
|
r = requests.post(t,data=new_group, headers=header)
|
||||||
print(r.text)
|
print(r.text)
|
||||||
|
|
||||||
def outcomes_attached_to_courses(term=TERM,limitdept=''):
|
def outcomes_attached_to_courses(term=65,limitdept=''):
|
||||||
# For each class in a term, check to see if it has outcomes and/or
|
# For each class in a term, check to see if it has outcomes and/or
|
||||||
# an outcome group attached to it.
|
# an outcome group attached to it.
|
||||||
courses = getCoursesInTerm(term,show=0,active=0)
|
courses = getCoursesInTerm(term,show=0,active=0)
|
||||||
|
|
@ -839,10 +837,9 @@ def all_outcome_results_in_term_sub(termid=''):
|
||||||
|
|
||||||
|
|
||||||
def all_linked_outcomes_in_term(termid=''):
|
def all_linked_outcomes_in_term(termid=''):
|
||||||
#terms = [172,174,176,178]
|
terms = [172,174,176,178]
|
||||||
#for t in terms:
|
for t in terms:
|
||||||
# all_linked_outcomes_in_term_sub(str(t))
|
all_linked_outcomes_in_term_sub(str(t))
|
||||||
all_linked_outcomes_in_term_sub(TERM)
|
|
||||||
|
|
||||||
|
|
||||||
def all_linked_outcomes_in_term_sub(termid=''):
|
def all_linked_outcomes_in_term_sub(termid=''):
|
||||||
|
|
@ -1279,7 +1276,7 @@ def parse_ilearn_course_names_ALLSEMESTERS():
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def parse_ilearn_course_names(term=TERM,fresh=0,log=0):
|
def parse_ilearn_course_names(term='178',fresh=0,log=0):
|
||||||
non_matches = []
|
non_matches = []
|
||||||
|
|
||||||
courses = getCoursesInTerm(term,get_fresh=fresh)
|
courses = getCoursesInTerm(term,get_fresh=fresh)
|
||||||
|
|
|
||||||
428
outcomes2022.py
428
outcomes2022.py
|
|
@ -13,8 +13,7 @@
|
||||||
# + Whether they are present in the relevant classes in iLearn
|
# + Whether they are present in the relevant classes in iLearn
|
||||||
# + Insert SLO into course if not present
|
# + Insert SLO into course if not present
|
||||||
# + Mark as inactive (change name) if necessary
|
# + Mark as inactive (change name) if necessary
|
||||||
#
|
|
||||||
# - Update shell with correct outcomes
|
|
||||||
|
|
||||||
# - Issue:
|
# - Issue:
|
||||||
# + Course naming / sections joined...
|
# + Course naming / sections joined...
|
||||||
|
|
@ -22,16 +21,14 @@
|
||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from pipelines import fetch, url, header
|
from pipelines import fetch, url, header
|
||||||
from outcomes import quick_add_course_outcomes, code_from_ilearn_name, all_linked_outcomes_in_term
|
from courses import getCoursesInTerm
|
||||||
from courses import getCoursesInTerm, getCourses
|
import codecs, json
|
||||||
import codecs, json, sys, re, csv, requests, textwrap
|
|
||||||
from path_dict import PathDict
|
from path_dict import PathDict
|
||||||
|
|
||||||
outputfile = ''
|
|
||||||
csvwriter = ''
|
|
||||||
|
|
||||||
TERM = 181
|
|
||||||
|
|
||||||
|
NUM_THREADS = 20
|
||||||
|
get_fresh = 0
|
||||||
|
sem_courses = getCoursesInTerm(178,get_fresh)
|
||||||
|
|
||||||
def escape_commas(s):
|
def escape_commas(s):
|
||||||
if ',' in s:
|
if ',' in s:
|
||||||
|
|
@ -39,34 +36,15 @@ def escape_commas(s):
|
||||||
else:
|
else:
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
# shorter list for test?
|
||||||
|
#sem_courses = sem_courses[:50]
|
||||||
|
|
||||||
def add_outcome_to_course(shell_id=''):
|
print("Got %i courses in current semester." % len(sem_courses))
|
||||||
if shell_id == '':
|
|
||||||
shell_id = input("Enter shell id > ")
|
|
||||||
course = getCourses(str(shell_id))
|
|
||||||
dept, code, crn = code_from_ilearn_name(course['name'])
|
|
||||||
print(f"{dept} {code} {crn} for course named: {course['name']}")
|
|
||||||
#xyz = input(f"Using: {code} for {course['name']}. Enter a different code, q to skip or press enter to continue > ")
|
|
||||||
#if xyz == 'q':
|
|
||||||
# return
|
|
||||||
#if xyz != '':
|
|
||||||
# code = xyz
|
|
||||||
cq_course_id = find_recent_cqcourseid(code)
|
|
||||||
oc = codecs.open('cache/courses/alloutcomes.csv','r','utf-8')
|
|
||||||
reader = csv.reader(oc)
|
|
||||||
cols = next(reader) # skip header
|
|
||||||
|
|
||||||
# Filter rows matching the code
|
|
||||||
rows = [row for row in reader if row[1] == cq_course_id]
|
|
||||||
rows_dicts = [ {cols[i]: r[i] for i in range(len(cols))} for r in rows ]
|
|
||||||
#abc = input(f"Using outcomes:\n{rows_dicts}\n\nPress enter to continue > ")
|
|
||||||
|
|
||||||
quick_add_course_outcomes(shell_id, rows_dicts)
|
|
||||||
|
|
||||||
|
|
||||||
|
outputfile = codecs.open('cache/slo/outcomes2022.output.txt','w','utf-8')
|
||||||
|
outputfile.write( "coursename,assessed,courseid,outcome_id,points,title,displayname,description,guid\n")
|
||||||
|
|
||||||
def course_slo_getter(q):
|
def course_slo_getter(q):
|
||||||
global outputfile, csvwriter
|
|
||||||
(name,id) = q
|
(name,id) = q
|
||||||
info = {'ilearnname':name,'ilearnid':id}
|
info = {'ilearnname':name,'ilearnid':id}
|
||||||
print(" + Thread getting %s %s" % (str(name),str(id)))
|
print(" + Thread getting %s %s" % (str(name),str(id)))
|
||||||
|
|
@ -90,8 +68,6 @@ def course_slo_getter(q):
|
||||||
og['full_outcomes'][this_outcome['id']] = this_outcome
|
og['full_outcomes'][this_outcome['id']] = this_outcome
|
||||||
saveme = [name, this_outcome['assessed'], id, this_outcome['id'], this_outcome['points_possible'], this_outcome['title'], this_outcome['display_name'], this_outcome['description'], this_outcome['vendor_guid'] ]
|
saveme = [name, this_outcome['assessed'], id, this_outcome['id'], this_outcome['points_possible'], this_outcome['title'], this_outcome['display_name'], this_outcome['description'], this_outcome['vendor_guid'] ]
|
||||||
saveme2 = [escape_commas(str(x)) for x in saveme]
|
saveme2 = [escape_commas(str(x)) for x in saveme]
|
||||||
|
|
||||||
csvwriter.writerow([id, name, og['id'], this_outcome['id'], this_outcome['vendor_guid'], this_outcome['points_possible'], this_outcome['mastery_points'], this_outcome['assessed'], this_outcome['description']])
|
|
||||||
outputfile.write(",".join(saveme2) + "\n")
|
outputfile.write(",".join(saveme2) + "\n")
|
||||||
outputfile.flush()
|
outputfile.flush()
|
||||||
if type(og_for_course) == list:
|
if type(og_for_course) == list:
|
||||||
|
|
@ -101,43 +77,25 @@ def course_slo_getter(q):
|
||||||
print(" - Thread %s DONE" % str(id))
|
print(" - Thread %s DONE" % str(id))
|
||||||
return og_for_course
|
return og_for_course
|
||||||
|
|
||||||
|
raw_log = codecs.open('cache/outcome_raw_log.txt','w','utf-8')
|
||||||
|
#raw_log.write( json.dumps(output,indent=2) )
|
||||||
|
|
||||||
# I duplicate???
|
output = []
|
||||||
def outcomes_in_shell(course_id):
|
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as pool:
|
||||||
print(f"Getting root outcome group for course id {course_id}")
|
futures = []
|
||||||
root_og = fetch(f"{url}/api/v1/courses/{course_id}/root_outcome_group")
|
for C in sem_courses:
|
||||||
print(f"Getting outcomes")
|
print("Adding ", C['name'], C['id'], " to queue")
|
||||||
u1 =f"{url}/api/v1/courses/{course_id}/outcome_groups/{root_og['id']}/outcomes"
|
futures.append( pool.submit(course_slo_getter, [C['name'], C['id']] ) )
|
||||||
outcomes_list = fetch( u1 )
|
for future in concurrent.futures.as_completed(futures):
|
||||||
the_outcomes = []
|
output.append(future.result())
|
||||||
|
print(future.result())
|
||||||
|
raw_log.write( json.dumps(future.result(),indent=2) + "\n" )
|
||||||
|
#print("-- Done")
|
||||||
|
#print("results array has %i items" % len(results))
|
||||||
|
|
||||||
if 'errors' in outcomes_list:
|
#for r in concurrent.futures.as_completed(results):
|
||||||
print(f"Error: {outcomes_list}")
|
# output.append(r.result())
|
||||||
|
|
||||||
if len(outcomes_list):
|
|
||||||
for oo in outcomes_list:
|
|
||||||
print(f"Getting outcome id {oo['outcome']['id']}")
|
|
||||||
outcome = fetch( url + '/api/v1/outcomes/%s' % str(oo['outcome']['id']) )
|
|
||||||
outcome['parent_group'] = root_og['id']
|
|
||||||
the_outcomes.append(outcome)
|
|
||||||
u2 = f"{url}/api/v1/courses/{course_id}/outcome_groups/{root_og['id']}/subgroups"
|
|
||||||
g2 = fetch( u2 )
|
|
||||||
|
|
||||||
for subgroup in g2:
|
|
||||||
print("doing subgroup id %s" % str(subgroup['id']))
|
|
||||||
u3 = f"{url}/api/v1/courses/{course_id}/outcome_groups/{subgroup['id']}/outcomes"
|
|
||||||
outcomes_list = fetch( u3 )
|
|
||||||
|
|
||||||
if 'errors' in outcomes_list:
|
|
||||||
print(f"Error: {outcomes_list}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if len(outcomes_list):
|
|
||||||
for oo in outcomes_list:
|
|
||||||
outcome = fetch( f"{url}/api/v1/outcomes/{oo['outcome']['id']}" )
|
|
||||||
outcome['parent_group'] = subgroup['id']
|
|
||||||
the_outcomes.append(outcome)
|
|
||||||
return root_og, the_outcomes, g2
|
|
||||||
|
|
||||||
|
|
||||||
def ilearn_shell_slo_to_csv(shell_slos):
|
def ilearn_shell_slo_to_csv(shell_slos):
|
||||||
|
|
@ -188,339 +146,9 @@ def ilearn_shell_slo_to_csv(shell_slos):
|
||||||
print(df)
|
print(df)
|
||||||
|
|
||||||
|
|
||||||
def get_outcomes_term_index():
|
|
||||||
global outputfile, csvwriter
|
|
||||||
NUM_THREADS = 20
|
|
||||||
get_fresh = 0
|
|
||||||
|
|
||||||
sem_courses = getCoursesInTerm(TERM,get_fresh)
|
|
||||||
|
|
||||||
# shorter list for test?
|
#ilearn_shell_slo_to_csv(output)
|
||||||
#sem_courses = sem_courses[:50]
|
|
||||||
|
|
||||||
print("Got %i courses in current semester." % len(sem_courses))
|
|
||||||
|
|
||||||
outputfile = codecs.open(f'cache/slo/outcomes_bycourse_{TERM}.output.txt','w','utf-8')
|
|
||||||
outputfile.write( "coursename,assessed,courseid,outcome_id,points,title,displayname,description,guid\n")
|
|
||||||
|
|
||||||
csvfile = codecs.open(f'cache/slo/linked_slos_term_{TERM}_compact.csv','w','utf-8')
|
|
||||||
csvwriter = csv.writer(csvfile)
|
|
||||||
csvwriter.writerow('courseid coursename ogid oid vendorguid points mastery assessed desc'.split(' '))
|
|
||||||
|
|
||||||
raw_log = codecs.open('cache/outcome_raw_log.txt','w','utf-8')
|
|
||||||
#raw_log.write( json.dumps(output,indent=2) )
|
|
||||||
|
|
||||||
output = []
|
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as pool:
|
|
||||||
futures = []
|
|
||||||
for C in sem_courses:
|
|
||||||
print("Adding ", C['name'], C['id'], " to queue")
|
|
||||||
futures.append( pool.submit(course_slo_getter, [C['name'], C['id']] ) )
|
|
||||||
for future in concurrent.futures.as_completed(futures):
|
|
||||||
output.append(future.result())
|
|
||||||
print(future.result())
|
|
||||||
raw_log.write( json.dumps(future.result(),indent=2) + "\n" )
|
|
||||||
csvfile.close()
|
|
||||||
ilearn_shell_slo_to_csv(output)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def classify_shell(lines):
|
|
||||||
# given a list of lines like this, determine status of shell
|
|
||||||
# (from linked_slos_term_180_compact.csv) outcomes.py all_linked_outcomes_in_term()
|
|
||||||
|
|
||||||
#
|
|
||||||
# courseid,coursename,ogid,oid,vendorguid,points,mastery,assessed,desc
|
|
||||||
# 16909,AH11 FA23 10003/10014/12251,10860,819,,5,3,False,Use scientific facts and principles to critically analyze nutrition information and use the information to assess personal diet and the diets of other cultures.
|
|
||||||
# 16909,AH11 FA23 10003/10014/12251,10860,820,,5,3,False,Evaluate nutrition information for accuracy and reliability.
|
|
||||||
# 16909,AH11 FA23 10003/10014/12251,10860,821,,5,3,False,Analyze and identify the relationship between nutrition and health.
|
|
||||||
# 16909,AH11 FA23 10003/10014/12251,10860,822,,5,3,False,Differentiate among food habits and practices related to traditional foods and preparation techniques in selected cultures or religions.
|
|
||||||
# 16909,AH11 FA23 10003/10014/12251,10860,823,,5,3,False,Analyze nutritional problems of selected cultures and create a nutritionally balanced menu.
|
|
||||||
#
|
|
||||||
# 1. number of outcomes
|
|
||||||
# 2. points are correct (max=3,mastery=2) or incorrect (max=5,mastery=3)
|
|
||||||
# 3. assessed or not
|
|
||||||
|
|
||||||
course_status = {'outcome_count':0, 'id':0, 'name':'', 'assessed_count':0, 'points_ok':1}
|
|
||||||
|
|
||||||
for L in lines:
|
|
||||||
#print(L)
|
|
||||||
#L = L.split(',')
|
|
||||||
course_status['outcome_count'] += 1
|
|
||||||
course_status['id'] = L[0]
|
|
||||||
course_status['name'] = L[1]
|
|
||||||
outcome_status = {'courseid':L[0],'coursename':L[1],'ogid':L[2],'oid':L[3],'vendorguid':L[4],'points':L[5],'mastery':L[6],'assessed':L[7],'desc':L[8], 'pointscorrect':0}
|
|
||||||
if L[5] == '5' and L[6] == '3':
|
|
||||||
outcome_status['pointscorrect'] = 0
|
|
||||||
course_status['points_ok'] = 0
|
|
||||||
elif (L[5] == '3.0' or L[5] == '3') and L[6] == '2':
|
|
||||||
outcome_status['pointscorrect'] = 1
|
|
||||||
else:
|
|
||||||
outcome_status['pointscorrect'] = -1
|
|
||||||
if L[7] == 'True':
|
|
||||||
course_status['assessed_count'] += 1
|
|
||||||
return course_status
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def find_recent_cqcourseid(code):
|
|
||||||
# code example: CSIS42
|
|
||||||
with open('cache/courses/all_courses_ranked.csv', 'r') as f:
|
|
||||||
reader = csv.reader(f)
|
|
||||||
next(reader) # skip header
|
|
||||||
|
|
||||||
# Filter rows matching the code
|
|
||||||
rows = [row for row in reader if row[0] == code]
|
|
||||||
|
|
||||||
print(f"All entries for {code}:\n{rows}")
|
|
||||||
|
|
||||||
if not rows:
|
|
||||||
raise ValueError(f"No rows found for code {code}")
|
|
||||||
|
|
||||||
# Sort by 'termineffect', in descending order
|
|
||||||
rows.sort(key=lambda row: row[3], reverse=True)
|
|
||||||
|
|
||||||
# Return cqcourseid of the first row
|
|
||||||
myrow = rows[0][1]
|
|
||||||
print(f"Using: {myrow}")
|
|
||||||
return myrow
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def remove_old_outcomes(course_id):
|
|
||||||
root_og, current_outcomes, subgroups = outcomes_in_shell(course_id)
|
|
||||||
print(f"Got {len(current_outcomes)} outcomes for course id {course_id}")
|
|
||||||
print(f"Current outcomes:\n{json.dumps(current_outcomes,indent=2)}")
|
|
||||||
|
|
||||||
# Try deleting them
|
|
||||||
if 1:
|
|
||||||
for deleted_outcome in current_outcomes:
|
|
||||||
print(f"Deleting outcome id {deleted_outcome['id']}")
|
|
||||||
u9 = f"{url}/api/v1/courses/{course_id}/outcome_groups/{deleted_outcome['parent_group']}/outcomes/{deleted_outcome['id']}"
|
|
||||||
print(u9)
|
|
||||||
|
|
||||||
# make the DELETE request (update with your actual access token)
|
|
||||||
response = requests.delete(u9, headers=header)
|
|
||||||
|
|
||||||
# check the status of the request
|
|
||||||
if response.status_code == 200:
|
|
||||||
print(' Delete operation was successful')
|
|
||||||
else:
|
|
||||||
print(' Failed to delete, response code:', response.status_code)
|
|
||||||
print(' Response message:', response.text)
|
|
||||||
|
|
||||||
|
|
||||||
def repair_outcome_points(course_id):
|
|
||||||
# Compare to what Outcomes SHOULD be
|
|
||||||
course = getCourses(course_id)
|
|
||||||
dept, code, crn = code_from_ilearn_name(course['name'])
|
|
||||||
xyz = input(f"Using: {code} for {course['name']}. Enter a different code or press enter to continue > ")
|
|
||||||
if xyz != '':
|
|
||||||
code = xyz
|
|
||||||
cq_course_id = find_recent_cqcourseid(code)
|
|
||||||
oc = codecs.open('cache/courses/alloutcomes.csv','r','utf-8')
|
|
||||||
reader = csv.reader(oc)
|
|
||||||
cols = next(reader) # skip header
|
|
||||||
|
|
||||||
# Filter rows matching the code
|
|
||||||
rows = [row for row in reader if row[1] == cq_course_id]
|
|
||||||
rows_dicts = [ {cols[i]: r[i] for i in range(len(cols))} for r in rows ]
|
|
||||||
abc = input(f"Using outcomes:\n{json.dumps(rows_dicts,indent=2)}\n\nPress enter to continue > ")
|
|
||||||
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
outcome_id = 0
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'mastery_points': '2',
|
|
||||||
'calculation_method': 'decaying_average',
|
|
||||||
'calculation_int': '65',
|
|
||||||
'ratings[0][description]': 'Exceeds Expectations',
|
|
||||||
'ratings[0][points]': '3',
|
|
||||||
'ratings[1][description]': 'Meets Expectations',
|
|
||||||
'ratings[1][points]': '2',
|
|
||||||
'ratings[2][description]': 'Does Not Meet Expectations',
|
|
||||||
'ratings[2][points]': '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.put(f'{url}/api/v1/outcomes/{outcome_id}.json', headers=header, data=data)
|
|
||||||
|
|
||||||
if response.status_code == 200:
|
|
||||||
print(f"Successfully updated outcome with id {outcome_id}.")
|
|
||||||
else:
|
|
||||||
print(f"Failed to update outcome with id {outcome_id}. Error: {response.text}")
|
|
||||||
|
|
||||||
def add_o_dept_dry_run():
|
|
||||||
add_o_dept(1)
|
|
||||||
|
|
||||||
def add_o_whole_term():
|
|
||||||
course_groups = full_term_overview(0)
|
|
||||||
|
|
||||||
dept_shells_to_add = [ a for a in course_groups['no outcomes'] ]
|
|
||||||
sorted_dept_shells_to_add = sorted(dept_shells_to_add, key=lambda x: f"{x['dept']}{x['code']}")
|
|
||||||
|
|
||||||
print(f"Adding to {len(sorted_dept_shells_to_add)} shells.")
|
|
||||||
|
|
||||||
for shell in sorted_dept_shells_to_add:
|
|
||||||
print(f"Adding outcomes to {shell['name']}")
|
|
||||||
try:
|
|
||||||
add_outcome_to_course(shell['id'])
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed on {shell['id']}: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
def add_o_dept(dry_run=0):
|
|
||||||
d = input("Enter dept or deps separated with a space > ")
|
|
||||||
d_list = d.split(' ')
|
|
||||||
course_groups = full_term_overview(0)
|
|
||||||
|
|
||||||
dept_shells_to_add = [ a for a in course_groups['no outcomes'] if a['dept'] in d_list ]
|
|
||||||
sorted_dept_shells_to_add = sorted(dept_shells_to_add, key=lambda x: f"{x['dept']}{x['code']}")
|
|
||||||
|
|
||||||
print(f"Adding to {len(sorted_dept_shells_to_add)} shells.")
|
|
||||||
|
|
||||||
for shell in sorted_dept_shells_to_add:
|
|
||||||
print(f"Adding outcomes to {shell['name']}")
|
|
||||||
if not dry_run:
|
|
||||||
try:
|
|
||||||
add_outcome_to_course(shell['id'])
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed on {shell['id']}: {e}")
|
|
||||||
else:
|
|
||||||
print(" Dry run, not adding")
|
|
||||||
|
|
||||||
def remove_all_bad_points():
|
|
||||||
course_groups = full_term_overview(0)
|
|
||||||
|
|
||||||
dept_shells_to_zap = [ a for a in course_groups['fix_points'] ]
|
|
||||||
for shell in dept_shells_to_zap:
|
|
||||||
print(f"Removing outcomes from {shell['name']}")
|
|
||||||
remove_old_outcomes(shell['id'])
|
|
||||||
|
|
||||||
|
|
||||||
def full_term_overview(verbose=1):
|
|
||||||
out2 = codecs.open(f'cache/slo_status_{TERM}.json','w','utf-8')
|
|
||||||
out3 = codecs.open(f'cache/slo_status_{TERM}.txt','w','utf-8')
|
|
||||||
fn1 = f"cache/courses_in_term_{TERM}.json"
|
|
||||||
all_courses = json.loads(codecs.open(fn1,'r','utf-8').read())
|
|
||||||
all_courses_status = {}
|
|
||||||
|
|
||||||
# default values for all courses
|
|
||||||
for C in all_courses:
|
|
||||||
dept,code,crn = code_from_ilearn_name(C['name'])
|
|
||||||
all_courses_status[str(C['id'])] = {'outcome_count':0, 'id':C['id'], 'name':C['name'], 'dept':dept, 'code':code, 'crn':crn, 'assessed_count':0, 'points_ok':1}
|
|
||||||
|
|
||||||
# read the existing ilearn outcomes and group by shell
|
|
||||||
filename = f"cache/slo/linked_slos_term_{TERM}_compact.csv"
|
|
||||||
with open(filename, 'r') as csvfile:
|
|
||||||
reader = csv.reader(csvfile)
|
|
||||||
next(reader) # skip header
|
|
||||||
|
|
||||||
# Read the rows into a list
|
|
||||||
rows = list(reader)
|
|
||||||
|
|
||||||
# Sort the rows based on a specific column (e.g., column 0)
|
|
||||||
sorted_rows = sorted(rows, key=lambda x: x[0])
|
|
||||||
|
|
||||||
groups = []
|
|
||||||
current_group = []
|
|
||||||
last_courseid = None
|
|
||||||
for row in sorted_rows:
|
|
||||||
courseid = row[0]
|
|
||||||
if last_courseid != courseid and current_group:
|
|
||||||
# courseid changed from last row to current row
|
|
||||||
groups.append(current_group)
|
|
||||||
current_group = []
|
|
||||||
current_group.append(row)
|
|
||||||
last_courseid = courseid
|
|
||||||
|
|
||||||
# append the last group if any
|
|
||||||
if current_group:
|
|
||||||
groups.append(current_group)
|
|
||||||
|
|
||||||
for g in groups:
|
|
||||||
classified = classify_shell(g)
|
|
||||||
dept,code,crn = code_from_ilearn_name(g[0][1])
|
|
||||||
classified['dept'] = dept
|
|
||||||
classified['code'] = code
|
|
||||||
classified['crn'] = crn
|
|
||||||
all_courses_status[str(classified['id'])] = classified
|
|
||||||
|
|
||||||
#for C in all_courses_status:
|
|
||||||
# print(all_courses_status[C])
|
|
||||||
|
|
||||||
course_groups = {
|
|
||||||
'no outcomes': [],
|
|
||||||
'ok': [],
|
|
||||||
'fix_points_and_scores': [],
|
|
||||||
'fix_points': []
|
|
||||||
}
|
|
||||||
|
|
||||||
for d in all_courses_status.values():
|
|
||||||
outcome_count = d['outcome_count']
|
|
||||||
points_ok = d['points_ok']
|
|
||||||
assessed_count = d['assessed_count']
|
|
||||||
|
|
||||||
if outcome_count == 0:
|
|
||||||
course_groups['no outcomes'].append(d)
|
|
||||||
elif points_ok == 1 and assessed_count > 0:
|
|
||||||
course_groups['fix_points_and_scores'].append(d)
|
|
||||||
elif points_ok == 0 and assessed_count > 0:
|
|
||||||
course_groups['fix_points_and_scores'].append(d)
|
|
||||||
elif points_ok == 1:
|
|
||||||
course_groups['ok'].append(d)
|
|
||||||
elif points_ok == 0:
|
|
||||||
course_groups['fix_points'].append(d)
|
|
||||||
|
|
||||||
# Print out the groups
|
|
||||||
out2.write(json.dumps(course_groups,indent=2))
|
|
||||||
if verbose:
|
|
||||||
for group, dicts in course_groups.items():
|
|
||||||
sorted_dicts = sorted(dicts, key=lambda x: f"{x['dept']}{x['code']}")
|
|
||||||
print(f"{group} - {len(sorted_dicts)} item(s)")
|
|
||||||
out3.write(f"{group} - {len(sorted_dicts)} item(s)\n")
|
|
||||||
for d in sorted_dicts:
|
|
||||||
print(d)
|
|
||||||
out3.write(str(d) + "\n")
|
|
||||||
print("\n")
|
|
||||||
out3.write("\n")
|
|
||||||
|
|
||||||
return course_groups
|
|
||||||
|
|
||||||
def fetch_term_outcomes_and_report():
|
|
||||||
get_outcomes_term_index()
|
|
||||||
full_term_overview()
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
options = { 1: ['Refresh term outcome list & report', fetch_term_outcomes_and_report],
|
|
||||||
2: ['Add outcomes to unset courses in whole term', add_o_whole_term],
|
|
||||||
3: ['Add outcomes to course id', add_outcome_to_course],
|
|
||||||
4: ['Fix outcome points', remove_old_outcomes],
|
|
||||||
5: ['Add outcomes to dept, dry run', add_o_dept_dry_run],
|
|
||||||
6: ['Add outcomes to dept', add_o_dept],
|
|
||||||
7: ['Remove all outcomes with wrong points', remove_all_bad_points],
|
|
||||||
}
|
|
||||||
print ('')
|
|
||||||
|
|
||||||
if len(sys.argv) > 1 and re.search(r'^\d+',sys.argv[1]):
|
|
||||||
resp = int(sys.argv[1])
|
|
||||||
print("\n\nPerforming: %s\n\n" % options[resp][0])
|
|
||||||
|
|
||||||
else:
|
|
||||||
print ('')
|
|
||||||
for key in options:
|
|
||||||
print(str(key) + '.\t' + options[key][0])
|
|
||||||
|
|
||||||
print('')
|
|
||||||
resp = input('Choose: ')
|
|
||||||
|
|
||||||
# Call the function in the options dict
|
|
||||||
options[ int(resp)][1]()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
17
pipelines.py
17
pipelines.py
|
|
@ -2084,25 +2084,22 @@ def expand_old_semesters():
|
||||||
input('press return to continue.')
|
input('press return to continue.')
|
||||||
|
|
||||||
# Input: xxxx_sched.json. Output: xxxx_latestarts.txt
|
# Input: xxxx_sched.json. Output: xxxx_latestarts.txt
|
||||||
def list_latestarts(term="fa23"):
|
def list_latestarts(term="su23"):
|
||||||
|
|
||||||
show_summary = 1
|
show_summary = 1
|
||||||
|
|
||||||
the_year = '20' + term[2:4]
|
the_year = '20' + term[2:4]
|
||||||
print("year: ", the_year, " semester: ", term)
|
print("year: ", the_year, " semester: ", term)
|
||||||
|
|
||||||
#term_in = "cache/%s_sched.json" % term
|
term_in = "cache/%s_sched.json" % term
|
||||||
term_out = "cache/%s_latestarts.txt" % term
|
term_out = "cache/%s_latestarts.txt" % term
|
||||||
expanded_out = "%s_sched_expanded.json" % term
|
expanded_out = "%s_sched_expanded.json" % term
|
||||||
print("Writing output to " + term_out)
|
print("Writing output to " + term_out)
|
||||||
#infile = codecs.open(term_in, "r", "utf-8")
|
infile = codecs.open(term_in, "r", "utf-8")
|
||||||
outfile = codecs.open(term_out, "w", "utf-8")
|
outfile = codecs.open(term_out, "w", "utf-8")
|
||||||
exoutfile = codecs.open('cache/' + expanded_out, "w", "utf-8")
|
exoutfile = codecs.open('cache/' + expanded_out, "w", "utf-8")
|
||||||
expanded = []
|
expanded = []
|
||||||
#sched = json.loads(infile.read())
|
sched = json.loads(infile.read())
|
||||||
|
|
||||||
|
|
||||||
sched = requests.get(f"http://gavilan.cc/schedule/{term}_sched.json").json()
|
|
||||||
#print sched
|
#print sched
|
||||||
by_date = {}
|
by_date = {}
|
||||||
|
|
||||||
|
|
@ -2181,14 +2178,8 @@ def list_latestarts(term="fa23"):
|
||||||
#print(Y)
|
#print(Y)
|
||||||
#outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] +"\n")
|
#outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] +"\n")
|
||||||
outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] + "\t" + "\n")
|
outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] + "\t" + "\n")
|
||||||
outfile.close()
|
|
||||||
put_file('/home/public/schedule/', 'cache/', "%s_latestarts.txt" % term, 0)
|
put_file('/home/public/schedule/', 'cache/', "%s_latestarts.txt" % term, 0)
|
||||||
return expanded
|
return expanded
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
print ('')
|
print ('')
|
||||||
|
|
|
||||||
409
stats.py
409
stats.py
|
|
@ -53,16 +53,13 @@
|
||||||
|
|
||||||
-
|
-
|
||||||
"""
|
"""
|
||||||
import codecs, os, warnings, itertools
|
import codecs, os
|
||||||
import json, csv, requests, sys, re
|
import json, csv, requests, sys, re
|
||||||
import numpy as np
|
|
||||||
import pandas as pd
|
|
||||||
from multiprocessing import Semaphore
|
from multiprocessing import Semaphore
|
||||||
from statistics import mean, median, stdev
|
from statistics import mean, median, stdev
|
||||||
from pipelines import fetch, url
|
from pipelines import fetch, url
|
||||||
from courses import getCoursesInTerm, course_enrollment
|
from courses import getCoursesInTerm, course_enrollment
|
||||||
from localcache import get_course_enrollments
|
from localcache import get_course_enrollments
|
||||||
from localcache import query_multiple
|
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
all_grades_file = f"cache/grades_all.csv"
|
all_grades_file = f"cache/grades_all.csv"
|
||||||
|
|
@ -883,403 +880,6 @@ def cluster_student_histories():
|
||||||
|
|
||||||
df = pd.read_csv(infile)
|
df = pd.read_csv(infile)
|
||||||
|
|
||||||
def dept(s):
|
|
||||||
parts = s.split(' ')
|
|
||||||
return parts[0]
|
|
||||||
|
|
||||||
def try_make_sched():
|
|
||||||
term = "fa23"
|
|
||||||
sched = requests.get(f"http://gavilan.cc/schedule/{term}_sched.json").json()
|
|
||||||
#print(json.dumps(sched,indent=2))
|
|
||||||
|
|
||||||
d = "CSIS"
|
|
||||||
courses = [ [x['code'], x['crn']] for x in sched if dept(x['code'])==d ]
|
|
||||||
teachers = { x['teacher'] for x in sched if dept(x['code'])==d }
|
|
||||||
|
|
||||||
print(courses)
|
|
||||||
print(teachers)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def sched_lookup_tables():
|
|
||||||
|
|
||||||
# Renumber the semesters
|
|
||||||
# sp16 su16 fa16 wi17 sp17 su17 fa17 wi18
|
|
||||||
#semesters = "sp18 su18 fa18 wi19 sp19 su19 fa19 wi20 sp20 su20 fa20 wi21 sp21 su21 fa21 wi22 sp22 su22 fa22 wi23 sp23 su23 fa23 wi24 sp24 su24 fa24 wi25 sp25 su25 fa25 wi26".split(" ")
|
|
||||||
|
|
||||||
sem_fourcode = "sp18 su18 fa18 sp19 su19 fa19 sp20 su20 fa20 sp21 su21 fa21 sp22 su22 fa22 sp23 su23 fa23 sp24 su24 fa24 sp25 su25 fa25".split(" ")
|
|
||||||
int_numbers = [x for x in range(1,len(sem_fourcode)+1)]
|
|
||||||
fourcode_2_int = {semester: number for semester, number in zip(sem_fourcode, int_numbers)}
|
|
||||||
int_2_fourcode = {v: k for k, v in fourcode_2_int.items()}
|
|
||||||
|
|
||||||
sis_2_fourcode = {}
|
|
||||||
fourcode_2_sis = {}
|
|
||||||
yr = 2018
|
|
||||||
sems = ['30','50','70']
|
|
||||||
i = 0
|
|
||||||
semcodes = []
|
|
||||||
while yr < 2026:
|
|
||||||
for s in sems:
|
|
||||||
semcodes.append(f"{yr}{s}")
|
|
||||||
sis_2_fourcode[f"{yr}{s}"] = sem_fourcode[i]
|
|
||||||
fourcode_2_sis[sis_2_fourcode[f"{yr}{s}"]] = f"{yr}{s}"
|
|
||||||
#print(f"UPDATE schedule SET semsis={yr}{s} WHERE sem='{semesters[i]}';")
|
|
||||||
i += 1
|
|
||||||
yr += 1
|
|
||||||
return fourcode_2_int, int_2_fourcode, sis_2_fourcode, fourcode_2_sis, semcodes
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def section_stats_bymode():
|
|
||||||
data = query_multiple("SELECT code, semsis, COUNT(id) AS sections, sum(act) filter (WHERE type='in-person') AS inperson, sum(act) filter (WHERE type='online') AS online, sum(act) filter (WHERE type='hybrid') AS hybrid, sum(act) filter (WHERE type='online live') AS onlinelive FROM schedule GROUP BY code, semsis ORDER BY code, semsis;", 'cache/canvas_data/data20231012.db')
|
|
||||||
import pandas as pd
|
|
||||||
df = pd.DataFrame(data)
|
|
||||||
df.fillna(0,inplace=True)
|
|
||||||
for L in 'sections,inperson,online,hybrid,onlinelive'.split(','):
|
|
||||||
df[L] = df[L].astype(int)
|
|
||||||
print(df)
|
|
||||||
df.to_csv('cache/section_stats_bymode.csv')
|
|
||||||
return df
|
|
||||||
|
|
||||||
def section_stats():
|
|
||||||
# for each course, (ENG1A) how many are enrolled in each all sections?
|
|
||||||
# (and break down by mode,time,location,etc)
|
|
||||||
#
|
|
||||||
# for each course, how many are first semester gav students?
|
|
||||||
#
|
|
||||||
data = query_multiple("SELECT * FROM schedule ORDER BY code,id", 'cache/canvas_data/data20231012.db')
|
|
||||||
|
|
||||||
fourcode_2_int, int_2_fourcode, sis_2_fourcode, fourcode_2_sis, semcodes = sched_lookup_tables()
|
|
||||||
|
|
||||||
# Assuming your data is in a list of dictionaries called data
|
|
||||||
df = pd.DataFrame(data)
|
|
||||||
|
|
||||||
# Drop the specified columns
|
|
||||||
df = df.drop(columns=['id', 'crn', 'units', 'teacher', 'start', 'end', 'loc', 'cap'])
|
|
||||||
|
|
||||||
codecs.open('cache/sem_mapping.json','w','utf-8').write(json.dumps(fourcode_2_int,indent=2))
|
|
||||||
|
|
||||||
df['sem'] = df['sem'].map(fourcode_2_int)
|
|
||||||
df.set_index('sem', inplace=True)
|
|
||||||
return df
|
|
||||||
|
|
||||||
def simple_exp_smoothing_section_model():
|
|
||||||
sout = codecs.open('cache/section_predictions.txt','w','utf-8')
|
|
||||||
from statsmodels.tsa.api import SimpleExpSmoothing
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
periods = 3
|
|
||||||
start = 19
|
|
||||||
|
|
||||||
df = section_stats()
|
|
||||||
print(df)
|
|
||||||
df = df.sort_index()
|
|
||||||
|
|
||||||
predictions = {}
|
|
||||||
for course_code in df['code'].unique():
|
|
||||||
try:
|
|
||||||
print(course_code)
|
|
||||||
sout.write(course_code + "\n")
|
|
||||||
this_set = df[df['code'] == course_code]['act']
|
|
||||||
this_set = this_set.groupby('sem').sum()
|
|
||||||
#this_set.fillna(method='ffill', inplace=True)
|
|
||||||
#this_set.fillna(0, inplace=True)
|
|
||||||
|
|
||||||
# Create a new index with all required semesters
|
|
||||||
new_index = np.arange(this_set.index.min(), this_set.index.max()+1)
|
|
||||||
|
|
||||||
# Reindex the DataFrame and fill missing values with 0
|
|
||||||
this_set = this_set.reindex(new_index, fill_value=0)
|
|
||||||
|
|
||||||
print(this_set.to_string())
|
|
||||||
|
|
||||||
sout.write(this_set.to_string() + "\n")
|
|
||||||
model = SimpleExpSmoothing(this_set)
|
|
||||||
fit = model.fit(smoothing_level=0.2) # initiate with a smoothing level of 0.2
|
|
||||||
# Later modify above line based on if your data has high or low variability
|
|
||||||
|
|
||||||
#prediction = fit.forecast(start=32,end=34) # predict attendance for the next 3 semesters
|
|
||||||
prediction = fit.predict(start=start,end=start+4)
|
|
||||||
print(prediction)
|
|
||||||
sout.write(str(prediction) + "\n")
|
|
||||||
sout.flush()
|
|
||||||
predictions[course_code] = prediction
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Model creation failed for {course_code} due to {str(e)}")
|
|
||||||
sout.write(f"Model creation failed for {course_code} due to {str(e)}\n")
|
|
||||||
"""
|
|
||||||
model = ARIMA(this_set, order=(1,1,1)) #ARIMA params (p, d, q)
|
|
||||||
model_fit = model.fit()
|
|
||||||
forecast_result = model_fit.forecast(steps=periods)
|
|
||||||
if forecast_result:
|
|
||||||
predictions[course_code] = forecast_result[0]
|
|
||||||
else:
|
|
||||||
print(f"No prediction for {course_code}. Skipping...")"""
|
|
||||||
|
|
||||||
|
|
||||||
# statistics - use a smooth exponential model to predict the next 3 semesters of enrollment
|
|
||||||
# Doesn't really seem to get the patterns.
|
|
||||||
def exp_smoothing_section_model():
|
|
||||||
sout = codecs.open('cache/section_predictions.txt','w','utf-8')
|
|
||||||
from statsmodels.tsa.api import ExponentialSmoothing
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
periods = 3
|
|
||||||
start = 19
|
|
||||||
|
|
||||||
fourcode_2_int, int_2_fourcode, sis_2_fourcode, fourcode_2_sis, semcodes = sched_lookup_tables()
|
|
||||||
|
|
||||||
df = section_stats()
|
|
||||||
print(df)
|
|
||||||
df = df.sort_index()
|
|
||||||
|
|
||||||
predictions = {}
|
|
||||||
for course_code in df['code'].unique():
|
|
||||||
try:
|
|
||||||
print(course_code)
|
|
||||||
#sout.write(course_code + "\n")
|
|
||||||
this_set = df[df['code'] == course_code]['act']
|
|
||||||
this_set = this_set.groupby('sem').sum()
|
|
||||||
#this_set.fillna(method='ffill', inplace=True)
|
|
||||||
#this_set.fillna(0, inplace=True)
|
|
||||||
|
|
||||||
# Create a new index with all required semesters
|
|
||||||
new_index = np.arange(this_set.index.min(), this_set.index.max()+1)
|
|
||||||
|
|
||||||
# Reindex the DataFrame and fill missing values with 0
|
|
||||||
this_set = this_set.reindex(new_index, fill_value=0)
|
|
||||||
|
|
||||||
print(this_set.to_string())
|
|
||||||
|
|
||||||
for i,v in this_set.items():
|
|
||||||
sout.write(f"{course_code},{int_2_fourcode[i]},{v}\n")
|
|
||||||
|
|
||||||
model = ExponentialSmoothing(this_set, seasonal_periods=4, trend='add', seasonal='add')
|
|
||||||
fit = model.fit()
|
|
||||||
|
|
||||||
prediction = fit.predict(start=start,end=start+4)
|
|
||||||
print(prediction)
|
|
||||||
for i,v in prediction.items():
|
|
||||||
v = int(v)
|
|
||||||
if v<0: v=0
|
|
||||||
sout.write(f"{course_code},{int_2_fourcode[i]}, {v}\n")
|
|
||||||
sout.flush()
|
|
||||||
predictions[course_code] = prediction
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Model creation failed for {course_code} due to {str(e)}")
|
|
||||||
#sout.write(f"Model creation failed for {course_code} due to {str(e)}\n")
|
|
||||||
|
|
||||||
def student_by_semester():
|
|
||||||
|
|
||||||
query = """
|
|
||||||
SELECT u.name, u.canvasid, s.code, s.semsis FROM users u
|
|
||||||
JOIN enrollment e ON u.id = e.user_id
|
|
||||||
JOIN courses c ON c.id = e.course_id
|
|
||||||
JOIN terms t ON c.termid = t.id
|
|
||||||
JOIN schedule s ON c.schedule = s.id
|
|
||||||
WHERE e.type='StudentEnrollment' AND e.workflow='active'
|
|
||||||
ORDER BY u.sortablename, s.semsis;
|
|
||||||
"""
|
|
||||||
|
|
||||||
df = pd.DataFrame(query_multiple(query, 'cache/canvas_data/data20231012.db'))
|
|
||||||
|
|
||||||
# Apply groupby and aggregate the courses in each semester in a comma-separated string
|
|
||||||
df['courses'] = df.groupby(['name','canvasid','semsis'])['code'].transform(lambda x : ' / '.join(x))
|
|
||||||
|
|
||||||
# Removing duplicates
|
|
||||||
df = df[['name','canvasid','semsis','courses']].drop_duplicates()
|
|
||||||
|
|
||||||
# Create pivot table
|
|
||||||
df_pivot = df.pivot_table(values='courses', index=['name','canvasid'], columns='semsis', aggfunc='first').reset_index()
|
|
||||||
|
|
||||||
# Adding prefix to new columns names to recognize them
|
|
||||||
df_pivot.columns = [str(col) + '_sem' if isinstance(col, int) else col for col in df_pivot.columns]
|
|
||||||
|
|
||||||
df_pivot.to_csv('cache/student_by_semester.csv')
|
|
||||||
|
|
||||||
|
|
||||||
def sections_grouped_by_year_mode():
|
|
||||||
df = section_stats_bymode()
|
|
||||||
|
|
||||||
# list of unique courses
|
|
||||||
df_all_courses = df['code'].unique()
|
|
||||||
|
|
||||||
# list of unique semesters
|
|
||||||
df_all_semesters = df['semsis'].unique()
|
|
||||||
df_all_semesters.sort()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
raw_data = {}
|
|
||||||
for line in df:
|
|
||||||
print(line['semsis'])
|
|
||||||
sis = str(line['semsis'])
|
|
||||||
year = sis[0:4]
|
|
||||||
raw_data[ f"{line['code']}{year}"] = [line['inperson'],line['online'],line['hybrid'],line['onlinelive']]
|
|
||||||
print(raw_data)
|
|
||||||
return
|
|
||||||
|
|
||||||
for course in df_all_courses:
|
|
||||||
c = str(course)
|
|
||||||
template = {'code':[c,c,c], 'semsis':[], 'inperson':[], 'online':[], 'hybrid':[], 'onlinelive':[]}
|
|
||||||
|
|
||||||
# group semesters in to groups of 3 by year
|
|
||||||
for i in df_all_semesters:
|
|
||||||
j = str(i)
|
|
||||||
year = j[0:4]
|
|
||||||
print(f"{i} ({year})")
|
|
||||||
|
|
||||||
# for each course, for each group of 3 semesters, fill in values, using 0 if necessary
|
|
||||||
|
|
||||||
# ...
|
|
||||||
|
|
||||||
def lstm_model_sections():
|
|
||||||
from keras.models import Sequential
|
|
||||||
from keras.layers import Dense
|
|
||||||
from keras.layers import LSTM
|
|
||||||
from sklearn.preprocessing import MinMaxScaler
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
|
|
||||||
# Preprocessing
|
|
||||||
|
|
||||||
# Normalize inputs for better performance
|
|
||||||
df = section_stats_bymode()
|
|
||||||
print(df)
|
|
||||||
scaler = MinMaxScaler(feature_range=(0, 1))
|
|
||||||
dataset_scaled = scaler.fit_transform(df.drop(['code', 'semsis'], axis=1))
|
|
||||||
print("scaled:")
|
|
||||||
print(df)
|
|
||||||
|
|
||||||
# Split features and targets (Assuming you want to predict 'online' enrollments)
|
|
||||||
X = dataset_scaled[:, 1:]
|
|
||||||
Y = dataset_scaled[:,0:1]
|
|
||||||
|
|
||||||
# Train / Test split
|
|
||||||
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
|
|
||||||
|
|
||||||
# Reshape input to be [samples, time steps, features] which is required for LSTM
|
|
||||||
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
|
|
||||||
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
|
|
||||||
|
|
||||||
print("x_train shape:", x_train.shape)
|
|
||||||
print(x_train)
|
|
||||||
|
|
||||||
print("\n\nTraining...\n\n")
|
|
||||||
|
|
||||||
# LSTM architecture
|
|
||||||
model = Sequential()
|
|
||||||
model.add(LSTM(50, input_shape=(X.shape[1], 1))) # 50 LSTM blocks
|
|
||||||
model.add(Dense(1)) # Since we are predicting only 1 output ('online' enrollments)
|
|
||||||
model.compile(loss='mean_squared_error', optimizer='adam')
|
|
||||||
model.fit(x_train, y_train, epochs=5, batch_size=1) # Training the model
|
|
||||||
|
|
||||||
# Prediction
|
|
||||||
scaler_predict = MinMaxScaler()
|
|
||||||
scaler_predict.fit_transform(df[['online']])
|
|
||||||
trainPredict = model.predict(x_train)
|
|
||||||
testPredict = model.predict(x_test)
|
|
||||||
|
|
||||||
# Invert predictions (Due to normalization)
|
|
||||||
trainPredict = scaler_predict.inverse_transform(trainPredict)
|
|
||||||
testPredict = scaler_predict.inverse_transform(testPredict)
|
|
||||||
|
|
||||||
# Now you have your future prediction in testPredict.
|
|
||||||
|
|
||||||
print("Predictions:")
|
|
||||||
print(testPredict)
|
|
||||||
np.savetxt('cache/section_predictions_lstm.txt',testPredict, fmt='%f')
|
|
||||||
|
|
||||||
# I'm lost here...
|
|
||||||
df
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def visualize_course_modes_multi_semester():
|
|
||||||
import plotly.express as px
|
|
||||||
from plotly.subplots import make_subplots
|
|
||||||
seasons = {'sp':'30','su':'50','fa':'70'}
|
|
||||||
semcodes = "sp18 su18 fa18 sp19 su19 fa19 sp20 su20 fa20 sp21 su21 fa21 sp22 su22 fa22 sp23 su23 fa23 sp24".split(" ")
|
|
||||||
# sems = {'sp23':'202330','su23':'202350','fa23':'202370'}
|
|
||||||
sems = { x:'20' + x[2:] + seasons[x[:2]] for x in semcodes }
|
|
||||||
sem_dfs = []
|
|
||||||
sem_dfs_depts = []
|
|
||||||
for s in sems.keys():
|
|
||||||
sched = requests.get(f"http://gavilan.cc/schedule/{s}_sched_expanded.json").json()
|
|
||||||
for crs in sched:
|
|
||||||
if 'extra' in crs: del crs['extra']
|
|
||||||
crs['dept'] = crs['code'].split(' ')[0]
|
|
||||||
df = pd.DataFrame(sched)
|
|
||||||
df_depts = df.copy()
|
|
||||||
df_depts = df_depts.drop(columns=['crn','sec','code','cmp','name','days','time','rem','wl_cap','wl_act','wl_rem','teacher','date','loc','ztc','time_start','time_end','start','end','doy'])
|
|
||||||
df = df.drop(columns=['crn','sec','code','cmp','name','days','time','rem','wl_cap','wl_act','wl_rem','teacher','date','loc','ztc','time_start','time_end','start','end','doy'])
|
|
||||||
dept_counts = grouped_by_dept = df_depts.groupby(['dept','type']).size().reset_index(name='count')
|
|
||||||
grouped_by_mode = df['type'].value_counts().reset_index()
|
|
||||||
grouped_by_dept["semester"] = sems[s]
|
|
||||||
grouped_by_mode["semester"] = sems[s]
|
|
||||||
#print(dept_counts)
|
|
||||||
sem_dfs.append(grouped_by_mode)
|
|
||||||
sem_dfs_depts.append(grouped_by_dept)
|
|
||||||
|
|
||||||
#grouped_json = grouped_by_dept.to_json(orient='records')
|
|
||||||
#j = json.loads(grouped_json)
|
|
||||||
#print(json.dumps(j,indent=2))
|
|
||||||
|
|
||||||
#grouped_by_dept.columns = ['Department', 'Count'] # rename the column names appropriately
|
|
||||||
#fig = px.bar(grouped_by_dept, x='Department', y='Count', title='Section Counts by Department')
|
|
||||||
#fig.write_html(f"cache/output_{s}.html")
|
|
||||||
|
|
||||||
|
|
||||||
combined_data = pd.concat(sem_dfs, axis=0)
|
|
||||||
combined_data = combined_data.rename(columns={'type':'count','index':'type'})
|
|
||||||
combined_data.reset_index(drop=True,inplace=True)
|
|
||||||
pivoted_data = combined_data.pivot(index='semester', columns='type', values='count')
|
|
||||||
pivoted_data.reset_index(inplace=True)
|
|
||||||
|
|
||||||
fig = px.bar(pivoted_data, x='semester',y=['hybrid', 'in-person', 'online', 'online live'], barmode='stack',
|
|
||||||
title='Course Delivery by Semester',
|
|
||||||
color_discrete_sequence=["#000066","#660000","#333366","#9400D3"])
|
|
||||||
fig.write_html(f"cache/sections_by_deliverymode.html")
|
|
||||||
|
|
||||||
|
|
||||||
combined_data_depts = pd.concat(sem_dfs_depts, axis=0)
|
|
||||||
combined_data_depts.reset_index(drop=True,inplace=True)
|
|
||||||
#print(combined_data_depts)
|
|
||||||
combined_data_depts.to_csv('cache/section_delivery_by_dept.csv')
|
|
||||||
'''pivoted_data_depts = combined_data_depts.pivot(index='semester', columns='type', values='count')
|
|
||||||
pivoted_data_depts.reset_index(inplace=True)
|
|
||||||
|
|
||||||
fig = px.bar(pivoted_data_depts, x='semester',y=['hybrid', 'in-person', 'online', 'online live'], barmode='stack',
|
|
||||||
title='Course Delivery by Semester',
|
|
||||||
color_discrete_sequence=["#000066","#660000","#333366","#9400D3"])
|
|
||||||
fig.write_html(f"cache/sections_depts_by_deliverymode.html")'''
|
|
||||||
|
|
||||||
unique_depts = combined_data_depts['dept'].unique()
|
|
||||||
fig = make_subplots(rows=len(unique_depts), cols=1,
|
|
||||||
subplot_titles=unique_depts,
|
|
||||||
)
|
|
||||||
for i, dept in enumerate(unique_depts, start=1):
|
|
||||||
#if i>1: break
|
|
||||||
# Filter the dataframe for the current department
|
|
||||||
dept_data = combined_data_depts[combined_data_depts['dept'] == dept]
|
|
||||||
|
|
||||||
# Pivot the data frame
|
|
||||||
pivoted_dept_data = dept_data.pivot(index='semester', columns='type', values='count').reset_index()
|
|
||||||
pivoted_dept_data.fillna(0,inplace=True)
|
|
||||||
print(pivoted_dept_data)
|
|
||||||
|
|
||||||
# Plot the data
|
|
||||||
columns_to_plot = ['hybrid', 'in-person', 'online', 'online live']
|
|
||||||
valid_columns = [col for col in columns_to_plot if col in pivoted_dept_data.columns]
|
|
||||||
fig_sub = px.bar(pivoted_dept_data, x='semester', y=valid_columns, barmode='stack',
|
|
||||||
#title=f'Course Delivery by Semester for {dept}',
|
|
||||||
color_discrete_sequence=["#000066","#660000","#333366","#9400D3"])
|
|
||||||
fig.add_traces(fig_sub['data'], rows=[i]*len(fig_sub['data']), cols=[1]*len(fig_sub['data']))
|
|
||||||
fig.update_layout(height=70*len(fig['data']), width=1100, showlegend=False)
|
|
||||||
fig.write_html(f"cache/sections_depts_by_deliverymode.html")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1296,13 +896,6 @@ if __name__ == "__main__":
|
||||||
9: ['semester startdates list', semester_dates],
|
9: ['semester startdates list', semester_dates],
|
||||||
10: ['normalize course histories', normalize_course_histories],
|
10: ['normalize course histories', normalize_course_histories],
|
||||||
11: ['cluster student histories', cluster_student_histories],
|
11: ['cluster student histories', cluster_student_histories],
|
||||||
12: ['try to make a schedule', try_make_sched],
|
|
||||||
13: ['ES model section predict attendance', exp_smoothing_section_model],
|
|
||||||
14: ['section stats by mode', section_stats_bymode],
|
|
||||||
15: ['student courses by semester', student_by_semester],
|
|
||||||
16: ['LSTM model sections', lstm_model_sections],
|
|
||||||
17: ['rearrange section data to yearly form', sections_grouped_by_year_mode],
|
|
||||||
30: ['visualize course modes multi semester', visualize_course_modes_multi_semester],
|
|
||||||
}
|
}
|
||||||
print ('')
|
print ('')
|
||||||
|
|
||||||
|
|
|
||||||
132
tasks.py
132
tasks.py
|
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
import pysftp, os, datetime, requests, re, json, sqlite3, codecs, csv, sys
|
import pysftp, os, datetime, requests, re, json, sqlite3, codecs, csv, sys
|
||||||
import funcy, os.path, shutil, urllib
|
import funcy, os.path, shutil, urllib
|
||||||
from datetime import datetime, strptime
|
from datetime import datetime
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
#from datetime import strptime
|
#from datetime import strptime
|
||||||
from time import mktime
|
from time import mktime
|
||||||
|
|
@ -1317,134 +1317,6 @@ def update_auth():
|
||||||
|
|
||||||
#print(json.dumps(r,indent=2))
|
#print(json.dumps(r,indent=2))
|
||||||
|
|
||||||
def print_a_calendar():
|
|
||||||
import datetime
|
|
||||||
cur_week = datetime.date.today().isocalendar()[1]
|
|
||||||
print(f"Current week number: {cur_week}")
|
|
||||||
|
|
||||||
import time
|
|
||||||
from time import gmtime, strftime
|
|
||||||
#d = time.strptime("3 Jul 2023", "%d %b %Y")
|
|
||||||
#print(strftime(d, '%U'))
|
|
||||||
|
|
||||||
|
|
||||||
import calendar
|
|
||||||
|
|
||||||
# Specify the year
|
|
||||||
year = 2023
|
|
||||||
|
|
||||||
if 0:
|
|
||||||
# Create a calendar for the entire year
|
|
||||||
cal = calendar.Calendar()
|
|
||||||
|
|
||||||
# Iterate over each month of the year
|
|
||||||
for month in range(1, 13):
|
|
||||||
# Print the month name
|
|
||||||
month_name = calendar.month_name[month]
|
|
||||||
print(f"\n{month_name} ({year})")
|
|
||||||
|
|
||||||
# Print the weekday abbreviation
|
|
||||||
weekdays = ['w ', 'M ', 'T ', 'W ', 'Th', 'F ', 'Sa', 'S ']
|
|
||||||
print(' '.join([ f"{day:<3}" for day in weekdays] ))
|
|
||||||
|
|
||||||
# Get the month's calendar
|
|
||||||
month_calendar = cal.monthdatescalendar(year, month)
|
|
||||||
|
|
||||||
# Iterate over each week in the month
|
|
||||||
for week in month_calendar:
|
|
||||||
# Extract the week number and days of the week
|
|
||||||
week_number = week[0].isocalendar()[1]
|
|
||||||
week_days = [day.day if day.month == month else '' for day in week]
|
|
||||||
|
|
||||||
# Print the week number and days
|
|
||||||
print(f"{week_number:<4}", end=' ')
|
|
||||||
print(' '.join([ f"{day:<2}" for day in week_days]))
|
|
||||||
|
|
||||||
|
|
||||||
### ### ### ###
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def generate_custom_calendar(year, semesters):
|
|
||||||
# Create a calendar for the entire year
|
|
||||||
cal = calendar.Calendar()
|
|
||||||
|
|
||||||
# Iterate over each month of the year
|
|
||||||
for month in range(1, 13):
|
|
||||||
# Print the month name
|
|
||||||
month_name = calendar.month_name[month]
|
|
||||||
print(f"\n{month_name} {year}")
|
|
||||||
|
|
||||||
# Print the weekday abbreviation
|
|
||||||
weekdays = ['w ', 'M ', 'T ', 'W ', 'Th', 'F ', 'Sa', 'S ', 'sem' ]
|
|
||||||
print(' '.join([f"{day:<3}" for day in weekdays]))
|
|
||||||
|
|
||||||
# Get the month's calendar
|
|
||||||
month_calendar = cal.monthdatescalendar(year, month)
|
|
||||||
|
|
||||||
# Iterate over each week in the month
|
|
||||||
for week in month_calendar:
|
|
||||||
# Extract the week number and days of the week
|
|
||||||
week_number = week[0].isocalendar()[1]
|
|
||||||
week_days = [day.day if day.month == month else '' for day in week]
|
|
||||||
|
|
||||||
#print("week: ", week)
|
|
||||||
|
|
||||||
# Determine the column value for the 'sem' column
|
|
||||||
sem_value = ' '
|
|
||||||
for (label, start_week, num_weeks) in semesters:
|
|
||||||
if week_number >= start_week and week_number < start_week + num_weeks:
|
|
||||||
sem_value = (week_number - start_week) + 1
|
|
||||||
|
|
||||||
# Print the week number, days, and the 'fa23' column value
|
|
||||||
print(f"{week_number:<4}", end=' ')
|
|
||||||
print(' '.join([f"{day:<2}" for day in week_days]) + f" {sem_value:<2}")
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
semesters = [ "su23,06/12,6", "fa23,08/28,16" ]
|
|
||||||
l_semesters = []
|
|
||||||
for sem in semesters:
|
|
||||||
column_label, start_date, num_weeks = sem.split(',')
|
|
||||||
start_dt = datetime.datetime.strptime(start_date + "/" + str(year), "%m/%d/%Y")
|
|
||||||
start_wk = start_dt.isocalendar()[1]
|
|
||||||
l_semesters.append( (column_label, start_wk, int(num_weeks)) )
|
|
||||||
generate_custom_calendar(year, l_semesters)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def word_calendar():
|
|
||||||
from docx import Document
|
|
||||||
from docx.shared import Inches
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
# Define the start date
|
|
||||||
start_date = datetime.date(2023, 8, 28)
|
|
||||||
|
|
||||||
# Prepare a list of 18 weeks beginning from the start date
|
|
||||||
dates = [start_date + datetime.timedelta(weeks=x) for x in range(18)]
|
|
||||||
|
|
||||||
# Initialize an instance of a word document
|
|
||||||
doc = Document()
|
|
||||||
table = doc.add_table(rows=1, cols=3)
|
|
||||||
|
|
||||||
# Set the headers
|
|
||||||
hdr_cells = table.rows[0].cells
|
|
||||||
hdr_cells[0].text = 'Week'
|
|
||||||
hdr_cells[1].text = 'Date'
|
|
||||||
hdr_cells[2].text = 'Events/Notes'
|
|
||||||
|
|
||||||
# Iterate through the list of dates
|
|
||||||
for i, date in enumerate(dates):
|
|
||||||
cells = table.add_row().cells
|
|
||||||
cells[0].text = str(i+1)
|
|
||||||
cells[1].text = date.strftime("%B %d")
|
|
||||||
cells[2].text = ''
|
|
||||||
|
|
||||||
# Save the document
|
|
||||||
doc.save('cache/tasks_schedule.docx')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
|
@ -1459,8 +1331,6 @@ if __name__ == "__main__":
|
||||||
10: ['dumb rename images mistake',file_renamer] ,
|
10: ['dumb rename images mistake',file_renamer] ,
|
||||||
11: ['list auth', list_auth],
|
11: ['list auth', list_auth],
|
||||||
12: ['update auth', update_auth],
|
12: ['update auth', update_auth],
|
||||||
13: ['print a calendar', print_a_calendar],
|
|
||||||
14: ['create a week calendar in word', word_calendar],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(sys.argv) > 1 and re.search(r'^\d+',sys.argv[1]):
|
if len(sys.argv) > 1 and re.search(r'^\d+',sys.argv[1]):
|
||||||
|
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
||||||
<!DOCTYPE html>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>Interactive Text Program</title>
|
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.1.2/socket.io.js"></script>
|
|
||||||
<script>
|
|
||||||
var socket = io();
|
|
||||||
|
|
||||||
socket.on('output', function(data) {
|
|
||||||
// Append the received text output to the page
|
|
||||||
var outputDiv = document.getElementById('output');
|
|
||||||
outputDiv.innerHTML += data.data + '<br>';
|
|
||||||
outputDiv.scrollTop = outputDiv.scrollHeight;
|
|
||||||
});
|
|
||||||
|
|
||||||
function sendInput() {
|
|
||||||
var inputField = document.getElementById('input');
|
|
||||||
var userInput = inputField.value;
|
|
||||||
|
|
||||||
// Send the user input to the server
|
|
||||||
socket.emit('input', {data: userInput});
|
|
||||||
|
|
||||||
// Clear the input field
|
|
||||||
inputField.value = '';
|
|
||||||
}
|
|
||||||
|
|
||||||
const eventSource = new EventSource('/stream');
|
|
||||||
|
|
||||||
eventSource.addEventListener('output', function (event) {
|
|
||||||
const data = JSON.parse(event.data);
|
|
||||||
// Update the web page with the received output data
|
|
||||||
// (e.g., append it to a <div> element)
|
|
||||||
});
|
|
||||||
|
|
||||||
eventSource.onerror = function () {
|
|
||||||
// Handle errors
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div id="output"></div>
|
|
||||||
<input type="text" id="input" onkeydown="if (event.key === 'Enter') sendInput()" autofocus>
|
|
||||||
<button onclick="sendInput()">Send</button>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
115
users.py
115
users.py
|
|
@ -26,7 +26,7 @@ from threading import Thread
|
||||||
from os import path
|
from os import path
|
||||||
|
|
||||||
# for NLP
|
# for NLP
|
||||||
#import spacy
|
import spacy
|
||||||
from gensim import corpora, models, similarities, downloader, utils
|
from gensim import corpora, models, similarities, downloader, utils
|
||||||
from nltk import stem
|
from nltk import stem
|
||||||
|
|
||||||
|
|
@ -1994,7 +1994,7 @@ def nlp_sample():
|
||||||
for document_number, score in sorted(enumerate(sims), key=lambda x: x[1], reverse=True):
|
for document_number, score in sorted(enumerate(sims), key=lambda x: x[1], reverse=True):
|
||||||
print(document_number, score)
|
print(document_number, score)
|
||||||
|
|
||||||
'''
|
|
||||||
def nlp_sample2():
|
def nlp_sample2():
|
||||||
# load english language model
|
# load english language model
|
||||||
nlp = spacy.load('en_core_web_sm',disable=['ner','textcat'])
|
nlp = spacy.load('en_core_web_sm',disable=['ner','textcat'])
|
||||||
|
|
@ -2006,7 +2006,8 @@ def nlp_sample2():
|
||||||
|
|
||||||
for token in doc:
|
for token in doc:
|
||||||
print(token.text,'->',token.pos_)
|
print(token.text,'->',token.pos_)
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -2034,26 +2035,9 @@ def find_new_teachers():
|
||||||
for J in jj:
|
for J in jj:
|
||||||
print( J['teacher'])
|
print( J['teacher'])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def user_db_sync():
|
def user_db_sync():
|
||||||
# currently in db
|
|
||||||
conusr = fetch("http://192.168.1.6:8080/dir_api.php?users=1")
|
|
||||||
conusr_emails = set([x.lower() for x in funcy.pluck('email',conusr)])
|
|
||||||
|
|
||||||
#fetch all staff from ilearn ILRN unique emails
|
|
||||||
ilrn = json.loads(codecs.open("cache/ilearn_staff.json","r","utf-8").read())
|
|
||||||
ilrn_emails = set([x.lower() for x in funcy.pluck('email',ilrn)])
|
|
||||||
|
|
||||||
for e in ilrn_emails:
|
|
||||||
|
|
||||||
if not (e in conusr_emails) and e.endswith('@gavilan.edu'):
|
|
||||||
E = funcy.first(funcy.where(ilrn,email=e))
|
|
||||||
goo = E['login_id'][3:]
|
|
||||||
#print("not in conf_user: %s \t %s \t %s" % (e,E['short_name'], E['login_id']) )
|
|
||||||
print("INSERT INTO conf_users (goo,email,name) VALUES ('%s', '%s', '%s');" % (goo,e,E['short_name']) )
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def user_db_sync2():
|
|
||||||
#fetch all personnel dir entries from dir_api.php. PERSL unique emails
|
#fetch all personnel dir entries from dir_api.php. PERSL unique emails
|
||||||
persl = fetch("http://hhh.gavilan.edu/phowell/map/dir_api.php?personnel=1")
|
persl = fetch("http://hhh.gavilan.edu/phowell/map/dir_api.php?personnel=1")
|
||||||
persl_emails = set([x.lower() for x in funcy.pluck('email',persl)])
|
persl_emails = set([x.lower() for x in funcy.pluck('email',persl)])
|
||||||
|
|
@ -2228,92 +2212,6 @@ def compare_db_tables():
|
||||||
for e in common_emails:
|
for e in common_emails:
|
||||||
out.write(f"update `conf_users` set `p2id`='{by_email_conf[e]['id']}' where lower(`email`)='{e}';\n")
|
out.write(f"update `conf_users` set `p2id`='{by_email_conf[e]['id']}' where lower(`email`)='{e}';\n")
|
||||||
|
|
||||||
# given a list of classes, report back about the student on one row of info
|
|
||||||
def student_history_analysis(sh):
|
|
||||||
from functools import reduce
|
|
||||||
semesters_set = set()
|
|
||||||
num_sems = 0
|
|
||||||
num_course = len(sh)
|
|
||||||
num_units = 0
|
|
||||||
units_online = 0
|
|
||||||
units_inperson = 0
|
|
||||||
units_hybrid = 0
|
|
||||||
units_ol = 0
|
|
||||||
fa_23_units = 0
|
|
||||||
fa_23_online_units = 0
|
|
||||||
fa23_courses = 0
|
|
||||||
fa23_onlinecourses = 0
|
|
||||||
|
|
||||||
#un_list = [ float(x['units'].split('-')[0].split('/')[0]) for x in sh ]
|
|
||||||
#num_units = reduce(lambda x,y: x+y, un_list)
|
|
||||||
for section in sh:
|
|
||||||
semesters_set.add(section['sis'])
|
|
||||||
units = float(section['units'].split('-')[0].split('/')[0])
|
|
||||||
num_units += units
|
|
||||||
if section['type'] == 'in-person': units_inperson += units
|
|
||||||
if section['type'] == 'online': units_online += units
|
|
||||||
if section['type'] == 'hybrid': units_hybrid += units
|
|
||||||
if section['type'] == 'online live': units_ol += units
|
|
||||||
|
|
||||||
if section['sis'] == '202370':
|
|
||||||
fa_23_units += units
|
|
||||||
fa23_courses += 1
|
|
||||||
if not section['type'] == 'in-person':
|
|
||||||
fa_23_online_units += units
|
|
||||||
fa23_onlinecourses += 1
|
|
||||||
|
|
||||||
num_sems = len(semesters_set)
|
|
||||||
if num_units == 0:
|
|
||||||
pct_online = 0
|
|
||||||
else:
|
|
||||||
pct_online = round(100 * (units_online+units_hybrid+units_ol) / num_units, 1)
|
|
||||||
|
|
||||||
if fa_23_units == 0:
|
|
||||||
fa_23_pct_online = 0
|
|
||||||
else:
|
|
||||||
fa_23_pct_online = round(100 * (fa_23_online_units) / fa_23_units, 1)
|
|
||||||
|
|
||||||
if fa23_courses == 0:
|
|
||||||
fa23_pct_course_online = 0
|
|
||||||
else:
|
|
||||||
fa23_pct_course_online = round(100 * (fa23_onlinecourses) / fa23_courses, 1)
|
|
||||||
summary = [units, num_course, f"\"{sh[0]['sortablename']}\",{sh[0]['canvasid']},{num_sems},{num_course},{num_units},{units_online},{units_inperson},{units_hybrid},{units_ol},{pct_online},{fa_23_units},{fa_23_online_units},{fa_23_pct_online},{fa23_courses},{fa23_onlinecourses},{fa23_pct_course_online}"]
|
|
||||||
return summary
|
|
||||||
|
|
||||||
def report_student_stats():
|
|
||||||
from localcache import users_with_history, students_current_semester
|
|
||||||
from itertools import groupby
|
|
||||||
u = users_with_history()
|
|
||||||
this_sem = [x['canvasid'] for x in students_current_semester()]
|
|
||||||
|
|
||||||
df = pd.DataFrame(u)
|
|
||||||
filtered_df = df[df['canvasid'].isin(this_sem)]
|
|
||||||
filtered_df.to_csv('cache/student_history_current_students.csv',index=False)
|
|
||||||
|
|
||||||
oo = codecs.open('cache/student_units.txt','w','utf-8')
|
|
||||||
oo.write("name,id,num_sems,num_course,num_units,units_online,units_inperson,units_hybrid,units_ol,percent_online,fa23_units,fa23_onlineunits,fa23_pct_online,fa23_num_courses,fa23_num_onlinecourses,fa23_percent_online_course\n")
|
|
||||||
# Now group by that key
|
|
||||||
def kk(x): return x['canvasid']
|
|
||||||
grouped_dict = {key:list(group) for key, group in groupby(u, kk)}
|
|
||||||
|
|
||||||
shorter = []
|
|
||||||
|
|
||||||
for k,g in grouped_dict.items():
|
|
||||||
if k in this_sem:
|
|
||||||
h = student_history_analysis(g)
|
|
||||||
#oo.write(json.dumps(h[2],indent=2)+ "\n")
|
|
||||||
oo.write( str(h[2]) + "\n")
|
|
||||||
shorter.append(h)
|
|
||||||
else:
|
|
||||||
print(f"Skipping {k}")
|
|
||||||
#print(this_sem)
|
|
||||||
#oo.write('units,courses\n')
|
|
||||||
#shorter.sort(key=lambda x: x[0], reverse=True)
|
|
||||||
#for s in shorter:
|
|
||||||
# print(s[2])
|
|
||||||
# #oo.write(f"{s[0]},{s[1]}\n")
|
|
||||||
# #print('\n\n')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print ("")
|
print ("")
|
||||||
|
|
@ -2340,7 +2238,6 @@ if __name__ == "__main__":
|
||||||
22: ['Sync personnel and conference user databases', user_db_sync],
|
22: ['Sync personnel and conference user databases', user_db_sync],
|
||||||
23: ['Find non-gnumbers', find_no_goo ],
|
23: ['Find non-gnumbers', find_no_goo ],
|
||||||
24: ['compare user tables', compare_db_tables],
|
24: ['compare user tables', compare_db_tables],
|
||||||
25: ['Report on student stats', report_student_stats],
|
|
||||||
#3: ['Main index, 1 year, teachers and their classes', getAllTeachersInTerm],
|
#3: ['Main index, 1 year, teachers and their classes', getAllTeachersInTerm],
|
||||||
#5: ['Match names in schedule & ilearn', match_usernames],
|
#5: ['Match names in schedule & ilearn', match_usernames],
|
||||||
#6: ['Create Dept\'s ZTC list', create_ztc_list],
|
#6: ['Create Dept\'s ZTC list', create_ztc_list],
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue