diff --git a/courses.py b/courses.py
index ae24856..1c6e7c9 100644
--- a/courses.py
+++ b/courses.py
@@ -27,7 +27,7 @@ def get_gott1_passers():
min_passing = 85
passers_filename = 'cache/teacherdata/bootcamp_passed.csv'
still_active_filename = 'cache/teacherdata/bootcamp_active.csv'
- get_course_passers(course, min_passing, passers_filename, still_active_filename)
+ #get_course_passers(course, min_passing, passers_filename, still_active_filename)
# Plagiarism Module - report on who completed it.
def get_plague_passers():
@@ -35,6 +35,7 @@ def get_plague_passers():
min_passing = 85
passers_filename = 'cache/teacherdata/plagiarism_passed.csv'
still_active_filename = 'cache/teacherdata/plagiarism_active.csv'
+ """
(passed, didnt) = get_course_passers(course, min_passing, passers_filename, still_active_filename)
passed = set( [z[2] for z in passed] )
didnt = set( [z[2] for z in didnt] )
@@ -54,40 +55,36 @@ def get_plague_passers():
outputfile = open('cache/plagcheck.txt','w').write( json.dumps( [ [z[2] for z in passed],[z[2] for z in didnt],enrol],indent=2))
return 1
- passed_d = {}
- didnt_d = {}
+ passed = {}
+ didnt = {}
output_by_course = {}
course_s = {}
- for p in passed: passed_d[str(p[2])] = p
- for p in didnt: didnt_d[str(p[2])] = p
+ for p in passed: passed_by_deptr(p[2])] = p
+ for p in didnt: didnt_d(p[2])] = p
- passed_s = [ str(k) for k in passed_d.keys() ]
- didnt_s = [ str(k) for k in didnt_d.keys() ]
+ passed_s = [ str(k) for k in passed_d() ]
+ didnt_s = [ str(k) for k in didnt_by_deptys() ]
crossref = ['11677','11698',]
outputfile = open('cache/plagcheck.txt','w')
- oo = { 'passed': passed_d, 'didnt': didnt_d }
-
+ oo = { 'passed': passed_by_deptdidnt': didnt_by_dept
for cr in crossref:
student_int = course_enrollment(cr)
- student_d = { str(k): v for k,v in student_int.items() }
- oo[cr] = student_d
-
+ student_by_dept{ str(k): v for k,v in student_int.items() }
+ oo[cr] = student_by_dept
output_by_course[cr] = { 'passed':{}, 'didnt':{}, 'missing':{} }
- course_s[cr] = set( [ str(k) for k in student_d.keys() ])
+ course_s[cr] = set( [ str(k) for k in student_by_deptys() ])
- for k,v in student_d.items():
+ for k,v in student_by_deptems():
key_s = str(k)
- if key_s in passed_d:
- output_by_course[cr]['passed'][key_s] = passed_d[key_s]
- elif key_s in didnt_d:
- output_by_course[cr]['didnt'][key_s] = didnt_d[key_s]
+ if key_s in passed_by_dept output_by_course[cr]['passed'][key_s] = passed_by_depty_s]
+ elif key_s in didnt_by_dept output_by_course[cr]['didnt'][key_s] = didnt_by_depty_s]
else:
output_by_course[cr]['missing'][key_s] = v['user']
@@ -143,7 +140,7 @@ def get_course_passers(course, min_passing, passers_filename, still_active_filen
print("Saved output to \n - passed: %s\n - not passed: %s\n" % (passers_filename, still_active_filename))
return (passed,didnt)
-
+ """
# Gott 1A
"""course = '2908'
quiz = '15250'
@@ -231,9 +228,9 @@ def users_in_semester():
#
# All students in STEM (or any list of depts.. match the course_code). Return SET of canvas ids.
-def users_in_depts_live(depts=[], termid='171'):
- courses_by_dept = {}
- students_by_dept = {}
+def users_in_by_depts_live(depts=[], termid='171'):
+ courses_by_by_dept = {}
+ students_by_by_dept = {}
all_c = getCoursesInTerm(termid,0,0)
codecs.open('cache/courses_in_term_%s.json' % termid,'w','utf-8').write( json.dumps(all_c,indent=2) )
@@ -244,19 +241,19 @@ def users_in_depts_live(depts=[], termid='171'):
match = re.search('^(%s)' % d, c['course_code'])
if match:
print("Getting enrollments for %s" % c['course_code'])
- if d in courses_by_dept: courses_by_dept[d].append(c)
- else: courses_by_dept[d] = [ c, ]
+ if d in courses_by_by_dept: courses_by_by_dept[d].append(c)
+ else: courses_by_by_dept[d] = [ c, ]
for u in course_enrollment(c['id']).values():
if u['type'] != "StudentEnrollment": continue
- if not (d in students_by_dept):
- students_by_dept[d] = set()
- students_by_dept[d].add(u['user_id'])
+ if not (d in students_by_by_dept):
+ students_by_by_dept[d] = set()
+ students_by_by_dept[d].add(u['user_id'])
continue
- print(students_by_dept)
- codecs.open('cache/students_by_dept_in_term_%s.json' % termid,'w','utf-8').write( str(students_by_dept) )
+ print(students_by_by_dept)
+ codecs.open('cache/students_by_by_dept_in_term_%s.json' % termid,'w','utf-8').write( str(students_by_by_dept) )
all_students = set()
- for dd in students_by_dept.values(): all_students.update(dd)
- codecs.open('cache/all_students_in_depts_in_term_%s.json' % termid,'w','utf-8').write( str(all_students) )
+ for dd in students_by_by_dept.values(): all_students.update(dd)
+ codecs.open('cache/all_students_in_by_depts_in_term_%s.json' % termid,'w','utf-8').write( str(all_students) )
return all_students
@@ -295,7 +292,7 @@ def askForTerms():
print("Terms: ")
for u in s:
print(str(u['id']) + "\t" + u['name'])
- #print json.dumps(results_dict,indent=2)
+ #print json.dumps(results_by_dept,indent=2)
term = input("The term id? ")
"""
@@ -547,13 +544,12 @@ def all_equal2(iterator):
177 2023 Winter
"""
def semester_cross_lister():
- sem = "fa23"
- term = 180
+ sem = "sp24"
+ term = 181
xlist_filename = f"cache/{sem}_crosslist.csv"
checkfile = codecs.open('cache/xlist_check.html','w','utf-8')
checkfile.write('
\n')
- current_term = 179
xlistfile = codecs.open(xlist_filename,'r','utf-8').readlines()[1:]
by_section = {}
by_group = defaultdict( list )
@@ -608,14 +604,17 @@ def semester_cross_lister():
nums_list = list(set([ z[1].split(' ')[1] for z in by_group[y] ]))
if all_equal2(depts_list):
depts = depts_list[0]
+ nums_list.sort()
nums = '/'.join(nums_list)
else:
- depts = list(set(depts_list))
+ depts = list(set(depts_list))
+ depts.sort()
+ depts = '/'.join(depts )
nums = by_group[y][0][1].split(' ')[1]
- new_name = depts + nums + " " + ' '.join(by_group[y][0][4].split(' ')[1:-1]) + " " + new_sec
+ new_name = f"{depts}{nums} {' '.join(by_group[y][0][4].split(' ')[1:-1])} {new_sec}"
#new_name = by_group[y][0][4][0:-5] + new_sec
- new_code = depts + nums + " " + new_sec
+ new_code = f"{depts}{nums} {sem.upper()} {new_sec}"
#new_code = by_group[y][0][5][0:-5] + new_sec
print(y)
print("\t", sects)
@@ -627,13 +626,15 @@ def semester_cross_lister():
for target_section in sections:
xlist_ii(target_section[3],host_id,new_name,new_code)
+ #pass
-
+# Perform an actual cross-list, given 2 id numbers, new name and code
def xlist_ii(parasite_id,host_id,new_name,new_code):
print("Parasite id: ",parasite_id," Host id: ", host_id)
print("New name: ", new_name)
print("New code: ", new_code)
- xyz = input("Perform cross list? Enter for yes, n for no: ")
+ xyz = 'y'
+ #xyz = input("Perform cross list? Enter for yes, n for no: ")
if xyz != 'n':
uu = url + '/api/v1/courses/%s/sections' % parasite_id
c_sect = fetch(uu)
@@ -853,7 +854,7 @@ def enroll_stem_students_live():
the_term = '180' # su23 fa23 = 180
do_removes = 0
depts = "MATH BIO CHEM CSIS PHYS PSCI GEOG ASTR ECOL ENVS ENGR".split(" ")
- users_to_enroll = users_in_depts_live(depts, the_term) # term id
+ users_to_enroll = users_in_by_depts_live(depts, the_term) # term id
stem_enrollments = course_enrollment(stem_course_id) # by user_id
@@ -924,7 +925,7 @@ def enroll_stem_students_live():
###########################
def enroll_bulk_students_bydept(course_id, depts, the_term="172", cautious=1): # a string, a list of strings
- users_to_enroll = users_in_depts_live(depts, the_term) # term id
+ users_to_enroll = users_in_by_depts_live(depts, the_term) # term id
targeted_enrollments = course_enrollment(course_id) # by user_id.. (live, uses api)
@@ -1120,7 +1121,7 @@ def make_ztc_list(sem='sp20'):
result = open('cache/ztc_crossref.csv','w')
result.write('Course,Section,Name,Teacher,ZTC teacher\n')
- ztc_dict = {}
+ ztc_by_dept = {}
for R in responses:
R = re.sub(',Yes','',R)
R = re.sub('\s\s+',',',R)
@@ -1132,18 +1133,18 @@ def make_ztc_list(sem='sp20'):
for C in parts[1:] :
C = C.strip()
#print(C)
- if C in ztc_dict:
- ztc_dict[C] += ', ' + parts[0]
+ if C in ztc_by_dept:
+ ztc_by_dept[C] += ', ' + parts[0]
else:
- ztc_dict[C] = parts[0]
- print(ztc_dict)
+ ztc_by_dept[C] = parts[0]
+ print(ztc_by_dept)
for CO in sched:
#if re.match(r'CWE',CO['code']):
#print(CO)
- if CO['code'] in ztc_dict:
- print(('Possible match, ' + CO['code'] + ' ' + ztc_dict[CO['code']] + ' is ztc, this section taught by: ' + CO['teacher'] ))
- result.write( ','.join( [CO['code'] ,CO['crn'] , CO['name'] , CO['teacher'] , ztc_dict[CO['code']] ]) + "\n" )
+ if CO['code'] in ztc_by_dept:
+ print(('Possible match, ' + CO['code'] + ' ' + ztc_by_dept[CO['code']] + ' is ztc, this section taught by: ' + CO['teacher'] ))
+ result.write( ','.join( [CO['code'] ,CO['crn'] , CO['name'] , CO['teacher'] , ztc_by_dept[CO['code']] ]) + "\n" )
def course_search_by_sis():
term = 65
@@ -1161,7 +1162,7 @@ def course_search_by_sis():
-def course_dates_terms(section=0):
+def course_by_depts_terms(section=0):
"""s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
s = list(funcy.flatten(s))
s.sort()
@@ -1172,13 +1173,24 @@ def course_dates_terms(section=0):
#c = getCoursesInTerm(174,0,1) # sp22
#c = getCoursesInTerm(176,0,1) # fa22
- get_fresh = 0
+ get_fresh = 1
+ SP_TERM = 181
+ WI_TERM = 182
+ SEM = "sp24"
+
+ make_changes = 1
+ make_changes_LS = 1
+
+ winter_start_day = 2
+ aviation_start_day = 11
+ nursing_start_day = 15
+ spring_start_day = 29
if get_fresh:
- c = getCoursesInTerm(178,0,0) # sp23
- codecs.open('cache/courses_in_term_178.json','w','utf-8').write(json.dumps(c,indent=2))
+ c = getCoursesInTerm(SP_TERM,0,0)
+ codecs.open(f'cache/courses_in_term_{SP_TERM}.json','w','utf-8').write(json.dumps(c,indent=2))
else:
- c = json.loads( codecs.open('cache/courses_in_term_178.json','r','utf-8').read() )
+ c = json.loads( codecs.open(f'cache/courses_in_term_{SP_TERM}.json','r','utf-8').read() )
crn_to_canvasid = {}
for C in c:
@@ -1189,38 +1201,45 @@ def course_dates_terms(section=0):
#print(crn_to_canvasid)
#return
- s = json.loads( codecs.open('cache/sp23_sched_expanded.json','r','utf-8').read() )
+ #s = json.loads( codecs.open(f'cache/{SEM}_sched_expanded.json','r','utf-8').read() )
+ s = requests.get(f"http://gavilan.cc/schedule/{SEM}_sched_expanded.json").json()
for S in s:
- start = re.sub( r'\-','/', S['start']) + '/2023'
+ start = re.sub( r'\-','/', S['start']) + '/20' + SEM[2:4]
d_start = datetime.strptime(start,"%m/%d/%Y")
if d_start.month > 5:
print("Ignoring ", d_start, " starting too late...")
continue
- if d_start.month == 1 and d_start.day == 12:
+ if d_start.month == 1 and d_start.day == aviation_start_day:
print("- Aviation ", start, d_start, " - ", S['code'], " ", S['crn'] )
continue
- if d_start.month == 1 and d_start.day ==3:
- print("+ winter session: ", d_start, " - ", S['code'])
- winter_term = '177'
- data = {'course[term_id]':winter_term}
- u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
- r3 = requests.put(u2, headers=header, params=data)
- print(u2, " OK")
- #print(r3.text)
+ if d_start.month == 1 and d_start.day == nursing_start_day:
+ print("- Nursing ", start, d_start, " - ", S['code'], " ", S['crn'] )
continue
- if d_start.month == 1 and d_start.day == 30:
+ if d_start.month == 1 and d_start.day == winter_start_day:
+ print("+ winter session: ", d_start, " - ", S['code'])
+ data = {'course[term_id]':WI_TERM}
+ u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
+ if make_changes:
+ r3 = requests.put(u2, headers=header, params=data)
+ print(" updated.. OK")
+ #print(r3.text)
+ continue
+
+ if d_start.month == 1 and d_start.day == spring_start_day:
# normal class
continue
print("- Late start? ", start, d_start, " - ", S['code'], " ", S['crn'] )
- data = {'course[start_at]':d_start.isoformat(), 'course[restrict_enrollments_to_course_dates]': True}
- u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
- r3 = requests.put(u2, headers=header, params=data)
- print(u2, " OK")
+ if make_changes_LS:
+ data = {'course[start_at]':d_start.isoformat(), 'course[restrict_student_future_view]': True,
+ 'course[restrict_enrollments_to_course_dates]':True }
+ u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
+ r3 = requests.put(u2, headers=header, params=data)
+ print(" updated.. OK")
return
@@ -1234,9 +1253,9 @@ def xlist_cwe():
# cwe192 get put into another shell
- this_sem_190_id = 17549 # they get 190s and 290s
- this_sem_192_id = 17154 # they get 192s
- this_sem_term = 180 # fa23
+ this_sem_190_id = 18424 # they get 190s and 290s
+ this_sem_192_id = 18519 # they get 192s
+ this_sem_term = 181
get_fresh = 0
sem_courses = getCoursesInTerm(this_sem_term, get_fresh, 0)
@@ -1375,8 +1394,8 @@ def create_sandboxes():
# Create a course
r3 = requests.post(u2, headers=header, params=data)
- course_data = json.loads(r3.text)
- id = course_data['id']
+ course_by_dept = json.loads(r3.text)
+ id = course_by_dept['id']
print(f"created course id {id}")
report.append( f"{coursename} https://ilearn.gavilan.edu/courses/{id}" )
@@ -1551,55 +1570,44 @@ def instructor_list_to_activate_evals():
def add_evals(section=0):
# show or hide?
- hidden = False
- #s = [ x.strip() for x in codecs.open('cache/sp21_eval_sections.txt','r').readlines()]
- #s = [ x.split(',')[4].split('::') for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
- #s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
- s = [ x.strip() for x in codecs.open('cache/sp23_eval_sections.csv','r').readlines()]
+
+ TERM = 180
+ SEM = "fa23"
+
+ hidden = True
+ s = [ x.strip() for x in codecs.open(f'cache/{SEM}_eval_sections.csv','r').readlines()]
s = list(funcy.flatten(s))
s.sort()
print(s)
+ print()
xyz = input('hit return to continue')
- #c = getCoursesInTerm(168,0,1)
- #c = getCoursesInTerm(174,0,1) # sp22
- #c = getCoursesInTerm(176,0,1) # fa22
- c = getCoursesInTerm(178,0,1) # sp23
- print(c)
+ c = getCoursesInTerm(TERM,0,1)
ids = []
courses = {}
for C in c:
if C and 'sis_course_id' in C and C['sis_course_id']:
parts = C['sis_course_id'].split('-')
if parts[1] in s:
- print(C['name'])
+ #print(C['name'])
courses[str(C['id'])] = C
ids.append(str(C['id']))
ask = 1
data = {'position':2, 'hidden':hidden}
-
+ ids.sort()
+
for i in ids:
if ask:
- a = input("Hit q to quit, a to do all, or enter to activate eval for: " + str(courses[i]))
+ a = input(f"Hit q to quit, a to do all, or enter to activate eval for: {courses[i]['id']} / {courses[i]['name']} : ")
if a == 'a': ask = 0
if a == 'q': return
+ else:
+ print(f"{courses[i]['id']} / {courses[i]['name']}")
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s/tabs/context_external_tool_1953" % i
r3 = requests.put(u2, headers=header, params=data)
- print(r3.text)
- time.sleep(0.400)
-
-
- return 1
-
- u2 = "https://gavilan.instructure.com:443/api/v1/courses/12001/tabs"
- r = fetch(u2)
- print(json.dumps(r,indent=2))
-
-
-
- # PUT /api/v1/courses/:course_id/tabs/:tab_id
-
+ #print(r3.text)
+ #time.sleep(0.400)
@@ -1708,8 +1716,8 @@ def fetch_rubric_scores(course_id=16528, assignment_id=1):
#print(assignments_list)
- assignments_dict = {}
- ratings_dict = {}
+ assignments_by_dept = {}
+ ratings_by_dept = {}
# Iterate through the list of assignments and populate the dictionary
for assignment in assignments_list:
@@ -1723,7 +1731,7 @@ def fetch_rubric_scores(course_id=16528, assignment_id=1):
out.write(f" Asmt Name: {assignment_name} ID: {assignment_id} Rubric: {has_rubric}\n")
# Save assignment details including rubric
- assignments_dict[assignment_id] = {
+ assignments_by_dept[assignment_id] = {
'name': assignment_name,
'rubric': rubric
# Add more assignment details if needed
@@ -1734,12 +1742,12 @@ def fetch_rubric_scores(course_id=16528, assignment_id=1):
print(json.dumps(rubric,indent=2))
for r in rubric:
for rat in r.get('ratings',[]):
- ratings_dict[rat['id']] = { 'rub_description': r['description'], 'rat_description': rat['description'], 'points': rat['points']}
+ ratings_by_dept[rat['id']] = { 'rub_by_deptription': r['description'], 'rat_by_deptription': rat['description'], 'points': rat['points']}
# Print the assignments dictionary
- out.write(json.dumps(assignments_dict,indent=2)+'\n\n\n')
- out.write(json.dumps(ratings_dict,indent=2)+'\n\n\n')
+ out.write(json.dumps(assignments_by_dept,indent=2)+'\n\n\n')
+ out.write(json.dumps(ratings_by_dept,indent=2)+'\n\n\n')
# Loop thru assignments with rubrics and report on grades
for assignment in assignments_list:
@@ -1763,11 +1771,11 @@ def fetch_rubric_scores(course_id=16528, assignment_id=1):
# print(f"Request failed with status code {response.status_code}")
# continue
- submissions_data = fetch(api_url)
+ submissions_by_dept = fetch(api_url)
# Iterate through the list of submissions and retrieve rubric scores and comments
- for submission in submissions_data:
+ for submission in submissions_by_dept:
user_id = submission['user_id']
rubric = submission.get('rubric_assessment', []) # Get the rubric assessment (empty list if not present)
comments = submission.get('submission_comments', '') # Get submission comments (empty string if not present)
@@ -1784,13 +1792,13 @@ def fetch_rubric_scores(course_id=16528, assignment_id=1):
out.write(f"Submission Comments: {comments}\n")
out.write(f"Rubric:\n")
for k,v in rubric.items():
- rub_desc = '?'
- rat_desc = '?'
- if v['rating_id'] in ratings_dict:
- rub_rating = ratings_dict[v['rating_id']]
- rub_desc = rub_rating['rub_description']
- rat_desc = rub_rating['rat_description']
- out.write(f" {rub_desc} - {rat_desc} ({v['rating_id']}): {v['points']}/{rub_rating['points']} points: {v['comments']}\n")
+ rub_by_dept = '?'
+ rat_by_dept = '?'
+ if v['rating_id'] in ratings_by_dept:
+ rub_rating = ratings_by_dept[v['rating_id']]
+ rub_by_dept = rub_rating['rub_by_deptription']
+ rat_by_dept = rub_rating['rat_by_deptription']
+ out.write(f" {rub_by_dept} - {rat_by_dept} ({v['rating_id']}): {v['points']}/{rub_rating['points']} points: {v['comments']}\n")
out.write("---") # Separator between submissions
out.flush()
@@ -1814,7 +1822,7 @@ def create_calendar_event():
local = pytz.timezone("America/Los_Angeles")
naive = datetime.strptime(date, "%Y-%m-%d")
local_dt = local.localize(naive, is_dst=None)
- utc_dt = local_dt.astimezone(pytz.utc).isoformat()
+ utc_dt = local_dt.timezone(pytz.utc).isoformat()
@@ -1822,8 +1830,8 @@ def create_calendar_event():
"calendar_event[context_code]": "course_15924", # 2023 student orientation
"calendar_event[title]": title,
"calendar_event[description]": desc,
- "calendar_event[start_at]": utc_dt, # DateTime
- "calendar_event[all_day]": "true",
+ "calendar_event[start_at]": utc_dt, # DateTime
+ "calendar_event[all_by_dept": "true",
}
@@ -1845,7 +1853,7 @@ def utc_to_local(utc_str):
# Convert the UTC datetime to the Pacific Time Zone
pacific_tz = pytz.timezone('US/Pacific')
- pacific_dt = utc_dt.astimezone(pacific_tz)
+ pacific_dt = pytz.timezone(pacific_tz)
return pacific_dt.strftime('%a %b %d, %Y %#I:%M%p')
@@ -1897,6 +1905,9 @@ def fetch_announcements():
print("Announcements saved to ", filename)
+
+
+
if __name__ == "__main__":
options = { 1: ['Cross check schedule with ztc responses',make_ztc_list] ,
2: ['Add announcements to homepage', change_course_ann_homepage],
@@ -1927,7 +1938,7 @@ if __name__ == "__main__":
17: ['Remove "new analytics" from all courses navs in a semester', remove_n_analytics],
21: ['Add course evals', add_evals],
- 27: ['Fine tune term dates and winter session', course_dates_terms],
+ 27: ['Fine tune term dates and winter session', course_by_depts_terms],
3: ['Cross-list classes', xlist ],
6: ['Cross list helper', eslCrosslister],
28: ['Cross list a semester from file', semester_cross_lister],
diff --git a/localcache.py b/localcache.py
index 463d3bb..8d72084 100644
--- a/localcache.py
+++ b/localcache.py
@@ -43,6 +43,7 @@ term_format = "id canvasid rootid name start end sis".split(" ")
course_format = "id canvasid rootactid acctid termid name code type created start conclude visible sis state wikiid schedule".split(" ")
role_format = "id canvas_id root_account_id account_id name base_role_type workflow_state created_at updated_at deleted_at".split(" ")
course_score_format = "s_id c_id a_id course_id enrol_id current final muted_current muted_final".split(" ")
+course_section_dim_format = "id canvas_id name course_id enrollment_term_id default_section accepting_enrollments can_manually_enroll start_at end_at created_at workflow_state restrict_enrollments_to_section_dates nonxlist_course_id sis_source_id".split(" ")
enrollment_dim_format = "id cid root course_section role type workflow created updated start end complete self sis course_id user_id last_activity".split(" ")
communication_channel_dim_format = "id canvas_id user_id address type position workflow_state created_at updated_at".split(" ")
pseudonym_dim_format = "id canvas_id user_id account_id workflow_state last_request_at last_login_at current_login_at last_login_ip current_login_ip position created_at updated_at password_auto_generated deleted_at sis_user_id unique_name integration_id authentication_provider_id".split(" ")
@@ -197,6 +198,17 @@ def setup_table(table='requests'):
q += "\t%s %s" % (col,type)
q += "\n);"
+ if table=='course_sections':
+ first = 1
+ q = "CREATE TABLE IF NOT EXISTS course_sections (\n"
+ for L in course_section_dim_format:
+ (col,type) = (L,'text')
+ if not first:
+ q += ",\n"
+ first = 0
+ q += "\t%s %s" % (col,type)
+ q += "\n);"
+
if table=='enrollment':
first = 1
q = "CREATE TABLE IF NOT EXISTS enrollment (\n"
@@ -900,6 +912,26 @@ def merge_courses():
print(q)
conn.commit()
+def merge_course_sections():
+ setup_table('course_sections')
+ (conn,cur) = db()
+
+ c_file = most_recent_file_of('course_section_dim')
+ c_sections = parse_file_with( c_file, course_section_dim_format)
+ count = 0
+ for U in c_sections:
+ q,v = dict_to_insert(U,'course_sections')
+ count += 1
+ #if count % 1000 == 0:
+ # print( "%i - " % count + q + " " + str(v) )
+ try:
+ cur.execute(q,v)
+ except Exception as e:
+ print(e)
+ print(q)
+ conn.commit()
+ print("Processed %i course sections" % count)
+
def merge_enrollment():
setup_table('enrollment')
(conn,cur) = db()
@@ -1113,6 +1145,7 @@ def full_reload():
merge_enrollment()
merge_term()
merge_roles()
+ merge_course_sections()
#merge_requests()
@@ -2003,7 +2036,31 @@ def sched_to_db():
conn.executemany(query, vals_cache)
conn.commit()
+def students_current_semester(sem='202370'):
+ q = f"""SELECT u.canvasid FROM enrollment AS e
+JOIN users AS u ON e.user_id=u.id
+JOIN courses AS c ON e.course_id=c.id
+WHERE c.sis LIKE "{sem}-%"
+AND e.workflow="active"
+AND e."type"="StudentEnrollment"
+GROUP BY u.canvasid;"""
+ result = query_multiple(q)
+ #for r in result:
+ # print(json.dumps(result,indent=2))
+ return result
+def users_with_history():
+ q = '''SELECT u.name, u.sortablename, u.canvasid, c.code, s.partofday, s.type, s.site, s.units, t.sis, s.sem FROM users u
+JOIN enrollment e ON u.id = e.user_id
+JOIN courses c ON c.id = e.course_id
+JOIN terms t ON c.termid = t.id
+JOIN schedule s ON (s.crn=SUBSTR(c.sis,INSTR(c.sis, '-')+1,5) AND s.semsis=t.sis)
+WHERE e.type='StudentEnrollment' AND e.workflow='active'
+ORDER BY u.sortablename, t.sis, c.code ;'''
+ result = query_multiple(q)
+ #for r in result:
+ # print(json.dumps(result,indent=2))
+ return result
if __name__ == "__main__":
@@ -2035,6 +2092,7 @@ if __name__ == "__main__":
24: ['add conference sessions', add_sessions],
25: ['gavilan.cc extended schedule to sql insert format', sched_to_db],
26: ['correlate courses to schedule id', courses_to_sched],
+ 27: ['report all users', users_with_history],
#19: ['add evals for a whole semester', instructor_list_to_activate_evals],
#16: ['Upload new employees to flex app', employees_refresh_flex],
}
diff --git a/outcomes2022.py b/outcomes2022.py
index 3fda09d..6eb430c 100644
--- a/outcomes2022.py
+++ b/outcomes2022.py
@@ -30,7 +30,7 @@ from path_dict import PathDict
outputfile = ''
csvwriter = ''
-TERM = 180
+TERM = 181
def escape_commas(s):
@@ -358,6 +358,21 @@ def repair_outcome_points(course_id):
def add_o_dept_dry_run():
add_o_dept(1)
+def add_o_whole_term():
+ course_groups = full_term_overview(0)
+
+ dept_shells_to_add = [ a for a in course_groups['no outcomes'] ]
+ sorted_dept_shells_to_add = sorted(dept_shells_to_add, key=lambda x: f"{x['dept']}{x['code']}")
+
+ print(f"Adding to {len(sorted_dept_shells_to_add)} shells.")
+
+ for shell in sorted_dept_shells_to_add:
+ print(f"Adding outcomes to {shell['name']}")
+ try:
+ add_outcome_to_course(shell['id'])
+ except Exception as e:
+ print(f"Failed on {shell['id']}: {e}")
+
def add_o_dept(dry_run=0):
d = input("Enter dept or deps separated with a space > ")
@@ -483,6 +498,7 @@ def fetch_term_outcomes_and_report():
if __name__ == "__main__":
options = { 1: ['Refresh term outcome list & report', fetch_term_outcomes_and_report],
+ 2: ['Add outcomes to unset courses in whole term', add_o_whole_term],
3: ['Add outcomes to course id', add_outcome_to_course],
4: ['Fix outcome points', remove_old_outcomes],
5: ['Add outcomes to dept, dry run', add_o_dept_dry_run],
diff --git a/tasks.py b/tasks.py
index b7bd455..0beba8b 100644
--- a/tasks.py
+++ b/tasks.py
@@ -15,7 +15,7 @@
import pysftp, os, datetime, requests, re, json, sqlite3, codecs, csv, sys
import funcy, os.path, shutil, urllib
-from datetime import datetime
+from datetime import datetime, strptime
from collections import defaultdict
#from datetime import strptime
from time import mktime
diff --git a/users.py b/users.py
index e03c31a..9a48fc5 100644
--- a/users.py
+++ b/users.py
@@ -26,7 +26,7 @@ from threading import Thread
from os import path
# for NLP
-import spacy
+#import spacy
from gensim import corpora, models, similarities, downloader, utils
from nltk import stem
@@ -1994,7 +1994,7 @@ def nlp_sample():
for document_number, score in sorted(enumerate(sims), key=lambda x: x[1], reverse=True):
print(document_number, score)
-
+'''
def nlp_sample2():
# load english language model
nlp = spacy.load('en_core_web_sm',disable=['ner','textcat'])
@@ -2006,8 +2006,7 @@ def nlp_sample2():
for token in doc:
print(token.text,'->',token.pos_)
-
-
+'''
@@ -2229,6 +2228,92 @@ def compare_db_tables():
for e in common_emails:
out.write(f"update `conf_users` set `p2id`='{by_email_conf[e]['id']}' where lower(`email`)='{e}';\n")
+# given a list of classes, report back about the student on one row of info
+def student_history_analysis(sh):
+ from functools import reduce
+ semesters_set = set()
+ num_sems = 0
+ num_course = len(sh)
+ num_units = 0
+ units_online = 0
+ units_inperson = 0
+ units_hybrid = 0
+ units_ol = 0
+ fa_23_units = 0
+ fa_23_online_units = 0
+ fa23_courses = 0
+ fa23_onlinecourses = 0
+
+ #un_list = [ float(x['units'].split('-')[0].split('/')[0]) for x in sh ]
+ #num_units = reduce(lambda x,y: x+y, un_list)
+ for section in sh:
+ semesters_set.add(section['sis'])
+ units = float(section['units'].split('-')[0].split('/')[0])
+ num_units += units
+ if section['type'] == 'in-person': units_inperson += units
+ if section['type'] == 'online': units_online += units
+ if section['type'] == 'hybrid': units_hybrid += units
+ if section['type'] == 'online live': units_ol += units
+
+ if section['sis'] == '202370':
+ fa_23_units += units
+ fa23_courses += 1
+ if not section['type'] == 'in-person':
+ fa_23_online_units += units
+ fa23_onlinecourses += 1
+
+ num_sems = len(semesters_set)
+ if num_units == 0:
+ pct_online = 0
+ else:
+ pct_online = round(100 * (units_online+units_hybrid+units_ol) / num_units, 1)
+
+ if fa_23_units == 0:
+ fa_23_pct_online = 0
+ else:
+ fa_23_pct_online = round(100 * (fa_23_online_units) / fa_23_units, 1)
+
+ if fa23_courses == 0:
+ fa23_pct_course_online = 0
+ else:
+ fa23_pct_course_online = round(100 * (fa23_onlinecourses) / fa23_courses, 1)
+ summary = [units, num_course, f"\"{sh[0]['sortablename']}\",{sh[0]['canvasid']},{num_sems},{num_course},{num_units},{units_online},{units_inperson},{units_hybrid},{units_ol},{pct_online},{fa_23_units},{fa_23_online_units},{fa_23_pct_online},{fa23_courses},{fa23_onlinecourses},{fa23_pct_course_online}"]
+ return summary
+
+def report_student_stats():
+ from localcache import users_with_history, students_current_semester
+ from itertools import groupby
+ u = users_with_history()
+ this_sem = [x['canvasid'] for x in students_current_semester()]
+
+ df = pd.DataFrame(u)
+ filtered_df = df[df['canvasid'].isin(this_sem)]
+ filtered_df.to_csv('cache/student_history_current_students.csv',index=False)
+
+ oo = codecs.open('cache/student_units.txt','w','utf-8')
+ oo.write("name,id,num_sems,num_course,num_units,units_online,units_inperson,units_hybrid,units_ol,percent_online,fa23_units,fa23_onlineunits,fa23_pct_online,fa23_num_courses,fa23_num_onlinecourses,fa23_percent_online_course\n")
+ # Now group by that key
+ def kk(x): return x['canvasid']
+ grouped_dict = {key:list(group) for key, group in groupby(u, kk)}
+
+ shorter = []
+
+ for k,g in grouped_dict.items():
+ if k in this_sem:
+ h = student_history_analysis(g)
+ #oo.write(json.dumps(h[2],indent=2)+ "\n")
+ oo.write( str(h[2]) + "\n")
+ shorter.append(h)
+ else:
+ print(f"Skipping {k}")
+ #print(this_sem)
+ #oo.write('units,courses\n')
+ #shorter.sort(key=lambda x: x[0], reverse=True)
+ #for s in shorter:
+ # print(s[2])
+ # #oo.write(f"{s[0]},{s[1]}\n")
+ # #print('\n\n')
+
if __name__ == "__main__":
print ("")
@@ -2255,6 +2340,7 @@ if __name__ == "__main__":
22: ['Sync personnel and conference user databases', user_db_sync],
23: ['Find non-gnumbers', find_no_goo ],
24: ['compare user tables', compare_db_tables],
+ 25: ['Report on student stats', report_student_stats],
#3: ['Main index, 1 year, teachers and their classes', getAllTeachersInTerm],
#5: ['Match names in schedule & ilearn', match_usernames],
#6: ['Create Dept\'s ZTC list', create_ztc_list],