From 79e8dfed5312e1515c1325ce6200d68e77a855c8 Mon Sep 17 00:00:00 2001 From: Peter Howell Date: Wed, 22 Mar 2023 11:44:01 -0700 Subject: [PATCH 1/5] surface version courses.py --- courses.py | 111 +++++++++++++++++++++-------------------------------- 1 file changed, 44 insertions(+), 67 deletions(-) diff --git a/courses.py b/courses.py index 691757b..547d699 100644 --- a/courses.py +++ b/courses.py @@ -1,7 +1,7 @@ -import json, re, requests, codecs, sys, time, funcy, os +import json, re, requests, codecs, sys, time, funcy import pandas as pd -#from tabulate import tabulate +from tabulate import tabulate from dateutil import parser from datetime import datetime from util import print_table @@ -249,7 +249,9 @@ def scrape_bookstore(): # Input: xxxx_sched.json. Output: xxxx_latestarts.txt def list_latestarts(): #term = input("Name of current semester file? (ex: sp18) ") - term = "sp23" # sems[0] + term = "sp23" # sems[0] + + dept_ignore = "JFT JLE CWE".split(" ") term_in = "cache/" + term + "_sched.json" term_out = "cache/" + term + "_latestarts.txt" @@ -260,12 +262,13 @@ def list_latestarts(): #print sched by_date = {} for C in sched: + dept = C['code'].split(" ") + dept = dept[0] + if dept in dept_ignore: + continue + print(dept) parts = C['date'].split("-") start = parts[0] - codes = C['code'].split(' ') - dept = codes[0] - if dept in ['JLE','JFT','CWE']: - continue if re.search('TBA',start): continue try: startd = parser.parse(start) @@ -279,11 +282,11 @@ def list_latestarts(): #print "Start: " + str(X) if len(by_date[X]) < 200: prettydate = X.strftime("%A, %B %d") - print(prettydate + ": " + str(len(by_date[X])) + " courses") + #print(prettydate + ": " + str(len(by_date[X])) + " courses") outfile.write(prettydate + ": " + str(len(by_date[X])) + " courses" + "\n") for Y in by_date[X]: #print "\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] - print(Y) + #print(Y) #outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] +"\n") outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] + "\t" + "\n") @@ -332,20 +335,14 @@ def users_in_depts_live(depts=[], termid='171'): def course_enrollment(id=''): - print("Getting enrollments for course id %s" % str(id)) if not id: id = input('Course id? ') t = url + '/api/v1/courses/%s/enrollments?role[]=StudentEnrollment' % str(id) - print(t) emts = fetch(t,0) - print(emts) + #print(emts) emt_by_id = {} for E in emts: - print(E) - try: - emt_by_id[E['user_id']] = E - except Exception as exp: - print("Skipped that class with this exception: %s" % str(exp)) + emt_by_id[E['user_id']] = E ff = codecs.open('cache/courses/%s.json' % str(id), 'w', 'utf-8') ff.write(json.dumps(emt_by_id, indent=2)) print( " %i results" % len(emts) ) @@ -510,18 +507,11 @@ GROUP BY c.code ORDER BY c.state, c.code""" % (S['id'],S['id']) outp2.write("\n\n---------\nNOT PUBLISHED\n\n" + json.dumps(notpub, indent=2)) # Fetch all courses in a given term -def getCoursesInTerm(term=0,get_fresh=1,show=0,active=0): # a list - if not term: - term = getTerms(1,1) - ff = 'cache/courses_in_term_%s.json' % str(term) - if not get_fresh: - if os.path.isfile(ff): - return json.loads( codecs.open(ff,'r','utf-8').read() ) - else: - print(" -> couldn't find cached classes at: %s" % ff) - +def getCoursesInTerm(term=0,show=1,active=0): # a list # https://gavilan.instructure.com:443/api/v1/accounts/1/courses?published=true&enrollment_term_id=11 names = [] + if not term: + term = getTerms(1,1) if active: active = "published=true&" else: @@ -541,7 +531,6 @@ def getCoursesInTerm(term=0,get_fresh=1,show=0,active=0): # a list names.append(a['name']) info.append( [a['id'], a['name'], a['workflow_state'] ] ) if show: print_table(info) - codecs.open(ff, 'w', 'utf-8').write(json.dumps(results,indent=2)) return results @@ -713,32 +702,18 @@ def all_semester_course_sanity_check(): codecs.open('cache/courses_in_term_178.json','w','utf-8').write(json.dumps(c,indent=2)) output = codecs.open('cache/courses_w_sections.csv','w','utf-8') output.write( ",".join(['what','id','parent_course_id','sis_course_id','name']) + "\n" ) - output2 = codecs.open('cache/courses_checker.csv','w','utf-8') - output2.write( ",".join(['id','sis_course_id','name','state','students']) + "\n" ) i = 0 for course in c: - u2 = url + '/api/v1/courses/%s?include[]=total_students' % str(course['id']) - course['info'] = fetch(u2) - #print(json.dumps(course['info'],indent=2)) - ts = '?' - try: - ts = course['info']['total_students'] - except Exception as e: - pass - info = [ 'course', course['id'], '', course['sis_course_id'], course['name'], course['workflow_state'], ts ] + info = [ 'course', course['id'], '', course['sis_course_id'], course['name'], ] info = list(map(str,info)) - info2 = [ course['id'], course['sis_course_id'], course['name'], course['workflow_state'], ts ] - info2 = list(map(str,info2)) - output2.write( ",".join(info2) + "\n" ) - output2.flush() - print(info2) + print(info) output.write( ",".join(info) + "\n" ) - #uu = url + '/api/v1/courses/%s/sections' % str(course['id']) - #course['sections'] = fetch(uu) - #s_info = [ [ 'section', y['id'], y['course_id'], y['sis_course_id'], y['name'], y['total_students'] ] for y in course['sections'] ] - #for row in s_info: - # print(row) - # output.write( ",".join( map(str,row) ) + "\n" ) + uu = url + '/api/v1/courses/%s/sections' % str(course['id']) + course['sections'] = fetch(uu) + s_info = [ [ 'section', y['id'], y['course_id'], y['sis_course_id'], y['name']] for y in course['sections'] ] + for row in s_info: + print(row) + output.write( ",".join( map(str,row) ) + "\n" ) output.flush() i += 1 if i % 5 == 0: @@ -864,9 +839,9 @@ def unenroll_student(courseid,enrolid): def enroll_stem_students_live(): - the_term = '178' + the_term = '176' do_removes = 0 - depts = "MATH BIO CHEM CSIS PHYS PSCI GEOG ASTR ECOL ENVS ENGR".split(" ") + depts = "MATH BIO CHEM CSIS PHYS GEOG ASTR ECOL ENVS ENGR".split(" ") users_to_enroll = users_in_depts_live(depts, the_term) # term id stem_enrollments = course_enrollment(stem_course_id) # by user_id @@ -1039,21 +1014,21 @@ def enroll_orientation_students(): s = cursor.fetchall() if s: s = s[0] - print(" + Enrolling: %s" % s[0]) + print("Enrolling: %s" % s[0]) t = url + '/api/v1/courses/%s/enrollments' % ori_shell_id data = { 'enrollment[user_id]': j, 'enrollment[type]':'StudentEnrollment', 'enrollment[enrollment_state]': 'active' } - #print(data) + print(data) r3 = requests.post(t, headers=header, params=data) eee += 1 - #print(r3.text) - time.sleep(0.600) + print(r3.text) + time.sleep(0.200) except Exception as e: - print(" - Something went wrong with id %s, %s, %s" % (j, str(s), str(e))) + print("Something went wrong with id %s, %s, %s" % (j, str(s), str(e))) return (eee,uuu) def enroll_o_s_students(): - #full_reload() + full_reload() (es,us) = enroll_stem_students_live() (eo, uo) = enroll_orientation_students() @@ -1170,18 +1145,20 @@ def instructor_list_to_activate_evals(): def add_evals(section=0): # show or hide? - hidden = True + hidden = False #s = [ x.strip() for x in codecs.open('cache/sp21_eval_sections.txt','r').readlines()] #s = [ x.split(',')[4].split('::') for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()] - s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()] - print(s) + #s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()] + s = [ x.strip() for x in codecs.open('cache/sp23_eval_sections.csv','r').readlines()] s = list(funcy.flatten(s)) s.sort() + print(s) xyz = input('hit return to continue') #c = getCoursesInTerm(168,0,1) #c = getCoursesInTerm(174,0,1) # sp22 - c = getCoursesInTerm(176,0,1) # fa22 + #c = getCoursesInTerm(176,0,1) # fa22 + c = getCoursesInTerm(178,0,1) # sp23 print(c) ids = [] courses = {} @@ -1193,7 +1170,7 @@ def add_evals(section=0): courses[str(C['id'])] = C ids.append(str(C['id'])) - ask = 0 + ask = 1 data = {'position':2, 'hidden':hidden} for i in ids: @@ -1203,7 +1180,7 @@ def add_evals(section=0): if a == 'q': return u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s/tabs/context_external_tool_1953" % i r3 = requests.put(u2, headers=header, params=data) - print(r3) + print(r3.text) time.sleep(0.600) @@ -1396,7 +1373,6 @@ def set_ext_tools(): if __name__ == "__main__": options = { 1: ['Cross check schedule with ztc responses',make_ztc_list] , - 30: ['List latestart classes', list_latestarts ], 2: ['Add announcements to homepage', change_course_ann_homepage], 3: ['Cross-list classes', xlist ], 4: ['List students who passed quiz X', get_quiz_passers], @@ -1419,12 +1395,13 @@ if __name__ == "__main__": 21: ['Add announcements to homepage', change_course_ann_homepage], 22: ['Get a course info by id',getCourses], 23: ['Reset course conclude date',update_course_conclude], - #24: ['Add course evals to whole semester',instructor_list_to_activate_evals], + #24: ['Add course evalse to whole semester',instructor_list_to_activate_evals], 25: ['ext tools',get_ext_tools], 26: ['set ext tools',set_ext_tools], 27: ['Fine tune term dates and winter session', course_dates_terms], 28: ['Cross list a semester from file', semester_cross_lister], 29: ['Check all courses & their sections in semester', all_semester_course_sanity_check], + 30: ['List latestart classes', list_latestarts ], # TODO wanted: group shell for each GP (guided pathway) as a basic student services gateway.... # } @@ -1443,4 +1420,4 @@ if __name__ == "__main__": resp = input('Choose: ') # Call the function in the options dict - options[ int(resp)][1]() + options[ int(resp)][1]() \ No newline at end of file From e85f2cb29d8cb1f3983f660a95dc1ddee9205de5 Mon Sep 17 00:00:00 2001 From: Peter Howell Date: Wed, 22 Mar 2023 12:52:05 -0700 Subject: [PATCH 2/5] test commit surface --- courses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/courses.py b/courses.py index 547d699..13bfbb0 100644 --- a/courses.py +++ b/courses.py @@ -1,4 +1,4 @@ - +# test import json, re, requests, codecs, sys, time, funcy import pandas as pd from tabulate import tabulate From 3c9a9ef66544c43a04f68b85e45f32ea370b95e9 Mon Sep 17 00:00:00 2001 From: Coding with Peter Date: Wed, 22 Mar 2023 14:02:06 -0700 Subject: [PATCH 3/5] change name of secrets file --- .gitignore | 2 +- cq_demo.py | 2 +- curric2022.py | 2 +- curriculum.py | 2 +- depricated.py | 30 ++++++++++++++++++++++++++++++ gpt.py | 2 +- interactive.py | 2 +- pipelines.py | 30 ++---------------------------- tasks.py | 2 +- tempget.py | 2 +- 10 files changed, 40 insertions(+), 36 deletions(-) diff --git a/.gitignore b/.gitignore index 55dc7c6..54b9e55 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -secrets.py +canvas_secrets.py *.bak .ipynb_checkpoints 104ab42f11 diff --git a/cq_demo.py b/cq_demo.py index 4fb769f..cf3f8d3 100644 --- a/cq_demo.py +++ b/cq_demo.py @@ -1,5 +1,5 @@ import codecs, json, requests -from secrets import cq_token, ph_token +from canvas_secrets import cq_token, ph_token token = cq_token url = 'https://ilearn.gavilan.edu' header = {'Authorization': 'Bearer ' + token} diff --git a/curric2022.py b/curric2022.py index 01918f0..2d29770 100644 --- a/curric2022.py +++ b/curric2022.py @@ -13,7 +13,7 @@ from bs4 import BeautifulSoup as bs leafcount = 0 displaynames = [] -from secrets import cq_user, cq_pasw +from canvas_secrets import cq_user, cq_pasw CQ_URL = "https://secure.curricunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc" diff --git a/curriculum.py b/curriculum.py index db778bb..1ce780d 100644 --- a/curriculum.py +++ b/curriculum.py @@ -13,7 +13,7 @@ import pandas as pd import sys, locale, re from pipelines import getSemesterSchedule -from secrets import cq_url, cq_user, cq_pasw +from canvas_secrets import cq_url, cq_user, cq_pasw #sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout) diff --git a/depricated.py b/depricated.py index 8590302..c8f19c0 100644 --- a/depricated.py +++ b/depricated.py @@ -4,6 +4,36 @@ # from pipelines - canvas data + + +online_courses = {} +def prep_online_courses_df(): + global online_courses + schedule = current_schedule() # from banner + online_courses = schedule[lambda x: x.type=='online'] + +def course_is_online(crn): + global online_courses + #print "looking up: " + str(crn) + #print online_courses + course = online_courses[lambda x: x.crn==int(crn)] + return len(course) + +def get_crn_from_name(name): + #print "name is: " + #print(name) + m = re.search( r'(\d\d\d\d\d)', name) + if m: return int(m.groups(1)[0]) + else: return 0 + +def get_enrlmts_for_user(user,enrollments): + #active enrollments + u_en = enrollments[ lambda x: (x['user_id'] == user) & (x['workflow']=='active') ] + return u_en[['type','course_id']] + + + + """ timestamp = nowAsStr() diff --git a/gpt.py b/gpt.py index 97ebf1e..42dfe0d 100644 --- a/gpt.py +++ b/gpt.py @@ -1,7 +1,7 @@ import os, json, sys import openai -from secrets import openai_org, openai_api_key +from canvas_secrets import openai_org, openai_api_key openai.organization = "org-66WLoZQEtBrO42Z9S8rfd10M" diff --git a/interactive.py b/interactive.py index 1bb445f..33d3413 100644 --- a/interactive.py +++ b/interactive.py @@ -13,7 +13,7 @@ from importlib import reload import server import localcache from server import * -from secrets import flask_secretkey +from canvas_secrets import flask_secretkey q = Queue() diff --git a/pipelines.py b/pipelines.py index fb05f00..0aa8d88 100644 --- a/pipelines.py +++ b/pipelines.py @@ -11,8 +11,8 @@ import sys, shutil, hmac, hashlib, base64, schedule, time, pathlib, datetime import pdb from collections import defaultdict from deepdiff import DeepDiff -from secrets import apiKey, apiSecret, FTP_SITE, FTP_USER, FTP_PW, GOO, GOO_PIN, token, url, domain, account_id, header, g_id, g_secret -from secrets import instructure_url, instructure_username, instructure_private_key +from canvas_secrets import apiKey, apiSecret, FTP_SITE, FTP_USER, FTP_PW, GOO, GOO_PIN, token, url, domain, account_id, header, g_id, g_secret +from canvas_secrets import instructure_url, instructure_username, instructure_private_key @@ -429,32 +429,6 @@ def getSemesterSchedule(short='sp21'): # I used to be current_sch -online_courses = {} -def prep_online_courses_df(): - global online_courses - schedule = current_schedule() # from banner - online_courses = schedule[lambda x: x.type=='online'] - -def course_is_online(crn): - global online_courses - #print "looking up: " + str(crn) - #print online_courses - course = online_courses[lambda x: x.crn==int(crn)] - return len(course) - -def get_crn_from_name(name): - #print "name is: " - #print(name) - m = re.search( r'(\d\d\d\d\d)', name) - if m: return int(m.groups(1)[0]) - else: return 0 - -def get_enrlmts_for_user(user,enrollments): - #active enrollments - u_en = enrollments[ lambda x: (x['user_id'] == user) & (x['workflow']=='active') ] - return u_en[['type','course_id']] - - ################ ################ CANVAS DATA diff --git a/tasks.py b/tasks.py index 611d215..5f8e28b 100644 --- a/tasks.py +++ b/tasks.py @@ -17,7 +17,7 @@ import pysftp, os, datetime, requests, re, json, sqlite3, codecs, csv, sys import funcy, os.path, shutil, urllib from datetime import datetime -from secrets import badgr_target, badgr_hd +from canvas_secrets import badgr_target, badgr_hd if os.name != 'posix': diff --git a/tempget.py b/tempget.py index 79193af..87dbe72 100644 --- a/tempget.py +++ b/tempget.py @@ -11,7 +11,7 @@ import re import time -from secrets import banner_url1, banner_url2, GOO, GOO_PIN, otter_login, otter_pw +from canvas_secrets import banner_url1, banner_url2, GOO, GOO_PIN, otter_login, otter_pw From 78be1c4186d8a5446f7dfffe82b0212ec6ba3cf5 Mon Sep 17 00:00:00 2001 From: Coding with Peter Date: Wed, 22 Mar 2023 14:14:44 -0700 Subject: [PATCH 4/5] fixing vscode warnings --- courses.py | 30 +----- depricated.py | 274 ++++++++++++++++++++++++++++++++++++++++++++++++++ localcache.py | 241 -------------------------------------------- 3 files changed, 279 insertions(+), 266 deletions(-) diff --git a/courses.py b/courses.py index 691757b..99ddaa0 100644 --- a/courses.py +++ b/courses.py @@ -966,8 +966,12 @@ def enroll_bulk_students_bydept(course_id, depts, the_term="172", cautious=1): if s: s = s[0] print("Removing: %s" % s[0]) - r1 = unenroll_student(str(course_id), stem_enrollments[j]['id']) + + ## TODO not done here + # r1 = unenroll_student(str(course_id), stem_enrollments[j]['id']) #print(r1) + + time.sleep(0.600) except Exception as e: print("Something went wrong with id %s, %s, %s" % (j, str(s), str(e))) @@ -1061,30 +1065,6 @@ def enroll_o_s_students(): print("Enrolled %i and unenrolled %i students in STEM shell" % (es,us)) print("Enrolled %i students in Orientation shell" % eo) - -########## -########## CALCULATING SEMESTER STUFF -########## - - -def summarize_proportion_online_classes(u): - # u is a "group" from the groupby fxn - #print u - if NUM_ONLY: - if ((1.0 * u.sum()) / u.size) > 0.85: return '2' - if ((1.0 * u.sum()) / u.size) < 0.15: return '0' - return '1' - else: - if ((1.0 * u.sum()) / u.size) > 0.85: return 'online-only' - if ((1.0 * u.sum()) / u.size) < 0.15: return 'f2f-only' - return 'mixed' - -def summarize_num_term_classes(u): - # u is a "group" from the groupby fxn - # term is sp18 now - #print u - return u.size - diff --git a/depricated.py b/depricated.py index c8f19c0..91a2a21 100644 --- a/depricated.py +++ b/depricated.py @@ -254,6 +254,34 @@ def serve(): """ +### courses.py + + +########## +########## CALCULATING SEMESTER STUFF +########## + + +def summarize_proportion_online_classes(u): + # u is a "group" from the groupby fxn + #print u + if NUM_ONLY: + if ((1.0 * u.sum()) / u.size) > 0.85: return '2' + if ((1.0 * u.sum()) / u.size) < 0.15: return '0' + return '1' + else: + if ((1.0 * u.sum()) / u.size) > 0.85: return 'online-only' + if ((1.0 * u.sum()) / u.size) < 0.15: return 'f2f-only' + return 'mixed' + +def summarize_num_term_classes(u): + # u is a "group" from the groupby fxn + # term is sp18 now + #print u + return u.size + + + @@ -379,6 +407,252 @@ def matchstyle(): ##### from localcache + +def user_role_and_online(): + # cross list users, classes enrolled, and their roles + global role_table, term_courses + + role_table = enrollment_file() + user_table = users_file() + user_table = user_table[ user_table['name']!="Test Student" ] + term_table = term_file() + current = term_table[lambda d: d.course_section=='2020 Spring'] # current semester from canvas + term_id = current['id'].values[0] + course_table = courses_file() # from canvas + schedule = current_schedule() # from banner... + + term_courses = course_table[lambda d: d.termid==term_id] # courses this semester ... now add a crn column + term_courses['crn'] = term_courses['code'].map( lambda x: get_crn_from_name(x) ) + # add is_online flag (for courses listed in schedule as online-only) + term_courses['is_online'] = term_courses['crn'].map( lambda x: course_is_online( x ) ) # kinda redundant + ban_can = term_courses.merge(schedule,on='crn',how='left') #join the schedule from banner to the courses from canvas + + role_table = role_table.where(lambda x: x.workflow=='active') + + # this join limits to current semester if 'inner', or all semesters if 'left' + courses_and_enrol = role_table.merge(ban_can,left_on='course_id',right_on='id', how='left') + + user_table = user_table.drop(columns="rootactid tz created vis school position gender locale public bd cc state".split(" ")) + c_e_user = courses_and_enrol.merge(user_table,left_on='user_id',right_on='id',how='left') + + + prop_online = pd.DataFrame(c_e_user.groupby(['user_id'])['is_online'].aggregate(summarize_proportion_online_classes).rename('proportion_online')) + num_trm_crs = pd.DataFrame(c_e_user.groupby(['user_id'])['is_online'].aggregate(summarize_num_term_classes).rename('num_term_crs')) + stu_tch_rol = pd.DataFrame(c_e_user.groupby(['user_id'])['type'].aggregate(summarize_student_teacher_role).rename('main_role')) + user_table = user_table.merge(prop_online,left_on='id',right_index=True) + user_table = user_table.merge(num_trm_crs,left_on='id',right_index=True) + user_table = user_table.merge(stu_tch_rol,left_on='id',right_index=True) + + # remove name-less entries + user_table = user_table.where(lambda x: (x.canvasid!='') ) # math.isnan(x.canvasid)) + + return user_table + +#print user_table.query('proportion_online=="online-only"') + #print user_table.query('main_role=="teacher"') + #user_table.to_csv('canvas_data/users_online.csv') + + + """e_qry = "CREATE TABLE IF NOT EXISTS enrollments ( + id integer PRIMARY KEY, + name text NOT NULL, + begin_date text, + end_date text + );""" + +""" + +['CREATE INDEX "idx_req_userid" ON "requests" ("id","courseid","userid" );', + 'CREATE INDEX "idx_users_id" ON "users" ("id","canvasid", );', + 'CREATE INDEX "idx_term_id" ON "terms" ("id","canvasid" );', + 'CREATE INDEX "idx_enrollment" ON "enrollment" ("cid","course_id","user_id" );', + 'CREATE INDEX "idx_courses" ON "courses" ("id","canvasid","termid","code","name" );' ] + + +took 6 seconds + + +select * from users where name = "Peter Howell" + +select * from users join requests on users.id = requests.userid where name = "Peter Howell" +20k rows in 1.014 seconds!! with index above + +without: killed it after 120 seconds + +select timestamp, url, useragent, httpmethod, remoteip, controller from users join requests on users.id = requests.userid where name = "Peter Howell" order by requests.timestamp + + + +select courses.name, courses.code, terms.name, requests.url from courses +join terms on courses.termid = terms.id +join requests on courses.id = requests.courseid +where terms.name='2020 Spring ' and courses.code='ACCT20 SP20 40039' +order by courses.code + + + + + + + + + + + +""" + + +def more_unused_xreferencing(): + """continue + + for line in lines: + r = requests_line(line.decode('utf-8'),filei) + if filei < 5: + print(r) + else: + break + filei += 1 + + + by_date_course = defaultdict( lambda: defaultdict(int) ) + by_date_user = defaultdict( lambda: defaultdict(int) ) + df_list = [] + df_list_crs = [] + users = defaultdict( lambda: defaultdict(int) ) + #by_user = {} + #by_course = {} + i = 0 + + limit = 300 + + #print(r) + date = dt.strptime( r['timestamp'], "%Y-%m-%d %H:%M:%S.%f" ) + if r['userid'] in users: + users[r['userid']]['freq'] += 1 + if users[r['userid']]['lastseen'] < date: + users[r['userid']]['lastseen'] = date + else: + users[r['userid']] = {"id":r['userid'], "lastseen":date, "freq":1} + by_date_course[ r['day'] ][ r['courseid'] ] += 1 + by_date_user[ r['day'] ][ r['userid'] ] += 1 + #if r['userid'] in by_user: by_user[r['userid']] += 1 + #else: by_user[r['userid']] = 1 + #if r['courseid'] in by_course: by_course[r['courseid']] += 1 + #else: by_course[r['courseid']] = 1 + #mylog.write("by_user = " + str(by_user)) + df_list.append(pd.DataFrame(data=by_date_user)) + df_list_crs.append(pd.DataFrame(data=by_date_course)) + i += 1 + if i > limit: break + #mylog.write("by_date_course = ") + result = pd.concat(df_list, axis=1,join='outer') + result_crs = pd.concat(df_list_crs, axis=1,join='outer') + #print result_crs + mylog.write(result.to_csv()) + # get users + usersf = user_role_and_online() + merged = pd.merge(result,usersf,left_index=True,right_on='id', how='left') + #dropkeys = "rootactid tz created vis school position gender locale public bd cc state".split(" ") + #merged.drop(dropkeys, inplace=True, axis=1) + mglog = open(local_data_folder+'userlogs.csv','w') + mglog.write(merged.to_csv()) + + # get courses + courses = courses_file() + merged2 = pd.merge(result_crs,courses,left_index=True,right_on='id', how='left') + dropkeys = "rootactid wikiid".split(" ") + merged2.drop(dropkeys, inplace=True, axis=1) + mglogc = open(local_data_folder + 'courselogs.csv','w') + mglogc.write(merged2.to_csv()) + + # a users / freq / lastseen file + ufl = open(local_data_folder + "user_freq.json","w") + today = datetime.datetime.today() + for U in list(users.keys()): + date = users[U]['lastseen'] + users[U]['lastseen'] = date.strftime("%Y-%m-%d") + diff = today - date + users[U]['daysago'] = str(diff.days) + users[U]['hoursago'] = str(int(diff.total_seconds()/3600)) + us_frame = pd.DataFrame.from_dict(users,orient='index') + us_with_names = pd.merge(us_frame,usersf,left_index=True,right_on='id', how='left') + #dropkeys = "id id_x id_y globalid rootactid tz created vis school position gender locale public bd cc state".split(" ") + #us_with_names.drop(dropkeys, inplace=True, axis=1) + print(us_with_names) + ufl.write( json.dumps(users, indent=4) ) + ufl.close() + mglogd = open('canvas_data/user_freq.csv','w') + mglogd.write(us_with_names.to_csv()) + """ + + """ -- projects table + CREATE TABLE IF NOT EXISTS projects ( + id integer PRIMARY KEY, + name text NOT NULL, + begin_date text, + end_date text + ); + """ + pass + + +def users_p_file(): + uf = users_file() + pf = pseudonym_file() + #print pf + upf = uf.merge(pf,left_on='id',right_on='user_id',how='left') + return upf + + """ + def com_channel_dim(): + all = os.listdir(local_data_folder) + all.sort(key=lambda x: os.stat(os.path.join(local_data_folder,x)).st_mtime) + all.reverse() + #print "sorted file list:" + #print all + for F in all: + if re.search('communication_channel_dim',F): + cc_file = F + break + print("most recent communication channel file is " + cc_file) + cc_users = [] + for line in gzip.open(local_data_folder + cc_file,'r'): + line_dict = dict(list(zip(cc_format, line.split("\t")))) + #line_dict['globalid'] = line_dict['globalid'].rstrip() + cc_users.append(line_dict) + df = pd.DataFrame(cc_users) + return df + """ + + + """grp_sum_qry = ""SELECT u.sortablename, r.timeblock, SUM(r.viewcount), u.canvasid AS user, c.canvasid AS course + FROM requests_sum1 AS r + JOIN courses AS c ON e.course_id=c.id + JOIN enrollment as e ON r.courseid=c.id + JOIN users AS u ON u.id=e.user_id + WHERE c.canvasid=%s AND e."type"="StudentEnrollment" + GROUP BY u.id,c.id,r.timeblock + ORDER BY u.sortablename DESC, r.timeblock"" % course_id + + q = ""SELECT u.sortablename, r.timeblock, r.viewcount, u.canvasid AS user, c.canvasid AS course + FROM requests_sum1 AS r + JOIN courses AS c ON e.course_id=c.id + JOIN enrollment as e ON r.courseid=c.id + JOIN users AS u ON u.id=e.user_id + WHERE c.canvasid=%s AND e."type"="StudentEnrollment" AND u.canvasid=810 + ORDER BY u.sortablename DESC, r.timeblock"" % course_id + + + q = ""SELECT u.sortablename, r.timeblock, r.viewcount, u.canvasid AS user, c.canvasid AS course FROM enrollment as e JOIN courses AS c ON e.course_id=c.id +JOIN requests_sum1 AS r ON r.courseid=c.id +JOIN users AS u ON u.id=e.user_id +WHERE c.canvasid=%s AND e."type"="StudentEnrollment" +ORDER BY u.sortablename, r.timeblock"" % course_id""" + + + + + stem_course_id = '11015' # TODO # NO LONGER USED - SEE COURSES diff --git a/localcache.py b/localcache.py index 3da9fb0..978a9a2 100644 --- a/localcache.py +++ b/localcache.py @@ -1564,195 +1564,6 @@ def semester_enrollments(verbose=0): # Overview of student hits in a course. Return a (pandas??) table student/timeblock/hits 6 * 7 * 7 items per student. - """e_qry = "CREATE TABLE IF NOT EXISTS enrollments ( - id integer PRIMARY KEY, - name text NOT NULL, - begin_date text, - end_date text - );""" - -""" - -['CREATE INDEX "idx_req_userid" ON "requests" ("id","courseid","userid" );', - 'CREATE INDEX "idx_users_id" ON "users" ("id","canvasid", );', - 'CREATE INDEX "idx_term_id" ON "terms" ("id","canvasid" );', - 'CREATE INDEX "idx_enrollment" ON "enrollment" ("cid","course_id","user_id" );', - 'CREATE INDEX "idx_courses" ON "courses" ("id","canvasid","termid","code","name" );' ] - - -took 6 seconds - - -select * from users where name = "Peter Howell" - -select * from users join requests on users.id = requests.userid where name = "Peter Howell" -20k rows in 1.014 seconds!! with index above - -without: killed it after 120 seconds - -select timestamp, url, useragent, httpmethod, remoteip, controller from users join requests on users.id = requests.userid where name = "Peter Howell" order by requests.timestamp - - - -select courses.name, courses.code, terms.name, requests.url from courses -join terms on courses.termid = terms.id -join requests on courses.id = requests.courseid -where terms.name='2020 Spring ' and courses.code='ACCT20 SP20 40039' -order by courses.code - - - - - - - - - - - -""" - - -def more_unused_xreferencing(): - """continue - - for line in lines: - r = requests_line(line.decode('utf-8'),filei) - if filei < 5: - print(r) - else: - break - filei += 1 - - - by_date_course = defaultdict( lambda: defaultdict(int) ) - by_date_user = defaultdict( lambda: defaultdict(int) ) - df_list = [] - df_list_crs = [] - users = defaultdict( lambda: defaultdict(int) ) - #by_user = {} - #by_course = {} - i = 0 - - limit = 300 - - #print(r) - date = dt.strptime( r['timestamp'], "%Y-%m-%d %H:%M:%S.%f" ) - if r['userid'] in users: - users[r['userid']]['freq'] += 1 - if users[r['userid']]['lastseen'] < date: - users[r['userid']]['lastseen'] = date - else: - users[r['userid']] = {"id":r['userid'], "lastseen":date, "freq":1} - by_date_course[ r['day'] ][ r['courseid'] ] += 1 - by_date_user[ r['day'] ][ r['userid'] ] += 1 - #if r['userid'] in by_user: by_user[r['userid']] += 1 - #else: by_user[r['userid']] = 1 - #if r['courseid'] in by_course: by_course[r['courseid']] += 1 - #else: by_course[r['courseid']] = 1 - #mylog.write("by_user = " + str(by_user)) - df_list.append(pd.DataFrame(data=by_date_user)) - df_list_crs.append(pd.DataFrame(data=by_date_course)) - i += 1 - if i > limit: break - #mylog.write("by_date_course = ") - result = pd.concat(df_list, axis=1,join='outer') - result_crs = pd.concat(df_list_crs, axis=1,join='outer') - #print result_crs - mylog.write(result.to_csv()) - # get users - usersf = user_role_and_online() - merged = pd.merge(result,usersf,left_index=True,right_on='id', how='left') - #dropkeys = "rootactid tz created vis school position gender locale public bd cc state".split(" ") - #merged.drop(dropkeys, inplace=True, axis=1) - mglog = open(local_data_folder+'userlogs.csv','w') - mglog.write(merged.to_csv()) - - # get courses - courses = courses_file() - merged2 = pd.merge(result_crs,courses,left_index=True,right_on='id', how='left') - dropkeys = "rootactid wikiid".split(" ") - merged2.drop(dropkeys, inplace=True, axis=1) - mglogc = open(local_data_folder + 'courselogs.csv','w') - mglogc.write(merged2.to_csv()) - - # a users / freq / lastseen file - ufl = open(local_data_folder + "user_freq.json","w") - today = datetime.datetime.today() - for U in list(users.keys()): - date = users[U]['lastseen'] - users[U]['lastseen'] = date.strftime("%Y-%m-%d") - diff = today - date - users[U]['daysago'] = str(diff.days) - users[U]['hoursago'] = str(int(diff.total_seconds()/3600)) - us_frame = pd.DataFrame.from_dict(users,orient='index') - us_with_names = pd.merge(us_frame,usersf,left_index=True,right_on='id', how='left') - #dropkeys = "id id_x id_y globalid rootactid tz created vis school position gender locale public bd cc state".split(" ") - #us_with_names.drop(dropkeys, inplace=True, axis=1) - print(us_with_names) - ufl.write( json.dumps(users, indent=4) ) - ufl.close() - mglogd = open('canvas_data/user_freq.csv','w') - mglogd.write(us_with_names.to_csv()) - """ - - """ -- projects table - CREATE TABLE IF NOT EXISTS projects ( - id integer PRIMARY KEY, - name text NOT NULL, - begin_date text, - end_date text - ); - """ - pass - -def user_role_and_online(): - # cross list users, classes enrolled, and their roles - global role_table, term_courses - - role_table = enrollment_file() - user_table = users_file() - user_table = user_table[ user_table['name']!="Test Student" ] - term_table = term_file() - current = term_table[lambda d: d.course_section=='2020 Spring'] # current semester from canvas - term_id = current['id'].values[0] - course_table = courses_file() # from canvas - schedule = current_schedule() # from banner... - - term_courses = course_table[lambda d: d.termid==term_id] # courses this semester ... now add a crn column - term_courses['crn'] = term_courses['code'].map( lambda x: get_crn_from_name(x) ) - # add is_online flag (for courses listed in schedule as online-only) - term_courses['is_online'] = term_courses['crn'].map( lambda x: course_is_online( x ) ) # kinda redundant - ban_can = term_courses.merge(schedule,on='crn',how='left') #join the schedule from banner to the courses from canvas - - role_table = role_table.where(lambda x: x.workflow=='active') - - # this join limits to current semester if 'inner', or all semesters if 'left' - courses_and_enrol = role_table.merge(ban_can,left_on='course_id',right_on='id', how='left') - - user_table = user_table.drop(columns="rootactid tz created vis school position gender locale public bd cc state".split(" ")) - c_e_user = courses_and_enrol.merge(user_table,left_on='user_id',right_on='id',how='left') - - - prop_online = pd.DataFrame(c_e_user.groupby(['user_id'])['is_online'].aggregate(summarize_proportion_online_classes).rename('proportion_online')) - num_trm_crs = pd.DataFrame(c_e_user.groupby(['user_id'])['is_online'].aggregate(summarize_num_term_classes).rename('num_term_crs')) - stu_tch_rol = pd.DataFrame(c_e_user.groupby(['user_id'])['type'].aggregate(summarize_student_teacher_role).rename('main_role')) - user_table = user_table.merge(prop_online,left_on='id',right_index=True) - user_table = user_table.merge(num_trm_crs,left_on='id',right_index=True) - user_table = user_table.merge(stu_tch_rol,left_on='id',right_index=True) - - # remove name-less entries - user_table = user_table.where(lambda x: (x.canvasid!='') ) # math.isnan(x.canvasid)) - - return user_table - -#print user_table.query('proportion_online=="online-only"') - #print user_table.query('main_role=="teacher"') - #user_table.to_csv('canvas_data/users_online.csv') - - - - def comm_channel_file(): @@ -1797,58 +1608,6 @@ def pseudonym_file(): df = pd.DataFrame(all_users) return df -def users_p_file(): - uf = users_file() - pf = pseudonym_file() - #print pf - upf = uf.merge(pf,left_on='id',right_on='user_id',how='left') - return upf - - """ - def com_channel_dim(): - all = os.listdir(local_data_folder) - all.sort(key=lambda x: os.stat(os.path.join(local_data_folder,x)).st_mtime) - all.reverse() - #print "sorted file list:" - #print all - for F in all: - if re.search('communication_channel_dim',F): - cc_file = F - break - print("most recent communication channel file is " + cc_file) - cc_users = [] - for line in gzip.open(local_data_folder + cc_file,'r'): - line_dict = dict(list(zip(cc_format, line.split("\t")))) - #line_dict['globalid'] = line_dict['globalid'].rstrip() - cc_users.append(line_dict) - df = pd.DataFrame(cc_users) - return df - """ - - - """grp_sum_qry = ""SELECT u.sortablename, r.timeblock, SUM(r.viewcount), u.canvasid AS user, c.canvasid AS course - FROM requests_sum1 AS r - JOIN courses AS c ON e.course_id=c.id - JOIN enrollment as e ON r.courseid=c.id - JOIN users AS u ON u.id=e.user_id - WHERE c.canvasid=%s AND e."type"="StudentEnrollment" - GROUP BY u.id,c.id,r.timeblock - ORDER BY u.sortablename DESC, r.timeblock"" % course_id - - q = ""SELECT u.sortablename, r.timeblock, r.viewcount, u.canvasid AS user, c.canvasid AS course - FROM requests_sum1 AS r - JOIN courses AS c ON e.course_id=c.id - JOIN enrollment as e ON r.courseid=c.id - JOIN users AS u ON u.id=e.user_id - WHERE c.canvasid=%s AND e."type"="StudentEnrollment" AND u.canvasid=810 - ORDER BY u.sortablename DESC, r.timeblock"" % course_id - - - q = ""SELECT u.sortablename, r.timeblock, r.viewcount, u.canvasid AS user, c.canvasid AS course FROM enrollment as e JOIN courses AS c ON e.course_id=c.id -JOIN requests_sum1 AS r ON r.courseid=c.id -JOIN users AS u ON u.id=e.user_id -WHERE c.canvasid=%s AND e."type"="StudentEnrollment" -ORDER BY u.sortablename, r.timeblock"" % course_id""" def abcd(): setup_table('index') From a919e4d2eaf2cdd24085c10bf93a5c8cc1f2822f Mon Sep 17 00:00:00 2001 From: peter Date: Wed, 22 Mar 2023 21:26:10 +0000 Subject: [PATCH 5/5] Revert "Merge branch 'surface' into 'master'" This reverts merge request !1 --- courses.py | 113 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 68 insertions(+), 45 deletions(-) diff --git a/courses.py b/courses.py index 57c18b6..99ddaa0 100644 --- a/courses.py +++ b/courses.py @@ -1,7 +1,7 @@ -# test -import json, re, requests, codecs, sys, time, funcy + +import json, re, requests, codecs, sys, time, funcy, os import pandas as pd -from tabulate import tabulate +#from tabulate import tabulate from dateutil import parser from datetime import datetime from util import print_table @@ -249,9 +249,7 @@ def scrape_bookstore(): # Input: xxxx_sched.json. Output: xxxx_latestarts.txt def list_latestarts(): #term = input("Name of current semester file? (ex: sp18) ") - term = "sp23" # sems[0] - - dept_ignore = "JFT JLE CWE".split(" ") + term = "sp23" # sems[0] term_in = "cache/" + term + "_sched.json" term_out = "cache/" + term + "_latestarts.txt" @@ -262,13 +260,12 @@ def list_latestarts(): #print sched by_date = {} for C in sched: - dept = C['code'].split(" ") - dept = dept[0] - if dept in dept_ignore: - continue - print(dept) parts = C['date'].split("-") start = parts[0] + codes = C['code'].split(' ') + dept = codes[0] + if dept in ['JLE','JFT','CWE']: + continue if re.search('TBA',start): continue try: startd = parser.parse(start) @@ -282,11 +279,11 @@ def list_latestarts(): #print "Start: " + str(X) if len(by_date[X]) < 200: prettydate = X.strftime("%A, %B %d") - #print(prettydate + ": " + str(len(by_date[X])) + " courses") + print(prettydate + ": " + str(len(by_date[X])) + " courses") outfile.write(prettydate + ": " + str(len(by_date[X])) + " courses" + "\n") for Y in by_date[X]: #print "\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] - #print(Y) + print(Y) #outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] +"\n") outfile.write("\t" + Y['code'] + " " + Y['crn'] + "\t" + Y['teacher'] + "\t" + Y['type'] + "\t" + "\n") @@ -335,14 +332,20 @@ def users_in_depts_live(depts=[], termid='171'): def course_enrollment(id=''): + print("Getting enrollments for course id %s" % str(id)) if not id: id = input('Course id? ') t = url + '/api/v1/courses/%s/enrollments?role[]=StudentEnrollment' % str(id) + print(t) emts = fetch(t,0) - #print(emts) + print(emts) emt_by_id = {} for E in emts: - emt_by_id[E['user_id']] = E + print(E) + try: + emt_by_id[E['user_id']] = E + except Exception as exp: + print("Skipped that class with this exception: %s" % str(exp)) ff = codecs.open('cache/courses/%s.json' % str(id), 'w', 'utf-8') ff.write(json.dumps(emt_by_id, indent=2)) print( " %i results" % len(emts) ) @@ -507,11 +510,18 @@ GROUP BY c.code ORDER BY c.state, c.code""" % (S['id'],S['id']) outp2.write("\n\n---------\nNOT PUBLISHED\n\n" + json.dumps(notpub, indent=2)) # Fetch all courses in a given term -def getCoursesInTerm(term=0,show=1,active=0): # a list - # https://gavilan.instructure.com:443/api/v1/accounts/1/courses?published=true&enrollment_term_id=11 - names = [] +def getCoursesInTerm(term=0,get_fresh=1,show=0,active=0): # a list if not term: term = getTerms(1,1) + ff = 'cache/courses_in_term_%s.json' % str(term) + if not get_fresh: + if os.path.isfile(ff): + return json.loads( codecs.open(ff,'r','utf-8').read() ) + else: + print(" -> couldn't find cached classes at: %s" % ff) + + # https://gavilan.instructure.com:443/api/v1/accounts/1/courses?published=true&enrollment_term_id=11 + names = [] if active: active = "published=true&" else: @@ -531,6 +541,7 @@ def getCoursesInTerm(term=0,show=1,active=0): # a list names.append(a['name']) info.append( [a['id'], a['name'], a['workflow_state'] ] ) if show: print_table(info) + codecs.open(ff, 'w', 'utf-8').write(json.dumps(results,indent=2)) return results @@ -702,18 +713,32 @@ def all_semester_course_sanity_check(): codecs.open('cache/courses_in_term_178.json','w','utf-8').write(json.dumps(c,indent=2)) output = codecs.open('cache/courses_w_sections.csv','w','utf-8') output.write( ",".join(['what','id','parent_course_id','sis_course_id','name']) + "\n" ) + output2 = codecs.open('cache/courses_checker.csv','w','utf-8') + output2.write( ",".join(['id','sis_course_id','name','state','students']) + "\n" ) i = 0 for course in c: - info = [ 'course', course['id'], '', course['sis_course_id'], course['name'], ] + u2 = url + '/api/v1/courses/%s?include[]=total_students' % str(course['id']) + course['info'] = fetch(u2) + #print(json.dumps(course['info'],indent=2)) + ts = '?' + try: + ts = course['info']['total_students'] + except Exception as e: + pass + info = [ 'course', course['id'], '', course['sis_course_id'], course['name'], course['workflow_state'], ts ] info = list(map(str,info)) - print(info) + info2 = [ course['id'], course['sis_course_id'], course['name'], course['workflow_state'], ts ] + info2 = list(map(str,info2)) + output2.write( ",".join(info2) + "\n" ) + output2.flush() + print(info2) output.write( ",".join(info) + "\n" ) - uu = url + '/api/v1/courses/%s/sections' % str(course['id']) - course['sections'] = fetch(uu) - s_info = [ [ 'section', y['id'], y['course_id'], y['sis_course_id'], y['name']] for y in course['sections'] ] - for row in s_info: - print(row) - output.write( ",".join( map(str,row) ) + "\n" ) + #uu = url + '/api/v1/courses/%s/sections' % str(course['id']) + #course['sections'] = fetch(uu) + #s_info = [ [ 'section', y['id'], y['course_id'], y['sis_course_id'], y['name'], y['total_students'] ] for y in course['sections'] ] + #for row in s_info: + # print(row) + # output.write( ",".join( map(str,row) ) + "\n" ) output.flush() i += 1 if i % 5 == 0: @@ -839,9 +864,9 @@ def unenroll_student(courseid,enrolid): def enroll_stem_students_live(): - the_term = '176' + the_term = '178' do_removes = 0 - depts = "MATH BIO CHEM CSIS PHYS GEOG ASTR ECOL ENVS ENGR".split(" ") + depts = "MATH BIO CHEM CSIS PHYS PSCI GEOG ASTR ECOL ENVS ENGR".split(" ") users_to_enroll = users_in_depts_live(depts, the_term) # term id stem_enrollments = course_enrollment(stem_course_id) # by user_id @@ -1018,21 +1043,21 @@ def enroll_orientation_students(): s = cursor.fetchall() if s: s = s[0] - print("Enrolling: %s" % s[0]) + print(" + Enrolling: %s" % s[0]) t = url + '/api/v1/courses/%s/enrollments' % ori_shell_id data = { 'enrollment[user_id]': j, 'enrollment[type]':'StudentEnrollment', 'enrollment[enrollment_state]': 'active' } - print(data) + #print(data) r3 = requests.post(t, headers=header, params=data) eee += 1 - print(r3.text) - time.sleep(0.200) + #print(r3.text) + time.sleep(0.600) except Exception as e: - print("Something went wrong with id %s, %s, %s" % (j, str(s), str(e))) + print(" - Something went wrong with id %s, %s, %s" % (j, str(s), str(e))) return (eee,uuu) def enroll_o_s_students(): - full_reload() + #full_reload() (es,us) = enroll_stem_students_live() (eo, uo) = enroll_orientation_students() @@ -1125,20 +1150,18 @@ def instructor_list_to_activate_evals(): def add_evals(section=0): # show or hide? - hidden = False + hidden = True #s = [ x.strip() for x in codecs.open('cache/sp21_eval_sections.txt','r').readlines()] #s = [ x.split(',')[4].split('::') for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()] - #s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()] - s = [ x.strip() for x in codecs.open('cache/sp23_eval_sections.csv','r').readlines()] + s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()] + print(s) s = list(funcy.flatten(s)) s.sort() - print(s) xyz = input('hit return to continue') #c = getCoursesInTerm(168,0,1) #c = getCoursesInTerm(174,0,1) # sp22 - #c = getCoursesInTerm(176,0,1) # fa22 - c = getCoursesInTerm(178,0,1) # sp23 + c = getCoursesInTerm(176,0,1) # fa22 print(c) ids = [] courses = {} @@ -1150,7 +1173,7 @@ def add_evals(section=0): courses[str(C['id'])] = C ids.append(str(C['id'])) - ask = 1 + ask = 0 data = {'position':2, 'hidden':hidden} for i in ids: @@ -1160,7 +1183,7 @@ def add_evals(section=0): if a == 'q': return u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s/tabs/context_external_tool_1953" % i r3 = requests.put(u2, headers=header, params=data) - print(r3.text) + print(r3) time.sleep(0.600) @@ -1353,6 +1376,7 @@ def set_ext_tools(): if __name__ == "__main__": options = { 1: ['Cross check schedule with ztc responses',make_ztc_list] , + 30: ['List latestart classes', list_latestarts ], 2: ['Add announcements to homepage', change_course_ann_homepage], 3: ['Cross-list classes', xlist ], 4: ['List students who passed quiz X', get_quiz_passers], @@ -1375,13 +1399,12 @@ if __name__ == "__main__": 21: ['Add announcements to homepage', change_course_ann_homepage], 22: ['Get a course info by id',getCourses], 23: ['Reset course conclude date',update_course_conclude], - #24: ['Add course evalse to whole semester',instructor_list_to_activate_evals], + #24: ['Add course evals to whole semester',instructor_list_to_activate_evals], 25: ['ext tools',get_ext_tools], 26: ['set ext tools',set_ext_tools], 27: ['Fine tune term dates and winter session', course_dates_terms], 28: ['Cross list a semester from file', semester_cross_lister], 29: ['Check all courses & their sections in semester', all_semester_course_sanity_check], - 30: ['List latestart classes', list_latestarts ], # TODO wanted: group shell for each GP (guided pathway) as a basic student services gateway.... # } @@ -1400,4 +1423,4 @@ if __name__ == "__main__": resp = input('Choose: ') # Call the function in the options dict - options[ int(resp)][1]() \ No newline at end of file + options[ int(resp)][1]()