working but disorganized outcome import workflow
This commit is contained in:
parent
e53ee2b698
commit
6dfbb913e0
22
courses.py
22
courses.py
|
|
@ -1861,6 +1861,27 @@ def list_all_assignments():
|
||||||
print(f"{a['name']}\t{p}\t{date}")
|
print(f"{a['name']}\t{p}\t{date}")
|
||||||
|
|
||||||
|
|
||||||
|
def bulk_unenroll():
|
||||||
|
course_id = input("course id> ")
|
||||||
|
enrollments = fetch(f"{url}/api/v1/courses/{course_id}/enrollments")
|
||||||
|
|
||||||
|
for enrollment in enrollments:
|
||||||
|
enrollment_id = enrollment['id']
|
||||||
|
skiplist = ['51237','58362','237']
|
||||||
|
if enrollment_id in skiplist:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Set the headers and parameters for the DELETE API call
|
||||||
|
api_url = f"{url}/api/v1/courses/{course_id}/enrollments/{enrollment_id}"
|
||||||
|
|
||||||
|
# Make the DELETE request
|
||||||
|
response = requests.delete(api_url, headers=header)
|
||||||
|
|
||||||
|
# Check the response
|
||||||
|
if response.status_code == 200:
|
||||||
|
print(f"Successfully unenrolled student with id {enrollment_id} from course {course_id}.")
|
||||||
|
else:
|
||||||
|
print(f"Failed to unenroll student with id {enrollment_id} from course {course_id}. Error: {response.text}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1907,6 +1928,7 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
40: ['Enroll GOTT Workshops', enroll_gott_workshops_su23],
|
40: ['Enroll GOTT Workshops', enroll_gott_workshops_su23],
|
||||||
42: ['Add teacher to many shells', teacher_to_many_shells],
|
42: ['Add teacher to many shells', teacher_to_many_shells],
|
||||||
|
43: ['Bulk unenroll from course', bulk_unenroll],
|
||||||
# 24: ['Add course evals to whole semester',instructor_list_to_activate_evals],
|
# 24: ['Add course evals to whole semester',instructor_list_to_activate_evals],
|
||||||
# 21: ['Add announcements to homepage', change_course_ann_homepage],
|
# 21: ['Add announcements to homepage', change_course_ann_homepage],
|
||||||
# TODO wanted: group shell for each GP (guided pathway) as a basic student services gateway....
|
# TODO wanted: group shell for each GP (guided pathway) as a basic student services gateway....
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,11 @@ displaynames = []
|
||||||
|
|
||||||
from canvas_secrets import cq_user, cq_pasw
|
from canvas_secrets import cq_user, cq_pasw
|
||||||
|
|
||||||
|
from outcomes import quick_add_course_outcomes
|
||||||
|
|
||||||
|
|
||||||
CQ_URL = "https://secure.curricunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc"
|
CQ_URL = "https://secure.curricunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc"
|
||||||
|
CQ_URL = "https://mws.services.curriqunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc"
|
||||||
PARAM = "?returnFormat=json&method=getCourses"
|
PARAM = "?returnFormat=json&method=getCourses"
|
||||||
|
|
||||||
user = cq_user
|
user = cq_user
|
||||||
|
|
@ -669,6 +672,7 @@ def another_request(url,startat):
|
||||||
newparam = "&skip=" + str(startat)
|
newparam = "&skip=" + str(startat)
|
||||||
print((url+newparam))
|
print((url+newparam))
|
||||||
r = requests.get(url+newparam, auth=(user,pasw))
|
r = requests.get(url+newparam, auth=(user,pasw))
|
||||||
|
#print(r.text)
|
||||||
try:
|
try:
|
||||||
mydata = json.loads(r.text, strict=False)
|
mydata = json.loads(r.text, strict=False)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -851,11 +855,11 @@ if __name__ == "__main__":
|
||||||
options = { 1: ['fetch all courses', fetch_all_classes],
|
options = { 1: ['fetch all courses', fetch_all_classes],
|
||||||
2: ['process all classes', path_style_test],
|
2: ['process all classes', path_style_test],
|
||||||
3: ['courses - path style to html catalog', course_path_style_2_html],
|
3: ['courses - path style to html catalog', course_path_style_2_html],
|
||||||
4: ['courses - rank by all versions', course_rank],
|
4: ['show course outcomes', all_outcomes],
|
||||||
5: ['fetch all programs', fetch_all_programs],
|
5: ['courses - rank by all versions', course_rank],
|
||||||
6: ['process all programs', path_style_prog],
|
10: ['fetch all programs', fetch_all_programs],
|
||||||
9: ['show course outcomes', all_outcomes],
|
11: ['process all programs', path_style_prog],
|
||||||
10: ['programs - path style to html catalog', path_style_2_html],
|
12: ['programs - path style to html catalog', path_style_2_html],
|
||||||
}
|
}
|
||||||
|
|
||||||
print ('')
|
print ('')
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,8 @@ f = codecs.open('cache/slo/log.txt','w','utf-8')
|
||||||
|
|
||||||
VERBOSE = 1
|
VERBOSE = 1
|
||||||
|
|
||||||
|
TERM = '180'
|
||||||
|
|
||||||
SLO_CURRENT_SOURCE = 'cache/slo/2018_slo.csv' # term 21
|
SLO_CURRENT_SOURCE = 'cache/slo/2018_slo.csv' # term 21
|
||||||
#SLO_CURRENT_SOURCE = 'cache/slo/2020_slo.csv'
|
#SLO_CURRENT_SOURCE = 'cache/slo/2020_slo.csv'
|
||||||
|
|
||||||
|
|
@ -294,7 +296,7 @@ def create_dept_group(short):
|
||||||
r = requests.post(t,data=new_group, headers=header)
|
r = requests.post(t,data=new_group, headers=header)
|
||||||
print(r.text)
|
print(r.text)
|
||||||
|
|
||||||
def outcomes_attached_to_courses(term=65,limitdept=''):
|
def outcomes_attached_to_courses(term=TERM,limitdept=''):
|
||||||
# For each class in a term, check to see if it has outcomes and/or
|
# For each class in a term, check to see if it has outcomes and/or
|
||||||
# an outcome group attached to it.
|
# an outcome group attached to it.
|
||||||
courses = getCoursesInTerm(term,show=0,active=0)
|
courses = getCoursesInTerm(term,show=0,active=0)
|
||||||
|
|
@ -840,7 +842,7 @@ def all_linked_outcomes_in_term(termid=''):
|
||||||
#terms = [172,174,176,178]
|
#terms = [172,174,176,178]
|
||||||
#for t in terms:
|
#for t in terms:
|
||||||
# all_linked_outcomes_in_term_sub(str(t))
|
# all_linked_outcomes_in_term_sub(str(t))
|
||||||
all_linked_outcomes_in_term_sub('180')
|
all_linked_outcomes_in_term_sub(TERM)
|
||||||
|
|
||||||
|
|
||||||
def all_linked_outcomes_in_term_sub(termid=''):
|
def all_linked_outcomes_in_term_sub(termid=''):
|
||||||
|
|
@ -1277,7 +1279,7 @@ def parse_ilearn_course_names_ALLSEMESTERS():
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def parse_ilearn_course_names(term='178',fresh=0,log=0):
|
def parse_ilearn_course_names(term=TERM,fresh=0,log=0):
|
||||||
non_matches = []
|
non_matches = []
|
||||||
|
|
||||||
courses = getCoursesInTerm(term,get_fresh=fresh)
|
courses = getCoursesInTerm(term,get_fresh=fresh)
|
||||||
|
|
|
||||||
414
outcomes2022.py
414
outcomes2022.py
|
|
@ -13,7 +13,8 @@
|
||||||
# + Whether they are present in the relevant classes in iLearn
|
# + Whether they are present in the relevant classes in iLearn
|
||||||
# + Insert SLO into course if not present
|
# + Insert SLO into course if not present
|
||||||
# + Mark as inactive (change name) if necessary
|
# + Mark as inactive (change name) if necessary
|
||||||
|
#
|
||||||
|
# - Update shell with correct outcomes
|
||||||
|
|
||||||
# - Issue:
|
# - Issue:
|
||||||
# + Course naming / sections joined...
|
# + Course naming / sections joined...
|
||||||
|
|
@ -21,16 +22,16 @@
|
||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from pipelines import fetch, url, header
|
from pipelines import fetch, url, header
|
||||||
from courses import getCoursesInTerm
|
from outcomes import quick_add_course_outcomes, code_from_ilearn_name, all_linked_outcomes_in_term
|
||||||
import codecs, json
|
from courses import getCoursesInTerm, getCourses
|
||||||
|
import codecs, json, sys, re, csv, requests, textwrap
|
||||||
from path_dict import PathDict
|
from path_dict import PathDict
|
||||||
|
|
||||||
|
outputfile = ''
|
||||||
|
csvwriter = ''
|
||||||
|
|
||||||
NUM_THREADS = 20
|
TERM = 180
|
||||||
get_fresh = 0
|
|
||||||
|
|
||||||
TERM = 180 # fa23
|
|
||||||
sem_courses = getCoursesInTerm(TERM,get_fresh)
|
|
||||||
|
|
||||||
def escape_commas(s):
|
def escape_commas(s):
|
||||||
if ',' in s:
|
if ',' in s:
|
||||||
|
|
@ -38,15 +39,34 @@ def escape_commas(s):
|
||||||
else:
|
else:
|
||||||
return s
|
return s
|
||||||
|
|
||||||
# shorter list for test?
|
|
||||||
#sem_courses = sem_courses[:50]
|
|
||||||
|
|
||||||
print("Got %i courses in current semester." % len(sem_courses))
|
def add_outcome_to_course(shell_id=''):
|
||||||
|
if shell_id == '':
|
||||||
|
shell_id = input("Enter shell id > ")
|
||||||
|
course = getCourses(str(shell_id))
|
||||||
|
dept, code, crn = code_from_ilearn_name(course['name'])
|
||||||
|
print(f"{dept} {code} {crn} for course named: {course['name']}")
|
||||||
|
#xyz = input(f"Using: {code} for {course['name']}. Enter a different code, q to skip or press enter to continue > ")
|
||||||
|
#if xyz == 'q':
|
||||||
|
# return
|
||||||
|
#if xyz != '':
|
||||||
|
# code = xyz
|
||||||
|
cq_course_id = find_recent_cqcourseid(code)
|
||||||
|
oc = codecs.open('cache/courses/alloutcomes.csv','r','utf-8')
|
||||||
|
reader = csv.reader(oc)
|
||||||
|
cols = next(reader) # skip header
|
||||||
|
|
||||||
|
# Filter rows matching the code
|
||||||
|
rows = [row for row in reader if row[1] == cq_course_id]
|
||||||
|
rows_dicts = [ {cols[i]: r[i] for i in range(len(cols))} for r in rows ]
|
||||||
|
#abc = input(f"Using outcomes:\n{rows_dicts}\n\nPress enter to continue > ")
|
||||||
|
|
||||||
|
quick_add_course_outcomes(shell_id, rows_dicts)
|
||||||
|
|
||||||
|
|
||||||
outputfile = codecs.open(f'cache/slo/outcomes_bycourse_{TERM}.output.txt','w','utf-8')
|
|
||||||
outputfile.write( "coursename,assessed,courseid,outcome_id,points,title,displayname,description,guid\n")
|
|
||||||
|
|
||||||
def course_slo_getter(q):
|
def course_slo_getter(q):
|
||||||
|
global outputfile, csvwriter
|
||||||
(name,id) = q
|
(name,id) = q
|
||||||
info = {'ilearnname':name,'ilearnid':id}
|
info = {'ilearnname':name,'ilearnid':id}
|
||||||
print(" + Thread getting %s %s" % (str(name),str(id)))
|
print(" + Thread getting %s %s" % (str(name),str(id)))
|
||||||
|
|
@ -70,6 +90,8 @@ def course_slo_getter(q):
|
||||||
og['full_outcomes'][this_outcome['id']] = this_outcome
|
og['full_outcomes'][this_outcome['id']] = this_outcome
|
||||||
saveme = [name, this_outcome['assessed'], id, this_outcome['id'], this_outcome['points_possible'], this_outcome['title'], this_outcome['display_name'], this_outcome['description'], this_outcome['vendor_guid'] ]
|
saveme = [name, this_outcome['assessed'], id, this_outcome['id'], this_outcome['points_possible'], this_outcome['title'], this_outcome['display_name'], this_outcome['description'], this_outcome['vendor_guid'] ]
|
||||||
saveme2 = [escape_commas(str(x)) for x in saveme]
|
saveme2 = [escape_commas(str(x)) for x in saveme]
|
||||||
|
|
||||||
|
csvwriter.writerow([id, name, og['id'], this_outcome['id'], this_outcome['vendor_guid'], this_outcome['points_possible'], this_outcome['mastery_points'], this_outcome['assessed'], this_outcome['description']])
|
||||||
outputfile.write(",".join(saveme2) + "\n")
|
outputfile.write(",".join(saveme2) + "\n")
|
||||||
outputfile.flush()
|
outputfile.flush()
|
||||||
if type(og_for_course) == list:
|
if type(og_for_course) == list:
|
||||||
|
|
@ -79,25 +101,43 @@ def course_slo_getter(q):
|
||||||
print(" - Thread %s DONE" % str(id))
|
print(" - Thread %s DONE" % str(id))
|
||||||
return og_for_course
|
return og_for_course
|
||||||
|
|
||||||
raw_log = codecs.open('cache/outcome_raw_log.txt','w','utf-8')
|
|
||||||
#raw_log.write( json.dumps(output,indent=2) )
|
|
||||||
|
|
||||||
output = []
|
# I duplicate???
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as pool:
|
def outcomes_in_shell(course_id):
|
||||||
futures = []
|
print(f"Getting root outcome group for course id {course_id}")
|
||||||
for C in sem_courses:
|
root_og = fetch(f"{url}/api/v1/courses/{course_id}/root_outcome_group")
|
||||||
print("Adding ", C['name'], C['id'], " to queue")
|
print(f"Getting outcomes")
|
||||||
futures.append( pool.submit(course_slo_getter, [C['name'], C['id']] ) )
|
u1 =f"{url}/api/v1/courses/{course_id}/outcome_groups/{root_og['id']}/outcomes"
|
||||||
for future in concurrent.futures.as_completed(futures):
|
outcomes_list = fetch( u1 )
|
||||||
output.append(future.result())
|
the_outcomes = []
|
||||||
print(future.result())
|
|
||||||
raw_log.write( json.dumps(future.result(),indent=2) + "\n" )
|
|
||||||
#print("-- Done")
|
|
||||||
#print("results array has %i items" % len(results))
|
|
||||||
|
|
||||||
#for r in concurrent.futures.as_completed(results):
|
|
||||||
# output.append(r.result())
|
|
||||||
|
|
||||||
|
if 'errors' in outcomes_list:
|
||||||
|
print(f"Error: {outcomes_list}")
|
||||||
|
|
||||||
|
if len(outcomes_list):
|
||||||
|
for oo in outcomes_list:
|
||||||
|
print(f"Getting outcome id {oo['outcome']['id']}")
|
||||||
|
outcome = fetch( url + '/api/v1/outcomes/%s' % str(oo['outcome']['id']) )
|
||||||
|
outcome['parent_group'] = root_og['id']
|
||||||
|
the_outcomes.append(outcome)
|
||||||
|
u2 = f"{url}/api/v1/courses/{course_id}/outcome_groups/{root_og['id']}/subgroups"
|
||||||
|
g2 = fetch( u2 )
|
||||||
|
|
||||||
|
for subgroup in g2:
|
||||||
|
print("doing subgroup id %s" % str(subgroup['id']))
|
||||||
|
u3 = f"{url}/api/v1/courses/{course_id}/outcome_groups/{subgroup['id']}/outcomes"
|
||||||
|
outcomes_list = fetch( u3 )
|
||||||
|
|
||||||
|
if 'errors' in outcomes_list:
|
||||||
|
print(f"Error: {outcomes_list}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if len(outcomes_list):
|
||||||
|
for oo in outcomes_list:
|
||||||
|
outcome = fetch( f"{url}/api/v1/outcomes/{oo['outcome']['id']}" )
|
||||||
|
outcome['parent_group'] = subgroup['id']
|
||||||
|
the_outcomes.append(outcome)
|
||||||
|
return root_og, the_outcomes, g2
|
||||||
|
|
||||||
|
|
||||||
def ilearn_shell_slo_to_csv(shell_slos):
|
def ilearn_shell_slo_to_csv(shell_slos):
|
||||||
|
|
@ -146,11 +186,325 @@ def ilearn_shell_slo_to_csv(shell_slos):
|
||||||
|
|
||||||
df.to_csv('cache/outcome.csv')
|
df.to_csv('cache/outcome.csv')
|
||||||
print(df)
|
print(df)
|
||||||
|
|
||||||
|
|
||||||
|
def get_outcomes_term_index():
|
||||||
|
global outputfile, csvwriter
|
||||||
|
NUM_THREADS = 20
|
||||||
|
get_fresh = 0
|
||||||
|
|
||||||
|
sem_courses = getCoursesInTerm(TERM,get_fresh)
|
||||||
|
|
||||||
|
# shorter list for test?
|
||||||
|
#sem_courses = sem_courses[:50]
|
||||||
|
|
||||||
|
print("Got %i courses in current semester." % len(sem_courses))
|
||||||
|
|
||||||
|
outputfile = codecs.open(f'cache/slo/outcomes_bycourse_{TERM}.output.txt','w','utf-8')
|
||||||
|
outputfile.write( "coursename,assessed,courseid,outcome_id,points,title,displayname,description,guid\n")
|
||||||
|
|
||||||
|
csvfile = codecs.open(f'cache/slo/linked_slos_term_{TERM}_compact.csv','w','utf-8')
|
||||||
|
csvwriter = csv.writer(csvfile)
|
||||||
|
csvwriter.writerow('courseid coursename ogid oid vendorguid points mastery assessed desc'.split(' '))
|
||||||
|
|
||||||
|
raw_log = codecs.open('cache/outcome_raw_log.txt','w','utf-8')
|
||||||
|
#raw_log.write( json.dumps(output,indent=2) )
|
||||||
|
|
||||||
|
output = []
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as pool:
|
||||||
|
futures = []
|
||||||
|
for C in sem_courses:
|
||||||
|
print("Adding ", C['name'], C['id'], " to queue")
|
||||||
|
futures.append( pool.submit(course_slo_getter, [C['name'], C['id']] ) )
|
||||||
|
for future in concurrent.futures.as_completed(futures):
|
||||||
|
output.append(future.result())
|
||||||
|
print(future.result())
|
||||||
|
raw_log.write( json.dumps(future.result(),indent=2) + "\n" )
|
||||||
|
csvfile.close()
|
||||||
|
ilearn_shell_slo_to_csv(output)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def classify_shell(lines):
|
||||||
|
# given a list of lines like this, determine status of shell
|
||||||
|
# (from linked_slos_term_180_compact.csv) outcomes.py all_linked_outcomes_in_term()
|
||||||
|
|
||||||
|
#
|
||||||
|
# courseid,coursename,ogid,oid,vendorguid,points,mastery,assessed,desc
|
||||||
|
# 16909,AH11 FA23 10003/10014/12251,10860,819,,5,3,False,Use scientific facts and principles to critically analyze nutrition information and use the information to assess personal diet and the diets of other cultures.
|
||||||
|
# 16909,AH11 FA23 10003/10014/12251,10860,820,,5,3,False,Evaluate nutrition information for accuracy and reliability.
|
||||||
|
# 16909,AH11 FA23 10003/10014/12251,10860,821,,5,3,False,Analyze and identify the relationship between nutrition and health.
|
||||||
|
# 16909,AH11 FA23 10003/10014/12251,10860,822,,5,3,False,Differentiate among food habits and practices related to traditional foods and preparation techniques in selected cultures or religions.
|
||||||
|
# 16909,AH11 FA23 10003/10014/12251,10860,823,,5,3,False,Analyze nutritional problems of selected cultures and create a nutritionally balanced menu.
|
||||||
|
#
|
||||||
|
# 1. number of outcomes
|
||||||
|
# 2. points are correct (max=3,mastery=2) or incorrect (max=5,mastery=3)
|
||||||
|
# 3. assessed or not
|
||||||
|
|
||||||
|
course_status = {'outcome_count':0, 'id':0, 'name':'', 'assessed_count':0, 'points_ok':1}
|
||||||
|
|
||||||
|
for L in lines:
|
||||||
|
#print(L)
|
||||||
|
#L = L.split(',')
|
||||||
|
course_status['outcome_count'] += 1
|
||||||
|
course_status['id'] = L[0]
|
||||||
|
course_status['name'] = L[1]
|
||||||
|
outcome_status = {'courseid':L[0],'coursename':L[1],'ogid':L[2],'oid':L[3],'vendorguid':L[4],'points':L[5],'mastery':L[6],'assessed':L[7],'desc':L[8], 'pointscorrect':0}
|
||||||
|
if L[5] == '5' and L[6] == '3':
|
||||||
|
outcome_status['pointscorrect'] = 0
|
||||||
|
course_status['points_ok'] = 0
|
||||||
|
elif (L[5] == '3.0' or L[5] == '3') and L[6] == '2':
|
||||||
|
outcome_status['pointscorrect'] = 1
|
||||||
|
else:
|
||||||
|
outcome_status['pointscorrect'] = -1
|
||||||
|
if L[7] == 'True':
|
||||||
|
course_status['assessed_count'] += 1
|
||||||
|
return course_status
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def find_recent_cqcourseid(code):
|
||||||
|
# code example: CSIS42
|
||||||
|
with open('cache/courses/all_courses_ranked.csv', 'r') as f:
|
||||||
|
reader = csv.reader(f)
|
||||||
|
next(reader) # skip header
|
||||||
|
|
||||||
|
# Filter rows matching the code
|
||||||
|
rows = [row for row in reader if row[0] == code]
|
||||||
|
|
||||||
|
print(f"All entries for {code}:\n{rows}")
|
||||||
|
|
||||||
|
if not rows:
|
||||||
|
raise ValueError(f"No rows found for code {code}")
|
||||||
|
|
||||||
|
# Sort by 'termineffect', in descending order
|
||||||
|
rows.sort(key=lambda row: row[3], reverse=True)
|
||||||
|
|
||||||
|
# Return cqcourseid of the first row
|
||||||
|
myrow = rows[0][1]
|
||||||
|
print(f"Using: {myrow}")
|
||||||
|
return myrow
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def remove_old_outcomes(course_id):
|
||||||
|
root_og, current_outcomes, subgroups = outcomes_in_shell(course_id)
|
||||||
|
print(f"Got {len(current_outcomes)} outcomes for course id {course_id}")
|
||||||
|
print(f"Current outcomes:\n{json.dumps(current_outcomes,indent=2)}")
|
||||||
|
|
||||||
|
# Try deleting them
|
||||||
|
if 1:
|
||||||
|
for deleted_outcome in current_outcomes:
|
||||||
|
print(f"Deleting outcome id {deleted_outcome['id']}")
|
||||||
|
u9 = f"{url}/api/v1/courses/{course_id}/outcome_groups/{deleted_outcome['parent_group']}/outcomes/{deleted_outcome['id']}"
|
||||||
|
print(u9)
|
||||||
|
|
||||||
|
# make the DELETE request (update with your actual access token)
|
||||||
|
response = requests.delete(u9, headers=header)
|
||||||
|
|
||||||
|
# check the status of the request
|
||||||
|
if response.status_code == 200:
|
||||||
|
print(' Delete operation was successful')
|
||||||
|
else:
|
||||||
|
print(' Failed to delete, response code:', response.status_code)
|
||||||
|
print(' Response message:', response.text)
|
||||||
|
|
||||||
|
|
||||||
|
def repair_outcome_points(course_id):
|
||||||
|
# Compare to what Outcomes SHOULD be
|
||||||
|
course = getCourses(course_id)
|
||||||
|
dept, code, crn = code_from_ilearn_name(course['name'])
|
||||||
|
xyz = input(f"Using: {code} for {course['name']}. Enter a different code or press enter to continue > ")
|
||||||
|
if xyz != '':
|
||||||
|
code = xyz
|
||||||
|
cq_course_id = find_recent_cqcourseid(code)
|
||||||
|
oc = codecs.open('cache/courses/alloutcomes.csv','r','utf-8')
|
||||||
|
reader = csv.reader(oc)
|
||||||
|
cols = next(reader) # skip header
|
||||||
|
|
||||||
|
# Filter rows matching the code
|
||||||
|
rows = [row for row in reader if row[1] == cq_course_id]
|
||||||
|
rows_dicts = [ {cols[i]: r[i] for i in range(len(cols))} for r in rows ]
|
||||||
|
abc = input(f"Using outcomes:\n{json.dumps(rows_dicts,indent=2)}\n\nPress enter to continue > ")
|
||||||
|
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
outcome_id = 0
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'mastery_points': '2',
|
||||||
|
'calculation_method': 'decaying_average',
|
||||||
|
'calculation_int': '65',
|
||||||
|
'ratings[0][description]': 'Exceeds Expectations',
|
||||||
|
'ratings[0][points]': '3',
|
||||||
|
'ratings[1][description]': 'Meets Expectations',
|
||||||
|
'ratings[1][points]': '2',
|
||||||
|
'ratings[2][description]': 'Does Not Meet Expectations',
|
||||||
|
'ratings[2][points]': '0'
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.put(f'{url}/api/v1/outcomes/{outcome_id}.json', headers=header, data=data)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
print(f"Successfully updated outcome with id {outcome_id}.")
|
||||||
|
else:
|
||||||
|
print(f"Failed to update outcome with id {outcome_id}. Error: {response.text}")
|
||||||
|
|
||||||
|
def add_o_dept_dry_run():
|
||||||
|
add_o_dept(1)
|
||||||
|
|
||||||
|
|
||||||
|
def add_o_dept(dry_run=0):
|
||||||
|
d = input("Enter dept or deps separated with a space > ")
|
||||||
|
d_list = d.split(' ')
|
||||||
|
course_groups = full_term_overview(0)
|
||||||
|
|
||||||
|
dept_shells_to_add = [ a for a in course_groups['no outcomes'] if a['dept'] in d_list ]
|
||||||
|
sorted_dept_shells_to_add = sorted(dept_shells_to_add, key=lambda x: f"{x['dept']}{x['code']}")
|
||||||
|
|
||||||
|
print(f"Adding to {len(sorted_dept_shells_to_add)} shells.")
|
||||||
|
|
||||||
|
for shell in sorted_dept_shells_to_add:
|
||||||
|
print(f"Adding outcomes to {shell['name']}")
|
||||||
|
if not dry_run:
|
||||||
|
try:
|
||||||
|
add_outcome_to_course(shell['id'])
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed on {shell['id']}: {e}")
|
||||||
|
else:
|
||||||
|
print(" Dry run, not adding")
|
||||||
|
|
||||||
|
def remove_all_bad_points():
|
||||||
|
course_groups = full_term_overview(0)
|
||||||
|
|
||||||
|
dept_shells_to_zap = [ a for a in course_groups['fix_points'] ]
|
||||||
|
for shell in dept_shells_to_zap:
|
||||||
|
print(f"Removing outcomes from {shell['name']}")
|
||||||
|
remove_old_outcomes(shell['id'])
|
||||||
|
|
||||||
|
|
||||||
|
def full_term_overview(verbose=1):
|
||||||
|
out2 = codecs.open(f'cache/slo_status_{TERM}.json','w','utf-8')
|
||||||
|
out3 = codecs.open(f'cache/slo_status_{TERM}.txt','w','utf-8')
|
||||||
|
fn1 = f"cache/courses_in_term_{TERM}.json"
|
||||||
|
all_courses = json.loads(codecs.open(fn1,'r','utf-8').read())
|
||||||
|
all_courses_status = {}
|
||||||
|
|
||||||
|
# default values for all courses
|
||||||
|
for C in all_courses:
|
||||||
|
dept,code,crn = code_from_ilearn_name(C['name'])
|
||||||
|
all_courses_status[str(C['id'])] = {'outcome_count':0, 'id':C['id'], 'name':C['name'], 'dept':dept, 'code':code, 'crn':crn, 'assessed_count':0, 'points_ok':1}
|
||||||
|
|
||||||
|
# read the existing ilearn outcomes and group by shell
|
||||||
|
filename = f"cache/slo/linked_slos_term_{TERM}_compact.csv"
|
||||||
|
with open(filename, 'r') as csvfile:
|
||||||
|
reader = csv.reader(csvfile)
|
||||||
|
next(reader) # skip header
|
||||||
|
|
||||||
|
# Read the rows into a list
|
||||||
|
rows = list(reader)
|
||||||
|
|
||||||
|
# Sort the rows based on a specific column (e.g., column 0)
|
||||||
|
sorted_rows = sorted(rows, key=lambda x: x[0])
|
||||||
|
|
||||||
|
groups = []
|
||||||
|
current_group = []
|
||||||
|
last_courseid = None
|
||||||
|
for row in sorted_rows:
|
||||||
|
courseid = row[0]
|
||||||
|
if last_courseid != courseid and current_group:
|
||||||
|
# courseid changed from last row to current row
|
||||||
|
groups.append(current_group)
|
||||||
|
current_group = []
|
||||||
|
current_group.append(row)
|
||||||
|
last_courseid = courseid
|
||||||
|
|
||||||
|
# append the last group if any
|
||||||
|
if current_group:
|
||||||
|
groups.append(current_group)
|
||||||
|
|
||||||
|
for g in groups:
|
||||||
|
classified = classify_shell(g)
|
||||||
|
dept,code,crn = code_from_ilearn_name(g[0][1])
|
||||||
|
classified['dept'] = dept
|
||||||
|
classified['code'] = code
|
||||||
|
classified['crn'] = crn
|
||||||
|
all_courses_status[str(classified['id'])] = classified
|
||||||
|
|
||||||
|
#for C in all_courses_status:
|
||||||
|
# print(all_courses_status[C])
|
||||||
|
|
||||||
|
course_groups = {
|
||||||
|
'no outcomes': [],
|
||||||
|
'ok': [],
|
||||||
|
'fix_points_and_scores': [],
|
||||||
|
'fix_points': []
|
||||||
|
}
|
||||||
|
|
||||||
|
for d in all_courses_status.values():
|
||||||
|
outcome_count = d['outcome_count']
|
||||||
|
points_ok = d['points_ok']
|
||||||
|
assessed_count = d['assessed_count']
|
||||||
|
|
||||||
|
if outcome_count == 0:
|
||||||
|
course_groups['no outcomes'].append(d)
|
||||||
|
elif points_ok == 1 and assessed_count > 0:
|
||||||
|
course_groups['fix_points_and_scores'].append(d)
|
||||||
|
elif points_ok == 0 and assessed_count > 0:
|
||||||
|
course_groups['fix_points_and_scores'].append(d)
|
||||||
|
elif points_ok == 1:
|
||||||
|
course_groups['ok'].append(d)
|
||||||
|
elif points_ok == 0:
|
||||||
|
course_groups['fix_points'].append(d)
|
||||||
|
|
||||||
|
# Print out the groups
|
||||||
|
out2.write(json.dumps(course_groups,indent=2))
|
||||||
|
if verbose:
|
||||||
|
for group, dicts in course_groups.items():
|
||||||
|
sorted_dicts = sorted(dicts, key=lambda x: f"{x['dept']}{x['code']}")
|
||||||
|
print(f"{group} - {len(sorted_dicts)} item(s)")
|
||||||
|
out3.write(f"{group} - {len(sorted_dicts)} item(s)\n")
|
||||||
|
for d in sorted_dicts:
|
||||||
|
print(d)
|
||||||
|
out3.write(str(d) + "\n")
|
||||||
|
print("\n")
|
||||||
|
out3.write("\n")
|
||||||
|
|
||||||
|
return course_groups
|
||||||
|
|
||||||
|
def fetch_term_outcomes_and_report():
|
||||||
|
get_outcomes_term_index()
|
||||||
|
full_term_overview()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
options = { 1: ['Refresh term outcome list & report', fetch_term_outcomes_and_report],
|
||||||
|
3: ['Add outcomes to course id', add_outcome_to_course],
|
||||||
|
4: ['Fix outcome points', remove_old_outcomes],
|
||||||
|
5: ['Add outcomes to dept, dry run', add_o_dept_dry_run],
|
||||||
|
6: ['Add outcomes to dept', add_o_dept],
|
||||||
|
7: ['Remove all outcomes with wrong points', remove_all_bad_points],
|
||||||
|
}
|
||||||
|
print ('')
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and re.search(r'^\d+',sys.argv[1]):
|
||||||
|
resp = int(sys.argv[1])
|
||||||
|
print("\n\nPerforming: %s\n\n" % options[resp][0])
|
||||||
|
|
||||||
|
else:
|
||||||
|
print ('')
|
||||||
|
for key in options:
|
||||||
|
print(str(key) + '.\t' + options[key][0])
|
||||||
|
|
||||||
ilearn_shell_slo_to_csv(output)
|
print('')
|
||||||
|
resp = input('Choose: ')
|
||||||
|
|
||||||
|
# Call the function in the options dict
|
||||||
|
options[ int(resp)][1]()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Loading…
Reference in New Issue