# Outcomes 2023 # Tasks: # # - List all courses (in semester) in iLearn: # + SLOs associated with the course # + Whether they are current or inactive # + Whether they are attached to an assessment # + Whether and by how many students, they have been assessed # # - Fetch most current SLOs from Curricunet # + Assemble multiple versions of a (CQ) course and determine which semesters they apply to # + Whether they are present in the relevant classes in iLearn # + Insert SLO into course if not present # + Mark as inactive (change name) if necessary # - Issue: # + Course naming / sections joined... import concurrent.futures import pandas as pd from pipelines import fetch, url, header from courses import getCoursesInTerm import codecs, json from path_dict import PathDict NUM_THREADS = 20 get_fresh = 0 sem_courses = getCoursesInTerm(178,get_fresh) def escape_commas(s): if ',' in s: return '"' + s.replace('"', '""') + '"' else: return s # shorter list for test? #sem_courses = sem_courses[:50] print("Got %i courses in current semester." % len(sem_courses)) outputfile = codecs.open('cache/slo/outcomes2022.output.txt','w','utf-8') outputfile.write( "coursename,assessed,courseid,outcome_id,points,title,displayname,description,guid\n") def course_slo_getter(q): (name,id) = q info = {'ilearnname':name,'ilearnid':id} print(" + Thread getting %s %s" % (str(name),str(id))) # Get GROUPS for a course u1 = url + "/api/v1/courses/%s/outcome_groups" % str(id) og_for_course = fetch(u1) if len(og_for_course): # There is a GROUP... for og in og_for_course: if "outcomes_url" in og: # There are OUTCOMES... outcomes = fetch(url + og["outcomes_url"]) og['outcomes'] = outcomes og['full_outcomes'] = {} for oo in outcomes: print(" -> " + url + oo['outcome']['url']) this_outcome = fetch( url + oo['outcome']['url'] ) og['full_outcomes'][this_outcome['id']] = this_outcome saveme = [name, this_outcome['assessed'], id, this_outcome['id'], this_outcome['points_possible'], this_outcome['title'], this_outcome['display_name'], this_outcome['description'], this_outcome['vendor_guid'] ] saveme2 = [escape_commas(str(x)) for x in saveme] outputfile.write(",".join(saveme2) + "\n") outputfile.flush() if type(og_for_course) == list: og_for_course.insert(0,info) else: og_for_course.update(info) print(" - Thread %s DONE" % str(id)) return og_for_course raw_log = codecs.open('cache/outcome_raw_log.txt','w','utf-8') #raw_log.write( json.dumps(output,indent=2) ) output = [] with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as pool: futures = [] for C in sem_courses: print("Adding ", C['name'], C['id'], " to queue") futures.append( pool.submit(course_slo_getter, [C['name'], C['id']] ) ) for future in concurrent.futures.as_completed(futures): output.append(future.result()) print(future.result()) raw_log.write( json.dumps(future.result(),indent=2) + "\n" ) #print("-- Done") #print("results array has %i items" % len(results)) #for r in concurrent.futures.as_completed(results): # output.append(r.result()) def ilearn_shell_slo_to_csv(shell_slos): L = ['canvasid','name','crn','has_outcomes',] for i in range(1,11): L.append("o%i_id" % i) L.append("o%i_vendor_guid" % i) L.append("o%i_desc" % i) L.append("o%i_assd" % i) df = pd.DataFrame(columns=L) for S in shell_slos: short = S[0] this_crs = {'canvasid':short['ilearnid'], 'name':short['ilearnname'], 'has_outcomes':0, } if len(S)>1: full = S[1] this_crs['has_outcomes'] = 1 i = 1 for o in full['outcomes']: try: this_id = int(o['outcome']['id']) this_crs['o%i_id' % i] = o['outcome']['id'] except Exception as e: this_crs['o%i_id' % i] = '!' try: this_crs['o%i_desc' % i] = full['full_outcomes'][this_id]['description'] except Exception as e: this_crs['o%i_desc' % i] = '!' try: assessed = 0 if full['full_outcomes'][this_id]['assessed'] == 'True': assessed = 1 this_crs['o%i_assd' % i] = assessed except Exception as e: this_crs['o%i_assd' % i] = '!' try: this_crs['o%i_vendor_guid' % i] = full['full_outcomes'][this_id]['vendor_guid'] except Exception as e: this_crs['o%i_vendor_guid' % i] = '!' i += 1 df2 = pd.DataFrame(this_crs, columns = df.columns, index=[0]) df = pd.concat( [df, df2], ignore_index = True ) df.to_csv('cache/outcome.csv') print(df) #ilearn_shell_slo_to_csv(output)