#from sqlite3 import paramstyle #from time import strptime #from util import UnicodeDictReader import codecs, json, requests, re, csv, datetime, pysftp, os, jsondiff, os.path import sys, shutil, hmac, hashlib, base64, schedule, time, pathlib, datetime #import pdb from datetime import timedelta import datetime #from collections import defaultdict from semesters import short_to_long from canvas_secrets import apiKey, apiSecret, FTP_SITE, FTP_USER, FTP_PW, url, domain, account_id, header, header_media, g_id, g_secret from canvas_secrets import instructure_url, instructure_username, instructure_private_key import os, asyncio from dap.api import DAPClient from dap.dap_types import Credentials from dap.integration.database import DatabaseConnection from dap.replicator.sql import SQLReplicator """ Everything to do with fetching data, - From iLearn, via token - current roster uploads from instructures sftp site - raw logs and other from canvas data repo - from ssb, use firefox to scrape the schedule And some subsequent processing: - Raw roster files, into a more compact json format - Raw logs into something more useful """ verbose = False users = {} users_by_id = {} # todo: all these constants for SSB -- line 1008 # # todo: https://stackoverflow.com/questions/42656247/how-can-i-use-canvas-data-rest-api-using-python sys.setrecursionlimit( 100000 ) local_data_folder = 'cache/canvas_data/' mylog = codecs.open(local_data_folder + 'temp_log.txt','w') class FetchError(Exception): pass DEBUG = 0 def d(s,end=''): global DEBUG if end and DEBUG: print(s,end=end) elif DEBUG: print(s) ################ ################ CANVAS API MAIN FETCHING FUNCTIONS ################ ################ ################ # Main canvas querying fxn def fetch(target,verbose=0,params=0,media=0): # if there are more results, recursivly call myself, adding on to the results. results = 0 if target[0:4] != "http": target = url + target if verbose: print("++ Fetching: " + target) if media: r2 = requests.get(target, headers = header_media) elif params: r2 = requests.get(target, headers = header, params = params) else: r2 = requests.get(target, headers = header) #if verbose: #print "++ Got: " + r2.text try: results = json.loads(r2.text) count = len(results) except: print("-- Failed to parse: ", r2.text) if verbose: print("Got %i results" % count) if verbose > 1: print(r2.headers) tempout = codecs.open('cache/fetchcache.txt','a','utf-8') tempout.write(r2.text+"\n\n") tempout.close() if ('link' in r2.headers and count > 0): links = r2.headers['link'].split(',') for L in links: ll = L.split(';') link = ll[0].replace("<","") link = link.replace(">","") if re.search(r'next', ll[1]): if (verbose): print("++ More link: " + link) #link = re.sub(r'per_page=10$', 'per_page=100', link) # link.replace('per_page=10','per_page=500') #if (verbose): print("++ More link: " + link) nest = fetch(link,verbose,params,media) if isinstance(results,dict): results.update(nest) else: results.extend(nest) return results # Main canvas querying fxn - stream version - don't die on big requests def fetch_stream(target,verbose=0): # if there are more results, recursivly call myself, adding on to the results. results = 0 while target: if target[0:4] != "http": target = url + target if verbose: print("++ Fetching: " + target) r2 = requests.get(target, headers = header) if r2.status_code == 502: raise FetchError() try: results = json.loads(r2.text) count = len(results) except: print("-- Failed to parse: ", r2.text) if verbose: print("Got %i results" % count) if verbose > 1: print(r2.headers) tempout = codecs.open('cache/fetchcache.txt','a','utf-8') tempout.write(r2.text+"\n\n") tempout.close() next_link_found = 0 if ('link' in r2.headers and count > 0): links = r2.headers['link'].split(',') for L in links: ll = L.split(';') link = ll[0].replace("<","") link = link.replace(">","") if re.search(r'next', ll[1]): target = link next_link_found = 1 break if not next_link_found: target = 0 yield results # for dicts with one key, collapse that one key out, cause # paging makes problems... example: enrollment_terms def fetch_collapse(target,collapse='',verbose=0): # if there are more results, recursivly call myself, adding on to the results. results = 0 if target[0:4] != "http": target = url + target if verbose: print("++ Fetching: " + target) r2 = requests.get(target, headers = header) #if verbose: #print "++ Got: " + r2.text try: results = json.loads(r2.text) except: print("-- Failed to parse: ", r2.text) if verbose: print(r2.headers) if collapse and collapse in results: results = results[collapse] if ('link' in r2.headers): links = r2.headers['link'].split(',') for L in links: ll = L.split(';') link = ll[0].replace("<","") link = link.replace(">","") if re.search(r'next', ll[1]): if (verbose): print("++ More link: " + link) nest = fetch_collapse(link, collapse, verbose) if isinstance(results,dict): results.update(nest) else: results.extend(nest) return results ################ ################ CANVAS DATA ################ ################ ################ # Get canvas data 2024 style def canvas_data_2024_run(): print("Updating all tables.") asyncio.run(canvas_data_2024()) print("Done with all tables.") async def canvas_data_2024(): base_url: str = os.environ["DAP_API_URL"] client_id: str = os.environ["DAP_CLIENT_ID"] client_secret: str = os.environ["DAP_CLIENT_SECRET"] connection_string: str = "postgresql://postgres:rolley34@192.168.1.6/db" desired_tables = "users,courses,communication_channels,context_modules,conversation_message_participants,conversation_messages,conversation_participants,conversations,course_sections,enrollment_states,enrollment_dates_overrides,enrollment_terms,enrollments,learning_outcome_groups,learning_outcome_question_results,learning_outcomes,pseudonyms,quizzes,scores,submissions,submission_versions,wiki_pages,wikis".split(',') credentials = Credentials.create(client_id=client_id, client_secret=client_secret) async with DatabaseConnection(connection_string).open() as db_connection: async with DAPClient(base_url, credentials) as session: #tables = await session.get_tables("canvas") for table in desired_tables: print(f" trying to update {table} ") try: #await SQLReplicator(session, db_connection).initialize("canvas", table) await SQLReplicator(session, db_connection).synchronize("canvas", table) except Exception as e: print(f" - skipping {table} because {e}") # Get canvas data 2024 style def setup_canvas_data_2024_run(): print("Setting up all tables.") asyncio.run(setup_canvas_data_2024()) print("Done with all tables.") async def setup_canvas_data_2024(): base_url: str = os.environ["DAP_API_URL"] client_id: str = os.environ["DAP_CLIENT_ID"] client_secret: str = os.environ["DAP_CLIENT_SECRET"] connection_string: str = "postgresql://postgres:rolley34@192.168.1.6/db" desired_tables = "users,courses,communication_channels,context_modules,conversation_message_participants,conversation_messages,conversation_participants,conversations,course_sections,enrollment_states,enrollment_dates_overrides,enrollment_terms,enrollments,learning_outcome_groups,learning_outcome_question_results,learning_outcomes,pseudonyms,quizzes,scores,submissions,submission_versions,wiki_pages,wikis".split(',') credentials = Credentials.create(client_id=client_id, client_secret=client_secret) async with DatabaseConnection(connection_string).open() as db_connection: async with DAPClient(base_url, credentials) as session: #tables = await session.get_tables("canvas") for table in desired_tables: print(f" {table}") try: await SQLReplicator(session, db_connection).initialize("canvas", table) except Exception as e: print(f" - skipping {table} because {e}") ################ ################ ROSTERS AND REGISTRATION ################ ################ ################ # todo: the pipeline is disorganized. Organize it to have # a hope of taking all this to a higher level. # # todo: where does this belong in the pipeline? compare with recent_schedules() # Take the generically named rosters uploads files and move them to a semester folder and give them a date. def move_to_folder(sem,year,folder,files): semester = year+sem semester_path = 'cache/rosters/%s' % semester if not os.path.isdir('cache/rosters/'+semester): os.makedirs('cache/rosters/'+semester) now = datetime.datetime.now().strftime('%Y-%m-%dT%H-%M') print("+ Moving roster files to folder: %s" % semester_path) if not os.path.isdir(semester_path): print("+ Creating folder: %s" % semester_path) os.makedirs(semester_path) if 'courses.csv' in files: os.rename('cache/rosters/courses-%s.csv' % folder, 'cache/rosters/%s/courses.%s.csv' % (semester,now)) if 'enrollments.csv' in files: os.rename('cache/rosters/enrollments-%s.csv' % folder, 'cache/rosters/%s/enrollments.%s.csv' % (semester,now)) if 'users.csv' in files: os.rename('cache/rosters/users-%s.csv' % folder, 'cache/rosters/%s/users.%s.csv' % (semester,now)) # Take raw upload (csv) files and make one big json out of them. # This relates to enrollment files, not schedule. def convert_roster_files(semester="",year="",folder=""): if not semester: semester = input("the semester? (ex: spring) ") folder = input("Folder? (ex 2020-02-25-14-58-20) ") uf = open('cache/rosters/users-'+folder+'.csv','r') cf = open('cache/rosters/courses-'+folder+'.csv','r') ef = open('cache/rosters/enrollments-'+folder+'.csv','r') u = csv.DictReader(uf) c = csv.DictReader(cf) e = csv.DictReader(ef) uu = [i for i in u] cc = [i for i in c] ee = [i for i in e] uf.close() cf.close() ef.close() myrosterfile = 'cache/rosters/roster_%s_%s.json' % (year, semester) if os.path.exists(myrosterfile): print(" -- Moving previous combined roster json file. opening %s ..." % myrosterfile) last_fileobj = open(myrosterfile,'r') last_file = json.load(last_fileobj) last_fileobj.close() info = last_file[3] last_date = info['date_filestring'] print(' -- writing: cache/rosters/%s%s/roster_%s.json ...' % (year,semester,last_date)) try: os.rename(myrosterfile, 'cache/rosters/%s%s/roster_%s.json ...' % (year,semester,last_date)) print(' -- ok') except Exception as e: print(" ** Failed because i couldn't move the previous roster file: %s" % myrosterfile) print(e) myrosterfile = "new_" + myrosterfile pass #os.remove('cache/old_rosters/roster_'+semester+'.'+last_date+'.json') #os.rename(myrosterfile, 'cache/old_rosters/roster_'+semester+'.'+last_date+'.json') newinfo = {'date_filestring': datetime.datetime.now().strftime('%Y-%m-%dT%H-%M'), } try: new_roster = codecs.open(myrosterfile,'w', 'utf-8') new_roster.write( json.dumps( [uu,cc,ee,newinfo], indent=2 )) new_roster.close() print(" -- Wrote roster info to: %s." % myrosterfile) except Exception as e: print(" ** Failed because i couldn't move the previous roster file: %s" % myrosterfile) print(" ** " + str(e)) def file_doesnt_exist(name): # Get list of files in current directory files = os.listdir() # Filter out zero-size files and directories files = [f for f in files if os.path.isfile(f) and os.path.getsize(f) > 0] if name in files: print( f" * file: {name} already exists. not downloading." ) else: print( f" * file: {name} downloading." ) # Check if the file exists in the filtered list return not (name in files) # From instructure sftp site def fetch_current_rosters(): cnopts = pysftp.CnOpts() cnopts.hostkeys = None with pysftp.Connection(instructure_url,username=instructure_username, private_key=instructure_private_key,cnopts=cnopts) as sftp: sftp.chdir('SIS') files = sftp.listdir() ff = open('cache/pipeline.log.txt','a') now = datetime.datetime.now() exact_time = now.strftime('%Y-%m-%d-%H-%M-%S') rounded_hour = (now.replace(second=0, microsecond=0, minute=0, hour=now.hour) + timedelta(hours=now.minute//30)) rounded_time = rounded_hour.strftime('%Y-%m-%d-%H') if len(files)>0: # and 'users.csv' in files: print(f"--> {exact_time}: I see these files at instructure ftp site:") [print(f" - {f}") for f in files] i = 0 seen_files = [] check = ['login','users','courses','enrollments'] for checking in check: try: if f'{checking}.csv' in files and file_doesnt_exist(f'{checking}-{rounded_time}.csv'): sftp.get(f'{checking}.csv',f'cache/rosters/{checking}-{rounded_time}.csv') i += 1 seen_files.append(f'{checking}.csv') except: print(f' * {checking}.csv not present') print(' Saved %i data files in rosters folder.' % i) ff.write( f" Saved {i} data files: {seen_files}") if i>2: if 'courses.csv' in seen_files: courses = open(f'cache/rosters/courses-{rounded_time}.csv','r') courses.readline() a = courses.readline() print(a) courses.close() parts = a.split(',') year = parts[1][0:4] ss = parts[1][4:6] sem = {'30':'spring', '50':'summer', '70':'fall' } this_sem = sem[ss] print(f" -> This semester is: {this_sem}, {year}" ) print(f" -> Building data file... {rounded_time}") convert_roster_files(this_sem,year,rounded_time) print(' -> moving files...') ff.write( f" Moved files to folder: {this_sem} {year} {rounded_time}\n") move_to_folder(this_sem,year,rounded_time,seen_files) else: print(" * No courses file. Not moving files.") ff.write( f" * No courses file. Not moving files.\n") else: print(f"--> {exact_time}: Don't see files.") sftp.close() def fetch_current_rosters_auto(): fetch_minute = "56,57,58,59,00,01,02,03,04,05,06".split(",") for m in fetch_minute: schedule.every().hour.at(f":{m}").do(fetch_current_rosters) #schedule.every().day.at("12:35").do(sync_non_interactive) #schedule.every().day.at("21:00").do(sync_non_interactive) #print(f"running every hour on the :{fetch_minute}\n") while True: try: schedule.run_pending() time.sleep(4) except Exception as e: import traceback print(" ---- * * * Failed with: %s" % str(e)) ff = open('cache/pipeline.log.txt','a') ff.write(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + "\n") ff.write(traceback.format_exc()+"\n---------\n\n") ff.close() #schedule.CancelJob time.sleep(1) # Canvas data, download all new files def sync_non_interactive(): resp = do_request('/api/account/self/file/sync') mylog.write(json.dumps(resp, indent=4)) #mylog.close() gotten = os.listdir(local_data_folder) wanted = [] i = 0 for x in resp['files']: filename = x['filename'] exi = "No " if filename in gotten: exi = "Yes" else: wanted.append(x) print(str(i) + '.\tLocal: %s\tRemote: %s' % ( exi, filename )) i += 1 print("I will attempt to download %i files." % len(wanted)) #answer = input("Press enter to begin, or q to quit ") #if not answer == '': return good_count = 0 bad_count = 0 for W in wanted: print("Downloading: " + W['filename']) response = requests.request(method='GET', url=W['url'], stream=True) if(response.status_code != 200): print('Request response went bad. Got back a %s code, meaning the request was %s' % \ (response.status_code, response.reason)) print('URL: ' + W['url']) bad_count += 1 else: #Use the downloaded data with open(local_data_folder + W['filename'], 'wb') as fd: for chunk in response.iter_content(chunk_size=128): fd.write(chunk) print("Success") good_count += 1 print("Out of %i files, %i succeeded and %i failed." % (len(wanted),good_count,bad_count)) ## OLD STYLE CANVAS DATA # Get something from Canvas Data def do_request(path): #Set up the request pieces method = 'GET' host = 'api.inshosteddata.com' apiTime = datetime.utcnow().strftime('%a, %d %b %y %H:%M:%S GMT') apiContentType = 'application/json' msgList = [] msgList.append(method) msgList.append(host) msgList.append(apiContentType) msgList.append('') msgList.append(path) msgList.append('') msgList.append(apiTime) msgList.append(apiSecret) msgStr = bytes("".join("%s\n" % k for k in msgList).strip(),'utf-8') sig = base64.b64encode(hmac.new(key=bytes(apiSecret,'utf-8'),msg=msgStr,digestmod=hashlib.sha256).digest()) sig = sig.decode('utf-8') headers = {} headers['Authorization'] = 'HMACAuth {}:{}'.format(apiKey,sig) headers['Date'] = apiTime headers['Content-type'] = apiContentType #Submit the request/get a response uri = "https://"+host+path print (uri) print (headers) response = requests.request(method='GET', url=uri, headers=headers, stream=True) #Check to make sure the request was ok if(response.status_code != 200): print(('Request response went bad. Got back a ', response.status_code, ' code, meaning the request was ', response.reason)) else: #Use the downloaded data jsonData = response.json() #print(json.dumps(jsonData, indent=4)) return jsonData ################ ################ SENDING DATA AWAY ################ ################ ################ # Upload a json file to www def put_file(remotepath,localpath, localfile,prompt=1): show_all = 0 folder = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') cnopts = pysftp.CnOpts() cnopts.hostkeys = None with pysftp.Connection(FTP_SITE,username=FTP_USER, password=FTP_PW,cnopts=cnopts) as sftp: #todo: these paths #files = sftp.listdir() #print(folder + "\tI see these files on remote: ", files, "\n") sftp.chdir(remotepath) files = sftp.listdir() if show_all: print(folder + "\tI see these files on remote: ", files, "\n") localf = os.listdir(localpath) if show_all: print("I see these local: ", localf) if prompt: input('ready to upload') sftp.put(localpath+localfile, localfile, preserve_mtime=True) sftp.close() """ # copy files and directories from local static, to remote static, # preserving modification times on the files for f in localf: print("This local file: " + f + " ", end=' ') if not f in files: sftp.put('video_srt/'+classfoldername+'/'+f, f, preserve_mtime=True) print("Uploaded.") else: print("Skipped.") """ """if len(files)==3 and 'users.csv' in files: sftp.get('courses.csv','rosters/courses-'+folder+'.csv') sftp.get('users.csv','rosters/users-'+folder+'.csv') sftp.get('enrollments.csv','rosters/enrollments-'+folder+'.csv') print folder + '\tSaved three data files in rosters folder.' courses = open('rosters/courses-'+folder+'.csv','r') courses.readline() a = courses.readline() print a courses.close() parts = a.split(',') year = parts[1][0:4] ss = parts[1][4:6] #print parts[1] sem = {'30':'spring', '50':'summer', '70':'fall' } this_sem = sem[ss] #print this_sem, "", year print folder + '\tbuilding data file...' convert_roster_files(this_sem,year,folder) print folder + '\tmoving files...' move_to_folder(this_sem,year,folder) else: print folder + "\tDon't see all three files.""" ################ ################ GOOGLE DOCS ################ ################ ################ def sec(t): return "
"+t+"
\n" def ul(t): return "