initial commit from canvasapp HDD

This commit is contained in:
Coding with Peter 2023-03-22 09:29:52 -07:00
commit 09fb625772
48 changed files with 26397 additions and 0 deletions

15
.gitignore vendored Normal file
View File

@ -0,0 +1,15 @@
secrets.py
*.bak
.ipynb_checkpoints
104ab42f11
__pycache__
cache
mergeme
qanda
qanda_student
sftp
static
ipython_log.*
completer.hist
*.zip
*.un~

0
__init__.py Normal file
View File

1619
apphelp.py Normal file

File diff suppressed because it is too large Load Diff

225
checker.py Normal file
View File

@ -0,0 +1,225 @@
# Common functions for checking web and canvas for accessibility
import os, sys, glob, codecs
import subprocess, re, pdb, html
from bs4 import BeautifulSoup, Comment
import html.entities
from datetime import datetime
import pdb
#from html.parser import HTMLParseError
# the following from: https://chase-seibert.github.io/blog/2011/01/28/sanitize-html-with-beautiful-soup.html#
# hasnt been tested yet
def safe_html(html):
if not html:
return None
# remove these tags, complete with contents.
blacklist = ["script", "style" ]
whitelist = [
"div", "span", "p", "br", "pre","a",
"blockquote",
"ul", "li", "ol",
"b", "em", "i", "strong", "u", "iframe","img",
"h1","h2","h3","h4","h5","h6"
]
try:
# BeautifulSoup is catching out-of-order and unclosed tags, so markup
# can't leak out of comments and break the rest of the page.
soup = BeautifulSoup(html,'lxml')
except Exception as e:
# special handling?
raise e
removelist = ['table','tbody','thead','th','tr','td']
# now strip HTML we don't like.
for tag in soup.findAll():
if tag.name.lower()=='iframe': continue
if tag.name.lower()=='img': continue
if tag.name.lower() in blacklist:
# blacklisted tags are removed in their entirety
tag.extract()
elif tag.name.lower() in whitelist:
# tag is allowed. Make sure all the attributes are allowed.
#print tag
#print tag.attrs
#pdb.set_trace()
#tag.attrs = [(a[0], safe_css(a[0], a[1])) for a in tag.attrs if _attr_name_whitelisted(a[0])]
for k,v in list(tag.attrs.items()):
#print 'attr: ' + str(k) + ' = ' + str(v) + '.... ',
if not _attr_name_whitelisted(k):
tag.attrs.pop(k)
#print ' removed'
else:
tag.attrs[k] = v
#print ' kept'
elif tag.name.lower() in removelist:
tag.unwrap()
else:
# not a whitelisted tag. I'd like to remove it from the tree
# and replace it with its children. But that's hard. It's much
# easier to just replace it with an empty span tag.
#tag.name = "span"
#tag.attrs = []
tag.unwrap()
# scripts can be executed from comments in some cases
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
comment.extract()
safe_html = str(soup)
if safe_html == ", -":
return None
return safe_html
def _attr_name_whitelisted(attr_name):
return attr_name.lower() in ["href", "src","width","height","alt","target","title","class","id"]
def safe_css(attr, css):
if attr == "style":
return re.sub("(width|height):[^;]+;", "", css)
return css
def plaintext(input):
"""Converts HTML to plaintext, preserving whitespace."""
# from http://effbot.org/zone/re-sub.htm#unescape-html
def _unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
input = safe_html(input) # basic sanitation first
text = "".join(BeautifulSoup("<body>%s</body>" % input).body(text=True))
text = text.replace("xml version='1.0' encoding='%SOUP-ENCODING%'", "") # strip BS meta-data
return _unescape(text)
#os.system("node node_modules/pa11y/bin/pa11y.js --standard Section508 http://www.gavilan.edu/student/online")
def check_folder(fname,path):
report = '<h2>' + fname + '</h2>\n'
number = -1
count = 0
try:
for F in os.listdir(path+fname): #'assignments'):A
cmd = "/usr/bin/node " + \
"/home/phowell/Documents/access/node_modules/pa11y/bin/pa11y.js --standard Section508 " + \
path + fname + "/" + F
print(("" + path + fname + "/" + F))
output = subprocess.run(cmd, stdout=subprocess.PIPE,
universal_newlines=True, shell=True, check=False)
report += "<h3>" + F + "</h3>\n"
line = output.stdout.split('\n')[-3]
if re.search('No\sissues',line):
pass
#print("Got zero")
else:
m = re.search('(\d+)\sErr',line)
if m:
count += int(m.group(1))
lines = output.stdout.split("\n")
#pdb.set_trace()
lines = lines[4:]
report += "<pre>" + html.escape("\n".join(lines)) + "</pre>\n\n\n"
except Exception as e:
print('finished with error or folder missing')
print(e)
return int(count), report
def check_class(folder):
path = "/home/phowell/hdd/SCRIPTS/everything-json/course_temps/" + folder + "/"
class_report = "<h1>Report on course: " + folder + "</h1>\n\n"
(cnt_a,rep_a) = check_folder('assignments',path)
(cnt_p,rep_p) = check_folder('pages',path)
class_report += rep_a
class_report += rep_p
#oo = open(path+'report.html','w')
#oo.write(class_report)
#oo.close()
#print(class_report)
return cnt_a+cnt_p, class_report
def check_all():
hd_path = '/home/phowell/hdd/SCRIPTS/everything-json/'
rep_f = open(hd_path+'report.html','w')
rep_s = open(hd_path+'summary.html','w')
rep_f.write('<meta charset="utf-8"/>\n')
listt = os.listdir('/home/phowell/hdd/SCRIPTS/everything-json/course_temps')
#listt = ['course_4341',] # for testing
for L in listt:
print(('Directory is: '+L))
m = glob.glob('/home/phowell/hdd/SCRIPTS/everything-json/course_temps/' +L+'/*.txt')
if m: name = m[0]
else: name = 'unknown.txt'
name = name.split('.')[0]
name = name.split('/')[-1]
print(('name is: ' + name))
(cnt,rep) = check_class(L)
rep_f.write("<a name='"+L+"'><h1>"+name+"</h1>\n"+rep+"\n\n<br /><br />\n\n")
rep_f.flush()
rep_s.write("("+str(cnt)+") Class: <a href='report.html#"+L+"'>"+name+"</a><br />\n")
rep_s.flush()
if __name__ == "__main__":
check_all()
#print(('arguments: '+str(sys.argv)))
# test
"""
file = 'course_temps/course_6862/pages/choose-the-right-browser.html'
dir = 'course_temps/course_6862/pages/'
#ff = open(file,'r').read()
#print safe_html(ff)
for file in os.listdir(dir):
if re.search('_cleaned\.html',file):
os.remove(dir+file)
for file in os.listdir(dir):
if file.endswith(".html"):
newfname = re.sub('\.html','_cleaned.html',file)
ff = codecs.open(dir+file,'r','utf-8').read()
print(file)
print(newfname)
newf = codecs.open(dir+newfname,'w','utf-8')
newf.write(safe_html(ff))
newf.close()
"""

860
content.py Normal file
View File

@ -0,0 +1,860 @@
#saved_titles = json.loads( codecs.open('cache/saved_youtube_titles.json','r','utf-8').read() )
import requests, codecs, os, re, json
from pipelines import header, fetch, url
from util import clean_title, to_file_friendly
from bs4 import BeautifulSoup as bs
from html.parser import HTMLParser
import tomd, checker
import html2markdown as h2m
import pypandoc
h = HTMLParser()
DBG = 1
def d(s):
global DBG
if DBG: print(s)
def stripper(s):
REMOVE_ATTRIBUTES = [
'lang','language','onmouseover','onmouseout','script','style','font',
'dir','face','size','color','style','class','width','height','hspace',
'border','valign','align','background','bgcolor','text','link','vlink',
'alink','cellpadding','cellspacing']
#doc = '''<html><head><title>Page title</title></head><body><p id="firstpara" align="center">This is <i>paragraph</i> <a onmouseout="">one</a>.<p id="secondpara" align="blah">This is <i>paragraph</i> <b>two</b>.</html>'''
soup = bs(s, features='lxml')
for tag in soup.recursiveChildGenerator():
try:
tag.attrs = {key:value for key,value in tag.attrs.iteritems()
if key not in REMOVE_ATTRIBUTES}
except AttributeError:
# 'NavigableString' object has no attribute 'attrs'
pass
return soup.prettify()
def mycleaner(s):
s = re.sub(r'<br\s?\/>','\n',s)
s = re.sub(r'<\/?b>','',s)
s = re.sub(r' +',' ',s)
s = re.sub(r'^[\s\t\r\n]+$','',s,flags=re.MULTILINE)
s = re.sub('^ ','',s)
return s
def freshdesk():
path = "C:\\Users\\peter\\Downloads\\freshdesk\\Solutions.xml"
soup = bs( codecs.open(path,'r','utf-8').read() ,features="lxml")
outpt = codecs.open('cache/faqs.txt','w')
out = ""
for a in soup.find_all('solution-article'):
print("TITLE\n"+a.find('title').get_text())
out += a.find('title').get_text()
"""for d in a.find_all('description'):
#print(d)
if d:
d = h.unescape(d.get_text())
e = stripper(d)
m = tomd.convert( e )
m = mycleaner(m)
print("\nDESCRIPTION\n"+m)"""
#print("\nWHAT IS THIS?\n" +
hh = a.find('desc-un-html').get_text()
d = h.unescape(hh)
e = stripper(d)
m = tomd.convert( e )
m = mycleaner(m)
print("\nDESCRIPTION\n"+m)
out += "\n\n" + m + "\n\n"
print("-----------\n\n")
outpt.write(out)
# Download everything interesting in a course to a local folder
# Build a master file with the entire class content
def accessible_check(id=""):
if not id:
id = input("ID of course to check? ")
pagebreak = '\n\n<!-- BREAK -->\n\n'
verbose = 1
save_file_types = ['application/pdf','application/docx','image/jpg','image/png','image/gif','image/webp','application/vnd.openxmlformats-officedocument.wordprocessingml.document']
courseinfo = fetch('/api/v1/courses/' + str(id), verbose )
item_id_to_index = {}
items_inorder = ["<font size='24'>" + courseinfo['name'] + "</font>\n\n" + pagebreak,]
running_index = 1
modules = fetch('/api/v1/courses/' + str(id) + '/modules',verbose)
items = []
for x in range(9000): items.append(0)
video_link_list = []
for m in modules:
items[running_index] = '<h2>%s</h2>%s\n' % ( m['name'], pagebreak )
running_index += 1
mod_items = fetch('/api/v1/courses/' + str(id) + '/modules/'+str(m['id'])+'/items', verbose)
for I in mod_items:
if I['type'] in ['SubHeader', 'Page', 'Quiz', 'Discussion', 'ExternalUrl' ] or 'content_id' in I:
running_index += 1
if I['type'] == 'SubHeader':
#print('subheader: ' + str(I))
items[running_index] = '<h3>%s</h3>\n' % str(json.dumps(I,indent=2))
if I['type'] == 'Page':
item_id_to_index[ I['page_url'] ] = running_index
if I['type'] == 'Quiz':
item_id_to_index[ I['content_id'] ] = running_index
if I['type'] == 'Discussion':
item_id_to_index[ I['content_id'] ] = running_index
if I['type'] == 'ExternalUrl':
items[running_index] = "<a href='%s'>%s</a><br />\n\n" % (I['external_url'], I['title'])
# ?
#if 'content_id' in I:
# item_id_to_index[ I['content_id'] ] = running_index
else:
print("What is this item? " + str(I))
#items_inorder.append('<i>Not included: '+ I['title'] + '(a ' + I['type'] + ')</i>\n\n\n' )
# I['title']
# I['content_id']
# I['page_url']
# I['type']
# I['published']
# assignments and files have content_id, pages have page_url
course_folder = '../course_temps/course_'+id
index = []
try:
os.mkdir(course_folder)
except:
print("Course folder exists.")
###
### FILES
###
files_f = course_folder + '/files'
headered = 0
print("\nFILES")
try:
os.mkdir(files_f)
except:
print(" * Files folder already exists.")
files = fetch('/api/v1/courses/' + str(id) + '/files', verbose)
print("LISTING COURSE FILES")
for f in files:
for arg in 'filename,content-type,size,url'.split(','):
if arg=='size':
f['size'] = str(int(f['size']) / 1000) + 'k'
if f['content-type'] in save_file_types:
d(' - %s' % f['filename'])
if not os.path.exists(files_f + '/' + f['filename']):
r = requests.get(f['url'],headers=header, stream=True)
with open(files_f + '/' + f['filename'], 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
else:
d(" - already downloaded %s" % files_f + '/' + f['filename'])
if not headered:
index.append( ('<br /><b>Files</b><br />') )
headered = 1
index.append( ('files/' + f['filename'], f['filename']) )
###
### PAGES
###
pages_f = course_folder + '/pages'
headered = 0
image_count = 0
print("\nPAGES")
try:
os.mkdir(pages_f)
except:
print(" * Pages folder already exists.")
pages = fetch('/api/v1/courses/' + str(id) + '/pages', verbose)
for p in pages:
d(' - %s' % p['title'])
p['title'] = clean_title(p['title'])
easier_filename = clean_title(p['url'])
this_page_filename = "%s/%s.html" % (pages_f, easier_filename)
#for a in 'title,updated_at,published'.split(','):
# print(str(p[a]), "\t", end=' ')
if not headered:
index.append( ('<br /><b>Pages</b><br />') )
headered = 1
index.append( ( 'pages/' + easier_filename + '.html', p['title'] ) )
if os.path.exists(this_page_filename):
d(" - already downloaded %s" % this_page_filename)
this_page_content = open(this_page_filename,'r').read()
elif re.search(r'eis-prod',p['url']) or re.search(r'gavilan\.ins',p['url']):
d(' * skipping file behind passwords')
else:
t2 = fetch('/api/v1/courses/' + str(id) + '/pages/'+p['url'], verbose)
if t2 and 'body' in t2 and t2['body']:
bb = bs(t2['body'],features="lxml")
a_links = bb.find_all('a')
for A in a_links:
if re.search( r'youtu', A['href']):
video_link_list.append( (A['href'], A.text, 'pages/'+easier_filename + ".html") )
page_images = bb.find_all('img')
for I in page_images:
d(' - %s' % I['src'])
if re.search(r'eis-prod',I['src']) or re.search(r'gavilan\.ins',I['src']):
d(' * skipping file behind passwords')
else:
try:
r = requests.get(I['src'],headers=header, stream=True)
mytype = r.headers['content-type']
#print("Response is type: " + str(mytype))
r_parts = mytype.split("/")
ending = r_parts[-1]
with open(pages_f + '/' + str(image_count) + "." + ending, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
image_count += 1
except Exception as e:
d( ' * Error downloading page image, %s' % str(e) )
try:
with codecs.open(this_page_filename, 'w','utf-8') as fd:
this_page_content = "<h2>%s</h2>\n%s" % ( t2['title'], t2['body'] )
fd.write(this_page_content)
except:
d(' * problem writing page content')
## TODO include linked pages even if they aren't in module
else:
d(' * nothing returned or bad fetch')
# write to running log of content in order of module
if p and p['url'] in item_id_to_index:
items[ item_id_to_index[ p['url'] ] ] = this_page_content +'\n\n'+pagebreak
else:
d(' -- This page didnt seem to be in the modules list.')
###
### ASSIGNMENTS
###
headered = 0
asm_f = course_folder + '/assignments'
print("\nASSIGNMENTS")
try:
os.mkdir(asm_f)
except:
d(" - Assignments dir exists")
asm = fetch('/api/v1/courses/' + str(id) + '/assignments', verbose)
for p in asm:
d(' - %s' % p['name'])
try:
friendlyfile = to_file_friendly(p['name'])
this_assmt_filename = asm_f + '/' + str(p['id'])+"_"+ friendlyfile + '.html'
if os.path.exists(this_assmt_filename):
d(" - already downloaded %s" % this_assmt_filename)
this_assmt_content = open(this_assmt_filename,'r').read()
else:
t2 = fetch('/api/v1/courses/' + str(id) + '/assignments/'+str(p['id']), verbose)
with codecs.open(this_assmt_filename, 'w','utf-8') as fd:
this_assmt_content = "<h2>%s</h2>\n%s\n\n" % (t2['name'], t2['description'])
fd.write(this_assmt_content)
if not headered:
index.append( ('<br /><b>Assignments</b><br />') )
headered = 1
index.append( ('assignments/' + str(p['id'])+"_"+friendlyfile + '.html', p['name']) )
# write to running log of content in order of module
if p['id'] in item_id_to_index:
items[ item_id_to_index[ p['url'] ] ] = this_assmt_content+'\n\n'+pagebreak
except Exception as e:
d(' * Problem %s' % str(e))
###
### FORUMS
###
"""forum_f = course_folder + '/forums'
headered = 0
image_count = 0
print("\nFORUMS")
try:
os.mkdir(forum_f)
forums = fetch('/api/v1/courses/' + str(id) + '/discussion_topics', verbose)
for p in forums:
p['title'] = clean_title(p['title'])
forum_id = p['id']
easier_filename = p['title']
for a in 'title,posted_at,published'.split(','):
print(str(p[a]), "\t", end=' ')
print("")
t2 = fetch('/api/v1/courses/' + str(id) + '/discussion_topics/'+str(forum_id), verbose)
#### REMOVED
bb = bs(t2['body'],features="lxml")
print("IMAGES IN THIS PAGE")
page_images = bb.find_all('img')
for I in page_images:
r = requests.get(I['src'],headers=header, stream=True)
mytype = r.headers['content-type']
print("Response is type: " + str(mytype))
r_parts = mytype.split("/")
ending = r_parts[-1]
with open(pages_f + '/' + str(image_count) + "." + ending, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
image_count += 1
#### END REMOVED
try:
with codecs.open(forum_f + '/' + easier_filename + '.html', 'w','utf-8') as fd:
fd.write("<h1>"+t2['title']+"</h1>\n")
fd.write(t2['message'])
if not headered: index.append( ('<br /><b>Discussion Forums</b><br />') )
headered = 1
index.append( ( 'forums/' + easier_filename + '.html', p['title'] ) )
# write to running log of content in order of module
if p['id'] in item_id_to_index:
items_inorder[ item_id_to_index[ p['id'] ] ] = '<h1>'+t2['title']+'</h1>\n\n'+t2['message']+'\n\n'+pagebreak
else:
print(' This forum didnt seem to be in the modules list.')
except Exception as e:
print("Error here:", e)
#print p
#print results_dict
except Exception as e:
print("** Forum folder seems to exist. Skipping those.")
print(e)
###
### QUIZZES
###
# get a list external urls
headered = 0
t = url + '/api/v1/courses/' + str(id) + '/modules'
while t: t = fetch(t)
mods = results
results = []
for m in mods:
results = []
t2 = url + '/api/v1/courses/' + str(id) + '/modules/' + str(m['id']) + '/items'
while t2: t2 = fetch(t2)
items = results
for i in items:
#print i
if i['type'] == "ExternalUrl":
#print i
for j in 'id,title,external_url'.split(','):
print unicode(i[j]), "\t",
print ""
if not headered: index.append( ('<br /><b>External Links</b><br />') )
headered = 1
index.append( (i['external_url'], i['title']) )
"""
# Create index page of all gathered items
myindex = codecs.open(course_folder+'/index.html','w','utf-8')
for i in index:
if len(i)==2: myindex.write("<a href='"+i[0]+"'>"+i[1]+"</a><br />\n")
else: myindex.write(i)
# Full course content in single file
print("Writing main course files...")
mycourse = codecs.open(course_folder+'/fullcourse.raw.html','w','utf-8')
for I in items:
if I:
mycourse.write( I )
temp = open('cache/coursedump.txt','w')
temp.write( "items: " + json.dumps(items,indent=2) )
temp.write("\n\n\n")
temp.write( "index: " + json.dumps(index,indent=2) )
temp.write("\n\n\n")
temp.write( "items_inorder: " + json.dumps(items_inorder,indent=2) )
temp.write("\n\n\n")
temp.write( "item_id_to_index: " + json.dumps(item_id_to_index,indent=2) )
if video_link_list:
mycourse.write('\n<h1>Videos Linked in Pages</h1>\n<table>')
for V in video_link_list:
(url, txt, pg) = V
mycourse.write("<tr><td><a target='_blank' href='"+url+"'>"+txt+"</a></td><td> on <a target='_blank' href='" + pg + "'>" + pg + "</a></td></tr>\n")
mycourse.write("</table>\n")
mycourse.close()
output = pypandoc.convert_file(course_folder+'/fullcourse.raw.html', 'html', outputfile=course_folder+"/fullcourse.html")
output1 = pypandoc.convert_file(course_folder+'/fullcourse.html', 'md', outputfile=course_folder+"/fullcourse.md")
output2 = pypandoc.convert_file(course_folder+'/fullcourse.html', 'docx', outputfile=course_folder+"/fullcourse.docx")
def pan_testing():
course_folder = '../course_temps/course_6862'
output3 = pypandoc.convert_file(course_folder+'/fullcourse.md', 'html', outputfile=course_folder+"/fullcourse.v2.html")
# Given course, page url, and new content, upload the new revision of a page
def create_page(course_num,new_title,new_content):
t3 = url + '/api/v1/courses/' + str(course_num) + '/pages'
#xyz = raw_input('Enter 1 to continue and send back to: ' + t3 + ': ')
#print("Creating page: %s\nwith content:%s\n\n\n" % (new_title,new_content))
print("Creating page: %s" % new_title)
xyz = input('type 1 to confirm: ') #'1'
if xyz=='1':
data = {'wiki_page[title]':new_title, 'wiki_page[body]':new_content}
r3 = requests.post(t3, headers=header, params=data)
print(r3)
print('ok')
def md_to_course():
#input = 'C:/Users/peter/Nextcloud/Documents/gavilan/student_orientation.txt'
#output = 'C:/Users/peter/Nextcloud/Documents/gavilan/stu_orientation/student_orientation.html'
id = "11214"
infile = 'cache/pages/course_%s.md' % id
output = 'cache/pages/course_%s_fixed.html' % id
output3 = pypandoc.convert_file(infile, 'html', format='md', outputfile=output)
xx = codecs.open(output,'r','utf-8').read()
soup = bs( xx, features="lxml" )
soup.encode("utf-8")
current_page = ""
current_title = ""
for child in soup.body.children:
if child.name == "h1" and not current_title:
current_title = child.get_text()
elif child.name == "h1":
upload_page(id,current_title,current_page)
current_title = child.get_text()
current_page = ""
print( "Next page: %s" % current_title )
else:
#print(dir(child))
if 'prettify' in dir(child):
current_page += child.prettify(formatter="html")
else:
current_page += child.string
upload_page(id,current_title,current_page)
print("Done")
# DL pages only
def grab_course_pages(course_num=-1):
global results, results_dict, url, header
# course_num = raw_input("What is the course id? ")
if course_num<0:
course_num = input("Id of course? ")
else:
course_num = str(course_num)
modpagelist = []
modurllist = []
# We want things in the order of the modules
t4 = url + '/api/v1/courses/'+str(course_num)+'/modules?include[]=items'
results = fetch(t4)
i = 1
pageout = codecs.open('cache/pages/course_'+str(course_num)+'.html','w','utf-8')
pageoutm = codecs.open('cache/pages/course_'+str(course_num)+'.md','w','utf-8')
divider = "\n### "
for M in results:
print("Module Name: " + M['name'])
for I in M['items']:
if I['type']=='Page':
modpagelist.append(I['title'])
modurllist.append(I['page_url'])
pageout.write(divider+I['title']+'### '+I['page_url']+'\n')
easier_filename = clean_title(I['page_url'])
print(" " + str(i) + ". " + I['title'])
t2 = url + '/api/v1/courses/' + str(course_num) + '/pages/'+I['page_url']
print('Getting: ' + t2)
mypage = fetch(t2)
fixed = checker.safe_html(mypage['body'])
if fixed:
#markdown = h2m.convert(fixed)
#p_data = pandoc.read(mypage['body'])
markdown = pypandoc.convert_text("\n<h1>" + I['title'] + "</h1>\n" + mypage['body'], 'md', format='html')
pageout.write(fixed+'\n')
pageoutm.write(markdown+'\n')
pageout.flush()
i += 1
pageout.close()
pageoutm.close()
# Upload pages. Local copy has a particular format.
# Appears to not be used
def put_course_pages():
course_num = '6862'
filein = codecs.open('cache/pages/course_'+str(course_num)+'.html','r','utf-8')
my_titles = []
my_urls = []
my_bodys = []
started = 0
current_body = ""
for L in filein.readlines():
ma = re.search('^###\s(.*)###\s(.*)$',L)
if ma:
my_titles.append(ma.group(1))
my_urls.append(ma.group(2))
if started:
my_bodys.append(current_body)
current_body = ""
started = 1
else:
current_body += "\n" + L
my_bodys.append(current_body)
i = 0
for U in my_urls:
# and now upload it....lol
upload_page(course_num,U,my_bodys[i])
i += 1
# Also not used
def put_revised_pages():
course_num = '6862'
course_folder = '../course_temps/course_6862'
filein = codecs.open(course_folder+'/fullcourse.v2.html','r','utf-8')
my_titles = []
my_urls = []
my_bodys = []
started = 0
current_body = ""
for L in filein.readlines():
ma = re.search('^<h1>(.*)</h1>.*$',L)
if ma:
my_titles.append(ma.group(1))
my_urls.append(ma.group(2))
if started:
my_bodys.append(current_body)
current_body = ""
started = 1
else:
current_body += "\n" + L
my_bodys.append(current_body)
i = 0
for U in my_urls:
# and now upload it....lol
upload_page(course_num,U,my_bodys[i])
i += 1
# Download, clean html, and reupload page
def update_page():
global results, results_dict, url, header
# course_num = raw_input("What is the course id? ")
course_num = '6862'
t = url + '/api/v1/courses/' + str(course_num) + '/pages'
while t: t = fetch(t)
pages = results
results = []
mypagelist = []
myurllist = []
modpagelist = []
modurllist = []
for p in pages:
p['title'] = clean_title(p['title'])
mypagelist.append(p['title'])
myurllist.append(p['url'])
easier_filename = clean_title(p['url'])
#for a in 'title,updated_at,published'.split(','):
# print unicode(p[a]), "\t",
#print ""
# We want things in the order of the modules
t4 = url + '/api/v1/courses/'+str(course_num)+'/modules?include[]=items'
while t4: t4 = fetch(t4)
mods = results
results = []
i = 1
print("\nWhat page do you want to repair?")
for M in mods:
print("Module Name: " + M['name'])
for I in M['items']:
if I['type']=='Page':
modpagelist.append(I['title'])
modurllist.append(I['page_url'])
print(" " + str(i) + ". " + I['title'])
i += 1
choice = input("\n> ")
choice = int(choice) - 1
chosen_url = modurllist[choice]
print('Fetching: ' + modpagelist[choice])
t2 = url + '/api/v1/courses/' + str(course_num) + '/pages/'+chosen_url
print('From: ' + t2)
results_dict = {}
while(t2): t2 = fetch_dict(t2)
mypage = results_dict
fixed_page = checker.safe_html(mypage['body'])
upload_page(course_num,chosen_url,fixed_page)
# Given course, page url, and new content, upload the new revision of a page
def upload_page(course_num,pageurl,new_content):
print("Repaired page:\n\n")
#print new_content
print(pageurl)
t3 = url + '/api/v1/courses/' + str(course_num) + '/pages/' + pageurl
xyz = input('Enter 1 to continue and send back to: ' + t3 + ': ')
#xyz = '1'
if xyz=='1':
data = {'wiki_page[body]':new_content}
r3 = requests.put(t3, headers=header, params=data)
print(r3)
print('ok')
# Use template to build html page with homegrown subtitles
def build_srt_embed_php(data):
template = codecs.open('template_srt_and_video.txt','r','utf-8').readlines()
result = ''
for L in template:
L = re.sub('FRAMEID',data['frameid'],L)
L = re.sub('TITLE',data['title'],L)
L = re.sub('EMBEDLINK',data['embedlink'],L)
L = re.sub('SRTFOLDERFILE',data['srtfolderfile'],L)
result += L
return result
def yt_title(code):
global saved_titles
if code in saved_titles:
return saved_titles[code]
a = requests.get('https://www.youtube.com/watch?v=%s' % code)
bbb = bs(a.content,"lxml")
ccc = bbb.find('title').text
ccc = re.sub(r'\s\-\sYouTube','',ccc)
saved_titles[code] = ccc
codecs.open('saved_youtube_titles.json','w','utf-8').write(json.dumps(saved_titles))
return ccc
def swap_youtube_subtitles():
# example here: http://siloor.github.io/youtube.external.subtitle/examples/srt/
# srt folder, look at all filenames
srtlist = os.listdir('video_srt')
i = 0
for V in srtlist:
print(str(i) + '. ' + V)
i += 1
choice = input("Which SRT folder? ")
choice = srtlist[int(choice)]
srt_folder = 'video_srt/'+choice
class_srt_folder = choice
srt_files = os.listdir(srt_folder)
srt_shorts = {}
print("\nThese are the subtitle files: " + str(srt_files))
for V in srt_files:
if V.endswith('srt'):
V1 = re.sub(r'(\.\w+$)','',V)
srt_shorts[V] = minimal_string(V1)
crs_id = input("What is the id of the course? ")
grab_course_pages(crs_id)
v1_pages = codecs.open('page_revisions/course_'+str(crs_id)+'.html','r','utf-8')
v1_content = v1_pages.read()
# a temporary page of all youtube links
tp = codecs.open('page_revisions/links_' + str(crs_id) + '.html', 'w','utf-8')
# course pages, get them all and look for youtube embeds
title_shorts = {}
title_embedlink = {}
title_list = []
print("I'm looking for iframes and youtube links.")
for L in v1_content.split('\n'):
if re.search('<a.*?href="https:\/\/youtu',L):
print("Possibly there's a linked video instead of embedded:" + L)
if re.search('iframe',L):
ma = re.compile('(\w+)=(".*?")')
#print "\n"
this_title = ''
for g in ma.findall(L):
print(g)
if g[0]=='title':
this_title = g[1].replace('"','')
if g[0]=='src':
this_src = g[1].replace('"','')
#print g
if not this_title:
tmp = re.search(r'embed\/(.*?)\?',this_src)
if not tmp: tmp = re.search(r'embed\/(.*?)$',this_src)
if tmp:
this_title = yt_title(tmp.groups()[0])
title_shorts[this_title] = minimal_string(this_title)
title_list.append(this_title)
title_embedlink[this_title] = this_src
print("%s\n" % this_title.encode('ascii','ignore'))
tp.write( "%s<br><a target='_blank' href='%s'>%s</a><br /><br />" % (this_title, this_src, this_src) )
# match them
# lowercase, non alpha or num chars become a single space, try to match
# if any srts remain unmatched, ask.
tp.close()
webbrowser.open_new_tab('file://C:/SCRIPTS/everything-json/page_revisions/links_'+str(crs_id)+'.html')
matches = {} # key is Title, value is srt file
for S,v in list(srt_shorts.items()):
found_match = 0
print(v, end=' ')
for T, Tv in list(title_shorts.items()):
if v == Tv:
print(' \tMatches: ' + T, end=' ')
found_match = 1
matches[T] = S
break
#print "\n"
print("\nThese are the srt files: ")
print(json.dumps(srt_shorts,indent=2))
print("\nThese are the titles: ")
print(json.dumps(title_shorts,indent=2))
print("\nThese are the matches: ")
print(json.dumps(matches,indent=2))
print(("There are %d SRT files and %d VIDEOS found. " % ( len(list(srt_shorts.keys())), len(list(title_shorts.keys())) ) ))
for S,v in list(srt_shorts.items()):
if not S in list(matches.values()):
print("\nDidn't find a match for: " + S)
i = 0
for T in title_list:
if not T in list(matches.keys()): print(str(i+1) + ". " + T.encode('ascii', 'ignore'))
i += 1
print("Here's the first few lines of the SRT:")
print(( re.sub(r'\s+',' ', '\n'.join(open(srt_folder+"/"+S,'r').readlines()[0:10]))+"\n\n"))
choice = input("Which one should I match it to? (zero for no match) ")
if int(choice)>0:
matches[ title_list[ int(choice)-1 ] ] = S
print("SRT clean name was: %s, and TITLE clean name was: %s" % (v,title_shorts[title_list[ int(choice)-1 ]] ))
print("ok, here are the matches:")
print(json.dumps(matches,indent=2))
# construct subsidiary pages, upload them
i = 0
for m,v in list(matches.items()):
# open template
# do replacement
i += 1
data = {'frameid':'videoframe'+str(i), 'title':m, 'embedlink':title_embedlink[m], 'srtfolderfile':v }
print(json.dumps(data,indent=2))
file_part = v.split('.')[0]
new_php = codecs.open(srt_folder + '/' + file_part + '.php','w','utf-8')
new_php.write(build_srt_embed_php(data))
new_php.close()
#srt_files = os.listdir(srt_folder)
put_file(class_srt_folder)
def test_swap():
crs_id = '6923'
# swap in embed code and re-upload canvas pages
v2_pages = codecs.open('page_revisions/course_'+str(crs_id)+'.html','r','utf-8')
v2_content = v2_pages.read()
ma = re.compile('(\w+)=(".*?")')
for L in v2_content.split('\n'):
find = re.findall('<iframe(.*?)>',L)
if find:
print("Found: ", find)
for each in find:
#print "\n"
this_title = ''
this_src = ''
for g in ma.findall(each):
#print g
if g[0]=='title':
this_title = g[1].replace('"','')
if g[0]=='src':
this_src = g[1].replace('"','')
#print g
if not this_title:
tmp = re.search(r'embed\/(.*?)\?',this_src)
if not tmp: tmp = re.search(r'embed\/(.*?)$',this_src)
if tmp:
this_title = yt_title(tmp.groups()[0])
print("Found embed link: %s\n and title: %s\n" % (this_src,this_title.encode('ascii','ignore')))
def multiple_downloads():
x = input("What IDs? Separate with one space: ")
for id in x.split(" "):
accessible_check(id)
if __name__ == "__main__":
print ('')
options = { 1: ['download a class into a folder / word file', accessible_check] ,
2: ['download multiple classes', multiple_downloads ],
3: ['convert stuff', pan_testing ],
4: ['convert md to html', md_to_course ],
5: ['import freshdesk content', freshdesk ],
6: ['download all a courses pages', grab_course_pages],
}
for key in options:
print(str(key) + '.\t' + options[key][0])
print('')
resp = input('Choose: ')
# Call the function in the options dict
options[ int(resp)][1]()

1446
courses.py Normal file

File diff suppressed because it is too large Load Diff

48
cq_demo.py Normal file
View File

@ -0,0 +1,48 @@
import codecs, json, requests
from secrets import cq_token, ph_token
token = cq_token
url = 'https://ilearn.gavilan.edu'
header = {'Authorization': 'Bearer ' + token}
output = codecs.open('cq_gav_test.txt','a','utf-8')
def fetch(target):
print("Fetching %s..." % target)
try:
r2 = requests.get(target, headers = header)
except Exception as e:
print("-- Failed to get: ", e)
try:
results = json.loads(r2.text)
count = len(results)
print("Got %i results" % count)
print(json.dumps(results,indent=2))
print()
output.write("----\nGetting: %s\n" % target)
output.write(json.dumps(results,indent=2))
output.write("\n\n")
except:
print("-- Failed to parse: ", r2.text)
fetch(url + '/api/v1/outcomes/270')
fetch(url + '/api/v1/outcomes/269')
exit()
fetch(url + '/api/v1/courses/15424/outcome_results')
fetch(url + '/api/v1/courses/15424/outcome_rollups')
exit()
fetch(url + '/api/v1/accounts/1/courses')
fetch(url + '/api/v1/courses/12820/sections')
fetch(url + '/api/v1/courses/12820/enrollments')

1
credentials.json Normal file
View File

@ -0,0 +1 @@
{"installed":{"client_id":"955378242514-m954fg4f0g1n1nb6kckp68ru001hpno0.apps.googleusercontent.com","project_id":"quickstart-1569874764316","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://oauth2.googleapis.com/token","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs","client_secret":"kSxttNuwitwdCVcQxqNh0dif","redirect_uris":["urn:ietf:wg:oauth:2.0:oob","http://localhost"]}}

848
curric2022.py Normal file
View File

@ -0,0 +1,848 @@
import requests,json,os,re, bisect, csv, codecs, funcy, sys, shutil, time
from datetime import datetime
import sortedcontainers as sc
from collections import defaultdict
from toolz.itertoolz import groupby,sliding_window
from sortedcontainers import SortedList
#from durable.lang import *
#from durable.engine import *
from pampy import match, _
from bs4 import BeautifulSoup as bs
leafcount = 0
displaynames = []
from secrets import cq_user, cq_pasw
CQ_URL = "https://secure.curricunet.com/scripts/webservices/generic_meta/clients/versions/v4/gavilan.cfc"
PARAM = "?returnFormat=json&method=getCourses"
user = cq_user
pasw = cq_pasw
err_fail_filecount = 1
def fetch_all_programs():
if os.path.isdir('cache/programs'):
m = datetime.strptime(time.ctime(os.path.getctime('cache/programs')), "%a %b %d %H:%M:%S %Y")
today = 'cache/programs_%s' % m.strftime('%Y_%m_%d')
print("+ Creating folder: %s" % today)
shutil.move('cache/programs', today)
os.makedirs('cache/programs')
size = 100
endn = 0
filen = 1
PARAM = "?returnFormat=json&method=getPrograms&status=Active"
while(size > 99):
size, endn, items = another_request(CQ_URL+PARAM,endn)
out = codecs.open('cache/programs/programs_'+str(filen)+'.txt','w', 'utf-8')
out.write(json.dumps(items,indent=4))
out.close()
filen += 1
print("Written to 'cache/programs....")
def nothing(x=0):
pass
seen = []
def clean(st):
#return st
global seen
ok = ['b','i','ul','li','ol','strong','br','u']
soup = bs(st, features='lxml')
"""for tag in soup.recursiveChildGenerator():
if isinstance(tag,bs.Tag) and tag.name not in ok:
tag.unwrap()
return soup.prettify()
"""
for T in soup.find_all(recursive=True):
if not T.name in ok:
if not T.name in seen:
seen.append(T.name)
print("- %s" % T.name)
#print(seen)
T.unwrap()
else:
#print("+ %s" % T.name)
pass
return str(soup).strip()
num_failed_course = 1
def single_course_parse(c):
global num_failed_course
this_course = []
if "attributes" in c and "entityId" in c["attributes"]:
print(c["attributes"]["entityId"])
return (c["attributes"]["entityId"], recur_matcher(c))
else:
print("I couldn't recognize a class in that")
ooops = codecs.open('cache/programs/failedcourse_%i.json' % num_failed_course, 'w', 'utf-8')
ooops.write(json.dumps(c,indent=2))
ooops.close()
num_failed_course = num_failed_course + 1
return ("-1", [])
def match_style_test():
classes = {}
oo = codecs.open("cache/courses/curric2022test.json","w","utf-8")
for f in os.listdir('cache/courses'):
if re.search('classes_',f):
print(f)
cls = json.loads(codecs.open('cache/courses/'+f,'r','utf-8').read())
for c in cls:
id,output = single_course_parse(c)
classes[id] = "\n".join(output)
oo.write( classes[id] )
oo.write( "\n\n\n" + "-"*30 + "\n\n" )
oo.flush()
def single_program_path_parse(c):
this_course = []
global num_failed_course
if "attributes" in c and "entityId" in c["attributes"]:
print(c["attributes"]["entityId"])
return (c["attributes"]["entityId"], pathstyle(c))
else:
print("I couldn't recognize a program in that")
ooops = codecs.open('cache/programs/failedcourse_%i.json' % num_failed_course, 'w', 'utf-8')
ooops.write(json.dumps(c,indent=2))
ooops.close()
num_failed_course = num_failed_course + 1
return ("-1", [])
def path_style_prog():
classes = {}
oo = codecs.open("cache/programs/allprogrampaths.txt","w","utf-8")
for f in os.listdir('cache/programs'):
if re.search('^programs_',f):
print(f)
cls = json.loads(codecs.open('cache/programs/'+f,'r','utf-8').read())
for c in cls:
id,output = single_program_path_parse(c)
classes[id] = "\n".join(output)
oo.write( classes[id] )
oo.write( "\n\n\n" + "-"*30 + "\n\n" )
oo.flush()
def term_txt_to_code(t):
term_codes = {'Spring':'30','Summer':'50','Fall':'70'}
parts = t.split(" ")
if len(parts)>1:
yr = parts[1]
sem = term_codes[parts[0]]
return yr+sem
return ''
def all_outcomes():
csvfile = codecs.open('cache/courses/alloutcomes.csv','w','utf-8')
csvwriter = csv.writer(csvfile)
csvwriter.writerow('code cqcourseid coursestatus termineffect dept num cqoutcomeid outcome'.split(' '))
csvfile2 = codecs.open('cache/courses/all_active_outcomes.csv','w','utf-8')
csvwriter2 = csv.writer(csvfile2)
csvwriter2.writerow('code cqcourseid coursestatus termineffect dept num cqoutcomeid outcome'.split(' '))
rr = codecs.open("cache/courses/allclasspaths.txt","r", "utf-8").readlines()
ww = codecs.open("cache/courses/alloutcomes.txt","w", "utf-8")
course_index = []
current_course = {}
current_course_num = 0
term_counts = defaultdict(int)
count = 0
for L in rr:
a = re.search('Course\/(\d+)',L)
if a:
course_num = a.group(1)
#print(course_num, current_course_num)
if (course_num != current_course_num):
if current_course_num != 0:
# log the course info so we can know cq id numbers of courses
course_index.append(current_course)
# status
count += 1
#input('ok ')
if count % 100 == 0:
print(count)
#pass
current_course_num = course_num
#print(course_num)
current_course = {'c':'','d':'','n':'','t':'','s':'','T':'','o':[],'i':'','a':'','m':''}
current_course['c'] = course_num
a = re.search('Course\/(\d+)\/1\/Course\ Description\/0\/Course\ Discipline\/(.*)$',L)
if a:
current_course['d'] = a.group(2)
a = re.search('Course\/(\d+)\/1\/Course\ Description\/0\/Course\ Number\/(.*)$',L)
if a:
current_course['n'] = a.group(2)
a = re.search('Course\/(\d+)\/1\/Course\ Description\/0\/Course\ Title\/(.*)$',L)
if a:
current_course['T'] = a.group(2)
a = re.search('Course\/(\d+)\/1\/Course\ Description\/0\/Short\ Title\/(.*)$',L)
if a:
current_course['t'] = a.group(2)
a = re.search('Course\ Description\/status\/(.*)$',L)
if a:
current_course['s'] = a.group(1)
a = re.search('Course\ Content\/\d+\/Lecture\ Content\/Curriculum\ Approval\ Date:\s*(.*)$',L)
if a:
current_course['a'] = a.group(1)
a = re.search('Course\ Description\/\d+\/Internal\ Processing\ Term\/(.*)$',L)
if a:
t_code = term_txt_to_code(a.group(1))
current_course['m'] = t_code
term_counts[t_code] += 1
# Course/10/10/Course Content/1/Lecture Content/Curriculum Approval Date: 02/24/2014
# Course/3091/1/Course Description/0/Internal Processing Term/Spring 2018
a = re.search('Learning\ Outcomes\/\d+\/(cqid_\d+)\/Learning\ Outcomes\/Description\/(.*)$',L)
if a:
current_course['o'].append(a.group(2))
current_course['i'] = a.group(1)
csvwriter.writerow([current_course['d']+current_course['n'], current_course_num, current_course['s'], current_course['m'], current_course['d'], current_course['n'], current_course['i'], a.group(2)])
if current_course['s']=='Active':
csvwriter2.writerow([current_course['d']+current_course['n'], current_course_num, current_course['s'], current_course['m'], current_course['d'], current_course['n'], current_course['i'], a.group(2)])
if re.search('Learning\ Outcomes\/Description\/',L):
ww.write(L)
if re.search('Description\/entityTitle\/',L):
ww.write(L)
if re.search('Description\/status\/',L):
ww.write(L)
xx = codecs.open("cache/courses/course_cq_index.json","w", "utf-8")
xx.write(json.dumps(course_index, indent=2))
#print(json.dumps(term_counts,indent=2))
def ddl():
return defaultdict(list)
def splitclassline(cl, id=''):
# "PHYS 4A - Physics for Scientists and Engineers I 4.000 *Active*"
dbg = 1
ret = {'name':'','units':'','units_hi':'','code':'','status':'', 'sequence':int(id)}
p1 = re.search(r'^(.*?)\s\-\s(.*)$',cl)
if p1:
code = p1.groups()[0]
ret['code'] = code
rest = p1.groups()[1]
p3 = re.search(r'^(.*)\s(\d+\.\d+)\s\-\s(\d+\.\d+)\s+\*(\w+)\*$',rest)
if p3:
name = p3.groups()[0]
units = p3.groups()[1]
units_hi = p3.groups()[2]
status = p3.groups()[3]
ret['name'] = name
ret['units'] = units
ret['units_hi'] = units_hi
ret['status'] = status
#if dbg: print( "%s --- code: %s - name: %s - units: %s-%s - status: %s" % (cl,code,name,units,units_hi,status))
return ret
p2 = re.search(r'^(.*)\s(\d+\.\d+)\s+\*(\w+)\*$',rest)
if p2:
name = p2.groups()[0]
units = p2.groups()[1]
status = p2.groups()[2]
ret['name'] = name
ret['units'] = units
ret['status'] = status
#if dbg: print( "%s --- code: %s - name: %s - units: %s - status: %s" % (cl,code,name,units,status))
return ret
else:
if dbg: print( "%s --- code: %s --------------------------------" % (cl,code))
else:
if dbg: print( "%s --- code:----------------------------------------" % cl)
#return (cl,'','')
return ret
def path_style_2_html():
verbose = 1
v = verbose
prog_title_subs = []
with codecs.open('cache/program_published_names.csv', 'r','utf-8') as file:
reader = csv.reader(file)
for row in reader:
prog_title_subs.append(row)
oo = codecs.open("cache/programs/allprogrampaths.txt","r","utf-8").readlines()
award_prebuild = defaultdict( ddl )
last_line = ""
for L in oo:
L = L.strip()
if not re.search(r'^Program',L):
last_line = last_line + " " + L
continue
else:
if re.search(r'\/$',last_line):
# ignore line with trailing slash - assume no data
last_line = L
continue
if re.search(r'Curriculum\sDivision\s\d+', last_line):
#print(last_line)
pass
test_1 = re.search(r'^Program\/(\d+)\/Course',last_line)
if test_1:
award_prebuild[ test_1.groups()[0] ]["Info"].append(last_line)
test_2 = re.search(r'^Program\/(\d+)\/(\d+)\/([\w\s]+)\/',last_line)
if test_2:
award_prebuild[ test_2.groups()[0] ][test_2.groups()[2]].append(last_line)
last_line = L
output = codecs.open("cache/programs/programs_prebuild.json","w","utf-8")
output.write( json.dumps(award_prebuild, indent=2) )
award_build = defaultdict( ddl )
for AW in sorted(list(award_prebuild.keys()),key=int):
v = 1
aw = award_prebuild[AW]
for line in aw["Program Description"]:
t1 = re.search(r'Division\/(.*)$', line)
if t1:
award_build[AW]["division"] = t1.groups()[0]
t1 = re.search(r'Department\/(.*)$', line)
if t1:
award_build[AW]["dept"] = t1.groups()[0]
t1 = re.search(r'Program\sTitle\/(.*)$', line)
if t1:
award_build[AW]["program_title"] = t1.groups()[0]
t1 = re.search(r'Award\sType\/(.*)$', line)
if t1:
award_build[AW]["award"] = t1.groups()[0]
t1 = re.search(r'\/Description\/(.*)$', line)
if t1:
award_build[AW]["description"] = t1.groups()[0]
t1 = re.search(r'Transfer\/CTE\/(.*)$', line)
if t1:
award_build[AW]["transfer_cte"] = t1.groups()[0]
t1 = re.search(r'CTE\sProgram\?\/\/(.*)$', line)
if t1:
award_build[AW]["is_cte"] = t1.groups()[0]
for line in aw["Info"]:
t1 = re.search(r'Description\/status\/(.*)$', line)
if t1:
award_build[AW]["status"] = t1.groups()[0]
t1 = re.search(r'Description\/proposalType\/(.*)$', line)
if t1:
award_build[AW]["proposal_type"] = t1.groups()[0]
for line in aw["Codes"]:
t1 = re.search(r'Banner\sCode\/(.*)$', line)
if t1:
award_build[AW]["banner_code"] = t1.groups()[0]
# substitute in program names more suitable for publishing
subbed = 0
for L in prog_title_subs:
if award_build[AW]["dept"] == L[0] and award_build[AW]["program_title"] == L[1]:
award_build[AW]["publish_title"] = L[2]
subbed = 1
if v: print("SUBBED")
if len(L)>3:
award_build[AW]["publish_title2"] = L[3]
else:
award_build[AW]["publish_title2"] = ""
if not subbed:
award_build[AW]["publish_title"] = award_build[AW]["dept"]
award_build[AW]["publish_title2"] = ""
if award_build[AW]["program_title"] == "Liberal Arts: Computer Science &amp; Information Systems Emphasis":
award_build[AW]["publish_title"] = "Computer Science and Information Studies"
award_build[AW]["publish_title2"] = "Liberal Arts"
if v: print("-----LIB ART CSIS")
if v:
print("%s / %s - %s" % (award_build[AW]["publish_title"],award_build[AW]["program_title"], award_build[AW]["award"]))
v = 0
for line in aw["Program Learning Outcomes"]:
t1 = re.search(r'Program\sLearning\sOutcomes\/\d+\/Outcome\/(\d+)\/cqid_(\d+)\/Outcome\/Outcome\/(.*)$', line)
if t1:
if "PLO" in award_build[AW]:
award_build[AW]["PLO"].append( (t1.groups()[0], t1.groups()[2]) )
else:
award_build[AW]["PLO"] = [ (t1.groups()[0], t1.groups()[2]), ]
st = lambda x: x[0]
award_build[AW]["PLO"] = sorted( award_build[AW]["PLO"], key=st )
award_build[AW]["PLO"] = [ x[1] for x in award_build[AW]["PLO"] ]
req_prebuild = defaultdict(list)
pbd_unit_calcs = {}
# requirements table:
# - most types have a 'units' column, which might be calculated
# - might be overridden
# - might be single number or a range min/max
for line in aw["Program Requirements"]:
t1 = re.search(r'Program\sBlock\sDefinitions\/(\d+)/cqid_\d+/Program\sBlock\sDefinitions\/(.*)$', line)
if t1:
pbd_number = t1.groups()[0]
if not pbd_number in pbd_unit_calcs:
pbd_unit_calcs[pbd_number] = {'unit_sum':0,'unit_sum_max':0,'override':0,'min':0,'max':0}
t2 = re.search(r'Requirements\/\d+\/Program\sBlock\sDefinitions\/(\d+)\/cqid_\d+\/Program\sBlock\sDefinitions\/Course\sBlock\sDefinition\/(.*)$', line)
if t2:
req_prebuild[pbd_number].append( ('h3', '0', t2.groups()[1]) )
continue
t3 = re.search(r'Definitions\/\d+\/Program\sCourses\/(\d+)\/cqid_\d+\/Program\sCourses\/\d+\/\[Discipline\sand\sCourse\schained\scombo\]\/Course\/(.*)$',line)
if t3:
req_prebuild[pbd_number].append( ('course', t3.groups()[0], splitclassline( t3.groups()[1], t3.groups()[0] )) )
continue
t3a = re.search(r'Definitions\/\d+\/Program\sCourses\/(\d+)\/cqid_\d+/Program\sCourses\/\d+\/\[Condition\sSection\]\/Condition\/or$',line)
if t3a:
req_prebuild[pbd_number].append( ('or', t3a.groups()[0]) )
continue
t3b = re.search(r'Definitions\/\d+\/Program\sCourses\/(\d+)\/cqid_\d+/Program\sCourses\/\d+\/\[Condition\sSection\]\/Condition\/and$',line)
if t3b:
req_prebuild[pbd_number].append( ('and', t3b.groups()[0]) )
continue
t4 = re.search(r'Definitions\/(\d+)\/cqid_\d+/Program\sBlock\sDefinitions\/\d+\/Program\sCourses/(\d+)/cqid_\d+/Program\sCourses\/Non\-Course\sRequirements\/(.*)$',line)
if t4:
req_prebuild[pbd_number].append( ('noncourse', t4.groups()[1], t4.groups()[2]) )
continue
t5 = re.search(r'Definitions\/(\d+)\/cqid_\d+\/Program\sBlock\sDefinitions\/Override\sUnit\sCalculation\/1$',line)
if t5:
pbd_unit_calcs[pbd_number]['override'] = 1
continue
t6 = re.search(r'Definitions\/(\d+)\/cqid_\d+\/Program\sBlock\sDefinitions\/Unit\sMin\/(.*)$',line)
if t6:
pbd_unit_calcs[pbd_number]['min'] = t6.groups()[1]
continue
t7 = re.search(r'Definitions\/(\d+)\/cqid_\d+\/Program\sBlock\sDefinitions\/Unit\sMax/(.*)$',line)
if t7:
pbd_unit_calcs[pbd_number]['max'] = t7.groups()[1]
continue
t8 = re.search(r'chained\scombo\]\/Discipline',line)
if t8:
continue
t8a = re.search(r'Units\s[Low|High]',line)
if t8a:
continue
t9 = re.search(r'Definitions\/Block\sHeader\/(.*)$',line)
if t9:
req_prebuild[pbd_number].append( ('blockheader', t9.groups()[0]) )
continue
req_prebuild[pbd_number].append( ('', t1.groups()[1]) )
award_build[AW]["requirements"] = req_prebuild
award_build[AW]["unit_calcs"] = pbd_unit_calcs
# associate unit calculations with program blocks
for block_key in req_prebuild.keys():
if block_key in pbd_unit_calcs:
req_prebuild[block_key].insert(0, pbd_unit_calcs[block_key])
else:
req_prebuild[block_key].insert(0, {'unit_sum':0,'unit_sum_max':0,'override':0})
# do the unit calc math
for block_key in req_prebuild.keys():
this_block = req_prebuild[block_key]
pad = this_block[0]
if v: print("pad: ",pad)
block_dict = {}
for item in this_block[1:]:
print(item)
try:
if item[0] == "or":
block_dict[ item[1]+"or" ] = 1
if item[0] == "h3":
if v: print("+ ", item[1])
if item[0] == "blockheader":
if v: print(" ", item[1])
if not item[0] == "course":
continue
block_dict[ item[1] ] = item[2]
seq = int(item[1])
units = ''
if item[2]['units']: units = float( item[2]['units'] )
except Exception as e:
print("ERROR ERROR\nERROR ERROR")
print(e)
xyz = input('hit return to continue')
#print( "%i \t %f \t %s" % (seq,units, item[2]['name']))
if v:
for k in sorted( block_dict.keys() ):
print(k," ", block_dict[k])
#for k in sliding_window(3, sorted( block_dict.keys() )):
# l,m,n = k
# if re.search(r'or$',m):
# print("OR")
# print(block_dict[l],"\n",block_dict[m],"\n",block_dict[n],"\n\n")
#print()
output = codecs.open("cache/programs/programs_built.json","w","utf-8")
output.write( json.dumps(award_build, indent=2) )
def course_path_style_2_html():
verbose = 1
v = verbose
oo = codecs.open("cache/courses/allclasspaths.txt","r","utf-8").readlines()
course_prebuild = defaultdict( ddl )
last_line = ""
for L in oo:
L = L.strip()
if not re.search(r'^Course',L):
last_line = last_line + " <br /> " + L
continue
else:
if re.search(r'\/$',last_line):
# ignore line with trailing slash - assume no data
last_line = L
continue
test_1 = re.search(r'^Course\/(\d+)\/Course',last_line)
if test_1:
course_prebuild[ test_1.groups()[0] ]["Info"].append(last_line)
test_2 = re.search(r'^Course\/(\d+)\/(\d+)\/(.*?)\/(.*)$',last_line)
if test_2:
course_prebuild[ test_2.groups()[0] ][test_2.groups()[2]].append(last_line)
last_line = L
output = codecs.open("cache/courses/courses_prebuild.json","w","utf-8")
output.write( json.dumps(course_prebuild, indent=2) )
all_courses = {}
active_courses = {}
lookup_table = { 'entityTitle':'title', 'proposalType':'type',
'\/Course\sDescription\/status':'status', 'Course\sDiscipline':'dept',
'Course\sNumber':'number', 'Course\sTitle':'name',
'Short\sTitle':'shortname', 'Internal\sProcessing\sTerm':'term', 'This\sCourse\sIs\sDegree\sApplicable':'degree_applicable',
'\/Course\sDescription\/\d+\/Course\sDescription\/':'desc',
'Minimum\sUnits':'min_units', 'Minimum\sLecture\sHour':'min_lec_hour', 'Minimum\sLab\sHour':'min_lab_hour', 'Course\shas\svariable\shours':'has_var_hours',
'Number\sWeeks':'weeks',
'Maximum\sUnits':'max_units', 'Credit\sStatus':'credit_status',
'TOP\sCode':'top_code', 'Classification':'classification', 'Non\sCredit\sCategory':'noncredit_category', 'Stand-Alone\sClass?':'stand_alone',
'Grade\sOption':'grade_option', 'Is\sRepeatable':'repeatable', 'Learning\sOutcomes\/Description':'slo',
'Is\sThis\sCourse\sis\sRecommended\sfor\sTransfer\sto\sState\sUniversities\sand\sColleges?':'transfer_csu',
'Is\sThis\sCourse\sis\sRecommended\sfor\sTransfer\sto\sUniversity\sof\sCalifornia?':'transfer_uc',
'\/Catalog\sCourse\sSummary\sView\/':'catalog',
'\/Course\sContent/\d+/Lecture\sContent\/':'content',
'\/ASSIST\sPreview\/\d+\/Outcomes\sand\sObjectives\/':'objectives'}
for C in sorted(list(course_prebuild.keys()),key=int):
v = 0
crs = course_prebuild[C]
course_build = {'slo':{}} # defaultdict( ddl )
if v: print(C)
for K in crs.keys():
if v: print("\t%s" % K)
for line in crs[K]:
for (str,key) in lookup_table.items():
if re.search(str,line):
if key == 'slo':
# \s<br\s\/>\s
content_search = re.search(r'\/Learning\sOutcomes\/\d+\/cqid_(\d+)\/Learning\sOutcomes\/Description\/(.*?)$',line)
if content_search: course_build['slo'][content_search.groups()[0]] = content_search.groups()[1]
else:
print("NO SLO? %s" % line)
elif key == 'desc':
content_search = re.search(r'^Course\/\d+\/\d+\/Course\sDescription\/\d+\/Course\sDescription\/(.*)$',line)
course_build['desc'] = content_search.groups()[0]
elif key == 'catalog':
content_search = re.search(r'^Course\/\d+\/\d+\/General\sEducation\sPattern\/\d+\/Catalog\sCourse\sSummary\sView\/(.*)$',line)
course_build['catalog'] = content_search.groups()[0]
elif key == 'content':
content_search = re.search(r'^Course\/\d+\/\d+\/Course\sContent\/\d+\/Lecture\sContent\/(.*)$',line)
course_build['content'] = content_search.groups()[0]
elif key == 'objectives':
content_search = re.search(r'^Course\/\d+\/\d+\/ASSIST\sPreview\/\d+\/Outcomes\sand\sObjectives\/(.*)$',line)
course_build['objectives'] = content_search.groups()[0]
else:
content_search = re.search(r'^(.*)\/(.*?)$',line)
course_build[key] = content_search.groups()[1]
if v: print("\t\t%s - %s" % (key, course_build[key]))
continue
all_courses[C] = course_build
if course_build['status'] == 'Active':
active_courses[C] = course_build
output = codecs.open("cache/courses/courses_built.json","w","utf-8")
output.write( json.dumps(all_courses, indent=2) )
output2 = codecs.open("cache/courses/courses_active_built.json","w","utf-8")
output2.write( json.dumps(active_courses, indent=2) )
#########
#########
#########
#########
def another_request(url,startat):
global err_fail_filecount
newparam = "&skip=" + str(startat)
print((url+newparam))
r = requests.get(url+newparam, auth=(user,pasw))
try:
mydata = json.loads(r.text, strict=False)
except Exception as e:
print("Couldn't read that last bit")
#print((r.text))
codecs.open('cache/curric2022failfile_%i.txt' % err_fail_filecount,'w','utf-8').write(r.text)
err_fail_filecount += 1
print(e)
return 0,0,[]
size = mydata['resultSetMetadata']['ResultSetSize']
endn = mydata['resultSetMetadata']['EndResultNum']
items = mydata['entityInstances']
print((' Got ' + str(size) + ' instances, ending at item number ' + str(endn)))
return size,endn,items
def fetch_all_classes():
if os.path.isdir('cache/courses'):
m = datetime.strptime(time.ctime(os.path.getctime('cache/courses')), "%a %b %d %H:%M:%S %Y")
today = 'cache/courses_%s' % m.strftime('%Y_%m_%d')
print("+ Creating folder: %s" % today)
shutil.move('cache/courses', today)
os.makedirs('cache/courses')
size = 100
endn = 0
filen = 1
while(size > 99):
size, endn, items = another_request(CQ_URL+PARAM,endn)
out = codecs.open('cache/courses/classes_'+str(filen)+'.txt','w', 'utf-8')
out.write(json.dumps(items,indent=2))
out.close()
filen += 1
print("Written to 'cache/courses....")
#
#
# Main worker
#
def recur_path_matcher(item, path=[]):
def x2_path_update(x,y,z):
path.extend([str(y),x])
my_result_lines.append( '/'.join(path) + '/' + 'lastEdited' + '/' + z)
path_str = "/".join(path) + "/"
path_str = re.sub('\/+','/',path_str)
path_str = re.sub('\s+',' ',path_str)
my_result_lines = []
if type(item) == type({}):
original_path = path.copy()
match( item,
{'attributes': {'displayName': _}, 'lookUpDisplay': _, },
lambda x,y: my_result_lines.append("%s%s/%s" % (path_str, clean(str(x)), clean(str(y)))) ,
{'attributes': {'displayName': _}, 'fieldValue': _, },
lambda x,y: my_result_lines.append("%s%s/%s" % (path_str, clean(str(x)), clean(str(y)))) ,
{'attributes': {'fieldName': _}, 'fieldValue': _, },
lambda x,y: my_result_lines.append("%s%s/%s" % (path_str, clean(str(x)), clean(str(y)))) ,
{'instanceId':_, 'sectionName': _, 'sectionSortOrder':_},
lambda id,name,order: path.extend([str(order),'cqid_'+str(id),name]),
{'instanceId':_, 'sectionName': _, 'instanceSortOrder':_},
lambda id,name,order: path.extend([str(order),'cqid_'+str(id),name]),
{'sectionName': _, 'sectionSortOrder':_, 'lastUpdated': _ },
#lambda x,y,z: path.extend([str(y),x,z]),
x2_path_update,
{'sectionName': _, 'sectionSortOrder':_},
lambda x,y: path.extend([str(y),x]),
{'sectionName': _},
lambda x: path.append(x),
_, nothing #lambda x: path.append('')
)
path = original_path
for K,V in list(item.items()):
my_result_lines.extend(recur_path_matcher(V,path))
elif type(item) == type([]):
for V in item:
my_result_lines.extend(recur_path_matcher(V,path))
return my_result_lines
def pathstyle(theclass):
#theclass = json.loads( codecs.open('cache/courses/samplecourse.json','r','utf-8').read() )
# {'entityMetadata': {'entityTitle': _,'status': _, 'entityType':_, 'entityId':_ }},
# lambda title,status,typ,id:
# my_result_lines.append("%s%s/%s/%s [%s]" % (path_str, str(typ), str(id), str(title),str(status))) ,
if "entityMetadata" in theclass:
id = theclass["entityMetadata"]["entityId"]
title = theclass["entityMetadata"]["entityTitle"]
typ = theclass["entityMetadata"]["entityType"]
action = theclass["entityMetadata"]["proposalType"]
status = theclass["entityMetadata"]["status"]
#"entityId": 4077,
#"entityTitle": "ENGL2B - American Ethnic Literature",
#"entityType": "Course",
#"proposalType": "Deactivate Course",
#"status": "Historical",
result = [ "/".join([ typ,str(id),"Course Description","entityTitle",title]) ,
"/".join([ typ,str(id),"Course Description","entityType",typ]) ,
"/".join([ typ,str(id),"Course Description","proposalType",action]) ,
"/".join([ typ,str(id),"Course Description","status",status]) , ]
result.extend(recur_path_matcher(theclass["entityFormData"]["rootSections"], [typ,str(id)] ))
#oo = codecs.open("cache/courses/curric2022test_path.json","w","utf-8")
#print(result)
return result
else:
print("didn't seem to be a class.")
def single_course_path_parse(c):
this_course = []
global num_failed_course
if "attributes" in c and "entityId" in c["attributes"]:
print(c["attributes"]["entityId"])
return (c["attributes"]["entityId"], pathstyle(c))
else:
print("I couldn't recognize a class in that")
ooops = codecs.open('cache/programs/failedcourse_%i.json' % num_failed_course, 'w', 'utf-8')
ooops.write(json.dumps(c,indent=2))
ooops.close()
num_failed_course = num_failed_course + 1
return ("-1", [])
def path_style_test():
classes = {}
oo = codecs.open("cache/courses/allclasspaths.txt","w","utf-8")
for f in os.listdir('cache/courses'):
if re.search('^classes_',f):
print(f)
cls = json.loads(codecs.open('cache/courses/'+f,'r','utf-8').read(),strict=False)
for c in cls:
id,output = single_course_path_parse(c)
classes[id] = "\n".join(output)
oo.write( classes[id] )
oo.write( "\n\n\n" + "-"*30 + "\n\n" )
oo.flush()
def make_sl():
return SortedList(key=lambda x: -1 * int(x['m']))
def course_rank():
csvfile = codecs.open('cache/courses/all_courses_ranked.csv','w','utf-8')
csvwriter = csv.writer(csvfile)
csvwriter.writerow("code,cqcourseid,coursestatus,termineffect,dept,num,numoutcomes".split(","))
courses = json.loads(codecs.open('cache/courses/course_cq_index.json','r','utf-8').read())
all = defaultdict(make_sl)
for c in courses:
code = c['d']+c['n']
if not c['m']:
c['m'] = '200030'
all[code].add(c)
for k in sorted(all.keys()):
print("\n##",k)
print(json.dumps(list(all[k]),indent=2))
for version in all[k]:
csvwriter.writerow( [ version['d']+version['n'], version['c'], version['s'], version['m'], version['d'], version['n'], len(version['o']) ])
if __name__ == "__main__":
print ('')
options = { 1: ['fetch all courses', fetch_all_classes],
2: ['process all classes', path_style_test],
3: ['courses - path style to html catalog', course_path_style_2_html],
4: ['courses - rank by all versions', course_rank],
5: ['fetch all programs', fetch_all_programs],
6: ['process all programs', path_style_prog],
9: ['show course outcomes', all_outcomes],
10: ['programs - path style to html catalog', path_style_2_html],
}
print ('')
if len(sys.argv) > 1 and re.search(r'^\d+',sys.argv[1]):
resp = int(sys.argv[1])
print("\n\nPerforming: %s\n\n" % options[resp][0])
else:
print ('')
for key in options:
print(str(key) + '.\t' + options[key][0])
print('')
resp = input('Choose: ')
# Call the function in the options dict
options[ int(resp)][1]()

2252
curriculum.py Normal file

File diff suppressed because it is too large Load Diff

661
curriculum2020.py Normal file
View File

@ -0,0 +1,661 @@
from pampy import match, _
import json, pypandoc, requests,json,os,re, bisect, csv, codecs
import sortedcontainers as sc
from collections import defaultdict
from toolz.itertoolz import groupby
import pdb
pat8020 = []
""" (programs) entityType entityTitle status proposalType sectionName lastUpdated lastUpdatedBy
fieldName displayName lookUpDisplay fieldValue instanceSortOrder
lookUpDataset (array of dicts, each has keys: name, value, and corresponding values.)
subsections or fields (arrays) - ignore for now just takem in order
(courses) same as above?
html values: markdown convert?
"""
"""pat8020.append( {"displayName": _} )
pat8020.append( {"entityType": _} )
pat8020.append( {"entityTitle": _} )
pat8020.append( {"lookUpDisplay": _} )
pat8020.append( {"fieldValue": _} )
"""
err = "no error\n"
def to_md(s):
output = pypandoc.convert_text(s,'md',format='html')
return output
def print_return(x):
print('got a hit')
print()
return x
def cq_8020(root,indent=0):
ret = []
idt = " " * indent
try:
m = match( root,
{"attributes": { "fieldName": "Department" },
"lookUpDisplay": _ }, lambda x: idt + "Department: " + x.strip() if x.strip() else "",
{"attributes": { "fieldName": "Division" },
"lookUpDisplay": _ }, lambda x: idt + "Division: " + x.strip() if x.strip() else "",
{"attributes": { "fieldName": "Discipline" },
"lookUpDisplay": _ }, lambda x: idt + "Discipline: " + x.strip() if x.strip() else "",
{"attributes": { "fieldName": "Program Title" },
"lookUpDisplay": _ }, lambda x: idt + "Program Title: " + x.strip() if x.strip() else "",
{"attributes": { "fieldName": "Outcome" },
"fieldValue": _ }, lambda x: idt + "Outcome: " + x.strip() if x.strip() else "",
{"attributes": { "fieldName": "Award Type" },
"lookUpDisplay": _ }, lambda x: idt + "Award Type: " + x,
{"attributes": { "fieldName": "Course" },
"lookUpDisplay": _ }, lambda x: idt + "Course: " + x.strip() if x.strip() else "",
{"attributes": { "fieldName": "Description" },
"fieldValue": _ }, lambda x: idt + "Description: " + to_md(x),
{"attributes": { "fieldName": "Justification" },
"fieldValue": _ }, lambda x: idt + "Justification: " + x.strip() if x.strip() else "",
{"fieldName": _}, lambda x: idt + "field name: " + x.strip() if x.strip() else "",
{"fieldValue": _}, lambda x: idt + "field value: " + x.strip() if x.strip() else "",
#{"entityType": _}, lambda x: idt + "entityType: " + x,
{"entityTitle": _}, lambda x: idt + "entityTitle: " + x.strip() if x.strip() else "",
{"lookUpDisplay": _}, lambda x: idt + "lookUpDisplay: " + to_md(x.strip()) if x.strip() else "",
# Units
{ "name": "Max", "value": _ }, lambda x: "%sMax: %s" % (idt,x),
{ "name": "Min", "value": _ }, lambda x: "%sMin: %s" % (idt,x),
{ "name": "Text", "value": _ }, lambda x: "%sText: %s" % (idt,x),
default=False )
if m:
print('case 1: ' + str(m) )
ret.append(m)
except Exception as e:
m = 0
pass
#print("GOT EXCEPTION.")
#err += str(e)
if (not m) and type(root) == type( {} ):
"""
for K,V in list(root.items()):
print( [K,V])
m = match( [K,V],
["lookUpDisplay", _ ], lambda x: idt + "lookup display: " + to_md(str(x).strip()) if str(x).strip() else "",
["fieldName", _ ], lambda x: idt + "field name: " + x,
["fieldValue", _ ], lambda x: idt + "field value: " + to_md(str(x).strip()) if str(x).strip() else "",
["entityType", _ ], lambda x: idt + "entity type: " + x,
["entityTitle", _ ], lambda x: idt + "entity title: " + x,
["displayName", _ ], lambda x: idt + "display name: " + x,
["sectionSortOrder", _ ], lambda x: idt + "section sort order: " + str(x),
default=False)
if m:
print('case 2 ' + str(m))
ret.append(m)
#else:
"""
for V in root.values():
m = cq_8020(V,indent+2)
if m:
print('case 4 ' + str(m))
ret.extend(m)
elif (not m) and type(root) == type([]):
for V in root:
m = cq_8020(V,indent+2)
if m:
print('case 3')
ret.extend(m)
return ret
def cq_8021(root,indent=0):
ret = []
idt = " " * indent
m = 0
try:
m = match( root,
{"attributes": { "fieldName": "Department" },
"lookUpDisplay": _ }, lambda x: {"key":"Department", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Division" },
"lookUpDisplay": _ }, lambda x: {"key":"Division", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Discipline" },
"lookUpDisplay": _ }, lambda x: {"key":"Discipline", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Program Title" },
"lookUpDisplay": _ }, lambda x: {"key":"Program Title", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Outcome" },
"fieldValue": _ }, lambda x: {"key":"Outcome", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Award Type" },
"lookUpDisplay": _ }, lambda x: {"key":"Award Type", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Course" },
"lookUpDisplay": _ }, lambda x: {"key":"Course", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Description" },
"fieldValue": _ }, lambda x: {"key":"Description", "value": to_md(x.strip()) } if x.strip() else 0,
{"attributes": { "fieldName": "Justification" },
"fieldValue": _ }, lambda x: {"key":"Justification", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Assessment" },
"fieldValue": _ }, lambda x: {"key":"Assessment", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Disk Name" },
"fieldValue": _ }, lambda x: {"key":"Disk Name", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Attached File Name" },
"fieldValue": _ }, lambda x: {"key":"Attached File Name", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Title" },
"fieldValue": _ }, lambda x: {"key":"Title", "value": x.strip() } if x.strip() else 0,
{"fieldName": _}, lambda x: {"key": x.strip()} if x.strip() else 0,
{"fieldValue": _}, lambda x: {"value": x.strip()} if x.strip() else 0,
{"entityType": _}, lambda x: {"key": "Type", "value": x.strip()} if x.strip() else 0,
{"entityTitle": _}, lambda x: {"key": "Title", "value": x.strip()} if x.strip() else 0,
{"lookUpDisplay": _}, lambda x: {"value": x.strip()} if x.strip() else 0,
# Units
{ "name": "Max", "value": _ }, lambda x: {"key": "max", "value": x.strip()} if x.strip() else 0,
{ "name": "Min", "value": _ }, lambda x: {"key": "min", "value": x.strip()} if x.strip() else 0,
{ "name": "Text", "value": _ }, lambda x: {"value": x.strip()} if x.strip() else 0,
default=False )
if m:
print('case 1: ' + str(m) )
ret.append(m)
except Exception as e:
m = 0
pass
#print("GOT EXCEPTION.")
#err += str(e)
if (not m) and type(root) == type( {} ):
"""
for K,V in list(root.items()):
print( [K,V])
m = match( [K,V],
["lookUpDisplay", _ ], lambda x: idt + "lookup display: " + to_md(str(x).strip()) if str(x).strip() else "",
["fieldName", _ ], lambda x: idt + "field name: " + x,
["fieldValue", _ ], lambda x: idt + "field value: " + to_md(str(x).strip()) if str(x).strip() else "",
["entityType", _ ], lambda x: idt + "entity type: " + x,
["entityTitle", _ ], lambda x: idt + "entity title: " + x,
["displayName", _ ], lambda x: idt + "display name: " + x,
["sectionSortOrder", _ ], lambda x: idt + "section sort order: " + str(x),
default=False)
if m:
print('case 2 ' + str(m))
ret.append(m)
#else:
"""
for V in root.values():
m = cq_8021(V,indent+2)
if m:
print('case 4 ' + str(m))
ret.extend(m)
#for mm in m:
# if 'key' in mm and 'value' in mm:
# ret.extend(mm)
elif (not m) and type(root) == type([]):
for V in root:
m = cq_8021(V,indent+2)
if m:
print('case 3')
ret.extend(m)
return ret
def cq_8021_start():
root = json.loads( open('cache/programs/programs_1.txt','r').read())
outt = open('cache/test_prog8020.txt','w')
outt_err = open('cache/test_prog8020err.txt','w')
result = cq_8021(root)
outt.write( json.dumps(result, indent=2))
#outt_err.write( err )
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # #
##
## In this attempt I try to keep the data structure intact, but swapping in parts I recognize for a
## more compact version.
#
## Recursively do this....
##
## As I elaborate on it, the non-swapped parts will hopefully stand out more and more, and I can
## track down all the problems.
##
def cq_8022(root,indent=0):
ret = []
idt = " " * indent
m = 0
try:
m = match( root,
# Clear empties
{ "attributes": { "fieldName": _ }, "fieldValue": "" }, "NULL",
{ "attributes": { "fieldName": _ }, "lookUpDisplay": "", "fieldValue": _ }, lambda x,y: {"key":x,"value":y},
{ "attributes": { "fieldName": _ }, "lookUpDisplay": _, "fieldValue": "" }, lambda x,y: {"key":x,"value":y},
{"attributes": { "fieldName": "Exception Identifier" }, "fieldValue": _ }, lambda x: {"key":"Exception ID", "value": x},
{"attributes": { "fieldName": "Department" },
"lookUpDisplay": _ }, lambda x: {"key":"Department", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Division" },
"lookUpDisplay": _ }, lambda x: {"key":"Division", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Discipline" },
"lookUpDisplay": _ }, lambda x: {"key":"Discipline", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Program Title" },
"fieldValue": _ }, lambda x: {"key":"Program Title", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Outcome" },
"fieldValue": _ }, lambda x: {"key":"Outcome", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Award Type" },
"lookUpDisplay": _ }, lambda x: {"key":"Award Type", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Course" },
"lookUpDisplay": _ }, lambda x: {"key":"Course", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Description" },
"fieldValue": _ }, lambda x: {"key":"Description", "value": to_md(x.strip()) } if x.strip() else 0,
{"attributes": { "fieldName": "Justification" },
"fieldValue": _ }, lambda x: {"key":"Justification", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Assessment" },
"fieldValue": "-" }, lambda x: "NULL",
{"attributes": { "fieldName": "Assessment" },
"fieldValue": _ }, lambda x: {"key":"Assessment", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Disk Name" },
"fieldValue": _ }, lambda x: {"key":"Disk Name", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Attached File Name" },
"fieldValue": _ }, lambda x: {"key":"Attached File Name", "value": x.strip() } if x.strip() else 0,
{"attributes": { "fieldName": "Title" },
"fieldValue": _ }, lambda x: {"key":"Title", "value": x.strip() } if x.strip() else 0,
{"entityType": _}, lambda x: {"key": "Type", "value": x.strip()} if x.strip() else 0,
{"entityTitle": _}, lambda x: {"key": "Title", "value": x.strip()} if x.strip() else 0,
{"lookUpDisplay": _}, lambda x: {"value": x.strip()} if x.strip() else 0,
{"attributes": { "fieldName": "Course" }, "lookUpDisplay": _ }, lambda x: {"key": "Course", "value": x.strip()} if x.strip() else 0,
# Units
{ "name": "Max", "value": _ }, lambda x: {"key": "max", "value": x.strip()} if x.strip() else 0,
{ "name": "Min", "value": _ }, lambda x: {"key": "min", "value": x.strip()} if x.strip() else 0,
{ "name": "Text", "value": _ }, lambda x: {"value": x.strip()} if x.strip() else 0,
# Programs
{ "attributes": { "fieldName": "Course Block Definition" },
"fieldValue": _ }, lambda x: { "key":"Course block d.", "value": x.strip() },
{ "attributes": { "fieldName": "Unit Min" },
"fieldValue": _ }, lambda x: { "key":"Unit min", "value": x },
{ "attributes": { "fieldName": "Unit Max" },
"fieldValue": _ }, lambda x: { "key":"Unit max", "value": x },
{ "attributes": { "fieldName": "Units Low" },
"fieldValue": _ }, lambda x: { "key":"Units low", "value": x },
{ "attributes": { "fieldName": "Units High" },
"fieldValue": _ }, lambda x: { "key":"Units high", "value": x },
{ "attributes": { "fieldName": "Override Unit Calculation" },
"fieldValue": _ }, lambda x: { "key":"override unit calc", "value": x },
{ "attributes": { "fieldName": "Override Defalut Unit Calculations" },
"fieldValue": _ }, lambda x: { "key":"override default unit calc", "value": x },
{ "attributes_unchanged": { "sectionOrInstance": "section" },
"subsections_unchanged": [], "fields": [] }, lambda x: "NULL" ,
{ "attributes": { "sectionOrInstance": "section" },
"subsections": [], "fields": [] }, lambda x: "NULL" ,
{ "attributes_unchanged": { "sectionOrInstance": "section" },
"subsections": [], "fields": [] }, lambda x: "NULL" ,
{ "attributes": { "sectionName": "[Discipline and Course chained combo]", "sectionSortOrder": _ },
"fields": _ }, lambda w,x: { "sortOrder":w, "key":"course", "value": x },
#
#{ "key": "_", "value": "_" }
default=False )
if m:
print(' '*indent + 'case 1: ' + str(m) )
return 1,m
except Exception as e:
m = 0
if (not m) and type(root) == type( [] ):
# an array that only has DICTS, which only have 2 (or 3) keys, key,value,(sortOrder)
# we want to collapse it into a dict.
this_change = 0
maybe_new_dict = {}
is_collapsable = 1
for z in root:
if type(z)==type({}):
for ea in list(z.keys()):
if not ea in ['sortOrder','key','value']:
is_collapsable = 0
else:
is_collapsable = 0
if not is_collapsable:
break
if is_collapsable:
kk = list(z.keys())
if 'sortOrder' in kk and 'key' in kk and 'value' in kk:
maybe_new_dict[str(z['sortOrder'])+'_'+z['key']] = z['value']
elif 'key' in kk and 'value' in kk:
maybe_new_dict[z['key']] = z['value']
else:
maybe_new_dict['value'] = z['value']
if is_collapsable:
return 1,maybe_new_dict
my_list = []
for x in root:
changed, m = cq_8022(x, indent+1)
this_change += changed
if changed:
if m != "NULL":
my_list.append(m)
print(' '*indent + 'case 5: ' +str(m))
else:
my_list.append(x)
if this_change:
changed2,m2 = cq_8022(my_list,indent+1)
return changed2+this_change , m2
if (not m) and type(root) == type( {} ):
my_d_clone = {}
this_change = 0
for k,V in root.items():
changed,m = cq_8022(V,indent+1)
this_change += changed
if this_change:
print(' '*indent + 'case 4: ' +str(m))
my_d_clone[k] = m
else:
#my_d_clone[k+'_unchanged'] = V
my_d_clone[k] = V
if this_change:
changed2,m2 = cq_8022(my_d_clone,indent+1)
return changed2+this_change , m2
return 0,root
"""if not changed and k == "fields" and type(V) == list:
#new_dict = {"err":[] }
new_list = []
for item in V:
if item == "NULL": continue
if type(item) == dict:
if len(item.keys())==2 and ("key" in item.keys()) and ("value" in item.keys()):
#print("\n" + str(item.keys()))
#pdb.set_trace()
new_list.append( {"key": item["key"], "value": item["value"] } )
else:
changed,m = cq_8022(item, indent+1)
this_change += changed
if changed:
new_list.append(m)
else:
new_list.append(item)
m = new_list
this_change += 1
elif (not m) and type(root) == type([]):
myclone = []
this_change = 0
for V in root:
changed,m = cq_8022(V,indent+1)
this_change += changed
if m:
print('case 3 (' + str(indent) + ') ' + str(m))
myclone.append(m)
else:
myclone.append(V)
if this_change:
return cq_8022(myclone,indent+1)
return this_change,myclone"""
def cq_8022_start():
root = json.loads( open('cache/programs/programs_demo.txt','r').read())
outt = open('cache/test_prog8020.txt','w')
outt_err = open('cache/test_prog8020err.txt','w')
#changed = 1
#while changed:
changed,result = cq_8022(root)
outt.write( json.dumps(result, indent=2))
#outt_err.write( err )
# # # # # # # # # #
# # # #
# #
#
# May 2021
def sortable_class(li):
dept = li[1]
rest = ''
# little error case here
n = re.match(r'([A-Za-z]+)(\d+)',li[2])
if n:
num = int(n.group(2))
else:
m = re.match(r'(\d+)([A-Za-z]+)$',li[2])
if m:
num = int(m.group(1))
rest = m.group(2)
else:
num = int(li[2])
if num < 10: num = '00'+str(num)
elif num < 100: num = '0'+str(num)
else: num = str(num)
return dept+num+rest
def c_name(c):
delivery = set()
units = []
slos = []
hybridPct = ''
active = 'Active'
id = c['entityMetadata']['entityId']
if c['entityMetadata']['status'] != 'Active':
active = 'Inactive'
#return ()
for r in c['entityFormData']['rootSections']:
if r['attributes']['sectionName'] == 'Course Description':
for ss in r['subsections']:
for f in ss['fields']:
if f['attributes']['fieldName'] == 'Course Discipline':
dept = f['lookUpDisplay']
if f['attributes']['fieldName'] == 'Course Number':
num = f['fieldValue']
if f['attributes']['fieldName'] == 'Course Title':
title = f['fieldValue']
#print "\n" + title
if f['attributes']['fieldName'] == 'Course Description':
desc = re.sub(r'\n',' ', f['fieldValue'])
if r['attributes']['sectionName'] == 'Units/Hours/Status':
for ss in r['subsections']:
if ss['attributes']['sectionName'] == '':
for f in ss['fields']:
if f['attributes']['fieldName'] == 'Minimum Units' and f['fieldValue'] not in units:
units.insert(0,f['fieldValue'])
if f['attributes']['fieldName'] == 'Maximum Units' and f['fieldValue'] and f['fieldValue'] not in units:
units.append(f['fieldValue'])
# Newer entered courses have this filled out
if r['attributes']['sectionName'] == 'Distance Education Delivery':
for ss in r['subsections']:
if ss['attributes']['sectionName'] == 'Distance Education Delivery':
for ssa in ss['subsections']:
for f in ssa['fields']:
if f['attributes']['fieldName'] == 'Delivery Method':
delivery.add(f['lookUpDisplay'])
if ss['attributes']['sectionName'] == "":
if ss['fields'][0]['attributes']['fieldName'] == "If this course is Hybrid, what percent is online?":
hybridPct = str(ss['fields'][0]['fieldValue'])
# Older ones seem to have it this way
if r['attributes']['sectionName'] == 'Distance Education':
for ss in r['subsections']:
for f2 in ss['fields']:
if 'fieldName' in f2['attributes'] and f2['attributes']['fieldName'] == 'Methods of Instruction':
#print f2['fieldValue']
if f2['fieldValue'] == 'Dist. Ed Internet Delayed':
delivery.add('Online')
# SLO
if r['attributes']['sectionName'] == 'Student Learning Outcomes':
for ss in r['subsections']:
if 'subsections' in ss:
if ss['attributes']['sectionName'] == 'Learning Outcomes':
for s3 in ss['subsections']:
for ff in s3['fields']:
if ff['attributes']['fieldName'] == 'Description':
slos.append(ff['fieldValue'])
#print ff
#[0]['fields']:
#print ff['fieldValue']
#for f2 in ss['fields']:
# if 'fieldName' in f2['attributes'] and f2['attributes']['fieldName'] == 'Methods of Instruction':
# if f2['fieldValue'] == 'Dist. Ed Internet Delayed':
# delivery.append('online(x)')
if len(units)==1: units.append('')
if len(delivery)==0: delivery.add('')
u0 = 0
try:
u0 = units[0]
except:
pass
u1 = 0
try:
u1 = units[2]
except:
pass
return id,dept,num,active,title,u0,u1,'/'.join(delivery),hybridPct,desc,slos
def show_classes2020():
pass
def show_classes2020_start():
outt = open('cache/test_class2021_all.txt','w')
max_active = {} # hold the id of the class if seen. only include the highest id class in main list.
used_course = {} # hold the actual course info, the version we'll actually use.
slo_by_id = {} # values are a list of slos.
slo_by_id_included = {} # just the ids of active or most recent versions.
#tmp = codecs.open('cache/course_temp.txt','w','utf-8')
for f in os.listdir('cache/courses'):
if re.search('classes_',f):
print(f)
cls = json.loads(open('cache/courses/'+f,'r').read())
for c in cls:
dir_data = list(c_name(c))
#tmp.write(str(dir_data) + "\n\n")
slo_by_id[dir_data[0]] = dir_data[10] #
info = list(map(str,dir_data[:10]))
info.append(dir_data[10])
#pdb.set_trace()
#print info
course_key = sortable_class(info)
curqnt_id = int(info[0])
if course_key in max_active:
if curqnt_id < max_active[course_key]:
continue
max_active[course_key] = curqnt_id
if course_key in used_course:
while course_key in used_course:
course_key += '_'
used_course[course_key] = info
print("\t%s" % course_key)
outt.write( json.dumps(info, indent=2))
out2 = open('cache/test_class2021.txt','w')
out2.write( json.dumps(used_course, indent=2) )
if __name__ == "__main__":
print ('')
options = { 1: ['take 1 - programs', cq_8021_start],
2: ['take 2 - programs', cq_8022_start],
3: ['take 1 - classes', show_classes2020_start],
}
for key in options:
print((str(key) + '.\t' + options[key][0]))
print('')
#resp = eval(input('Choose: '))
resp = input('Choose: ')
# Call the function in the options dict
options[ int(resp)][1]()

481
curriculum_patterns.py Normal file
View File

@ -0,0 +1,481 @@
from pampy import _
curic_patterns = []
curic_patterns.append( {
"attributes": {
"fieldName": "Division",
"fieldId": 65000,
"isLookUpField": True,
"displayName": "Division"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
} )
def div1(a,b):
r = "Division: %s, id: %s" % (a,b)
print(r)
return(r)
curic_patterns.append(div1)
curic_patterns.append( {
"attributes": {
"fieldName": "Department",
"fieldId": 65001,
"isLookUpField": True,
"displayName": "Department"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
})
def d2(a,b):
r = "Department: %s, id: %s" % (a,b)
print(r)
return r
curic_patterns.append(d2)
curic_patterns.append({
"attributes": {
"fieldName": "Award Type",
"fieldId": 60221,
"isLookUpField": True,
"displayName": "Award Type"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
})
def d3(a,b):
r = "Award: %s, id: %s" % (a,b)
print(r)
return r
curic_patterns.append(d3)
p1 = {
"attributes": {
"fieldName": "Description",
"fieldId": _,
"isLookUpField": False,
"displayName": "Description"
},
"dataTypeDetails": {
"type": "string"
},
"fieldValue": _
}
def pp1(a,b):
r = "Description (id:%s) %s" % (a,b)
#print(r[:40])
return r
curic_patterns.append(p1)
curic_patterns.append(pp1)
p2 = {"attributes": {
"fieldName": "Program Title",
"fieldId": _,
"isLookUpField": False,
"displayName": "Program Title"
},
"dataTypeDetails": {
"type": "string",
"maxLength": 250
},
"fieldValue":_
}
def pp2(a,b):
r = "Program (id:%s) %s" % (a,b)
#print(r)
return r
curic_patterns.append(p2)
curic_patterns.append(pp2)
p3 = { "attributes": {
"fieldName": "Course",
"fieldId": _,
"isLookUpField": True,
"displayName": "Course"
},
"lookUpDataset": [
[
{
"name": "Max",
"value": _
},
{
"name": "IsVariable",
"value": _
},
{
"name": "Min",
"value": _
},
{
"name": "Text",
"value": _
}
]
],
"dataTypeDetails": {
"type": "lookup"
},
"lookUpDisplay": _,
"fieldValue": _
}
def pp3(a,b,c,d,e,f,g):
r = "Course (%s / %s) %s (%s), var? %s %s - %s" % (a,g, f, e, c, b, d)
#print(r)
return r
curic_patterns.append(p3)
curic_patterns.append(pp3)
p4 = {
"attributes": {
"sectionOrInstance": "section",
"sectionName": "Unit Range",
"sectionSortOrder": 2,
"oneToManySection": False
},
"subsections": [],
"fields": [
{
"attributes": {
"fieldName": "Units Low",
"fieldId": 59608,
"isLookUpField": False,
"displayName": "Units Low"
},
"dataTypeDetails": {
"scale": 2,
"type": "numeric",
"precision": 6
},
"fieldValue": _
},
{
"attributes": {
"fieldName": "Units High",
"fieldId": 59609,
"isLookUpField": False,
"displayName": "Units High"
},
"dataTypeDetails": {
"scale": 2,
"type": "numeric",
"precision": 6
},
"fieldValue": _
}
]
}
def pp4(a,b):
r = "Unit Range: %s - %s" % (a,b)
return r
curic_patterns.append(p4)
curic_patterns.append(pp4)
p5 = {
"attributes": {
"fieldName": "Discipline",
"fieldId": _,
"isLookUpField": True,
"displayName": "Discipline"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
}
def pp5(a,b,c):
r = "Discipline (%s) %s / %s" % (a,b,c)
#print(r)
return r
curic_patterns.append(p5)
curic_patterns.append(pp5)
p6 = { "attributes": {
"fieldName": "Course Block Definition",
"fieldId": _,
"isLookUpField": False,
"displayName": "Course Block Definition"
},
"dataTypeDetails": {
"type": "string"
},
"fieldValue": _
}
def pp6(a,b):
r = "Block (%s) %s" % (a,b)
#print(r)
return r
p7 = {
"attributes": {
"fieldName": "Block Header",
"fieldId": _,
"isLookUpField": False,
"displayName": "Block Header"
},
"dataTypeDetails": {
"type": "string",
"maxLength": 4000
},
"fieldValue": _
}
def pp7(a,b):
r = "Block Header (%s) %s" % (b,a)
#print(r)
return r
p8 = {
"attributes": {
"fieldName": "Block Footer",
"fieldId": _,
"isLookUpField": False,
"displayName": "Block Footer"
},
"dataTypeDetails": {
"type": "string",
"maxLength": 4000
},
"fieldValue": _
}
def pp8(a,b):
r = "Block Footer (%s) %s" % (b,a)
#print(r)
return r
curic_patterns.append(p6)
curic_patterns.append(pp6)
curic_patterns.append(p7)
curic_patterns.append(pp7)
curic_patterns.append(p8)
curic_patterns.append(pp8)
######################
###################### Trying to remove more junk
######################
j1 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"type": "string",
"maxLength": _,
},
"fieldValue": _,
}
def jj1(a,b,c,d,e):
r = "String Label: %s (id %s) Value: %s" % (a,b,e)
#print(r)
return r
curic_patterns.append(j1)
curic_patterns.append(jj1)
j2 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": _,
"type": "numeric",
"precision": _,
},
"fieldValue": _,
}
def jj2(a,b,c,d,e,f):
r = "Generic Num Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,f)
#print(r)
return r
j3 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": True,
"displayName": _,
},
"lookUpDisplay": _,
"dataTypeDetails":
{"type": "lookup",
},
"fieldValue": _,
}
def jj3(a,b,c,d,e):
r = "Generic lookup Field: Name: %s / %s, ID: %i, Displayname: %s, Value: %s " % (a,c,b,d,e)
#print(r)
return r
curic_patterns.append(j2)
curic_patterns.append(jj2)
curic_patterns.append(j3)
curic_patterns.append(jj3)
j4 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"type": "flag",
},
"fieldValue": _,
}
def jj4(a,b,c,d):
r = "Generic Flag Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,d)
#print(r)
return r
curic_patterns.append(j4)
curic_patterns.append(jj4)
j5 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": _,
"type": "numeric",
"precision": _,
},
"fieldValue": _,
}
def jj5(a,b,c,d,e,f):
r = "Numeric Field, Name: %s / %s Id: %s, Value: %s" % (a,c,b,f)
j6 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"type": "string", },
"fieldValue": _, }
def jj6(a,b,c,d):
r = "String+Label field. Label: %s / %s Value: %s Id: %s" % (a,c,d,b)
#print(r)
return r
curic_patterns.append(j5)
curic_patterns.append(jj5)
curic_patterns.append(j6)
curic_patterns.append(jj6)
"""j2 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": 2,
"type": "numeric",
"precision": 6,
},
"fieldValue": _,
}
def jj2(a,b,c,d):
r = "Generic Num Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,d)
print(r)
return r
j2 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": 2,
"type": "numeric",
"precision": 6,
},
"fieldValue": _,
}
def jj2(a,b,c,d):
r = "Generic Num Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,d)
print(r)
return r
"""

1289
depricated.py Normal file

File diff suppressed because it is too large Load Diff

0
fa19_sched.json Normal file
View File

32
geckodriver.log Normal file
View File

@ -0,0 +1,32 @@
1558125801686 mozrunner::runner INFO Running command: "C:\\Program Files\\Mozilla Firefox\\firefox.exe" "-marionette" "-foreground" "-no-remote" "-profile" "C:\\Users\\phowell\\AppData\\Local\\Temp\\rust_mozprofile.IlmIOIgJLngr"
1558125802078 addons.webextension.screenshots@mozilla.org WARN Loading extension 'screenshots@mozilla.org': Reading manifest: Invalid extension permission: mozillaAddons
1558125802079 addons.webextension.screenshots@mozilla.org WARN Loading extension 'screenshots@mozilla.org': Reading manifest: Invalid extension permission: resource://pdf.js/
1558125802079 addons.webextension.screenshots@mozilla.org WARN Loading extension 'screenshots@mozilla.org': Reading manifest: Invalid extension permission: about:reader*
1558125802203 addons.xpi-utils WARN Add-on lsiwebhook@lakesidesoftware.com is not correctly signed.
1558125802204 addons.xpi-utils WARN Add-on lsiwebhook@lakesidesoftware.com is not correctly signed.
1558125802205 addons.xpi-utils WARN addMetadata: Add-on lsiwebhook@lakesidesoftware.com is invalid: Error: Extension lsiwebhook@lakesidesoftware.com is not correctly signed(resource://gre/modules/addons/XPIDatabase.jsm:2349:17) JS Stack trace: addMetadata@XPIDatabase.jsm:2349:17
processFileChanges@XPIDatabase.jsm:2700:21
checkForChanges@XPIProvider.jsm:2570:34
startup@XPIProvider.jsm:2148:25
callProvider@AddonManager.jsm:203:12
_startProvider@AddonManager.jsm:652:5
startup@AddonManager.jsm:805:9
startup@AddonManager.jsm:2775:5
observe@addonManager.js:66:9
1558125802205 addons.xpi-utils WARN Could not uninstall invalid item from locked install location
JavaScript error: resource://gre/modules/addons/XPIProvider.jsm, line 2614: TypeError: addon is null
1558125803597 Marionette INFO Listening on port 59067
1558125803830 Marionette WARN TLS certificate errors will be ignored for this session
1558125819374 Marionette INFO Stopped listening on port 59067
[Parent 7300, Gecko_IOThread] WARNING: pipe error: 109: file z:/build/build/src/ipc/chromium/src/chrome/common/ipc_channel_win.cc, line 332
[Child 6396, Chrome_ChildThread] WARNING: pipe error: 109: file z:/build/build/src/ipc/chromium/src/chrome/common/ipc_channel_win.cc, line 332
[Child 6396, Chrome_Child[Parent 7300, Gecko_IOThread] WARNING: pipe error: 109: file z:/build/build/src/ipc/chromium/src/chrome/common/ipc_channel_win.cc, line 332
[Child 6664, Chrome_ChildThread] WARNING: pipe error: 109: file z:/build/build/src/ipc/chromium/src/chrome/common/ipc_channel_win.cc, line 332
[Child 6664, Chrome_ChildThrea[Child 10084, Chrome_ChildThread] WARNING: pipe error: 109: file z:/build/build/src/ipc/chromium/src/chrome/common/ipc_channel_win.cc, line 332
[Child 10084, Chrome_ChildThread] WARNING[GPU 1
###!!! [Child][RunMessage] Error: Channel closing: too late to send/recv, messages will be lost
0312, Chr
###!!! [Child][MessageChannel::SendAndWait] Error: Channel error: cannot send/recv

28
gpt.py Normal file
View File

@ -0,0 +1,28 @@
import os, json, sys
import openai
from secrets import openai_org, openai_api_key
openai.organization = "org-66WLoZQEtBrO42Z9S8rfd10M"
openai.api_key = "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"
#print(openai.Model.list())
my_prompt = "Write a series of texts trying to sell a pen to a stranger."
print(sys.argv)
exit
if len(sys.argv)>1:
my_prompt = " ".join(sys.argv[1:])
else:
print("Prompt: %s" % my_prompt)
my_model = "text-davinci-003"
# create a completion
completion = openai.Completion.create(engine=my_model, prompt=my_prompt, max_tokens=1000, temperature=1,top_p=1)
#print(completion)
#print(json.dumps(completion,indent=2))
print(completion.choices[0].text)
print()

206
graphics.py Normal file
View File

@ -0,0 +1,206 @@
import cv2, sys, glob, os
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
#faceCascade = cv2.CascadeClassifier(cascPath)
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
folder = "cache/picsId/Pending/test/"
outfolder = "cache/picsId/Pending/test3/"
files=glob.glob(folder + "*.jpg")
i = 0
for file in files:
# Read the image
fn = file.split("/")[-1]
print(file)
print(fn)
image = cv2.imread(file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_size = image.shape
ww = img_size[0]
hh = img_size[1]
print("Image size: " + str(img_size))
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray, minNeighbors=5, minSize=(70, 70))
"""
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE )
"""
print("Found %d faces!" % len(faces))
if len(faces)==0: exit()
# Crop Padding
left = 10
right = 10
top = 10
bottom = 10
# Draw a rectangle around the faces
j = 0
for (x, y, w, h) in faces:
k = j
if k == 0: k = ''
else: k = str(k) + '_'
# Dubugging boxes
# cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
new_x1 = x-left
new_y1 = y-top
new_x2 = x+w+right
new_y2 = y+h+bottom
x1 = max(new_x1, 0)
y1 = max(new_y1, 0)
x2 = min(new_x2, ww)
y2 = min(new_y2, hh)
xx1 = min(x1,x2)
xx2 = max(x1,x2)
yy1 = min(y1,y2)
yy2 = max(y1,y2)
print(x, y, w, h)
print(ww, hh)
print(xx1,xx2,yy1,yy2)
#image = image[y-top:y+h+bottom, x-left:x+w+right]
write_image = image[yy1:yy2, xx1:xx2]
print("Writing: " + outfolder + k + fn)
try:
cv2.imwrite(outfolder + k + fn, write_image)
except:
print(" (failed. was image too small?)")
j += 1
# print ("cropped_{1}{0}".format(str(file),str(x)))
# autocrop
#
"""
from PIL import Image
from autocrop import Cropper
cropper = Cropper()
# Get a Numpy array of the cropped image
cropped_array = cropper.crop('portrait.png')
# Save the cropped image with PIL
cropped_image = Image.fromarray(cropped_array)
cropped_image.save('cropped.png')
--------------
usage: [-h] [-o OUTPUT] [-i INPUT] [-w WIDTH] [-H HEIGHT] [-e EXTENSION] [-v]
Automatically crops faces from batches of pictures
optional arguments:
-h, --help
Show this help message and exit
-o, --output, -p, --path
Folder where cropped images will be placed.
Default: current working directory
-r, --reject
Folder where images without detected faces will be placed.
Default: same as output directory
-i, --input
Folder where images to crop are located.
Default: current working directory
-w, --width
Width of cropped files in px. Default=500
-H, --height
Height of cropped files in px. Default=500
--facePercent
Zoom factor. Percentage of face height to image height.
-e, --extension
Enter the image extension which to save at output.
Default: Your current image extension
-v, --version
Show program's version number and exit
autocrop -i . -o test4 -w 250 -H 333
"""
# smartcrop
#
#
# smartcrop -W 1140 -H 400 -i input.jpg -o output.jpg
#
#
# imagemagick jpeg compress
#
# convert -strip -interlace Plane -gaussian-blur 0.05 -quality 60% -adaptive-resize 60% img_original.jpg img_resize.jpg
#
#
# convert image.jpg -define jpeg:extent=150kb result.jpg

919
interactive.py Normal file
View File

@ -0,0 +1,919 @@
import curses
import heapq, re, csv, os, shutil, datetime, urllib
import itertools, time, markdown, csv, json, os.path, webbrowser, threading
from functools import wraps
from flask import Flask, request, send_from_directory, Response, render_template
from flask import send_file
from flask_socketio import SocketIO, emit
from werkzeug.routing import PathConverter
from queue import Queue
from importlib import reload
import server
import localcache
from server import *
from secrets import flask_secretkey
q = Queue()
HOST_NAME = '127.0.0.1' #
HOST_NAME = '192.168.1.6' #
HOST_NAME = '192.168.1.6' #
PORT_NUMBER = 8080 # Maybe set this to 9000.
datafile = 'lambda.csv'
#writing_path = 'c:/users/peter/Nextcloud/Documents/writing/'
####
#### This little web server is going to work with the "gui" folder / vue app
####
####
def dict_generator(indict, pre=None):
pre = pre[:] if pre else []
if isinstance(indict, dict):
for key, value in indict.items():
if isinstance(value, dict):
for d in dict_generator(value, pre + [key]):
yield d
elif isinstance(value, list) or isinstance(value, tuple):
for v in value:
for d in dict_generator(v, pre + [key]):
yield d
else:
yield str(pre) + " " + str([key, value]) + "\n"
else:
yield pre + [indict]
yield str(pre) + " " + str([indict]) + "\n"
def print_dict(v, prefix='',indent=''):
if isinstance(v, dict):
return [ print_dict(v2, "{}['{}']".format(prefix, k) + "<br />", indent+" " ) for k, v2 in v.items() ]
elif isinstance(v, list):
return [ print_dict( v2, "{}[{}]".format(prefix , i) + "<br />", indent+" ") for i, v2 in enumerate(v) ]
else:
return '{} = {}'.format(prefix, repr(v)) + "\n"
def walk_file():
j = json.loads(open('cache/programs/programs_2.txt','r').read())
return print_dict(j)
def tag(x,y): return "<%s>%s</%s>" % (x,y,x)
def tagc(x,c,y): return '<%s class="%s">%s</%s>' % (x,c,y,x)
def a(t,h): return '<a href="%s">%s</a>' % (h,t)
def server_save(key,value):
codecs.open('cache/server_data.txt','a').write( "%s=%s\n" % (str(key),str(value)))
def flask_thread(q):
#app = Flask(__name__, static_url_path='/cache',
# static_folder='cache',)
app = Flask(__name__)
app.config['SECRET_KEY'] = flask_secretkey
app.jinja_env.auto_reload = True
socketio = SocketIO(app)
app.config['TEMPLATES_AUTO_RELOAD'] = True
def before_request():
app.jinja_env.cache = {}
app.before_request(before_request)
@app.route('/clearscreens')
def clears():
clearscreens()
return homepage()
@app.route('/displaypi/on')
def dpi():
displaypi_on()
return homepage()
@app.route('/displaypi/off')
def dpi2():
displaypi_off()
return homepage()
@app.route('/screensoff')
def screenoff_a():
screenoff()
return homepage()
@app.route('/light')
def light():
desklight()
return homepage()
@app.route('/image/<filename>', methods=['GET','POST'])
def do_image(filename):
return image_edit(filename)
@app.route('/imagecrop/<filename>/<x>/<y>/<w>/<h>/<newname>', methods=['GET','POST'])
def do_image_crop(filename,x,y,w,h,newname):
return image_crop(filename,x,y,w,h,newname)
#
# SAVING STUFF
#
@app.route('/save', methods=['POST'])
def save_post():
now = datetime.now().strftime('%Y%m%dT%H%M')
path = request.form['path']
txt = request.form['content']
o3 = codecs.open(server.writing_path + path, 'r', 'utf-8')
orig_text = o3.read()
o3.close()
bu_filename = server.writing_path + 'older_copies/' + path + '_' + now + '.md'
o2 = codecs.open( bu_filename, 'w', 'utf-8' )
o2.write(orig_text)
o2.close()
print('wrote backup to %s.' % bu_filename)
o1 = codecs.open(server.writing_path+path, 'w', 'utf-8')
o1.write(txt)
o1.close()
return "<h1>Successfully Saved</h1><br>" + a('back to writing folder','/x/writing/index') + \
" &nbsp; &nbsp; &nbsp; " + a('back to home','/')
@app.route('/x/writing/images/<fname>')
def writing_img(fname):
img_path = "/media/hd2/peter_home/Documents/writing_img/"
print(img_path + fname + " - writing images folder")
img_ext = fname.split('.')[-1]
if img_ext == "gif":
return send_from_directory(img_path, fname)
if img_ext == "jpg":
return send_from_directory(img_path, fname)
if img_ext == "png":
return send_from_directory(img_path, fname)
return send_from_directory(img_path, fname)
#
# SERVER maintenance type stuff
@app.route('/rl')
def restart():
reload(server)
reload(localcache)
return "Server code reloaded"
@app.route("/x/<func>/<arg>/<arrg>")
def dispatch3(func,arg,arrg):
print("2 args")
return "" + server_dispatch(func, arg, arrg)
@app.route("/x/<func>/<arg>")
def dispatch2(func,arg):
print("1 arg")
return "" + server_dispatch(func, arg)
@app.route("/x/<func>")
def dispatch(func):
print("0 arg")
return server_dispatch(func)
@app.route("/api/<func>/<arg>/<arrg>")
def dispatch3j(func,arg,arrg):
print("json, 3 args")
return Response(server_dispatch(func, arg, arrg), mimetype='text/json')
@app.route("/api/<func>/<arg>")
def dispatch2j(func,arg):
print("json, 1 arg")
return Response(server_dispatch(func, arg), mimetype='text/json')
@app.route("/api/<func>")
def dispatch1j(func):
print("json, 0 arg")
return Response(server_dispatch(func), mimetype='text/json')
@app.route("/")
def home():
return server.homepage()
#
# STATIC ROUTES
#
@app.route('/data/<path:path>')
def send_cachedata(path):
#myfile = os.path.join('cache', path).replace('\\','/')
print(path)
#return app.send_static_file(myfile)
return send_from_directory('cache', path)
# Departments, classes in each, and students (with hits) in each of those.
"""@app.route('/iii/<path:path>')
def send_js(path):
return send_from_directory('gui/dist', path)"""
"""@app.route('/lib/<path:path>')
def send_jslib(path):
return send_from_directory('gui/lib', path)"""
#@app.route('/hello/')
#@app.route('/hello/<name>')
@app.route("/save/<key>/<val>")
def s(key,val):
server_save(key,val)
return tag('h1','Saved.') + "<br />" + tag('p', 'Saved: %s = %s' % (str(key),str(val)))
@app.route("/sample")
def do_sample():
return sample()
@app.route('/podcast/media/<string:file_id>')
def media(file_id):
return send_file(LECPATH + urllib.parse.unquote(file_id), attachment_filename=urllib.parse.unquote(file_id))
@app.route("/podcast")
def podcast():
return lectures()
@app.route("/lectures")
def weblec():
return web_lectures()
@app.route("/crazy")
def hello():
r = '<link rel="stylesheet" href="static/bootstrap.min.css">'
r += tag('style', 'textarea { white-space:nowrap; }')
r += tag('body', \
tagc('div','container-fluid', \
tagc('div','row', \
tagc( 'div', 'col-md-6', tag('pre', walk_file() ) ) + \
tagc( 'div', 'col-md-6', 'Column 2' + a('Shut Down','/shutdown' ) ) ) ) )
return r
@app.route("/sd")
def sd():
print('SIGINT or CTRL-C detected. Exiting gracefully')
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return "Server has shut down."
@socketio.on('my event', namespace='/test')
def test_message(message):
print('received and event: "my event" from page. message is: %s' % message)
emit('my response', {'data': 'got it! it is MYEVENT'})
socketio.run(app, host= '0.0.0.0')
def serve():
x = threading.Thread(target=flask_thread, args=(q,))
x.start()
#webbrowser.open_new_tab("http://localhost:5000")
y = threading.Thread(target=mqtt_loop)
y.start()
if __name__ == '__main__':
serve()
"""class HelloWorldExample(object):
def make_teacher_rel(self, tchr, clss):
with self._driver.session() as tx:
tx.run("MERGE (tchr:Teacher {name: $tchr}) MERGE (tchr)-[:TEACHES]->(clss:Class {name: $clss})", \
tchr=tchr, clss=clss)
def __init__(self, uri, user, password):
self._driver = GraphDatabase.driver(uri, auth=(user, password))
def close(self):
self._driver.close()
def print_greeting(self, message):
with self._driver.session() as session:
greeting = session.write_transaction(self._create_and_return_greeting, message)
print(greeting)
@staticmethod
def _create_and_return_greeting(tx, message):
result = tx.run("CREATE (a:Greeting) "
"SET a.message = $message "
"RETURN a.message + ', from node ' + id(a)", message=message)
return result.single()[0]
"""
def make_teacher_rel(g, tchr, clss):
g.run("MERGE (tchr:Teacher {name: $tchr}) MERGE (tchr)-[:TEACHES]->(clss:Class {name: $clss})", \
tchr=tchr, clss=clss)
def testgraph():
gg = Graph("bolt://localhost:7687", auth=("neo4j", "asdf"))
#gg.run("DROP CONSTRAINT ON (tchr:Teacher) ASSERT tchr.name IS UNIQUE")
#gg.run("DROP CONSTRAINT ON (clss:Class) ASSERT clss.name IS UNIQUE")
#gg.run("CREATE INDEX ON :Teacher(name)")
#gg.run("CREATE INDEX ON :Class(name)")
stuff = json.loads( open('output/semesters/2020spring/sp20_sched.json','r').read())
# make lists of unique course code+name, teacher, locations
tch = {}
crs = {}
loc = {}
sem = Node("Semester", name="sp20")
for c in stuff:
if not c['teacher'] in tch:
tch[c['teacher']] = Node("Teacher", name=c['teacher'])
gg.create(tch[c['teacher']])
if not c['code'] in crs:
crs[ c['code'] ] = Node("Course section", name=c['name'], code=c['code'])
gg.create(crs[ c['code'] ])
if not c['loc'] in loc:
loc[ c['loc'] ] = Node("Location", loc=c['loc'])
gg.create(loc[ c['loc'] ])
sect = Node("Section", crn=int(c['crn']))
gg.create(Relationship(tch[c['teacher']], "TEACHES", sect ))
gg.create(Relationship(sect, "CLASS OF", crs[ c['code'] ] ))
gg.create(Relationship( sect, "LOCATED AT", loc[ c['loc'] ] ))
"""
for c in stuff:
print(c['crn'])
q = "CREATE (section:Section { Name: "+c['name']+", Code: "+c['code']+", Crn: "+c['crn']+", Teacher: "+c['teacher']+" })"
q = 'CREATE (section:Section { Name: "%s", Code: "%s", Crn: "%s", Teacher: "%s" })' % \
(c['name'], c['code'], c['crn'], c['teacher'])
gg.run(q)
"""
#gg = HelloWorldExample("bolt://localhost:7687", "neo4j", "asdf")
#gg.print_greeting("hi there world")
"""
make_teacher_rel(gg, "Peter Howell","CSIS 42")
make_teacher_rel(gg, "Alex Stoykov","CSIS 42")
make_teacher_rel(gg, "Sabrina Lawrence","CSIS 85")
make_teacher_rel(gg, "Peter Howell","CSIS 85")
"""
screen = 0
def Memoize( func):
"""
Memoize decorator
"""
cache = {}
@wraps(func)
def wrapper(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrapper
class MyRepl:
description = {
"switch ": "Switch stream. You can use either 'switch public' or 'switch mine'",
"home " : "Show your timeline. 'home 7' will show 7 tweet.",
"harry " : "a guys name.",
"homo " : "means the same.",
"view " : "'view @mdo' will show @mdo's home.",
"h " : "Show help.",
"t " : "'t opps' will tweet 'opps' immediately.",
"s " : "'s #AKB48' will search for '#AKB48' and return 5 newest tweets."
}
def startup(self, outfile):
global screen # make it self
self.g = {}
self.buf = {}
screen = None
self.enter_ary = [curses.KEY_ENTER,10]
self.delete_ary = [curses.KEY_BACKSPACE,curses.KEY_DC,8,127,263]
self.tab_ary = [9]
self.up_ary = [curses.KEY_UP]
self.down_ary = [curses.KEY_DOWN]
# Init curses screen
screen = curses.initscr()
screen.keypad(1)
curses.noecho()
try:
curses.start_color()
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i + 1, i, -1)
except curses.error:
pass
curses.cbreak()
self.g['height'] , self.g['width'] = screen.getmaxyx()
#print("Width: %i" % self.g['width'])
# Init color function
s = self
self.white = lambda x:curses_print_word(x,7) #0)
self.grey = lambda x:curses_print_word(x, 3) #3)1)
self.red = lambda x:curses_print_word(x,7) #2)
self.green = lambda x:curses_print_word(x, 3) #3)
self.yellow = lambda x:curses_print_word(x,7) #4)
self.blue = lambda x:curses_print_word(x,3)
self.magenta = lambda x:curses_print_word(x,7) #6)
self.cyan = lambda x:curses_print_word(x,7) #7)
self.colors_shuffle = [s.grey, s.red, s.green, s.yellow, s.blue, s.magenta, s.cyan]
self.cyc = itertools.cycle(s.colors_shuffle[1:])
self.index_cyc = itertools.cycle(range(1,8))
self.setup_command(outfile)
def set_my_dict(self,d):
self.description = d
@Memoize
def cycle_color(self, s):
"""
Cycle the colors_shuffle
"""
return next(self.cyc)
def ascii_art(self, text):
"""
Draw the Ascii Art
"""
fi = figlet_format(text, font='doom')
for i in fi.split('\n'):
self.curses_print_line(i,next(self.index_cyc))
def close_window(self, ):
"""
Close screen
"""
global screen
screen.keypad(0);
curses.nocbreak();
curses.echo()
curses.endwin()
def suggest(self, word):
"""
Find suggestion
"""
rel = []
if not word: return rel
word = word.lower()
for candidate in self.description:
ca = candidate.lower()
#if ca.startswith(word): rel.append(candidate)
for eachword in ca.split(" "):
if eachword.startswith(word):
rel.append(candidate)
return rel
def curses_print_word(self, word,color_pair_code):
"""
Print a word
"""
global screen
word = word.encode('utf8')
screen.addstr(word,curses.color_pair(color_pair_code))
def curses_print_line(self, line,color_pair_code):
"""
Print a line, scroll down if need
"""
global screen
line = line.encode('utf8')
y,x = screen.getyx()
if y - self.g['height'] == -3:
self.scroll_down(2,y,x)
screen.addstr(y,0,line,curses.color_pair(color_pair_code))
self.buf[y] = line, color_pair_code
elif y - self.g['height'] == -2:
self.scroll_down(3,y,x)
screen.addstr(y-1,0,line,curses.color_pair(color_pair_code))
self.buf[y-1] = line ,color_pair_code
else:
screen.addstr(y+1,0,line,curses.color_pair(color_pair_code))
self.buf[y+1] = line, color_pair_code
def redraw(self, start_y,end_y,fallback_y,fallback_x):
"""
Redraw lines from buf
"""
global screen
for cursor in range(start_y,end_y):
screen.move(cursor,0)
screen.clrtoeol()
try:
line, color_pair_code = self.buf[cursor]
screen.addstr(cursor,0,line,curses.color_pair(color_pair_code))
except:
pass
screen.move(fallback_y,fallback_x)
def scroll_down(self, noredraw,fallback_y,fallback_x):
"""
Scroll down 1 line
"""
global screen
# Recreate buf
# noredraw = n means that screen will scroll down n-1 line
trip_list = heapq.nlargest(noredraw-1,buf)
for i in buf:
if i not in trip_list:
self.buf[i] = self.buf[i+noredraw-1]
for j in trip_list:
buf.pop(j)
# Clear and redraw
screen.clear()
self.redraw(1,g['height']-noredraw,fallback_y,fallback_x)
def clear_upside(self, n,y,x):
"""
Clear n lines upside
"""
global screen
for i in range(1,n+1):
screen.move(y-i,0)
screen.clrtoeol()
screen.refresh()
screen.move(y,x)
def display_suggest(self, y,x,word):
"""
Display box of suggestion
"""
global screen
g = self.g
side = 2
# Check if need to print upside
upside = y+6 > int(g['height'])
# Redraw if suggestion is not the same as previous display
sug = self.suggest(word)
if sug != self.g['prev']:
# 0-line means there is no suggetions (height = 0)
# 3-line means there are many suggetions (height = 3)
# 5-line means there is only one suggetions (height = 5)
# Clear upside section
if upside:
# Clear upside is a bit difficult. Here it's seperate to 4 case.
# now: 3-lines / previous : 0 line
if len(sug) > 1 and not self.g['prev']:
self.clear_upside(3,y,x)
# now: 0-lines / previous :3 lines
elif not sug and len(g['prev'])>1:
self.redraw(y-3,y,y,x)
# now: 3-lines / previous :5 lines
elif len(sug) > 1 == len(g['prev']):
self.redraw(y-5,y-3,y,x)
self.clear_upside(3,y,x)
# now: 5-lines / previous :3 lines
elif len(sug) == 1 < len(g['prev']):
self.clear_upside(3,y,x)
# now: 0-lines / previous :5 lines
elif not sug and len(g['prev'])==1:
self.redraw(y-5,y,y,x)
# now: 3-lines / previous :3 lines
elif len(sug) == len(g['prev']) > 1:
self.clear_upside(3,y,x)
# now: 5-lines / previous :5 lines
elif len(sug) == len(g['prev']) == 1:
self.clear_upside(5,y,x)
screen.refresh()
else:
# Clear downside
screen.clrtobot()
screen.refresh()
self.g['prev'] = sug
if sug:
# More than 1 suggestion
if len(sug) > 1:
if len(sug) > 5: sug = sug[:5]
#needed_lenth = sum([len(i)+side for i in sug]) + side
needed_lenth = max( self.g['width']-5, sum([len(i)+side for i in sug]) + side)
print(self.g['width'])
print(word)
print(sug)
print(needed_lenth)
if upside:
win = curses.newwin(3,needed_lenth,y-3,0)
win.erase()
win.box()
win.refresh()
cur_width = side
for i in range(len(sug)):
if cur_width+len(sug[i]) > self.g['width']: break
screen.addstr(y-2,cur_width,sug[i],curses.color_pair(4))
cur_width += len(sug[i]) + side
if cur_width > self.g['width']:
break
else:
win = curses.newwin(3,needed_lenth,y+1,0)
win.erase()
win.box()
win.refresh()
cur_width = side
for i in range(len(sug)):
screen.addstr(y+2,cur_width,sug[i],curses.color_pair(4))
cur_width += len(sug[i]) + side
if cur_width > self.g['width']:
break
# Only 1 suggestion
else:
can = sug[0]
if upside:
win = curses.newwin(5,len(self.description[can])+2*side,y-5,0)
win.box()
win.refresh()
screen.addstr(y-4,side,can,curses.color_pair(4))
screen.addstr(y-2,side,self.description[can],curses.color_pair(3))
else:
win = curses.newwin(5,len(self.description[can])+2*side,y+1,0)
win.box()
win.refresh()
screen.addstr(y+2,side,can,curses.color_pair(4))
screen.addstr(y+4,side,self.description[can],curses.color_pair(3))
def inputloop(self, ):
"""
Main loop input
"""
global screen
word = ''
screen.addstr("\n" + self.g['prefix'],curses.color_pair(7))
while True:
# Current position
y,x = screen.getyx()
# Get char
event = screen.getch()
try :
char = chr(event)
except:
char = ''
# Test curses_print_line
if char == '?':
self.buf[y] = self.g['prefix'] + '?', 0
self.ascii_art('dtvd88')
# TAB to complete
elif event in self.tab_ary:
# First tab
try:
if not self.g['tab_cycle']:
self.g['tab_cycle'] = itertools.cycle(self.suggest(word))
suggestion = next(self.g['tab_cycle'])
# Clear current line
screen.move(y,len(self.g['prefix']))
screen.clrtoeol()
# Print out suggestion
word = suggestion
screen.addstr(y,len(self.g['prefix']),word)
self.display_suggest(y,x,word)
screen.move(y,len(word)+len(self.g['prefix']))
except:
pass
# UP key
elif event in self.up_ary:
if self.g['hist']:
# Clear current line
screen.move(y,len(self.g['prefix']))
screen.clrtoeol()
# Print out previous history
if self.g['hist_index'] > 0 - len(self.g['hist']):
self.g['hist_index'] -= 1
word = self.g['hist'][self.g['hist_index']]
screen.addstr(y,len(self.g['prefix']),word)
self.display_suggest(y,x,word)
screen.move(y,len(word)+len(self.g['prefix']))
# DOWN key
elif event in self.down_ary:
if self.g['hist']:
# clear current line
screen.move(y,len(self.g['prefix']))
screen.clrtoeol()
# print out previous history
if not self.g['hist_index']:
self.g['hist_index'] = -1
if self.g['hist_index'] < -1:
self.g['hist_index'] += 1
word = self.g['hist'][self.g['hist_index']]
screen.addstr(y,len(self.g['prefix']),word)
self.display_suggest(y,x,word)
screen.move(y,len(word)+len(self.g['prefix']))
# Enter key #### I should get the command out of there?
# #### Can I register a callback function?
elif event in self.enter_ary:
self.g['tab_cycle'] = None
self.g['hist_index'] = 0
self.g['hist'].append(word)
if word== 'q':
self.cleanup_command()
break;
self.display_suggest(y,x,'')
screen.clrtobot()
self.handle_command(word)
self.buf[y] = self.g['prefix'] + word, 0
# Touch the screen's end
if y - self.g['height'] > -3:
self.scroll_down(2,y,x)
screen.addstr(y,0,self.g['prefix'],curses.color_pair(7)) ## SHOW NEW PROMPT
else:
screen.addstr(y+1,0,self.g['prefix'],curses.color_pair(7))
word = ''
# Delete / Backspace
elif event in self.delete_ary:
self.g['tab_cycle'] = None
# Touch to line start
if x < len(self.g['prefix']) + 1:
screen.move(y,x)
word = ''
# Midle of line
else:
word = word[:-1]
screen.move(y,x-1)
screen.clrtoeol()
self.display_suggest(y,x,word)
screen.move(y,x-1)
# Another keys
else:
self.g['tab_cycle'] = None
# Explicitly print char
try:
screen.addstr(char)
word += char
self.display_suggest(y,x,word)
screen.move(y,x+1)
except ValueError as e: # got errors here when i adjusted the volume....
pass
# Reset
self.close_window()
def setup_command(self,outfile):
self.data = open(outfile,'a')
self.g['prev'] = None
self.g['tab_cycle'] = None
self.g['prefix'] = '[gav]: '
self.g['hist_index'] = 0
# Load history from previous session
try:
o = open('completer.hist')
self.g['hist'] = [i.strip() for i in o.readlines()]
except:
self.g['hist'] = []
def cleanup_command(self):
o = open('completer.hist','a')
o.write("\n".join(self.g['hist']))
o.close()
self.data.close()
def handle_command(self, cmd):
r1 = re.search( r'^n\s(.*)$',cmd)
if r1:
# new data collection mode
mode = r1.group(1)
self.g['prefix'] = "[" + mode + "]"
self.data.write("\n\n# %s\n" % mode)
else:
#winsound.Beep(440,300)
self.data.write(cmd + "\n")
self.data.flush()
def repl_staff():
tch = json.loads( open('cache/teacherdata/teachers.json','r').read() )
newdict = {}
for T in tch:
newdict[T['name']] = 'teacher with id ' + T['login_id']
c = MyRepl()
c.set_my_dict(newdict)
c.startup('cache/people_logs.txt')
c.inputloop()
def repl_degs():
tch = csv.reader( open('cache/attainment_masterlist.csv','r'),delimiter=",")
newdict = {}
num = 0
for row in tch:
if num==0:
pass
else:
d = ' '
if row[0]: d = row[0]
newdict[row[4]] = d
num += 1
#print(newdict)
#input('ready')
c = MyRepl()
c.set_my_dict(newdict)
#c.startup('cache/g_path_cluster2020_.txt')
# c.inputloop()
def repl():
repl_degs()
#input('ready')
c = MyRepl()
c.set_my_dict(newdict)
#c.startup('cache/g_path_cluster2020_.txt')
# c.inputloop()
def repl():
repl_degs()

759
interactivex.py Normal file
View File

@ -0,0 +1,759 @@
import curses
import heapq, re, csv
import itertools, time, markdown, csv, json, os.path, webbrowser, threading
from functools import wraps
from flask import Flask, request, send_from_directory, Response
from werkzeug.routing import PathConverter
from queue import Queue
from flask import render_template
from importlib import reload
import server
import localcache
from server import *
q = Queue()
HOST_NAME = '127.0.0.1' #
HOST_NAME = '192.168.1.6' #
HOST_NAME = '192.168.1.6' #
PORT_NUMBER = 8080 # Maybe set this to 9000.
datafile = 'lambda.csv'
####
#### This little web server is going to work with the "gui" folder / vue app
####
####
def dict_generator(indict, pre=None):
pre = pre[:] if pre else []
if isinstance(indict, dict):
for key, value in indict.items():
if isinstance(value, dict):
for d in dict_generator(value, pre + [key]):
yield d
elif isinstance(value, list) or isinstance(value, tuple):
for v in value:
for d in dict_generator(v, pre + [key]):
yield d
else:
yield str(pre) + " " + str([key, value]) + "\n"
else:
yield pre + [indict]
yield str(pre) + " " + str([indict]) + "\n"
def print_dict(v, prefix='',indent=''):
if isinstance(v, dict):
return [ print_dict(v2, "{}['{}']".format(prefix, k) + "<br />", indent+" " ) for k, v2 in v.items() ]
elif isinstance(v, list):
return [ print_dict( v2, "{}[{}]".format(prefix , i) + "<br />", indent+" ") for i, v2 in enumerate(v) ]
else:
return '{} = {}'.format(prefix, repr(v)) + "\n"
def walk_file():
j = json.loads(open('cache/programs/programs_2.txt','r').read())
return print_dict(j)
def tag(x,y): return "<%s>%s</%s>" % (x,y,x)
def tagc(x,c,y): return '<%s class="%s">%s</%s>' % (x,c,y,x)
def a(t,h): return '<a href="%s">%s</a>' % (h,t)
def server_save(key,value):
codecs.open('cache/server_data.txt','a').write( "%s=%s\n" % (str(key),str(value)))
def flask_thread(q):
#app = Flask(__name__, static_url_path='/cache',
# static_folder='cache',)
app = Flask(__name__)
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
def before_request():
app.jinja_env.cache = {}
app.before_request(before_request)
#
# SERVER maintenance type stuff
@app.route('/rl')
def restart():
reload(server)
reload(localcache)
return "Server code reloaded"
@app.route("/x/<func>/<arg>/<arrg>")
def dispatch3(func,arg,arrg):
print("2 args")
return "" + server_dispatch(func, arg, arrg)
@app.route("/x/<func>/<arg>")
def dispatch2(func,arg):
print("1 arg")
return "" + server_dispatch(func, arg)
@app.route("/x/<func>")
def dispatch(func):
print("0 arg")
return server_dispatch(func)
@app.route("/api/<func>/<arg>/<arrg>")
def dispatch3j(func,arg,arrg):
print("json, 3 args")
return Response(server_dispatch(func, arg, arrg), mimetype='text/json')
@app.route("/api/<func>/<arg>")
def dispatch2j(func,arg):
print("json, 1 arg")
return Response(server_dispatch(func, arg), mimetype='text/json')
@app.route("/api/<func>")
def dispatch1j(func):
print("json, 0 arg")
return Response(server_dispatch(func), mimetype='text/json')
@app.route("/")
def home():
return server.homepage()
#
# STATIC ROUTES
#
"""@app.route('/lib/<path:path>')
def send_jslib(path):
return send_from_directory('gui/lib', path)"""
@app.route('/data/<path:path>')
def send_cachedata(path):
#myfile = os.path.join('cache', path).replace('\\','/')
print(path)
#return app.send_static_file(myfile)
return send_from_directory('cache', path)
# Departments, classes in each, and students (with hits) in each of those.
"""@app.route('/iii/<path:path>')
def send_js(path):
return send_from_directory('gui/dist', path)"""
#@app.route('/hello/')
#@app.route('/hello/<name>')
@app.route("/save/<key>/<val>")
def s(key,val):
server_save(key,val)
return tag('h1','Saved.') + "<br />" + tag('p', 'Saved: %s = %s' % (str(key),str(val)))
@app.route("/sample")
def do_sample():
return sample()
@app.route("/crazy")
def hello():
r = '<link rel="stylesheet" href="static/bootstrap.min.css">'
r += tag('style', 'textarea { white-space:nowrap; }')
r += tag('body', \
tagc('div','container-fluid', \
tagc('div','row', \
tagc( 'div', 'col-md-6', tag('pre', walk_file() ) ) + \
tagc( 'div', 'col-md-6', 'Column 2' + a('Shut Down','/shutdown' ) ) ) ) )
return r
@app.route("/sd")
def sd():
print('SIGINT or CTRL-C detected. Exiting gracefully')
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return "Server has shut down."
app.run(host= '0.0.0.0')
def serve():
x = threading.Thread(target=flask_thread, args=(q,))
x.start()
webbrowser.open_new_tab("http://localhost:5000")
if __name__ == '__main__':
serve()
"""class HelloWorldExample(object):
def make_teacher_rel(self, tchr, clss):
with self._driver.session() as tx:
tx.run("MERGE (tchr:Teacher {name: $tchr}) MERGE (tchr)-[:TEACHES]->(clss:Class {name: $clss})", \
tchr=tchr, clss=clss)
def __init__(self, uri, user, password):
self._driver = GraphDatabase.driver(uri, auth=(user, password))
def close(self):
self._driver.close()
def print_greeting(self, message):
with self._driver.session() as session:
greeting = session.write_transaction(self._create_and_return_greeting, message)
print(greeting)
@staticmethod
def _create_and_return_greeting(tx, message):
result = tx.run("CREATE (a:Greeting) "
"SET a.message = $message "
"RETURN a.message + ', from node ' + id(a)", message=message)
return result.single()[0]
"""
def make_teacher_rel(g, tchr, clss):
g.run("MERGE (tchr:Teacher {name: $tchr}) MERGE (tchr)-[:TEACHES]->(clss:Class {name: $clss})", \
tchr=tchr, clss=clss)
screen = 0
def Memoize( func):
"""
Memoize decorator
"""
cache = {}
@wraps(func)
def wrapper(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrapper
class MyRepl:
description = {
"switch ": "Switch stream. You can use either 'switch public' or 'switch mine'",
"home " : "Show your timeline. 'home 7' will show 7 tweet.",
"harry " : "a guys name.",
"homo " : "means the same.",
"view " : "'view @mdo' will show @mdo's home.",
"h " : "Show help.",
"t " : "'t opps' will tweet 'opps' immediately.",
"s " : "'s #AKB48' will search for '#AKB48' and return 5 newest tweets."
}
def startup(self, outfile):
global screen # make it self
self.g = {}
self.buf = {}
screen = None
self.enter_ary = [curses.KEY_ENTER,10]
self.delete_ary = [curses.KEY_BACKSPACE,curses.KEY_DC,8,127,263]
self.tab_ary = [9]
self.up_ary = [curses.KEY_UP]
self.down_ary = [curses.KEY_DOWN]
# Init curses screen
screen = curses.initscr()
screen.keypad(1)
curses.noecho()
try:
curses.start_color()
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i + 1, i, -1)
except curses.error:
pass
curses.cbreak()
self.g['height'] , self.g['width'] = screen.getmaxyx()
#print("Width: %i" % self.g['width'])
# Init color function
s = self
self.white = lambda x:curses_print_word(x,7) #0)
self.grey = lambda x:curses_print_word(x, 3) #3)1)
self.red = lambda x:curses_print_word(x,7) #2)
self.green = lambda x:curses_print_word(x, 3) #3)
self.yellow = lambda x:curses_print_word(x,7) #4)
self.blue = lambda x:curses_print_word(x,3)
self.magenta = lambda x:curses_print_word(x,7) #6)
self.cyan = lambda x:curses_print_word(x,7) #7)
self.colors_shuffle = [s.grey, s.red, s.green, s.yellow, s.blue, s.magenta, s.cyan]
self.cyc = itertools.cycle(s.colors_shuffle[1:])
self.index_cyc = itertools.cycle(range(1,8))
self.setup_command(outfile)
def set_my_dict(self,d):
self.description = d
@Memoize
def cycle_color(self, s):
"""
Cycle the colors_shuffle
"""
return next(self.cyc)
def ascii_art(self, text):
"""
Draw the Ascii Art
"""
return
#fi = figlet_format(text, font='doom')
#for i in fi.split('\n'):
#self.curses_print_line(i,next(self.index_cyc))
def close_window(self, ):
"""
Close screen
"""
global screen
screen.keypad(0);
curses.nocbreak();
curses.echo()
curses.endwin()
def suggest(self, word):
"""
Find suggestion
"""
rel = []
if not word: return rel
word = word.lower()
for candidate in self.description:
ca = candidate.lower()
#if ca.startswith(word): rel.append(candidate)
for eachword in ca.split(" "):
if eachword.startswith(word):
rel.append(candidate)
return rel
def curses_print_word(self, word,color_pair_code):
"""
Print a word
"""
global screen
word = word.encode('utf8')
screen.addstr(word,curses.color_pair(color_pair_code))
def curses_print_line(self, line,color_pair_code):
"""
Print a line, scroll down if need
"""
global screen
line = line.encode('utf8')
y,x = screen.getyx()
if y - self.g['height'] == -3:
self.scroll_down(2,y,x)
screen.addstr(y,0,line,curses.color_pair(color_pair_code))
self.buf[y] = line, color_pair_code
elif y - self.g['height'] == -2:
self.scroll_down(3,y,x)
screen.addstr(y-1,0,line,curses.color_pair(color_pair_code))
self.buf[y-1] = line ,color_pair_code
else:
screen.addstr(y+1,0,line,curses.color_pair(color_pair_code))
self.buf[y+1] = line, color_pair_code
def redraw(self, start_y,end_y,fallback_y,fallback_x):
"""
Redraw lines from buf
"""
global screen
for cursor in range(start_y,end_y):
screen.move(cursor,0)
screen.clrtoeol()
try:
line, color_pair_code = self.buf[cursor]
screen.addstr(cursor,0,line,curses.color_pair(color_pair_code))
except:
pass
screen.move(fallback_y,fallback_x)
def scroll_down(self, noredraw,fallback_y,fallback_x):
"""
Scroll down 1 line
"""
global screen
# Recreate buf
# noredraw = n means that screen will scroll down n-1 line
trip_list = heapq.nlargest(noredraw-1,buf)
for i in buf:
if i not in trip_list:
self.buf[i] = self.buf[i+noredraw-1]
for j in trip_list:
buf.pop(j)
# Clear and redraw
screen.clear()
self.redraw(1,g['height']-noredraw,fallback_y,fallback_x)
def clear_upside(self, n,y,x):
"""
Clear n lines upside
"""
global screen
for i in range(1,n+1):
screen.move(y-i,0)
screen.clrtoeol()
screen.refresh()
screen.move(y,x)
def display_suggest(self, y,x,word):
"""
Display box of suggestion
"""
global screen
g = self.g
side = 2
# Check if need to print upside
upside = y+6 > int(g['height'])
# Redraw if suggestion is not the same as previous display
sug = self.suggest(word)
if sug != self.g['prev']:
# 0-line means there is no suggetions (height = 0)
# 3-line means there are many suggetions (height = 3)
# 5-line means there is only one suggetions (height = 5)
# Clear upside section
if upside:
# Clear upside is a bit difficult. Here it's seperate to 4 case.
# now: 3-lines / previous : 0 line
if len(sug) > 1 and not self.g['prev']:
self.clear_upside(3,y,x)
# now: 0-lines / previous :3 lines
elif not sug and len(g['prev'])>1:
self.redraw(y-3,y,y,x)
# now: 3-lines / previous :5 lines
elif len(sug) > 1 == len(g['prev']):
self.redraw(y-5,y-3,y,x)
self.clear_upside(3,y,x)
# now: 5-lines / previous :3 lines
elif len(sug) == 1 < len(g['prev']):
self.clear_upside(3,y,x)
# now: 0-lines / previous :5 lines
elif not sug and len(g['prev'])==1:
self.redraw(y-5,y,y,x)
# now: 3-lines / previous :3 lines
elif len(sug) == len(g['prev']) > 1:
self.clear_upside(3,y,x)
# now: 5-lines / previous :5 lines
elif len(sug) == len(g['prev']) == 1:
self.clear_upside(5,y,x)
screen.refresh()
else:
# Clear downside
screen.clrtobot()
screen.refresh()
self.g['prev'] = sug
if sug:
# More than 1 suggestion
if len(sug) > 1:
if len(sug) > 5: sug = sug[:5]
#needed_lenth = sum([len(i)+side for i in sug]) + side
needed_lenth = max( self.g['width']-5, sum([len(i)+side for i in sug]) + side)
print(self.g['width'])
print(word)
print(sug)
print(needed_lenth)
if upside:
win = curses.newwin(3,needed_lenth,y-3,0)
win.erase()
win.box()
win.refresh()
cur_width = side
for i in range(len(sug)):
if cur_width+len(sug[i]) > self.g['width']: break
screen.addstr(y-2,cur_width,sug[i],curses.color_pair(4))
cur_width += len(sug[i]) + side
if cur_width > self.g['width']:
break
else:
win = curses.newwin(3,needed_lenth,y+1,0)
win.erase()
win.box()
win.refresh()
cur_width = side
for i in range(len(sug)):
screen.addstr(y+2,cur_width,sug[i],curses.color_pair(4))
cur_width += len(sug[i]) + side
if cur_width > self.g['width']:
break
# Only 1 suggestion
else:
can = sug[0]
if upside:
win = curses.newwin(5,len(self.description[can])+2*side,y-5,0)
win.box()
win.refresh()
screen.addstr(y-4,side,can,curses.color_pair(4))
screen.addstr(y-2,side,self.description[can],curses.color_pair(3))
else:
win = curses.newwin(5,len(self.description[can])+2*side,y+1,0)
win.box()
win.refresh()
screen.addstr(y+2,side,can,curses.color_pair(4))
screen.addstr(y+4,side,self.description[can],curses.color_pair(3))
def inputloop(self, ):
"""
Main loop input
"""
global screen
word = ''
screen.addstr("\n" + self.g['prefix'],curses.color_pair(7))
while True:
# Current position
y,x = screen.getyx()
# Get char
event = screen.getch()
try :
char = chr(event)
except:
char = ''
# Test curses_print_line
if char == '?':
self.buf[y] = self.g['prefix'] + '?', 0
self.ascii_art('dtvd88')
# TAB to complete
elif event in self.tab_ary:
# First tab
try:
if not self.g['tab_cycle']:
self.g['tab_cycle'] = itertools.cycle(self.suggest(word))
suggestion = next(self.g['tab_cycle'])
# Clear current line
screen.move(y,len(self.g['prefix']))
screen.clrtoeol()
# Print out suggestion
word = suggestion
screen.addstr(y,len(self.g['prefix']),word)
self.display_suggest(y,x,word)
screen.move(y,len(word)+len(self.g['prefix']))
except:
pass
# UP key
elif event in self.up_ary:
if self.g['hist']:
# Clear current line
screen.move(y,len(self.g['prefix']))
screen.clrtoeol()
# Print out previous history
if self.g['hist_index'] > 0 - len(self.g['hist']):
self.g['hist_index'] -= 1
word = self.g['hist'][self.g['hist_index']]
screen.addstr(y,len(self.g['prefix']),word)
self.display_suggest(y,x,word)
screen.move(y,len(word)+len(self.g['prefix']))
# DOWN key
elif event in self.down_ary:
if self.g['hist']:
# clear current line
screen.move(y,len(self.g['prefix']))
screen.clrtoeol()
# print out previous history
if not self.g['hist_index']:
self.g['hist_index'] = -1
if self.g['hist_index'] < -1:
self.g['hist_index'] += 1
word = self.g['hist'][self.g['hist_index']]
screen.addstr(y,len(self.g['prefix']),word)
self.display_suggest(y,x,word)
screen.move(y,len(word)+len(self.g['prefix']))
# Enter key #### I should get the command out of there?
# #### Can I register a callback function?
elif event in self.enter_ary:
self.g['tab_cycle'] = None
self.g['hist_index'] = 0
self.g['hist'].append(word)
if word== 'q':
self.cleanup_command()
break;
self.display_suggest(y,x,'')
screen.clrtobot()
self.handle_command(word)
self.buf[y] = self.g['prefix'] + word, 0
# Touch the screen's end
if y - self.g['height'] > -3:
self.scroll_down(2,y,x)
screen.addstr(y,0,self.g['prefix'],curses.color_pair(7)) ## SHOW NEW PROMPT
else:
screen.addstr(y+1,0,self.g['prefix'],curses.color_pair(7))
word = ''
# Delete / Backspace
elif event in self.delete_ary:
self.g['tab_cycle'] = None
# Touch to line start
if x < len(self.g['prefix']) + 1:
screen.move(y,x)
word = ''
# Midle of line
else:
word = word[:-1]
screen.move(y,x-1)
screen.clrtoeol()
self.display_suggest(y,x,word)
screen.move(y,x-1)
# Another keys
else:
self.g['tab_cycle'] = None
# Explicitly print char
try:
screen.addstr(char)
word += char
self.display_suggest(y,x,word)
screen.move(y,x+1)
except ValueError as e: # got errors here when i adjusted the volume....
pass
# Reset
self.close_window()
def setup_command(self,outfile):
self.data = open(outfile,'a')
self.g['prev'] = None
self.g['tab_cycle'] = None
self.g['prefix'] = '[gav]: '
self.g['hist_index'] = 0
# Load history from previous session
try:
o = open('completer.hist')
self.g['hist'] = [i.strip() for i in o.readlines()]
except:
self.g['hist'] = []
def cleanup_command(self):
o = open('completer.hist','a')
o.write("\n".join(self.g['hist']))
o.close()
self.data.close()
def handle_command(self, cmd):
r1 = re.search( r'^n\s(.*)$',cmd)
if r1:
# new data collection mode
mode = r1.group(1)
self.g['prefix'] = "[" + mode + "]"
self.data.write("\n\n# %s\n" % mode)
else:
#winsound.Beep(440,300)
self.data.write(cmd + "\n")
self.data.flush()
def repl_staff():
tch = json.loads( open('cache/teacherdata/teachers.json','r').read() )
newdict = {}
for T in tch:
newdict[T['name']] = 'teacher with id ' + T['login_id']
c = MyRepl()
c.set_my_dict(newdict)
c.startup('cache/people_logs.txt')
c.inputloop()
def repl_degs():
tch = csv.reader( open('cache/attainment_masterlist.csv','r'),delimiter=",")
newdict = {}
num = 0
for row in tch:
if num==0:
pass
else:
d = ' '
if row[0]: d = row[0]
newdict[row[4]] = d
num += 1
#print(newdict)
#input('ready')
c = MyRepl()
c.set_my_dict(newdict)
#c.startup('cache/g_path_cluster2020_.txt')
# c.inputloop()
def repl():
repl_degs()
#input('ready')
c = MyRepl()
c.set_my_dict(newdict)
#c.startup('cache/g_path_cluster2020_.txt')
# c.inputloop()
def repl():
repl_degs()

2065
localcache.py Normal file

File diff suppressed because it is too large Load Diff

141
main.py Normal file
View File

@ -0,0 +1,141 @@
"""
Main entry point for Gavilan Canvas Tools App.
"""
#import datetime
from HTMLParser import HTMLParseError
from bs4 import BeautifulSoup as bs
from bs4 import Comment
from collections import defaultdict
from datetime import date
from datetime import datetime
from datetime import timedelta
from dateutil import parser
from dateutil import tz
from itertools import groupby
from sets import Set
from time import strptime, mktime
import base64
import codecs
import collections
import csv
import gzip
import hashlib
import hmac
import html
import htmlentitydefs
import html2markdown as h2m
import imghdr
import isoweek
import json
import math
import numpy as np
import os
import sys
import glob
import pandas as pd
import pdb
import pysftp
import pytz
import re
import requests
import shutil
import sqlite3
import subprocess
import time
import urllib
import webbrowser
import xlwt
import checker
from pipelines import *
from stats import *
from users import *
from util import *
from courses import *
from tasks import *
from outcomes import *
from content import *
#from upload_to_web import put_file
if __name__ == "__main__":
print ("")
options = {
# Basic info & random questions
39:['List all terms', getTerms],
1: ['Current Activity',getCurrentActivity] ,
3: ['List Course Info', getCourses] ,
5: ['List users in a course', getUsersInCourse] ,
8: ['List courses in a term', getCoursesInTerm] ,
12:['Get current classes', class_logs],
13:['Logs for one user', user_logs],
14:['Recent logins, last 5 min', recent_logins],
# User tracking
#4: ['List all users with a gavilan.edu address (employees)', getTeacherRoles] ,
27:['Grades summary of a semester', grades_rundown] ,
30:['List inactive teachers in term', getInactiveTeachersInTerm] ,
6: ['List all teachers', getAllTeachers] ,
15:['All teachers in a term',getAllTeachersInTerm],
16:['Make log of teacher activity',teacherActivityLog],
# Sched or Semester info
#17:['Construct schedule from html file',constructSchedule],
18:['List late-start classes',list_latestarts], ###
19:['External tools',externaltool],
# Tasks
9: ['Upload a photo', uploadPhoto],
10:['Download new photos', downloadPhoto],
11:['Check for avatar',checkForAvatar],
25:['X-List 190 sections', xlist_cwe] , ###
28:['Check accessibility of a course', accessible_check] ,
29:['Switch enrollments of a shell to all teachers', switch_enrol] ,
35:['Enroll user to all active courses in a semester', enroll_accred],
36:['Fix an older course so it can be enrolled again, add accred', unrestrict_course],
38:['Modify external tool', modify_x_tool],
# Post semester
2: ['Positive Attendance Report', pos_atten] ,
26:['20/60 hours calculation', hours_calc] ,
# Outcomes
20:['List outcomes and groups at account level',outcome_groups],
21:['Get outcome results',outcome_report2],
22:['List outcomes attached to classes', outcomes_attached_to_courses] ,
23:['Read the SLOs local file', read_slo_source] ,
24:['Outcome overview and sync', outcome_overview] ,
# Content editing or pulling
31:['Auto update a canvas page to remove fancy html', update_page] ,
32:['Download a courses pages for offline updating', grab_course_pages] ,
33:['Upload course pages back to a class', put_course_pages],
34:['Swap course youtube embeds', swap_youtube_subtitles],
37:['Test the iframe swap', test_swap],
}
for key in options:
print str(key) + '.\t' + options[key][0]
resp = raw_input('\nChoose: ')
results = []
results_dict = {}
# Call the function in the options dict
x = options[ int(resp)][1]()

57
myconsole.py Normal file
View File

@ -0,0 +1,57 @@
import importlib, signal
import outcomes
import pipelines
import curric2022
def handler(signum, frame):
print("\n\nCancelled.\n\n")
exit(1)
def mainloop():
print ('')
options = { 1: ['run sample course', curric2022.sampleclass],
2: ['pattern matcher style', curric2022.matchstyle],
3: ['pattern matcher - test on all classes', curric2022.match_style_test],
4: ['process all classes', curric2022.path_style_test],
5: ['process all programs', curric2022.path_style_prog],
6: ['show course outcomes', curric2022.all_outcomes],
7: ['Main outcome show & modify for a semester', outcomes.outcome_overview],
8: ['The outcome groups and links in iLearn', outcomes.outcome_groups],
9: ['Outcome report #2 sample', outcomes.outcome_report2],
10: ['Outcome groups dump', outcomes.outcome_groups_dump],
11: ['All outcomes attached to courses', outcomes.outcomes_attached_to_courses],
0: ['exit', exit],
}
for key in options:
print((str(key) + '.\t' + options[key][0]))
print('')
#resp = eval(input('Choose: '))
resp = input('Choose: ')
importlib.reload(outcomes)
importlib.reload(pipelines)
importlib.reload(curric2022)
# Call the function in the options dict
options[ int(resp)][1]()
print("\n\n\n\n")
mainloop()
signal.signal(signal.SIGINT, handler)
mainloop()

68
new flex app.md Normal file
View File

@ -0,0 +1,68 @@
## Ideas and links
This, spacy: https://www.analyticsvidhya.com/blog/2020/06/nlp-project-information-extraction/
Stackexchange: https://datascience.stackexchange.com/questions/12053/a-few-ideas-to-parse-events-from-a-text-document
https://stackoverflow.com/questions/2587663/natural-language-parsing-of-an-appointment
Sherlock (javascript): https://github.com/neilgupta/Sherlock
Plain python: dateutil.parser.parse("today is 21 jan 2016", fuzzy=True)
NLTK reference: https://stackoverflow.com/questions/10340540/using-the-nltk-to-recognise-dates-as-named-entities?noredirect=1&lq=1
Multiple plain python packages: https://stackoverflow.com/questions/19994396/best-way-to-identify-and-extract-dates-from-text-python?noredirect=1&lq=1
Chrono - nat lang date parser javascript - https://github.com/wanasit/chrono
https://github.com/wanasit/chrono-python
Natty - java nat language date parser - https://github.com/joestelmach/natty
## Snippets that I'd like my program to find the fields and put them in the right slots automatically (separated by --)
Join Better Together Labs for an exclusive Zoom workshop:
Creative and Playful Ways to Engage Learners on Zoom, Part 3
Tuesday, August 11, 2020, 1:00pm Pacific/4:00pm Eastern, 90 minutes
This hands-on workshop will explore Zoom games and activities that foster engagement, provide a sense of community, and leave your learners looking forward to more.
--
Multiple Dates
https://livetraining.zoom.us/webinar/register/8915786869708/WN_Qkc7KpkNSFOdlTwpZkGFlQ
Topic
Getting Started with Zoom Meetings
Description
Ready to start using Zoom, but need some help? Drop-in for our daily (Mon-Fri) quick starts! A Zoom expert will take you through a 45-minute high-level tour of Zoom and cover the basics to get you up and running. Its as simple as logging in, scheduling a meeting, and finding the controls. Start Zooming today! Stick around to get all your burning questions answered through live Q&A!
--
Monday @ 2pm
Tuesday @ 10am and 2pm
Thursday @ 10am and 2pm
Friday @ 10am
(All times in PST)
Join Zoom expert Raul Montes to learn the Zoom basics: scheduling, recording, screen sharing, and more.
Register Now: https://zoom.us/webinar/register/weeklylivedemo
--

1577
notebook.ipynb Normal file

File diff suppressed because it is too large Load Diff

1340
outcomes.py Normal file

File diff suppressed because it is too large Load Diff

130
outcomes2022.py Normal file
View File

@ -0,0 +1,130 @@
# Outcomes 2023
# Tasks:
#
# - List all courses (in semester) in iLearn:
# + SLOs associated with the course
# + Whether they are current or inactive
# + Whether they are attached to an assessment
# + Whether and by how many students, they have been assessed
#
# - Fetch most current SLOs from Curricunet
# + Assemble multiple versions of a (CQ) course and determine which semesters they apply to
# + Whether they are present in the relevant classes in iLearn
# + Insert SLO into course if not present
# + Mark as inactive (change name) if necessary
# - Issue:
# + Course naming / sections joined...
import concurrent.futures
import pandas as pd
from pipelines import fetch, url, header
from courses import getCoursesInTerm
import codecs, json
from path_dict import PathDict
NUM_THREADS = 8
get_fresh = 0
sem_courses = getCoursesInTerm(176,get_fresh)
# shorter list for test?
#sem_courses = sem_courses[:50]
print("Got %i courses in current semester." % len(sem_courses))
def course_slo_getter(q):
(name,id) = q
info = {'ilearnname':name,'ilearnid':id}
print(" + Thread getting %s %s" % (str(name),str(id)))
u1 = url + "/api/v1/courses/%s/outcome_groups" % str(id)
og_for_course = fetch(u1)
if len(og_for_course):
for og in og_for_course:
if "outcomes_url" in og:
outcomes = fetch(url + og["outcomes_url"])
og['outcomes'] = outcomes
og['full_outcomes'] = {}
for oo in outcomes:
print(" -> " + url + oo['outcome']['url'])
this_outcome = fetch( url + oo['outcome']['url'] )
og['full_outcomes'][this_outcome['id']] = this_outcome
og_for_course.insert(0,info)
print(" - Thread %s DONE" % str(id))
return og_for_course
output = []
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as pool:
results = []
for C in sem_courses:
print("Adding ", C['name'], C['id'], " to queue")
results.append( pool.submit(course_slo_getter, [C['name'], C['id']] ) )
print("-- Done")
print("results array has %i items" % len(results))
for r in concurrent.futures.as_completed(results):
output.append(r.result())
raw_log = codecs.open('cache/outcome_raw_log.txt','w','utf-8')
raw_log.write( json.dumps(output,indent=2) )
def ilearn_shell_slo_to_csv(shell_slos):
L = ['canvasid','name','crn','has_outcomes',]
for i in range(1,11):
L.append("o%i_id" % i)
L.append("o%i_vendor_guid" % i)
L.append("o%i_desc" % i)
L.append("o%i_assd" % i)
df = pd.DataFrame(columns=L)
for S in shell_slos:
short = S[0]
this_crs = {'canvasid':short['ilearnid'], 'name':short['ilearnname'], 'has_outcomes':0, }
if len(S)>1:
full = S[1]
this_crs['has_outcomes'] = 1
i = 1
for o in full['outcomes']:
try:
this_id = int(o['outcome']['id'])
this_crs['o%i_id' % i] = o['outcome']['id']
except Exception as e:
this_crs['o%i_id' % i] = '!'
try:
this_crs['o%i_desc' % i] = full['full_outcomes'][this_id]['description']
except Exception as e:
this_crs['o%i_desc' % i] = '!'
try:
assessed = 0
if full['full_outcomes'][this_id]['assessed'] == 'True':
assessed = 1
this_crs['o%i_assd' % i] = assessed
except Exception as e:
this_crs['o%i_assd' % i] = '!'
try:
this_crs['o%i_vendor_guid' % i] = full['full_outcomes'][this_id]['vendor_guid']
except Exception as e:
this_crs['o%i_vendor_guid' % i] = '!'
i += 1
df2 = pd.DataFrame(this_crs, columns = df.columns, index=[0])
df = pd.concat( [df, df2], ignore_index = True )
df.to_csv('cache/outcome.csv')
print(df)
ilearn_shell_slo_to_csv(output)

27
patterns_8020.py Normal file
View File

@ -0,0 +1,27 @@
from pampy import _
pat8020 = []
""" (programs) entityType entityTitle status proposalType sectionName lastUpdated lastUpdatedBy
fieldName displayName lookUpDisplay fieldValue instanceSortOrder
lookUpDataset (array of dicts, each has keys: name, value, and corresponding values.)
subsections or fields (arrays) - ignore for now just takem in order
(courses) same as above?
html values: markdown convert?
"""
pat8020.append( {"fieldName": _} )
pat8020.append( {"displayName": _} )
pat8020.append( {"entityType": _} )
pat8020.append( {"entityTitle": _} )
pat8020.append( {"lookUpDisplay": _} )
pat8020.append( {"fieldValue": _} )
pat8020.append( { "attributes": { "fieldName": "Award Type" },
"lookUpDisplay": _ } )

560
patterns_topdown.py Normal file
View File

@ -0,0 +1,560 @@
from pampy import _
pat = []
# lookup field
p0 = { "attributes": {
"fieldName": _,
"fieldId": _,
"isLookUpField": True,
"displayName": _
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _ }
def pp0(a,b,c,d,e):
r = ("lookup field", {'fieldname':a,'id':b,'displayname':c,'lookupdisplay':d,'value':e})
#print(r)
return r
# num field
p1 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": _,
"type": "numeric",
"precision": _,
},
"fieldValue": _,
}
def pp1(a,b,c,d,e,f):
#r = "Generic Num Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,f)
r = ("num field", {'fieldname':a,'id':b,'displayname':c,'value':f})
#print(r)
return r
# string field
p2 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"type": "string",
"maxLength": _,
},
"fieldValue": _,
}
def pp2(a,b,c,d,e):
#r = "String Label: %s (id %s) Value: %s" % (a,b,e)
r = ("string field", {'fieldname':a,'id':b,'displayname':c,'value':e})
#print(r)
return r
# flag field
p3 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"type": "flag",
},
"fieldValue": _,
}
def pp3(a,b,c,d):
#r = "Generic Flag Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,d)
r = ("flag field", {'fieldname':a,'id':b,'displayname':c,'value':d})
#print(r)
return r
# attributes
p4 = {"attributes": _,
"subsections": _,
"fields": _
}
def pp4(a,b,c):
r = ("attributes", {'attributes':a, 'subsections':b, 'fields':c})
#print(r)
return r
# section
p5 = {"sectionOrInstance": "section",
"sectionName": _,
"sectionSortOrder": _,
"oneToManySection": _,
}
def pp5(a,b,c):
r = ("section", {'name':a, 'sortorder':b, 'onetomanysection':c})
#print(r)
return r
# sectionInstance
p6 = {"instanceId": _,
"sectionOrInstance": "sectionInstance",
"instanceSortOrder": _,
"sectionName": _,
}
def pp6(a,b,c):
r = ("sectioninstance", {'id':a, 'sortorder':b, 'name':c })
#print(r)
return r
pat.append( p0 )
pat.append( pp0 )
pat.append( p1 )
pat.append( pp1 )
pat.append( p2 )
pat.append( pp2 )
pat.append( p3 )
pat.append( pp3 )
pat.append( p4 )
pat.append( pp4 )
pat.append( p5 )
pat.append( pp5 )
pat.append( p6 )
pat.append( pp6 )
"""
curic_patterns.append( {
"attributes": {
"fieldName": "Division",
"fieldId": 65000,
"isLookUpField": True,
"displayName": "Division"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
} )
def div1(a,b):
r = "Division: %s, id: %s" % (a,b)
print(r)
return(r)
curic_patterns.append(div1)
curic_patterns.append( {
"attributes": {
"fieldName": "Department",
"fieldId": 65001,
"isLookUpField": True,
"displayName": "Department"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
})
def d2(a,b):
r = "Department: %s, id: %s" % (a,b)
print(r)
return r
curic_patterns.append(d2)
curic_patterns.append({
"attributes": {
"fieldName": "Award Type",
"fieldId": 60221,
"isLookUpField": True,
"displayName": "Award Type"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
})
def d3(a,b):
r = "Award: %s, id: %s" % (a,b)
print(r)
return r
curic_patterns.append(d3)
p1 = {
"attributes": {
"fieldName": "Description",
"fieldId": _,
"isLookUpField": False,
"displayName": "Description"
},
"dataTypeDetails": {
"type": "string"
},
"fieldValue": _
}
def pp1(a,b):
r = "Description (id:%s) %s" % (a,b)
#print(r[:40])
return r
curic_patterns.append(p1)
curic_patterns.append(pp1)
p2 = {"attributes": {
"fieldName": "Program Title",
"fieldId": _,
"isLookUpField": False,
"displayName": "Program Title"
},
"dataTypeDetails": {
"type": "string",
"maxLength": 250
},
"fieldValue":_
}
def pp2(a,b):
r = "Program (id:%s) %s" % (a,b)
#print(r)
return r
curic_patterns.append(p2)
curic_patterns.append(pp2)
p3 = { "attributes": {
"fieldName": "Course",
"fieldId": _,
"isLookUpField": True,
"displayName": "Course"
},
"lookUpDataset": [
[
{
"name": "Max",
"value": _
},
{
"name": "IsVariable",
"value": _
},
{
"name": "Min",
"value": _
},
{
"name": "Text",
"value": _
}
]
],
"dataTypeDetails": {
"type": "lookup"
},
"lookUpDisplay": _,
"fieldValue": _
}
def pp3(a,b,c,d,e,f,g):
r = "Course (%s / %s) %s (%s), var? %s %s - %s" % (a,g, f, e, c, b, d)
#print(r)
return r
curic_patterns.append(p3)
curic_patterns.append(pp3)
p4 = {
"attributes": {
"sectionOrInstance": "section",
"sectionName": "Unit Range",
"sectionSortOrder": 2,
"oneToManySection": False
},
"subsections": [],
"fields": [
{
"attributes": {
"fieldName": "Units Low",
"fieldId": 59608,
"isLookUpField": False,
"displayName": "Units Low"
},
"dataTypeDetails": {
"scale": 2,
"type": "numeric",
"precision": 6
},
"fieldValue": _
},
{
"attributes": {
"fieldName": "Units High",
"fieldId": 59609,
"isLookUpField": False,
"displayName": "Units High"
},
"dataTypeDetails": {
"scale": 2,
"type": "numeric",
"precision": 6
},
"fieldValue": _
}
]
}
def pp4(a,b):
r = "Unit Range: %s - %s" % (a,b)
return r
curic_patterns.append(p4)
curic_patterns.append(pp4)
p5 = {
"attributes": {
"fieldName": "Discipline",
"fieldId": _,
"isLookUpField": True,
"displayName": "Discipline"
},
"lookUpDisplay": _,
"dataTypeDetails": {
"type": "lookup"
},
"fieldValue": _
}
def pp5(a,b,c):
r = "Discipline (%s) %s / %s" % (a,b,c)
#print(r)
return r
curic_patterns.append(p5)
curic_patterns.append(pp5)
p6 = { "attributes": {
"fieldName": "Course Block Definition",
"fieldId": _,
"isLookUpField": False,
"displayName": "Course Block Definition"
},
"dataTypeDetails": {
"type": "string"
},
"fieldValue": _
}
def pp6(a,b):
r = "Block (%s) %s" % (a,b)
#print(r)
return r
p7 = {
"attributes": {
"fieldName": "Block Header",
"fieldId": _,
"isLookUpField": False,
"displayName": "Block Header"
},
"dataTypeDetails": {
"type": "string",
"maxLength": 4000
},
"fieldValue": _
}
def pp7(a,b):
r = "Block Header (%s) %s" % (b,a)
#print(r)
return r
p8 = {
"attributes": {
"fieldName": "Block Footer",
"fieldId": _,
"isLookUpField": False,
"displayName": "Block Footer"
},
"dataTypeDetails": {
"type": "string",
"maxLength": 4000
},
"fieldValue": _
}
def pp8(a,b):
r = "Block Footer (%s) %s" % (b,a)
#print(r)
return r
curic_patterns.append(p6)
curic_patterns.append(pp6)
curic_patterns.append(p7)
curic_patterns.append(pp7)
curic_patterns.append(p8)
curic_patterns.append(pp8)
######################
###################### Trying to remove more junk
######################
curic_patterns.append(j1)
curic_patterns.append(jj1)
j3 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": True,
"displayName": _,
},
"lookUpDisplay": _,
"dataTypeDetails":
{"type": "lookup",
},
"fieldValue": _,
}
def jj3(a,b,c,d,e):
r = "Generic lookup Field: Name: %s / %s, ID: %i, Displayname: %s, Value: %s " % (a,c,b,d,e)
#print(r)
return r
curic_patterns.append(j2)
curic_patterns.append(jj2)
curic_patterns.append(j3)
curic_patterns.append(jj3)
curic_patterns.append(j4)
curic_patterns.append(jj4)
j5 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": _,
"type": "numeric",
"precision": _,
},
"fieldValue": _,
}
def jj5(a,b,c,d,e,f):
r = "Numeric Field, Name: %s / %s Id: %s, Value: %s" % (a,c,b,f)
#print(r)
return r
curic_patterns.append(j5)
curic_patterns.append(jj5)
curic_patterns.append(j6)
curic_patterns.append(jj6)
"""
"""j2 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": 2,
"type": "numeric",
"precision": 6,
},
"fieldValue": _,
}
def jj2(a,b,c,d):
r = "Generic Num Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,d)
print(r)
return r
j2 = {"attributes":
{"fieldName": _,
"fieldId": _,
"isLookUpField": False,
"displayName": _,
},
"dataTypeDetails":
{"scale": 2,
"type": "numeric",
"precision": 6,
},
"fieldValue": _,
}
def jj2(a,b,c,d):
r = "Generic Num Field: Name: %s, ID: %s, Displayname: %s, Value: %s" % (a,b,c,d)
print(r)
return r
"""

1958
pipelines.py Normal file

File diff suppressed because it is too large Load Diff

188
queries.sql Normal file
View File

@ -0,0 +1,188 @@
# TODO students enrolled in fall 2020
## Fall 2020 students with how many classes theyre taking
SELECT u.canvasid, u.name, u.sortablename, COUNT(e.id) AS num FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
GROUP BY u.canvasid
## All sections offered in Fall 2020
SELECT c.id, c.canvasid, c.name, c.code FROM courses AS c
WHERE c.sis LIKE "202070-%"
AND NOT c.state="deleted"
ORDER BY c.code ASC
## All Teachers teaching in Fall 2020
SELECT c.id, c.canvasid AS course_cid, c.name, c.code, u.name, u.sortablename, u.canvasid AS user_cid FROM courses AS c
JOIN enrollment AS e ON e.course_id=c.id
JOIN users AS u ON u.id=e.user_id
WHERE c.sis LIKE "202070-%"
AND NOT c.state="deleted"
AND e."type"="TeacherEnrollment"
ORDER BY u.sortablename
--- ORDER BY c.code ASC
## All Teachers teaching in Fall 2020 -> how many classes each has
SELECT u.name, u.sortablename, COUNT(c.id) AS num, GROUP_CONCAT(c.code, ", ") AS courses, u.canvasid AS user_cid FROM courses AS c
JOIN enrollment AS e ON e.course_id=c.id
JOIN users AS u ON u.id=e.user_id
WHERE c.sis LIKE "202070-%"
AND NOT c.state="deleted"
AND e."type"="TeacherEnrollment"
GROUP BY user_cid
ORDER BY courses
--- ORDER BY u.sortablename
--- ORDER BY c.code ASC
-- ## Fall 2020 teachers with NO ACTIVITY
SELECT c.id AS courseid, u.id AS userid, c.code, u.name FROM courses AS c
JOIN enrollment AS e ON e.course_id=c.id
JOIN users AS u ON u.id=e.user_id
WHERE c.sis LIKE "202070-%"
AND NOT c.state="deleted"
AND e."type"="TeacherEnrollment"
AND u.id NOT IN (
SELECT r.userid FROM requests_sum1 AS r
)
ORDER BY u.sortablename
-- ## Activity of Fall 2020 teachers
SELECT t.code, t.name, SUM(r.viewcount) FROM requests_sum1 AS r
JOIN (
SELECT c.id AS courseid, u.id AS userid, c.code, u.name FROM courses AS c
JOIN enrollment AS e ON e.course_id=c.id
JOIN users AS u ON u.id=e.user_id
WHERE c.sis LIKE "202070-%"
AND NOT c.state="deleted"
AND e."type"="TeacherEnrollment"
--GROUP BY u.id
ORDER BY u.sortablename
) AS t ON r.userid=t.userid AND r.courseid=t.courseid
GROUP BY r.userid, r.courseid
###### Students who are new in FALL 2020
SELECT u.canvasid, u.name, u.sortablename, GROUP_CONCAT(c.code), COUNT(e.id) AS num FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
AND u.canvasid NOT IN (
SELECT u.canvasid FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis NOT LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
GROUP BY u.canvasid
)
GROUP BY u.canvasid
ORDER BY num DESC, u.sortablename
###### Students who are new in 2020
SELECT u.canvasid, u.name, u.sortablename, GROUP_CONCAT(c.code), COUNT(e.id) AS num FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
AND u.canvasid NOT IN (
SELECT u.canvasid FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis NOT LIKE "202070-%"
AND c.sis NOT LIKE "202050-%"
AND c.sis NOT LIKE "202030-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
GROUP BY u.canvasid
)
GROUP BY u.canvasid
ORDER BY num DESC, u.sortablename
###### Students who are new in FALL 2020 -> how many students are taking how many classes
SELECT num, COUNT(num) AS class_count FROM (
SELECT e.id, COUNT(e.id) AS num FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
AND u.canvasid NOT IN (
SELECT u.canvasid FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis NOT LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
GROUP BY u.canvasid
)
GROUP BY u.id
)
GROUP BY num
ORDER BY num DESC
###### All FALL 2020 students -> how many students are taking how many classes
SELECT num, COUNT(num) AS class_count FROM (
SELECT e.id, COUNT(e.id) AS num FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
GROUP BY u.id
)
GROUP BY num
ORDER BY num DESC
##### Students who are NOT enrolled in Fall 2020
SELECT u.canvasid, u.name, u.sortablename FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON e.course_id=c.id
WHERE c.sis NOT LIKE "202070-%"
AND e.workflow="active"
AND e."type"="StudentEnrollment"
GROUP BY u.canvasid

61
requirements.2019.txt Normal file
View File

@ -0,0 +1,61 @@
beautifulsoup4==4.6.3
bs4==0.0.1
cachetools==3.1.1
certifi==2018.8.24
docx==0.2.4
durable-rules==2.0.11
google-api-python-client==1.7.11
google-auth==1.7.1
google-auth-httplib2==0.0.3
google-auth-oauthlib==0.4.1
html2text==2018.1.9
html5lib==1.0.1
httplib2==0.14.0
idna==2.7
ipython==7.0.1
ipython-genutils==0.2.0
jsondiff==1.2.0
lxml==4.2.5
Markdown==3.0.1
numpy==1.17.3
O365==2.0.5
oauthlib==3.1.0
oyaml==0.9
packaging==19.2
pampy==0.2.1
pandas==0.25.2
paramiko==2.6.0
pickleshare==0.7.5
Pillow==6.2.1
pkginfo==1.5.0.1
prompt-toolkit==2.0.5
pyfiglet==0.8
PyJWT==1.7.1
PyNaCl==1.3.0
pyparsing==2.4.5
pysftp==0.2.9
PySocks==1.6.8
python-dateutil==2.8.0
python-docx==0.8.10
pytoml==0.1.21
pytz==2018.9
PyYAML==5.1.2
requests==2.19.1
requests-oauthlib==1.2.0
rsa==4.0
simplegeneric==0.8.1
simpy==3.0.11
sortedcontainers==2.1.0
stringcase==1.2.0
structlog==19.2.0
tabulate==0.8.6
textdistance==4.1.5
toolz==0.10.0
twilio==6.24.0
tzlocal==2.0.0
uritemplate==3.0.0
urllib3==1.23
wcwidth==0.1.7
webencodings==0.5.1
windows-curses==2.1.0
yarg==0.1.9

288
requirements.txt Normal file
View File

@ -0,0 +1,288 @@
alabaster==0.7.10
anaconda-client==1.6.5
anaconda-navigator==1.6.9
anaconda-project==0.8.0
asn1crypto==0.22.0
astroid==1.5.3
astropy==2.0.2
atomicfile==1.0.1
attrs==18.2.0
Automat==0.7.0
Babel==2.5.0
backports.shutil-get-terminal-size==1.0.0
bcrypt==3.1.7
beautifulsoup4==4.6.0
bitarray==0.8.1
bkcharts==0.2
blaze==0.11.3
bleach==2.0.0
blinker==1.4
bokeh==0.12.10
boto==2.48.0
boto3==1.10.20
botocore==1.13.20
Bottleneck==1.2.1
certifi==2019.9.11
cffi==1.10.0
chardet==3.0.4
clfparser==0.3
click==6.7
cloudpickle==0.4.0
clyent==1.2.2
coinmarketcap==5.0.3
colorama==0.3.9
colorlog==4.0.2
conda==4.3.30
conda-build==3.0.27
conda-verify==2.0.0
constantly==15.1.0
contextlib2==0.5.5
cryptography==2.5
cssselect==1.0.3
cycler==0.10.0
cymem==2.0.2
Cython==0.28.5
cytoolz==0.9.0.1
dask==0.15.3
datashape==0.5.4
DAWG-Python==0.7.2
decorator==4.1.2
deepdiff==5.0.2
deeppavlov==0.1.6
dill==0.2.9
distributed==1.19.1
dnspython==2.0.0
docopt==0.6.2
docutils==0.14
docx==0.2.4
durable-rules==2.0.28
email-validator==1.1.1
emoji==0.5.4
en-core-web-lg==2.0.0
en-core-web-sm==2.0.0
entrypoints==0.2.3
et-xmlfile==1.0.1
fastcache==1.0.2
filelock==2.0.12
flasgger==0.9.1
Flask==1.0.2
Flask-Caching==1.9.0
Flask-Cors==3.0.6
Flask-HTTPAuth==4.1.0
Flask-Login==0.5.0
Flask-Mail==0.9.1
Flask-SocketIO==4.3.1
Flask-SQLAlchemy==2.4.4
Flask-User==1.0.2.2
Flask-WTF==0.14.3
funcy==1.14
fuzzywuzzy==0.16.0
gensim==3.8.3
gevent==1.2.2
gevent-websocket==0.10.1
glob2==0.5
glob3==0.0.1
gmpy2==2.0.8
graphviz==0.15
greenlet==0.4.12
h5py==2.8.0
heapdict==1.0.0
html2text==2018.1.9
html5lib==0.999999999
hyperlink==18.0.0
idna==2.8
imageio==2.2.0
imagesize==0.7.1
importlib-metadata==2.0.0
incremental==17.5.0
inflection==0.3.1
ipdb==0.13.4
ipykernel==4.6.1
ipython==6.1.0
ipython-genutils==0.2.0
ipywidgets==7.0.0
isort==4.2.15
itsdangerous==0.24
jdcal==1.3
jedi==0.10.2
Jinja2==2.10
jmespath==0.9.4
jsondiff==1.2.0
jsonschema==2.6.0
jupyter-client==5.1.0
jupyter-console==5.2.0
jupyter-core==4.3.0
jupyterlab==0.27.0
jupyterlab-launcher==0.4.0
Keras==2.2.0
Keras-Applications==1.0.2
Keras-Preprocessing==1.0.1
lazy-object-proxy==1.3.1
llvmlite==0.20.0
locket==0.2.0
lxml==4.1.0
Markdown==3.3.3
MarkupSafe==1.0
matplotlib==2.1.0
mccabe==0.6.1
mistune==0.7.4
mmh3==2.5.1
more-itertools==5.0.0
mpmath==0.19
msgpack==0.5.6
msgpack-numpy==0.4.3.2
msgpack-python==0.4.8
multipledispatch==0.4.9
murmurhash==1.0.2
mysql-connector==2.1.6
mysql-connector-python==8.0.15
navigator-updater==0.1.0
nbconvert==5.3.1
nbformat==4.4.0
ndg-httpsclient==0.5.1
networkx==2.0
nltk==3.2.5
nose==1.3.7
notebook==5.0.0
nrepl-python-client==0.0.3
numba==0.35.0+10.g143f70e90
numexpr==2.6.2
numpy==1.14.5
numpydoc==0.7.0
odo==0.5.1
olefile==0.44
openpyxl==2.4.8
ordered-set==4.0.2
ortools==7.1.6720
overrides==1.9
packaging==16.8
paho-mqtt==1.5.0
pampy==0.3.0
pandas==0.23.1
pandas-datareader==0.8.1
pandocfilters==1.4.2
paramiko==2.7.1
parsel==1.5.1
partd==0.3.8
passlib==1.7.2
path.py==10.3.1
pathlib==1.0.1
pathlib2==2.3.0
patsy==0.4.1
peewee==3.9.5
pep8==1.7.0
pervane==0.0.66
pexpect==4.2.1
pickleshare==0.7.4
Pillow==4.2.1
pkginfo==1.4.1
plac==0.9.6
plotly==4.14.1
ply==3.10
preshed==2.0.1
prompt-toolkit==1.0.15
protobuf==3.7.1
psutil==5.4.0
ptyprocess==0.5.2
py==1.4.34
pyasn1==0.4.5
pyasn1-modules==0.2.4
pycodestyle==2.3.1
pycosat==0.6.2
pycparser==2.18
pycrypto==2.6.1
pycurl==7.43.0
pydbus==0.6.0
PyDispatcher==2.0.5
pyflakes==1.6.0
Pygments==2.2.0
PyHamcrest==1.9.0
pylint==1.7.4
pymorphy2==0.8
pymorphy2-dicts==2.4.393442.3710985
pymorphy2-dicts-ru==2.4.404381.4453942
PyNaCl==1.3.0
pync==2.0.3
pyodbc==4.0.17
pyOpenSSL==18.0.0
pypandoc==1.4
pyparsing==2.2.0
pysftp==0.2.9
PySocks==1.6.7
pyTelegramBotAPI==3.5.2
pytest==3.2.1
python-dateutil==2.6.1
python-engineio==3.13.2
python-socketio==4.6.0
pytz==2017.2
PyWavelets==0.5.2
PyYAML==3.12
pyzmq==16.0.2
QtAwesome==0.4.4
qtconsole==4.3.1
QtPy==1.3.1
Quandl==3.4.8
queuelib==1.5.0
rake-nltk==1.0.4
readline==6.2.4.1
regex==2018.1.10
requests==2.22.0
requests-cache==0.5.2
retrying==1.3.3
rope==0.10.5
ruamel-yaml==0.11.14
rusenttokenize==0.0.4
s3transfer==0.2.1
schedule==0.6.0
scikit-image==0.13.0
scikit-learn==0.19.1
scipy==1.1.0
Scrapy==1.6.0
seaborn==0.8
service-identity==18.1.0
simplegeneric==0.8.1
singledispatch==3.4.0.3
six==1.12.0
smart-open==3.0.0
snowballstemmer==1.2.1
sortedcollections==0.5.3
sortedcontainers==1.5.7
spacy==2.0.18
Sphinx==1.6.3
sphinxcontrib-websupport==1.0.1
spyder==3.2.4
SQLAlchemy==1.1.13
statsmodels==0.8.0
striprtf==0.0.11
summa==1.2.0
sympy==1.1.1
tables==3.4.2
tblib==1.3.2
terminado==0.6
testpath==0.3.1
textdistance==4.2.0
thinc==6.12.1
toolz==0.8.2
tornado==4.5.2
tqdm==4.23.4
traitlets==4.3.2
Twisted==18.9.0
typing==3.6.2
ujson==1.35
unicodecsv==0.14.1
urllib3==1.25.7
w3lib==1.20.0
wcwidth==0.1.7
webencodings==0.5.1
Werkzeug==0.14.1
widgetsnbextension==3.0.2
wrapt==1.10.11
WTForms==2.3.1
xlrd==1.1.0
XlsxWriter==1.0.2
xlwt==1.3.0
yattag==1.11.1
youtube-dl==2019.11.5
zict==0.1.3
zipp==3.4.0
zope.interface==4.6.0

94
sched.py Normal file
View File

@ -0,0 +1,94 @@
import requests, re, csv, json, funcy, sys
def dates(s):
#print(s)
m = re.match(r'(\d\d\d\d)\-(\d\d)\-(\d\d)',s)
if m:
s = m.group(2) + "/" + m.group(3)
#print(s)
return s
# "Course Code","Start Date","End Date",Term,Delivery,CRN,Status,"Course Name","Course Description","Units/Credit hours","Instructor Last Name","Instructor First Name",Campus/College,"Meeting Days and Times","Pass/No Pass available?","Class Capacity","Available Seats","Waitlist Capacity","Current Waitlist Length","Meeting Locations","Course Notes",ZTC
# ACCT103,2021-06-14,2021-07-23,"Summer 2021",Online,80386,Active,"General Office Accounting","This course is designed to prepare students for entry-level office accounting positions. Emphasis is on practical accounting applications. This course has the option of a letter grade or pass/no pass. ADVISORY: Eligible for Mathematics 430."," 3.00","Valenzuela Roque",Karla,"Gavilan College"," ",T," 30"," 18"," 20"," 0",,,
def parse_www_csv_sched():
old_keys = [ "CRN","Course Code","Units/Credit hours","Course Name","Meeting Days and Times","Class Capacity","Available Seats","Waitlist Capacity","Current Waitlist Length","Instructor Last Name","Start Date","Meeting Locations","ZTC","Delivery","Campus/College","Status","Course Description","Pass/No Pass available?","Course Notes" ]
# "Instructor First Name","End Date","Term",
new_keys = [ "crn", "code","cred", "name", "days", "cap", "rem", "wl_cap", "wl_act", "teacher", "date", "loc", "ztc", "type", "site","status","desc","pnp","note" ]
# "time","act","wl_rem", "partofday",
url = "https://gavilan.edu/_files/php/current_schedule.csv"
sched_txt = requests.get(url).text.splitlines()
sched = {"Fall 2021":[], "Spring 2022":[], "Winter 2022":[], "Summer 2021":[]}
shortsems = {"Fall 2021":"fa21", "Spring 2022":"sp22", "Winter 2022":"wi22", "Summer 2021":"su21","Summer 2022":"su22","Fall 2022":"fa22"}
for row in csv.DictReader(sched_txt):
d = dict(row)
for (old_key,new_key) in zip(old_keys,new_keys):
d[new_key] = d.pop(old_key).strip()
d['teacher'] = d.pop('Instructor First Name').strip() + " " + d['teacher']
d['date'] = dates(d['date']) + '-' + dates(d.pop('End Date').strip())
d['term'] = shortsems[d.pop('Term')]
if d['cred'] == ".00":
d['cred'] = "0"
if d['type'] == "Online":
d["loc"] = "ONLINE"
d["site"] = "Online"
d["type"] = "online"
#d.pop('Instructor First Name').strip() + " " + d['teacher']
#d["code"] = d.pop("Course Code")
#d["crn"] = d.pop("CRN")
sched[row['Term']].append(d) #print(row)
print( json.dumps(sched,indent=2))
for k,v in sched.items():
print("%s: %i" % (k,len(v)))
for v in sched["Fall 2021"]:
print("%s\t %s\t %s\t %s" % ( v['code'], v['days'], v['type'], v['loc'] ))
#print("%s\t %s\t %s\t %s" % ( v['Course Code'], v['Meeting Days and Times'], v['Delivery'], v['Meeting Locations'] ))
def parse_json_test_sched():
j2 = open('cache/classes_json.json','r').readlines()
for L in j2:
o3 = json.loads(L)
print(json.dumps(o3,indent=2))
if __name__ == "__main__":
print ('')
options = {
1: ['fetch and parse the csv on www.', parse_www_csv_sched],
2: ['parse the test json file.', parse_json_test_sched ],
}
if len(sys.argv) > 1 and re.search(r'^\d+',sys.argv[1]):
resp = int(sys.argv[1])
print("\n\nPerforming: %s\n\n" % options[resp][0])
else:
print ('')
for key in options:
print(str(key) + '.\t' + options[key][0])
print('')
resp = input('Choose: ')
# Call the function in the options dict
options[ int(resp)][1]()

679
server.py Normal file
View File

@ -0,0 +1,679 @@
import json, codecs, re, markdown, os, pypandoc, striprtf, sqlite3, random, urllib
import subprocess, html
from striprtf.striprtf import rtf_to_text
from flask import render_template, Response
from flask import send_from_directory
import hashlib, funcy, platform, requests
from datetime import datetime
from orgpython import to_html
from localcache import sqlite_file, db # personnel_meta # personnel_fetch
from localcache import user_enrolled_in
from localcache import arrange_data_for_web, depts_with_classcounts, dept_with_studentviews, course_quick_stats
from yattag import Doc
LECPATH = "/media/hd2/peter_home_offload/lecture/"
host = 'http://192.168.1.6:5000'
import paho.mqtt.client as mqtt
#################################################################################################################
#################################################################################################################
######
###### mqtt
######
client = 0
mqtt_offline = 1
mqtt_time = 0.1
def mqtt_loop():
while 1:
if client and not mqtt_offline:
client.loop(mqtt_time)
# called when MQTT server connects
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe("local/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
now = datetime.now().strftime('%Y %m %d %H %M')
print(" %s mqtt msg: %s data: %s" % (now, msg.topic, msg.payload.decode()))
while(mqtt_offline):
try:
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.1.6", 1883, 60)
mqtt_offline = 0
except OSError as oe:
print('no internet? try again in 5 seconds.')
time.sleep(5)
def displaypi_on():
global client
msg = 'local/frame_on'
client.publish(msg, 'webserver')
print("sent %s" % msg)
def displaypi_off():
global client
msg = 'local/frame_off'
client.publish(msg, 'webserver')
print("sent %s" % msg)
def desklight():
global client
msg = 'local/peter/desklamps'
client.publish(msg, 'webserver')
print("sent %s" % msg)
def clearscreens():
global client
msg = 'local/clearscreens'
client.publish(msg, 'webserver')
print("sent %s" % msg)
def screenoff():
global client
msg = 'local/peter/monitors'
client.publish(msg, 'webserver')
print("sent %s" % msg)
#################################################################################################################
#################################################################################################################
######
###### writing & knowledgebase
######
news_path = '/media/hd2/peter_home/Documents/scripts/browser/'
if platform.system() == 'Windows':
writing_path = 'c:/users/peter/Nextcloud/Documents/writing/'
else:
writing_path = '/media/hd2/peter_home/Documents/writing/'
img_path = '/media/hd2/peter_home/Documents/writing_img/'
if platform.system() == 'Windows':
pics_path = 'c:/users/peter/Nextcloud/misc/'
else:
pics_path = '/media/hd2/peter_home/misc/'
br = "<br />"
nl = "\n"
style = """<style> body { line-height: 2.3em; margin: 4em; }
</style><link rel="stylesheet" href="/data/gui/public/simplemde.min.css">
<script src="/data/gui/public/simplemde.min.js"></script>"""
## I implement the backend of a web based GUI for the canvas functions
## and dashboard stuff.
## Stories / Next Steps
## 1. browse my writings. Follow links in markdown format to other files
## in this folder. html, md, rtf are handled so far.
## 2a. Sort by date, title, topic/tag
## 2b. Use w/ larger topic modeling project to suggest relations
## 2. Run through everything in the folder, and index the 'backlinks'.
## Append those to each file.
## 3 (interrupted) Use vue and implement editing. Q: convert back to
## original format? Or maintain new one. A: Convert back.
## 3. Do similar for tags.
## 4. Do similar but automatically, using nlp and keywords.
def tag(x,y): return "<%s>%s</%s>" % (x,y,x)
def tagc(x,c,y): return '<%s class="%s">%s</%s>' % (x,c,y,x)
def a(t,h): return '<a href="%s">%s</a>' % (h,t)
def homepage():
#output = pypandoc.convert_file(writing_path+fname, 'html', format="rst")
return tag('h1','This is my server.') + "<br />" + \
a('Toggle light','/light') + br + br + \
a('Clear screens','/clearscreens') + br + br + \
a('Toggle monitors','/screensoff') + br + br + \
a('Pi frame on','/displaypi/on') + br + br + \
a('Pi frame off','/displaypi/off') + br + br + \
a('Knowledge Base','/x/writing/index') + br + \
a('Graph of users','/x/user/1') + "<br />" + \
a('Courses in a dept','/x/dept/csis') + "<br />" + \
a('People in a course','/x/roster/10633') + br + br + \
a('Summarize courses a user has been seen in','/x/user_course_history_summary/9964') + br + br + \
a('Peters Lecture Series','/lectures') + br + br + \
a('Reload server','/rl') + "<br />" + \
a('want to shut down?','/sd') + "<br />" + \
a('', '') + br
def orgline(L):
L.strip()
if re.search("^\s*$", L): return ""
a = re.search( '^\*\s(.*)$', L)
if a: return "<h2>%s</h2>\n" % a.group(1)
b = re.search( 'TODO\s\[\#A\](.*)$', L)
if b: return "<b><i>Todo - Priority 1</i>: %s</b>" % b.group(1) + br + nl
d = re.search( '^\*\*\*\s(.*)$', L)
if d: return d.group(1)
d = re.search( '^\*\*\s(.*)$', L)
if d: L = d.group(1)
return L + br + nl
def editor(src):
return br + br + br + """<textarea name='content' id='editor'>%s</textarea><script>
var simplemde = new SimpleMDE({ element: document.getElementById("editor") });
</script>""" % src
def in_form(txt,path):
return '<form method="post" action="/save"><input type="hidden" name="what" value="writing">' + \
'<input type="hidden" name="path" value="' + path + '" />' + \
txt + \
'<input type="submit" value="Save" name="Save" /></form>'
def mytime(fname):
return os.path.getmtime( os.path.join(writing_path, fname) )
def index():
#f = [ os.path.join(writing_path, x) for x in os.listdir(writing_path) ]
f = os.listdir(writing_path)
f.sort(key=mytime)
f.reverse()
return "<br /><br />\n".join( ["<a href='%s'>%s</a> (%s)" % (x,x,datetime.fromtimestamp(mytime(x)).strftime('%Y-%m-%d %H')) for x in f ] )
def writing(fname):
if fname == 'index': return index()
inp = codecs.open(writing_path + fname, 'r', 'utf-8')
ext = fname.split('.')[-1]
if ext == "py" or ext == "php":
src = inp.read()
return "<pre>" + html.escape(src) + "</pre>"
if ext == "html":
src = inp.read()
return src
if ext == "md":
src = inp.read()
return style + markdown.markdown(src) + in_form(editor(src),fname)
if ext == "org":
src = inp.read()
return to_html(src, toc=True, offset=0, highlight=True)
if ext == "rtf":
text = "<br />\n".join( rtf_to_text(inp.read()).split('\n') )
return style + text
if ext == "docx":
hash = hashlib.sha1("my message".encode("UTF-8")).hexdigest()
hash = hash[:10]
#output = pypandoc.convert_file('C:/Users/peter/Nextcloud/Documents/writing/' + fname, 'html',
output = pypandoc.convert_file(writing_path + fname, 'html',
extra_args=['--extract-media=%s' % hash ]) # file:///c:/Users/peter/Nextcloud/Documents/writing
return style + output
return style + markdown.markdown( "".join( [ orgline(x) for x in inp.readlines() ] ) )
#################################################################################################################
#################################################################################################################
######
###### kiosk display
######
def dashboard():
return open('static/slides.html','r').read() # tag('h1','Dashboard') + br + a('home', '/')
def dash():
return open('static/dashboard.html','r').read() # tag('h1','Dashboard') + br + a('home', '/')
def mycalendar():
ics = 'https://calendar.google.com/calendar/u/0?cid=cGV0ZXIuaG93ZWxsQGdtYWlsLmNvbQ'
return dash()
def most_recent_file_of( target, folder ):
def finder(st):
return re.search(target,st)
all = os.listdir(folder)
all.sort(key=lambda x: os.stat(os.path.join(folder,x)).st_mtime)
all.reverse()
all = list(funcy.filter( finder, all ))
print("file list is: " + str(all))
if not all:
return ''
return all[0]
def news():
folder = most_recent_file_of( r'\d\d\d\d\d\d\d\d', news_path )
pics = os.listdir( news_path + folder )
return '/static/news/' + folder + '/' + random.choice(pics)
def randPic():
now = datetime.now()
if now.minute < 15:
return news()
return '/static/images/' + random.choice(os.listdir('static/images'))
def do_img_crop(im):
result = subprocess.run(['ls', '-l'], stdout=subprocess.PIPE)
rr = result.stdout.decode('utf-8')
#################################################################################################################
#################################################################################################################
######
###### db info helpers
######
def sample():
return "<h1>I am a sample</h1>"
def sample2(a=""):
return "I'm a placeholder"
# Filter a stream of loglines for those that match a course's url / id
def has_course(stream,courseid):
regex = '/courses/%i/' % int(courseid)
while True:
L = stream.readline()
if re.search(regex, L): yield L
def js(s):
return json.dumps(s, indent=2)
def sem_from_array_crn(crn):
if not crn[2]: return ""
if crn[2] == "": return ""
return crn[2][:6]
#################################################################################################################
#################################################################################################################
######
###### db ilearn course / user / hits
######
def user_courses(uid):
return js(user_enrolled_in(uid))
def user_course_history_summary(usr_id):
q = """SELECT r.timeblock, r.viewcount, c.sis, c.code, c.canvasid FROM requests_sum1 AS r
JOIN users AS u ON r.userid=u.id
JOIN courses AS c ON c.id=r.courseid
WHERE u.canvasid=%s
GROUP BY r.courseid ORDER BY r.viewcount DESC;""" % str(usr_id)
(conn,cur) = db()
cur.execute(q)
r = cur.fetchall()
return js(r)
groups = funcy.group_by(sem_from_array_crn, r)
g = {}
for K in groups.keys(): g[K] = [ x[3] for x in groups[K] ]
return js( g )
def roster(crn):
q = """SELECT u.name, u.sortablename, u.canvasid as user_id, c.canvasid as course_id, e.workflow, e."type" FROM enrollment AS e
JOIN users AS u ON e.user_id=u.id
JOIN courses AS c ON c.id=e.course_id
WHERE c.canvasid="%s" ;""" % str(crn)
(conn,cur) = db()
cur.execute(q)
return js(cur.fetchall())
def user_course_hits(usr,courseid):
return list(has_course( codecs.open('cache/users/logs/%s.csv' % usr, 'r', 'utf-8'), courseid))
#return "\n".join( [x for x in next(gen)] )
def profiles(id=1,b=2,c=3):
import os
pics = os.listdir('cache/picsCanvas')
return ''.join([ "<img height='45' width='45' hspace='5' vspace='5' src='/cache/picsCanvas/%s' />" % s for s in pics ])
# Departments, classes in each, and students (with hits) in each of those.
def enrollment(a):
return js(depts_with_classcounts())
# All the classes in this dept, w/ all the students in each, with count of their views.
def dept(d=''):
if not d: return js(dept_with_studentviews())
return js(dept_with_studentviews(d))
def user(canvas_id=None):
info = json.loads( codecs.open( 'cache/users/%s.txt' % canvas_id, 'r', 'utf-8').read() )
return render_template('hello.html', id=canvas_id, name=info['name'])
#################################################################################################################
#################################################################################################################
######
###### podcast feed
######
def lectures():
fi = os.listdir(LECPATH)
doc, tag, text = Doc().tagtext()
doc.asis('<?xml version="1.0" encoding="UTF-8"?>')
doc.asis('<rss xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" version="2.0">')
with tag('channel'):
with tag('title'): text("Peter's Lecture Series")
with tag('description'): text("Since 2019")
with tag('link'): text(host)
for f in fi:
if f.endswith('.mp3'):
#print(f)
with tag('item'):
name = f.split('.')[0]
ff = re.sub('\s','%20',f)
with tag('title'): text(name)
with tag('guid'): text(f)
b = os.path.getsize(LECPATH+f)
doc.stag('enclosure', url=host+'/podcast/media/'+urllib.parse.quote(ff), type='audio/mpeg',length=b)
doc.asis('</rss>')
#doc.asis('</xml>')
return doc.getvalue()
def web_lectures():
fi = os.listdir(LECPATH)
output = "<h1>Lectures</h1>\n"
for f in fi:
if f.endswith('.mp3'):
name = f.split('.')[0]
ff = urllib.parse.quote(f)
#ff = re.sub('\s','%20',f)
output += '<a href="%s">%s</a><br />\n' % ( host + '/podcast/media/' + ff, name)
return output
#################################################################################################################
#################################################################################################################
######
###### editing personnel app
######
# personnel_fetch, personnel_meta
# todo: update: dept, title, any of the other fields.
# insert: new dept, new title,
# update a value: dept id of a personnel id
def update_pers_title(pid, tid):
q = "UPDATE personnel SET `title`='%s' WHERE `id`='%s'" % (str(tid), str(pid))
(conn,cur) = db()
result = cur.execute(q)
conn.commit()
return js( {'result': 'success'} )
# update a value: dept id of a personnel id
def update_pers_dept(pid, did):
q = "UPDATE personnel SET `dept1`='%s' WHERE `id`='%s'" % (str(did), str(pid))
(conn,cur) = db()
result = cur.execute(q)
conn.commit()
return js( {'result': 'success'} )
def user_edit(canvas_id='2'):
info = json.loads( codecs.open( 'cache/users/%s.txt' % str(canvas_id), 'r', 'utf-8').read() )
return render_template('personnel.html', id=canvas_id, name=info['name'])
def staff_dir(search=''):
return render_template('dir.html')
######
###### handling images
######
def find_goo(n):
g = re.search('00(\d\d\d\d\d\d)', n)
if g:
return g.groups()[0]
return ''
def byname(x):
if 'conf_name' in x:
return x['conf_name']
if 'first_name' in x and 'last_name' in x:
return x['first_name'] + " " + x['last_name']
return ''
def fn_to_struct( n, staff ):
g = find_goo(n)
if g:
#print(g)
for s in staff:
cg = s['conf_goo']
if cg == g:
#print("%s - %s - %s" % (n, g, cg) )
return s
return { "conf_goo":g, "conf_name":"unknown - " + n }
return 0
def image_edit(filename=''):
url = "https://hhh.gavilan.edu/phowell/map/dir_api_tester.php?a=list/staffsemester"
staff = json.loads( requests.get(url).text )
badges = 0
web = 1
if web:
files = sorted(os.listdir('cache/picsStaffdir') )
done_files = [ x[:-4] for x in sorted(os.listdir('cache/picsStaffdir/cropped') ) ]
if badges:
files = sorted(os.listdir('cache/picsId/originals_20211022') )
done_files = [ x[:6] for x in sorted(os.listdir('cache/picsId/2021crop') ) ]
files_match = []
files_no_match = []
raw_filenames = files
for f in files:
sa = fn_to_struct(f,staff)
if sa:
ss = sa.copy()
else:
ss = sa
if ss:
ss['filename'] = f
files_match.append(ss)
else: files_no_match.append(f)
fm = json.dumps( sorted(files_match,key=byname) )
fnm = json.dumps(files_no_match)
sm = json.dumps(staff)
return render_template('images.html', staff=sm, matches=fm, nomatches=fnm, checked=done_files)
def image_crop(filename,x,y,w,h,newname=''):
from PIL import Image
import piexif
badges = 0
web = 1
if not newname: newname = filename
if web:
im = Image.open('cache/picsStaffdir/%s' % filename)
savepath = 'cache/picsStaffdir/cropped/%s.jpg' % newname
if badges:
im = Image.open('cache/picsId/originals_20211022/%s' % filename)
savepath = 'cache/picsId/2021crop/%s.jpg' % newname
out = { 'im': str(im) }
x = int(x)
y = int(y)
w = int(w)
h = int(h)
if "exif" in im.info:
exif_dict = piexif.load(im.info['exif'])
#out['exif'] = exif_dict
#print(exif_dict)
if piexif.ImageIFD.Orientation in exif_dict['0th']:
#exif_dict['0th'][piexif.ImageIFD.Orientation] = 3
print(piexif.ImageIFD.Orientation)
print(exif_dict['0th'])
out['rotation'] = 'messed up'
if exif_dict['0th'][piexif.ImageIFD.Orientation] == 6:
im = im.rotate(270, expand=True)
#im.save('cache/picsId/originals_20211022/crotated_%s' % filename, quality=95)
im_crop = im.crop((x,y,x+w,y+h))
img_resize = im_crop.resize((250, 333))
img_resize.save(savepath, quality=95)
return json.dumps( out )
#if filename=='list':
# #return '<br />\n'.join([ "<a href='/data/picsId/originals_20211022/%s'>%s</a>" % ( x,x ) for x in
# return '<br />\n'.join([ "<a href='/image/%s'>%s</a>" % ( x,x ) for x in sorted(os.listdir('cache/picsId/originals_20211022')) ])
#################################################################################################################
#################################################################################################################
######
###### server infrastructure
######
def server_save(key,value):
codecs.open(datafile2,'a').write( "%s=%s\n" % (str(key),str(value)))
def server_dispatch_json(function_name,arg='', arg2=''):
print("Looking for function: %s. arg:%s. arg2:%s." % (function_name, arg, arg2))
try:
result = "" + globals()[function_name](arg, arg2)
print("doing 2 args")
return result
except Exception as e:
print("Error with that: %s" % str(e))
try:
result = "" + globals()[function_name](arg) #
print("doing 1 arg")
return result
except Exception as f:
print("Error with that: %s" % str(f))
try:
result = globals()[function_name]()
print("doing 0 arg")
return result
except Exception as gg:
print("Error with that: %s" % str(gg))
return json.dumps({'result':'failed: exception', 'e1':str(e), 'e2':str(f), 'e3':str(gg)}, indent=2)
def server_dispatch(function_name,arg='', arg2=''):
print("Looking for function: %s. arg:%s. arg2:%s." % (function_name, arg, arg2))
try:
result = "" + globals()[function_name](arg, arg2)
print("doing 2 args")
return result
except Exception as e:
print("Error with that: %s" % str(e))
try:
result = "" + globals()[function_name](arg) #
print("doing 1 arg")
return result
except Exception as f:
print("Error with that: %s" % str(f))
try:
result = globals()[function_name]()
print("doing 0 arg")
return result
except Exception as gg:
print("Error with that: %s" % str(gg))
return json.dumps({'result':'failed: exception', 'e1':str(e), 'e2':str(f), 'e3':str(gg)}, indent=2)

223
stats.py Normal file
View File

@ -0,0 +1,223 @@
def grades_rundown():
global results, users_by_id
load_users()
results = []
all_sem_courses = []
ids_out = open('all_teachers_by_goo','w')
all_ids = {}
# for the current or given semester's shells (really, only active ones)
with open('grades_out.csv','wb') as f:
w = csv.DictWriter(f, 'id,name,teacher,mean,median,count,count_gt70,grades,avg_activity_time'.split(','))
w.writeheader()
#for c in all_sem_courses:
courses = getCoursesInTerm(term=23,show=0,active=1)
for C in courses:
activity_time_total = 0.0
course_info = {'id':str(C['id']),'name':C['name'],'grades':[], 'teacher':[] }
#print str(C['id']) + "\t " + C['name']
emts = course_enrollment(C['id'])
for k,E in emts.items():
#print E
if E['type'] == 'TeacherEnrollment':
course_info['teacher'].append(users_by_id[E['user_id']]['name'])
all_ids[E['sis_user_id']] = 1
""" if 'grades' in E and E['grades']['current_score']:
#print str(E['grades']['final_score']) + ", ",
#print str(E['grades']['current_score']) + ", ",
course_info['grades'].append(E['grades']['current_score'])
activity_time_total += E['total_activity_time']
if course_info['grades']:
s = pd.Series(course_info['grades'])
course_info['mean'] = s.mean()
course_info['median'] = s.median()
course_info['count'] = len(s.values)
course_info['count_gt70'] = (s > 70.0).count()
course_info['avg_activity_time'] = activity_time_total / len(s.values)
else:
course_info['mean'] = 0
course_info['median'] = 0
course_info['count'] = 0
course_info['count_gt70'] = 0
course_info['avg_activity_time'] = 0"""
#print course_info
all_sem_courses.append(course_info)
w.writerow(course_info)
f.flush()
# get a grade (final? current?) for each student
for k,v in all_ids.items():
if k: ids_out.write(k + ', ')
# sanity check to make sure grading is actually happening in the shell
# report an average, median, and buckets
def class_logs():
global results
# 1. Search the current semester and the misc semesters for a list of courses
# that we want to check for users/activity.
#target = url + '/api/v1/accounts/1/terms' # list the terms
target = url + '/api/v1/accounts/1/courses?published=true&enrollment_term_id=14'
print "Getting term classes."
while target:
target = fetch(target)
print "\n\n\n"
term_results = results
full_results = []
for x in term_results:
results = []
# now see who's logged in recently:
target = url + '/api/v1/courses/' + str(x['id']) + '/recent_students'
print "Getting class id: ", str(x['id'])
fetch(target)
if len(results):
#print results
LL = [ how_long_ago(z['last_login']) for z in results ]
avg = 9999
if len(LL): avg = sum(LL) / len(LL)
d = { 'id':x['id'], 'avg':avg, 'name':x['name'] }
full_results.append(d)
sorted_results = sorted(full_results, key=lambda k: k['avg'])
for x in sorted_results:
print x['id'], "\t", str(x['avg']), "\t", x['name']
def user_logs():
global url, users_by_id, results
target_user = "6357"
load_users()
results = []
target = url + '/api/v1/users/' + target_user + '/page_views?per_page=200'
while target:
print target
target = fetch(target)
# have all student's hits. Filter to only this class
#results = filter(match59,results)
times = []
print users_by_id[ int(target_user) ]
f.write(str(users_by_id[ int(target_user) ]) + "\n")
f.write( "link,updated_at,remote_ip,url,context_type,user_agent,action\n")
for hit in results:
L = [hit['links']['user'],hit['updated_at'],hit['remote_ip'],hit['url'],hit['context_type'],hit['user_agent'],hit['action']]
L = map(str,L)
f.write( ",".join(L) + "\n" )
def recent_logins():
global results, url, results_dict
p = { 'start_time':'2017-08-31T00:00:00Z', 'end_time':'2017-08-31T00:05:00Z'}
target = url + "/api/v1/audit/authentication/accounts/1"
results_dict = {}
resp = fetch_dict(target,p)
print resp
print results_dict
def userHitsThisSemester(uid=2):
begin = "20170820T0000"
t = url + "/api/v1/users/" + str(uid) + "/page_views?start_time=" + str(begin)
while(t): t = fetch(t)
print json.dumps(results, indent=4, sort_keys=True)
def getCurrentActivity(): # a dict
# CURRENT ACTIVITY
#r = requests.get(url + '/api/v1/accounts/1/analytics/current/activity', headers = header )
#t = url + '/api/v1/accounts/1/users?per_page=500'
# analytics/terms/:term_id/activity
#t = url + '/api/v1/accounts/1/analytics/current/statistics'
global results_dict
t = url + '/api/v1/accounts/1/analytics/terms/11/activity'
while(t): t = fetch_dict(t)
sp17 = results_dict['by_date']
results_dict = {}
t = url + '/api/v1/accounts/1/analytics/terms/14/activity'
while(t): t = fetch_dict(t)
su17 = results_dict['by_date']
results_dict = {}
t = url + '/api/v1/accounts/1/analytics/terms/15/activity'
while(t): t = fetch_dict(t)
su17b = results_dict['by_date']
results_dict = {}
t = url + '/api/v1/accounts/1/analytics/terms/18/activity'
while(t): t = fetch_dict(t)
fa17 = results_dict['by_date']
results_dict = {}
t = url + '/api/v1/accounts/1/analytics/terms/21/activity'
while(t): t = fetch_dict(t)
sp18 = results_dict['by_date']
results_dict = {}
t = url + '/api/v1/accounts/1/analytics/terms/7/activity'
while(t): t = fetch_dict(t)
cmte = results_dict['by_date']
results_dict = {}
t = url + '/api/v1/accounts/1/analytics/terms/6/activity'
while(t): t = fetch_dict(t)
dev = results_dict['by_date']
results_dict = {}
master_list_by_date = {}
for sem in [sp17,su17,su17b,fa17,sp18,cmte,dev]:
#print sem
for record in sem:
print record
date = record['date']
if date in master_list_by_date:
master_list_by_date[date]['participations'] += record['participations']
master_list_by_date[date]['views'] += record['views']
else:
master_list_by_date[date] = {}
master_list_by_date[date]['date'] = date
master_list_by_date[date]['participations'] = record['participations']
master_list_by_date[date]['views'] = record['views']
out = open('canvas/daily.json','w')
# want to match the old, funny format
by_date = []
my_out = {'by_date':by_date}
for day in master_list_by_date.keys():
by_date.append(master_list_by_date[day])
out.write(json.dumps(my_out,indent=2))
def externaltool(): # a list
#mydata = { "course_navigation[text]": "Video Chat",
# "course_navigation[default]": "false" }
#t = url + '/api/v1/accounts/1/external_tools/704?course_navigation[text]=Video Chat&course_navigation[default]=false'
#r = requests.put(t, headers=header)
#print r.text
t = url + '/api/v1/accounts/1/external_tools/'
while(t): t = fetch(t)
print results

1418
tasks.py Normal file

File diff suppressed because it is too large Load Diff

52
temp.py Normal file
View File

@ -0,0 +1,52 @@
"""
fname = 'cache/teacherdata/activity/G00101483.json'
import json
import codecs
from collections import defaultdict as ddict
from dateutil.parser import parse as parse_dt
allact = json.loads( codecs.open(fname,'r','utf-8').read() )
unique_urls = set(funcy.pluck('url',allact))
date_hits = sorted(funcy.pluck('updated_at',allact))
date_hits = list(map(parse_dt, date_hits))
dontcare = open('cache/urls_i_dont_care.txt','r').readlines()
dd = ddict(int)
for k in allact: dd[ k['url'] ] += 1
dits = ddict(int)
for j in
urls_by_freq = [ (k, v) for k, v in sorted(ddd.items(), key=lambda item: item[1],reverse=True)]
top_five = [ (k, v) for k, v in sorted(ddd.items(), key=lambda item: item[1],reverse=True)][:5]
"""
import csv
ilearn_version = csv.reader(open('cache\teacherdata\staff_main_table.csv','r').read())
old_dir = csv.reader(open('cache/personnel2020_04_12.csv'), delimiter=',')
dept1_crxn = {r[0]:r[1] for r in csv.reader(open('cache/dir_corrections.csv'), delimiter=',') }
dept2_crxn = {r[0]:r[2] for r in csv.reader(open('cache/dir_corrections.csv'), delimiter=',') }
title_crxn = {r[0]:r[3] for r in csv.reader(open('cache/dir_corrections.csv'), delimiter=',') }
newfile = open('cache/dir_new.txt','w')
depts = []
for r in old_dir:
old_dept = r[2]
if old_dept in dept1_crxn:
new_one = dept1_crxn[old_dept]
if dept2_crxn[old_dept]: new_one += '/' + dept2_crxn[old_dept]
if title_crxn[old_dept]: new_one += '/' + title_crxn[old_dept]
r[2] = new_one
newfile.write('\t'.join(r) + '\n')

136
tempget.py Normal file
View File

@ -0,0 +1,136 @@
#
#
# Fetcher for my otter and pinterest accounts. And whatever else.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import re
import time
from secrets import banner_url1, banner_url2, GOO, GOO_PIN, otter_login, otter_pw
# Use Firefox and log in to ssb and get full schedule
def login():
#todo: my data here.... secret
url = banner_url2
un = GOO
pw = GOO_PIN
text = ''
try:
driver = webdriver.Firefox()
driver.get(url)
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys(un)
driver.find_element_by_name("password").send_keys(pw)
driver.find_element_by_name("loginForm").submit()
driver.implicitly_wait(5)
print(driver.title)
driver.find_element_by_link_text("Students").click()
driver.implicitly_wait(5)
print(driver.title)
driver.find_element_by_link_text("Registration").click()
driver.implicitly_wait(5)
print(driver.title)
driver.find_element_by_link_text("Search for Classes").click()
driver.implicitly_wait(15)
print(driver.title)
dd = Select(driver.find_element_by_name("p_term"))
if (dd):
dd.select_by_visible_text(SEMESTER)
driver.find_element_by_xpath("/html/body/div/div[4]/form").submit()
driver.implicitly_wait(15)
print(driver.title)
driver.find_element_by_xpath("/html/body/div/div[4]/form/input[18]").click()
driver.implicitly_wait(10)
print(driver.title)
driver.find_element_by_name("SUB_BTN").click()
driver.implicitly_wait(10)
print(driver.title)
text = driver.page_source
except Exception as e:
print("Got an exception: ", e)
finally:
print("")
driver.quit()
return text
def filename_friendly(str):
str1 = re.sub(r'\s+','_',str)
return "".join([c for c in str1 if c.isalpha() or c.isdigit() or c==' ']).rstrip()
def otter():
driver = webdriver.Firefox()
driver.get("https://otter.ai/signin")
#assert "Python" in driver.title
elem = driver.find_element_by_css_selector('#mat-input-0')
elem.clear()
elem.send_keys(otter_login)
elem.send_keys(Keys.RETURN)
elem = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "#mat-input-1")))
elem.clear()
elem.send_keys(otter_pw)
elem.send_keys(Keys.RETURN)
time.sleep(5)
#driver.implicitly_wait(15)
driver.get("https://otter.ai/my-notes")
driver.implicitly_wait(10)
items = driver.find_elements_by_css_selector('div.__conversation-title')
print("I found %i conversations" % len(items))
titles = []
for i in items:
print(i.text)
titles.append(i.text)
count = len(items)
n = 0
while n < count:
items[n].click()
element = WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "div.conversation-detail__content")))
date_elem = driver.find_element_by_css_selector('.conversation-detail__title__meta')
kw_elem = driver.find_element_by_css_selector('.conversation-detail__title__keywords-list')
myfile = filename_friendly(date_elem.text) + '_' + filename_friendly(titles[n]) + '.txt'
ff = open('otter/%i.txt' % n, 'w')
ff.write("Title: %s\n" % titles[n])
ff.write("Keywords: %s\n\n" % kw_elem.text)
ff.write(element.text)
ff.close()
driver.get("https://otter.ai/my-notes")
driver.implicitly_wait(10)
items = driver.find_elements_by_css_selector('div.__conversation-title')
n += 1
driver.close()
print("OK")
#otter()
print(login() )

444
templates.py Normal file
View File

@ -0,0 +1,444 @@
import os, re, codecs
from pipelines import get_doc, get_doc_generic, put_file
# build web pages from fragments
output_type = ".php" # use php when uploading
output_type2 = ".html" # use php when uploading
which_template = "/template.html"
which_template2 = "/template2.html"
masonry_template = """ <div class="masonry-item">
<div class="masonry-content">
<a href="%%LINK%%">
<img src="%%IMGSRC%%"
alt="%%IMGALT%%"></a>
<h3 class="masonry-title">%%TITLE%%</h3>
<p class="masonry-description">%%CARDBODY%%</p>
</div>
</div>"""
def item_to_masonry(item):
# link title img imgalt desc
#print(item)
#print(item[1] + "\n" )
ix = {'TITLE': "<a href='%s'>%s</a>" % (item[0], item[1]),
'IMGSRC': item[2] or 'http://www.gavilan.edu/_files/img/blank.gif', 'IMGALT': item[3],
'CARDBODY': item[4], 'LINK': item[0] }
output = ''
for L in masonry_template.split('\n'):
match = re.search(r'%%(\w+)%%',L)
if match:
tag = match.group(1)
#print("Found a tag: %s" % tag)
line = re.sub(r'%%\w+%%', ix[tag], L)
output += line
else:
output += L
return output
def try_untemplate():
from bs4 import BeautifulSoup as bs
import bs4
dir1 = 'C:/Users/peter/Documents/gavilan/www mirror'
j = 0
j_in_dir = []
for x in os.listdir(dir1):
if x in ['student','staff']:
for xy in os.listdir(dir1+'/'+x):
j+= 1
print("%i.\t%s" % (j,x+'/'+xy))
j_in_dir.append(x+'/'+xy)
else:
j+= 1
print("%i.\t%s" % (j,x))
j_in_dir.append(x)
dir2 = j_in_dir[int(input("Choose a folder to look in: "))-1]
dir = dir1 + '/' + dir2
i = 0
f_in_dir = []
for x in os.listdir(dir):
if x.endswith('php'):
i+= 1
print("%i.\t%s" % (i,x))
f_in_dir.append(x)
choices = input("Choose inputs. Separate with a space: ")
for C in choices.split(" "):
#choice = int( input("Choose a page to make into template: ") ) - 1
choice = int( C ) - 1
print(f_in_dir[choice])
raw_html_in = open(dir + "/" + f_in_dir[choice],'r').read()
php_sig = '!!!PHP!!!'
php_elements = []
def php_remove(m):
php_elements.append(m.group())
return php_sig
def php_add(m):
return php_elements.pop(0)
# Pre-parse HTML to remove all PHP elements
html = re.sub(r'<\?php.*?\?>', php_remove, raw_html_in, flags=re.S+re.M)
# Try just poppin the first php tag. We probably leave it behind...
php_elements.pop(0)
bb = bs(html,'html.parser')
if not os.path.isdir(dir + '/template'):
os.mkdir(dir + '/template',0o777)
output_f = '.'.join(f_in_dir[choice].split('.')[:-1]) + '.html'
output = open( dir + '/template/' + output_f, 'w', encoding='utf-8')
b = bb.find(id='breadcrumbs').get_text()
b = re.sub(r'\s+',' ',b)
parts = b.split(' > ')
b = parts[-1]
c = bb.find('h1',class_='page-heading').get_text()
a = bb.find('article')
a.div.extract() # the first div has the h1 header
a_out = ""
for ea in a.contents:
try:
a_out += ea.prettify(formatter="html")
except:
if type(ea) == bs4.element.Comment:
a_out += "<!-- %s -->\n" % ea.string
else:
a_out += ea.string + "\n"
# some article cleanup
a_out = re.sub( r'\n{3,}','\n\n',a_out)
a_out = re.sub( r'(&#160;)+',' ',a_out)
a_out = re.sub(php_sig, php_add, a_out)
print("breadcrumb: %s" % b)
print("\n\ntitle: %s\n" % c)
#print("\n\narticle: %s" % a_out.strip())
output.write("BREADCRUMB=%s\n" % b)
output.write("TITLE=%s\n" % c)
output.write("ARTICLE=%s" % a_out)
output.close()
def do_template(temp,source,side):
subs = {'BANNER':'http://www.gavilan.edu/_files/img/blank.gif', 'SIDEBAR':''.join(side),}
state = 0
items = ""
output = ""
for L in source:
if state:
if re.search('%%ITEMS%%',L):
subs['ARTICLE'] += items
else:
subs['ARTICLE'] += L
else:
parts = L.split('=',1)
if parts[0] == 'ITEM':
i_parts = parts[1].split('|')
items += item_to_masonry(i_parts) + "\n"
if parts[0] == 'ARTICLE':
subs['ARTICLE'] = ""
state = 1
else: subs[parts[0].strip()] = parts[1].strip()
#subs['ITEMS'] = items
#print("Building page with this: " + str(subs))
for L in temp:
if len(L)<200:
match = re.search(r'%%(\w+)%%',L)
if match:
tag = match.group(1)
line = re.sub(r'%%\w+%%', subs[tag], L)
output += line
else:
output += L
else:
output += L
return output
def remove_filetype(f):
parts = f.split(r'.')
return '.'.join(parts[:-1])
def make():
dir1 = 'C:/Users/peter/Documents/gavilan/www mirror'
j = 0
j_in_dir = []
for x in os.listdir(dir1):
if x in ['student','staff']:
for xy in os.listdir(dir1+'/'+x):
j += 1
print("%i.\t%s" % (j,x+'/'+xy))
j_in_dir.append(x+'/'+xy)
else:
j+= 1
print("%i.\t%s" % (j,x))
j_in_dir.append(x)
dir2 = j_in_dir[int(input("Choose a folder to look in: "))-1]
in_dir = dir1 + '/' + dir2
#in_dir = r"C:/Users/peter/Documents/gavilan/www mirror/finaid_2019"
# how many slashes? Whats the depth? Any more than zero, start adding ../ s.
depth = dir2.count("/")
print("Depth is %i\n\n" % depth)
src = in_dir + r"/template/"
sidebar = ""
template = dir1 + which_template
if depth:
template = dir1 + which_template2
pages = []
for F in os.listdir(src):
if re.search(r'sidebar',F):
sidebar = F
#elif re.search(r'template',F):
# template = F
elif F.endswith('.html'):
pages.append(F)
print("Template: %s\nSidebar: %s\nPages: %s" % (template,sidebar,str(pages)))
template_text = open(template,'r').readlines()
side_txt = open(src + sidebar, 'r').readlines()
for P in pages:
in1_text = open(src + P, 'r').readlines()
out_file = open(in_dir + "/" + remove_filetype(P) + output_type , 'w')
out_file2 = open(in_dir + "/" + remove_filetype(P) + output_type2 , 'w')
print(P)
out_file.write( do_template( template_text, in1_text, side_txt) )
out_file.close()
out_file2.write( do_template( template_text, in1_text, side_txt) )
out_file2.close()
def txt_2_table():
input = open("C:/Users/peter/Documents/gavilan/www mirror/counseling_2019/who_call.txt",'r').readlines()
output = ''
state = ''
for L in input:
parts = L.split(r' ')
if state=='begintable': state= 'intable'
if state=='': output += "<p>"
if state=='intable': # in a table and a line is beginning
output += "<tr><td>"
for P in parts:
P = P.strip()
print(P)
if P=='NT':
if state=='intable':
output += "</td></tr></table>\n"
output += "<br /><table><tr><td>\n"
state = 'begintable'
continue
elif P=='ET':
output += '</td></tr></table>\n'
state = ''
continue
elif P=='|': # between cells
output += "</td><td>"
continue
output += " " + P # the normal case - input to output
if state=='intable': # in a table and a line just ended
output += "</td></tr>\n"
if state=='begintable': # in a table and the header just ended
output += "</td></tr>\n"
state = 'intable'
if state=='': output += "</p>"
output = open("C:/Users/peter/Documents/gavilan/www mirror/counseling_2019/who_call_out.txt.html",'w').write(output)
# https://docs.google.com/document/d/1Jw3rSGxuCkujMLrm-5p_zxSzCQavfwo_7Esthjzg0rQ/edit?usp=sharing
def studenttech_faq():
"""f = "../www mirror/student/online/template/tech_faq.html"
input = open(f,'r')
lines = input.readlines()
input.close()
output = open(f,'w')
for L in lines:
output.write(L )
if re.search('<!--START-->',L):
break
output.write( get_doc('1Jw3rSGxuCkujMLrm-5p_zxSzCQavfwo_7Esthjzg0rQ', 1) )"""
codecs.open('qanda_student/public/questions.json','w','utf-8').write( \
get_doc_generic('1Jw3rSGxuCkujMLrm-5p_zxSzCQavfwo_7Esthjzg0rQ', bracket=0,verbose=0))
put_file('/gavilan.edu/student/', 'qanda_student/public/', 'questions.json')
print("I uploaded the questions, but remember to do the images too if they changed.")
# https://docs.google.com/document/d/1tI_b-q75Lzu25HcA0GCx9bGfUt9ccM8m2YrrioDFZcA/edit?usp=sharing
def de_faq():
"""f = "cache/faq_template.html"
input = codecs.open(f,'r','utf-8')
lines = input.readlines()
input.close()
output = codecs.open('cache/de_teach_faq.html','w','utf-8')
for L in lines:
output.write(L )
if re.search('<!-- BEGIN -->',L):
output.write( get_doc_generic('1tI_b-q75Lzu25HcA0GCx9bGfUt9ccM8m2YrrioDFZcA', bracket=0,verbose=1))
"""
codecs.open('qanda/public/questions.json','w','utf-8').write( \
get_doc_generic('1tI_b-q75Lzu25HcA0GCx9bGfUt9ccM8m2YrrioDFZcA', bracket=0,verbose=0))
put_file('/gavilan.edu/staff/tlc/canvas_help/', 'qanda/public/', 'questions.json')
print("I uploaded the questions, but remember to do the images too if they changed.")
def degwork_faq():
f = "../www mirror/counseling_2019/template/degreeworks.html"
input = open(f,'r')
lines = input.readlines()
input.close()
output = open(f,'w')
for L in lines:
output.write(L )
if re.search('<!--START-->',L):
break
output.write( '<meta charset="utf-8" />\n' + get_doc('1ctmPkWwrIJ1oxlj8Z8UXYjijUzMW2VxnsVDSE1KfKME') )
def vrc_faq():
# https://docs.google.com/document/d/1anAmnSusL-lTSAz-E4lcjlzq1CA8YJyUfUHxnKgmJEo/edit?usp=sharing
f = "../www mirror/student/veterans/template/faq.html"
input = open(f,'r')
lines = input.readlines()
input.close()
output = open(f,'w')
for L in lines:
output.write(L )
if re.search('<!--START-->',L):
break
output.write( '<meta charset="utf-8" />\n' + get_doc('1anAmnSusL-lTSAz-E4lcjlzq1CA8YJyUfUHxnKgmJEo',verbose=1) )
def counseling_faq():
f = "../www mirror/counseling_2019/template/faq.html"
input = open(f,'r')
lines = input.readlines()
input.close()
output = open(f,'w')
for L in lines[0:3]:
output.write(L)
output.write( get_doc('101iOplZearjv955FX2FX9AM6bUnkcryo7BShKuzE9tI') )
def finaid_faq():
f = "../www mirror/finaid_2019/template/faq.html"
input = open(f,'r')
lines = input.readlines()
input.close()
output = open(f,'w')
i = 0
for L in lines[0:3]:
#print("%i, %s" % (i,L))
output.write(L)
i+=1
output.write( get_doc('1-FarjfyzZceezdSBXDHpP2cF_vaa9Qx6HvnIqwipmA4') )
def coun_loc():
f = "../www mirror/counseling_2019/template/location.html"
input = open(f,'r')
lines = input.readlines()
input.close()
output = open(f,'w')
i = 0
for L in lines[0:3]:
#print("%i, %s" % (i,L))
output.write(L)
i+=1
output.write( get_doc('1hxQZ9iXMWvQQtaoVlRgor9v4pdqdshksjeHD2Z4E6tg') )
def tutor_faq():
f = "../www mirror/student/learningcommons/template/faq.html"
input = open(f,'r')
lines = input.readlines()
input.close()
output = open(f,'w')
i = 0
for L in lines[0:3]:
#print("%i, %s" % (i,L))
output.write(L)
i+=1
output.write( get_doc('1gCYmGOanQ2rnd-Az2HWFjYErBm_4tp_RuJs6a7MkYrE',1) )
def test_repl():
from interactive import MyRepl
c = MyRepl()
c.set_my_dict( { "Peter": "thats me", "Mike": "a VP", "Pablo": "isn't here", "Mary": "Far away" })
c.inputloop()
if __name__ == "__main__":
print ('')
options = { 1: ['Build www pages', make] ,
2: ['De-template an existing page', try_untemplate],
3: ['Text to table', txt_2_table],
4: ['Pull the Counseling FAQ from gdocs', counseling_faq] ,
5: ['Pull the DegreeWorks FAQ from gdocs', degwork_faq] ,
6: ['Pull the Finaid FAQ from gdocs', finaid_faq] ,
7: ['Pull the Tutoring FAQ from gdocs', tutor_faq] ,
8: ['Pull the Counseling Location page from gdocs', coun_loc] ,
9: ['Pull the student tech faq page from gdocs', studenttech_faq] ,
10: ['Pull the DE faq page from gdocs', de_faq] ,
11: ['Pull the VRC faq page from gdocs', vrc_faq] ,
12: ['Test a REPL', test_repl ],
}
for key in options:
print(str(key) + '.\t' + options[key][0])
print('')
resp = input('Choose: ')
# Call the function in the options dict
options[ int(resp)][1]()

171
templates/dir.html Normal file
View File

@ -0,0 +1,171 @@
<style>
body, select { font-family: arial; font-size: 0.75rem; }
.line span { display: inline-block; width: 16%; }
div.line { border-bottom: 1px solid lightgrey; margin-bottom: 4px; }
th { text-align:left; border-bottom: 1px solid lightgrey; cursor: pointer; }
.linehead span { font-weight: bold; }
</style>
<!--<script src='/data/gui/lib/lodash.js'></script>-->
<script src='/data/gui/lib/underscore-min.js'></script>
<script src='/data/gui/lib/vue.js'></script>
<script src='/data/gui/lib/jacks.min.js'></script> <!-- https://github.com/jccazeaux/jacks -->
<script src='/data/gui/lib/vue-good-table.min.js'></script>
<script>
var margin = {top: 20, right: 20, bottom: 70, left: 40}
var appdata = { users:[], depts:[], titles:[],
columns: [
{
label: ' ',
field: 'picture',
},
{
label: 'Last',
field: 'last_name',
},
{
label: 'First',
field: 'first_name',
},
{
label: 'Department',
field: 'd1',
},
{
label: 'Phone',
field: 'phone_number',
},
{
label: 'Email',
field: 'email',
},
{
label: 'Room',
field: 'room',
},
],
}
Vue.config.devtools = true;
</script>
<div id="staff_dir">
<vue-good-table
:columns="columns"
:rows="users"
:search-options="{
enabled: true}">
</vue-good-table>
<br /><br />
<!--<div class="line linehead">
<span>Name</span>
<span>Title</span>
<span>Department</span>
<span>Old Department</span>
<span>Email</span>
<span>Phone</span>
</div><user-line v-for="p,i in users" v-bind:user="p"
v-bind:key="'usr_'+i"></user-line>-->
</div>
<script>
Vue.component('popup-field', {
props: ['fieldname', 'values', // an array of objs: {'text':'t', 'value':'v' }
'init_val', 'userid'],
data: function() { return {'selected':this.init_val, 'a':'b'} },
delimiters: ['[[', ']]'],
methods: {
new_val: function() {
jacks().get("/api/update_pers_dept/" + this.userid + "/" + this.selected)
.header("Accepts", "application/json")
.send( function(resp) { console.log("Tried to update department.")
console.log(resp) } )
},
},
template: `<select v-model="selected" @change="new_val()" >
<option v-for="k in values" :value="k.id">[[ k.name ]]</option>
</select>`
})
Vue.component('popup-title-field', {
props: ['fieldname', 'values',
'init_val', 'userid'],
data: function() { return {'selected':this.init_val, 'a':'b'} },
delimiters: ['[[', ']]'],
methods: {
new_val: function() {
jacks().get("/api/update_pers_title/" + this.userid + "/" + this.selected)
.header("Accepts", "application/json")
.send( function(resp) { console.log("Tried to update title.")
console.log(resp) } )
},
},
template: `<select v-model="selected" @change="new_val()" >
<option v-for="k in values" :value="k.id">[[ k.name ]]</option>
</select>`
})
Vue.component('user-line', {
props: [ 'user', ],
data: function () {
return { "a": "a", "b": "b" }
},
delimiters: ['[[', ']]'],
methods: {
update2: _.debounce( function(column, row, newval) {
jacks().get("/api/update_pers_dept/" + row + "/" + newval)
.header("Accepts", "application/json")
.send( function(resp) { console.log("Tried to update department.")
console.log(resp) } )
}, 300),
update: function(column, row, newval) { }
},
template: `<div class="line">
<span>[[ user.first_name ]] [[ user.last_name ]]</span>
<span>
<popup-title-field :fieldname="'title'" :values="this.$root.titles"
:init_val="user.titleid" :userid="user.id" ></popup-title-field>
</span>
<span>
<popup-field :fieldname="'dept1'" :values="this.$root.depts"
:init_val="user.dept1" :userid="user.id" ></popup-field>
</span>
<span>[[ user.old_dept ]]</span>
<span>[[ user.email ]]</span>
<span>[[ user.phone_number ]]</span>
</div>`
})
var vm = new Vue({
data: appdata,
el: '#staff_dir',
delimiters: ['[[', ']]'],
methods: {
pretty_session: function(ses) { },
remove: function (event) { console.log(event); console.log(this);
}},
computed: {
}
});
jacks().get("/api/personnel_fetch")
.header("Accepts", "application/json")
.send( function(resp) { vm.users = JSON.parse(resp.response)
console.log(vm.users) } )
jacks().get("/api/personnel_meta")
.header("Accepts", "application/json")
.send( function(resp) { var meta = JSON.parse(resp.response)
vm.depts = meta.depts
vm.titles = meta.titles
console.log(vm.depts) } )
console.log(filename)
</script>

112
templates/hello.html Normal file
View File

@ -0,0 +1,112 @@
<!doctype html>
<title>Welcome To Gavilan College</title>
{% if name %}
<h1>iLearn Hits for: {{ name }}</h1>
<p> <b>canvas id:</b> {{ id }}
{% else %}
<h1>Hello, World!</h1>
{% endif %}
<a href="/sd">Shutdown</a>
<script src='/data/gui/lib/d3.js'></script>
<script src='/data/gui/lib/lodash.js'></script>
<script>
var margin = {top: 20, right: 20, bottom: 70, left: 40},
filename= "/data/users/logs/{% if id %}{{ id }}{% else %}241{% endif %}.csv",
width = 600 - margin.left - margin.right,
height = 300 - margin.top - margin.bottom;
var parseDate = d3.timeParse("%Y-%m-%dT%H:%M:%S%Z") /* 2020-07-13T12:44:54Z,2020-07-13T12:44:54Z */
var parseDate2 = d3.timeParse("%Y-%m-%d") /* 2020-07-13 */
// Parse the date / time
var x = d3.scaleTime().range([0, width]);
var y = d3.scaleLinear().range([height, 0]);
var svg = d3.select("body").append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform",
"translate(" + margin.left + "," + margin.top + ")");
d3.csv(filename).then(function(data) {
data.forEach(function(d) {
d.day = d.created_at.substring(0,10)
d.date = parseDate(d.created_at);
d.value = 1;
});
var by_day = d3.nest().key( function(d) { return d.day }).rollup(function(v){return v.length;})
.entries(data);
by_day.forEach(function(d) {
d.key = parseDate2(d.key)
});
by_day = by_day.filter(function(d) { return d.key > parseDate2("2020-01-01")} )
console.log(by_day)
// Scale the range of the data
x.domain(d3.extent(by_day, function(d) { return d.key; }));
y.domain([0, d3.max(by_day, function(d) { return d.value; })]);
// Add the valueline path.
/*svg.append("path")
.data([by_day])
.attr("class", "line")
.attr("d", valueline);
*/
// Add the X Axis
svg.append("g")
.attr("class", "axis")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(x)
.tickFormat(d3.timeFormat("%Y-%m-%d")))
.selectAll("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", ".15em")
.attr("transform", "rotate(-65)");
// Add the Y Axis
svg.append("g")
.attr("class", "axis")
.call(d3.axisLeft(y));
/*svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(xAxis)
.selectAll("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", "-.55em")
.attr("transform", "rotate(-90)" );
svg.append("g")
.attr("class", "y axis")
.call(yAxis)
.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 6)
.attr("dy", ".71em")
.style("text-anchor", "end")
.text("Value ($)");
*/
svg.selectAll("bar")
.data(by_day)
.enter().append("rect")
.style("fill", "steelblue")
.attr("x", function(d) { return x(d.key); })
.attr("width", 10)
.attr("y", function(d) { return y(d.value); })
.attr("height", function(d) { console.log(height); console.log(d); console.log(height - y(d.value)); return height - y(d.value); });
});
/*d3.csv("/data/users/logs/241.csv").then(function(data) {
console.log(data[0]);
}); */
</script>

134
templates/images.html Normal file
View File

@ -0,0 +1,134 @@
<!doctype html>
<title>Welcome To Gavilan College</title>
<link href="/data/gui/cropper.css" rel="stylesheet">
<script src="/data/gui/cropper.js"></script>
<style>
body, select { font-family: arial; font-size: 0.75rem; }
.line span { display: inline-block; width: 16%; }
div.line { border-bottom: 1px solid lightgrey; margin-bottom: 4px; }
.linehead span { font-weight: bold; }
.clicky { cursor:pointer; padding:0.3em; }
button { padding:0.5em; margin:0.5em; }
#myapp img { max-width:100%; max-height:30rem; }
#list { width:30%; display:inline-block; position:absolute; top:0; left:0; height:100%; overflow:scroll; }
#ed { width:60%; display:inline-block; position:absolute; top:0; left:30%; height:90%;}
</style>
<script src='/data/gui/lib/lodash.js'></script>
<script src='/data/gui/lib/vue-max.js'></script>
<div id="pic_editor">
<div id="list">
<h1>Photo Cropper</h1>
<div class="clicky" v-for="ff,i in files_match" v-on:click="swap_image(i)">
<span v-if="is_checked(ff)"><b>x </b></span>
[[ff.conf_name]]
</div>
<div class="clicky" v-for="ff,i in files_no_match" v-on:click="swap_image_b(i)"><span v-if="is_checked(ff)"><b>x </b></span> [[ff]]</div>
</div>
<div id="ed">
<img id='target' src='' />
<button v-on:click="save_crop">Make Crop</button>
</div>
</div>
<script>
{% autoescape false %}
var files_match = {{matches}}
var files_no_match = {{nomatches}}
var staff = {{staff}}
var checked = {{checked}}
{% endautoescape %}
var app = new Vue({
el: '#pic_editor',
data: {
msg: 'hello', active: false,
current_file: 0,
//current_filename: '',
files_match: files_match,
files_no_match: files_no_match,
checked: checked,
picDir: '/data/picsStaffdir/', // '/data/picsId/originals_20211022/'
staff: staff,
cropper: '',
image: '',
},
delimiters: ['[[', ']]'],
watch: { },
methods: {
// current_filename: function() { return this.files_match[this.current_file].filename },
current_filename: function() { return this.files_no_match[this.current_file] },
is_checked: function(x) { console.log(x); return this.checked.includes(x) },
swap_image: function(i) {
console.log("Swap: " + i)
this.current_file = i
this.cropper.replace(this.picDir + this.files_match[i].filename)
//this.cropper.replace('/data/picsUpload/' + this.files_match[i].filename)
},
swap_image_b: function(i) {
console.log("Swap b: " + i)
// this.current_file = i
this.current_file = this.files_no_match[ i ]
this.cropper.replace(this.picDir + this.files_no_match[i])
},
save_crop: function() {
var self = this
console.log(this.cropper.getData(true))
cr = this.cropper.getData(true)
fetch('/imagecrop/' + this.current_file + '/' + cr.x + '/' + cr.y + '/' + cr.width + '/' + cr.height
+ '/' + this.current_file, { method: 'GET' }).then(function (response) {
// fetch('/imagecrop/' + this.current_filename() + '/' + cr.x + '/' + cr.y + '/' + cr.width + '/' + cr.height
// + '/' + this.files_match[ this.current_file ].conf_goo, { method: 'GET' }).then(function (response) {
// The API call was successful!
if (response.ok) {
response.json().then( function(r2) {
console.log('I saved it yo')
console.log(r2.text)
self.checked.push(self.current_file)
} )
} else { return Promise.reject(response) }
}).then(function (data) {
}).catch(function (err) { console.warn('Something went wrong.', err); });
},
},
computed: { },
mounted: function() {
this.image = document.getElementById('target');
this.cropper = new Cropper(this.image, {
aspectRatio: 3 / 4,
crop(event) { console.log(event) },
});
/*var self = this;
fetch('dir_api_tester.php', { method: 'GET' }).then(function (response) {
// The API call was successful!
if (response.ok) {
response.json().then( function(r2) {
self.user = r2;
} )
} else { return Promise.reject(response) }
}).then(function (data) {
}).catch(function (err) { console.warn('Something went wrong.', err); });
*/
}
})
</script>

197
templates/personnel.html Normal file
View File

@ -0,0 +1,197 @@
<!doctype html>
<title>Welcome To Gavilan College</title>
{% if name %}
<h1>Editor for: {{ name }}</h1>
<p> <b>canvas id:</b> {{ id }}
{% else %}
<h1>Hello, World!</h1>
{% endif %}
<p>
This is a page. Vue is: 1. set up your data. fetch json of either 1 item or the whole list.
</p>
<style>
body, select { font-family: arial; font-size: 0.75rem; }
.line span { display: inline-block; width: 16%; }
div.line { border-bottom: 1px solid lightgrey; margin-bottom: 4px; }
.linehead span { font-weight: bold; }
</style>
<script src='/data/gui/lib/d3.js'></script>
<script src='/data/gui/lib/lodash.js'></script>
<script src='/data/gui/lib/vue.js'></script>
<script src='/data/gui/lib/jacks.min.js'></script> <!-- https://github.com/jccazeaux/jacks -->
<script src='/data/gui/lib/zeroupload.min.js'></script> <!-- https://github.com/jhuckaby/zeroupload -->
<script src='/data/gui/lib/vue-good-table.min.js'></script>
<script>
var margin = {top: 20, right: 20, bottom: 70, left: 40}
var filename= "/data/users/logs/{% if id %}{{ id }}{% else %}241{% endif %}.csv"
var appdata = { users:[], depts:[], titles:[],
columns: [
{
label: 'Name',
field: 'name',
},
{
label: 'Age',
field: 'age',
type: 'number',
},
{
label: 'Created On',
field: 'createdAt',
type: 'date',
dateInputFormat: 'yyyy-MM-dd',
dateOutputFormat: 'MMM do yy',
},
{
label: 'Percent',
field: 'score',
type: 'percentage',
},
],
rows: [
{ id:1, name:"John", age: 20, createdAt: '2020-09-08', score: 0.03343 },
{ id:2, name:"Jane", age: 24, createdAt: '2011-10-31', score: 0.03343 },
{ id:3, name:"Susan", age: 16, createdAt: '2011-10-30', score: 0.03343 },
{ id:4, name:"Chris", age: 55, createdAt: '2011-10-11', score: 0.03343 },
{ id:5, name:"Dan", age: 40, createdAt: '2011-10-21', score: 0.03343 },
{ id:6, name:"John", age: 20, createdAt: '2011-10-31', score: 0.03343 },
], }
Vue.config.devtools = true;
</script>
<h3>I'm making a vue app. Again. And I like it. </h3>
<p>1.1 Make your main div with id, and custom tags in it.</p>
<div id="sample">
<vue-good-table
:columns="columns"
:rows="rows"></vue-good-table>
<br /><br />
<div class="line linehead">
<span>Name</span>
<span>Title</span>
<span>Department</span>
<span>Old Department</span>
<span>Email</span>
<span>Phone</span>
</div><user-line v-for="p,i in users" v-bind:user="p" v-bind:key="'usr_'+i"></user-line>
</div>
<p>2. Make some components</p>
<script>
Vue.component('popup-field', {
props: ['fieldname', 'values', // an array of objs: {'text':'t', 'value':'v' }
'init_val', 'userid'],
data: function() { return {'selected':this.init_val, 'a':'b'} },
delimiters: ['[[', ']]'],
methods: {
new_val: function() {
jacks().get("/api/update_pers_dept/" + this.userid + "/" + this.selected)
.header("Accepts", "application/json")
.send( function(resp) { console.log("Tried to update department.")
console.log(resp) } )
},
},
template: `<select v-model="selected" @change="new_val()" >
<option v-for="k in values" :value="k.id">[[ k.name ]]</option>
</select>`
})
Vue.component('popup-title-field', {
props: ['fieldname', 'values',
'init_val', 'userid'],
data: function() { return {'selected':this.init_val, 'a':'b'} },
delimiters: ['[[', ']]'],
methods: {
new_val: function() {
jacks().get("/api/update_pers_title/" + this.userid + "/" + this.selected)
.header("Accepts", "application/json")
.send( function(resp) { console.log("Tried to update title.")
console.log(resp) } )
},
},
template: `<select v-model="selected" @change="new_val()" >
<option v-for="k in values" :value="k.id">[[ k.name ]]</option>
</select>`
})
</script>
<p>3. Including the one that corresponds to the html / main div above. </p>
<script>
Vue.component('user-line', {
props: [ 'user', ],
data: function () {
return { "a": "a", "b": "b" }
},
delimiters: ['[[', ']]'],
methods: {
update2: _.debounce( function(column, row, newval) {
jacks().get("/api/update_pers_dept/" + row + "/" + newval)
.header("Accepts", "application/json")
.send( function(resp) { console.log("Tried to update department.")
console.log(resp) } )
}, 300),
update: function(column, row, newval) { }
},
template: `<div class="line">
<span>[[ user.first_name ]] [[ user.last_name ]]</span>
<span>
<popup-title-field :fieldname="'title'" :values="this.$root.titles"
:init_val="user.titleid" :userid="user.id" ></popup-title-field>
</span>
<span>
<popup-field :fieldname="'dept1'" :values="this.$root.depts"
:init_val="user.dept1" :userid="user.id" ></popup-field>
</span>
<span>[[ user.old_dept ]]</span>
<span>[[ user.email ]]</span>
<span>[[ user.phone_number ]]</span>
</div>`
})
var vm = new Vue({
data: appdata,
el: '#sample',
delimiters: ['[[', ']]'],
methods: {
pretty_session: function(ses) { },
remove: function (event) { console.log(event); console.log(this);
}},
computed: {
}
});
jacks().get("/api/personnel_fetch")
.header("Accepts", "application/json")
.send( function(resp) { vm.users = JSON.parse(resp.response)
console.log(vm.users) } )
jacks().get("/api/personnel_meta")
.header("Accepts", "application/json")
.send( function(resp) { var meta = JSON.parse(resp.response)
vm.depts = meta.depts
vm.titles = meta.titles
console.log(vm.depts) } )
console.log(filename)
</script>

View File

@ -0,0 +1,194 @@
<!doctype html>
<title>Welcome To Gavilan College</title>
{% if name %}
<h1>Editor for: {{ name }}</h1>
<p> <b>canvas id:</b> {{ id }}
{% else %}
<h1>Hello, World!</h1>
{% endif %}
<p>
This is a page. Vue is: 1. set up your data. fetch json of either 1 item or the whole list.
</p>
<script>
Vue.config.devtools = true;
var appdata = { "allsessions": [],
"goo":"", "name":"", "uid":"",
"mysessions": [],
"questions": [],
"times":times, "headers":headers, "colors":colors, };
appdata.goo=x['user']['goo']; appdata.name = x['user']['name'];
appdata.uid = x['user']['id']; appdata.mysessions = x['mysessions'];
appdata.other = x['other'];
appdata.questions = _.groupBy(x['questions'], function(z) { return z.ses_id; });
</script>
<p>1.1 Make your main div with id, and custom tags in it.</p>
<div id="sample">
<post-session v-for="ses in orderedSessions" :ses="ses" :qmodel="questions[parseInt(ses.session)]"></post-session>
</div>
<p>2. Make some components</p>
<script>
// A single text style question
Vue.component('t-question', {
props: [ 'qq' ],
data: function () {
return {
"answer": ""
}
},
watch: {
"qq.answer": function (val, oldVal) { this.$emit('update', this.qq.qid, val); },
},
template: `<div><span class="question">{{ qq.question }}</span><br />
<textarea v-model="qq.answer"></textarea>
<br />
</div>`
});
</script>
<p>3. Including the one that corresponds to the html / main div above. </p>
<script>
Vue.component('post-session', {
props: [ 'ses','qmodel' ],
data: function () {
var st = "wait";
if ( this.passed(this.ses) && this.ses.surveyed ) { st = "ok";}
else if ( this.passed(this.ses) ) { st = "need"; }
var is_cert = true;
if (this.ses.certified_at == null) { is_cert = false; }
return { "is_cert": is_cert, "state": st }
},
methods: {
docert: function(c) { var card = this;
var d = 1;
if (!c) { d = null; }
var save_answer_url = "conf3.php?u=" + card.ses.user + "&cert=" + card.ses.ses_id + "&q=" + d;
console.log(save_answer_url);
$.ajax({
url: save_answer_url,
context: document.body
}).done(function(r) {
card.state = "ok_saved";
console.log('ajax returned'); console.log(r);
}); },
update: _.debounce( function(qid,newval) { var card = this;
var save_answer_url = "conf3.php?u=" + card.ses.user + "&s=" + card.ses.ses_id + "&q=" + qid + "&answer=" + encodeURI(newval);
console.log(save_answer_url);
$.ajax({
url: save_answer_url, // u s q answer
context: document.body
}).done(function(r) {
card.state = "ok_saved";
// console.log('ajax returned'); console.log(r);
});
}, 300),
format_dt: format_dt,
passed: function(s) {
var t = s['starttime'].split(/[- :]/);
var d = new Date(t[0], t[1]-1, t[2], t[3], t[4], t[5]);
var milli = Date.now() - d;
if (milli > 0) { return true; }
return false; },
needs_svy(s) {
if ( this.passed(s) && ! s.surveyed ) { return true;}
return false;
}
},
template: `<div id="single_session" class="ses_card">
<div class="card_flags">
<template v-if="state=='need'">
<span class="badge badge-warning">Needs Survey</span>
<img src="map/images/icons/alert-circle.svg" alt="" width="22" height="22" title="Click to answer survey">
</template>
<template v-if="state=='ok'">
<span class="badge badge-success">OK</span>
<img src="map/images/icons/check.svg" alt="" width="22" height="22" title="OK">
</template>
<template v-if="state=='wait'">
<span class="badge badge-success">Not yet</span>
<img src="map/images/icons/check.svg" alt="" width="22" height="22" title="Wait till after session">
</template>
<template v-if="state=='loading'">
<span class="badge badge-success">OK</span>
<img src="map/images/spinner.gif" alt="" width="22" height="22" title="OK">
</template>
<template v-if="state=='ok_saved'">
<span class="badge badge-success">SAVED</span>
<img src="map/images/icons/check.svg" alt="" width="22" height="22" title="Saved">
</template>
</div>
<h3>Event: {{ ses.title }} </h3>
<span class="minor">Date:</span> {{ format_dt(ses) }} &nbsp; &nbsp; &nbsp;
<span class="minor">Location:</span> {{ ses.location }} <br />
<span class="minor">{{ ses.desc }}</span><br /><br />
<template v-for="q in qmodel">
<t-question v-if="q.type==1" :qq="q" @update="update"></t-question><br />
<n-question v-if="q.type==2" :qq="q" @update="update"></n-question><br />
</template><br />
<icertify :c="is_cert"></icertify>
<!--<template v-if=" ! passed(ses)">
</template>
<p v-else>This session hasn't occured yet. Check back after it to do the survey.</p>-->
</div>`
})
var vm = new Vue({
data: appdata, el: '#sample',
methods: {
format_dt: format_dt,
pretty_track: pretty_track,
pretty_session: function(ses) { },
remove: function (event) { console.log(event); console.log(this);
if (g_login) {
var re = /\w+_(\d+)/;
var match = re.exec(event.target.id);
remove(match[1]); } }, },
computed: {
orderedSessions: function () {
return _.sortBy(this.mysessions, 'starttime');
},
groupedSessions: function () {
var only = _.without( this.allsessions, _.findWhere(this.allsessions, { id:"1002" }));
return _.sortBy ( _.groupBy( _.sortBy(only, 'track'), 'starttime' ), function(x) { return x[0].starttime; });
}
}
});
</script>
<script src='/data/gui/lib/d3.js'></script>
<script src='/data/gui/lib/lodash.js'></script>
<script src='/data/gui/lib/vue.js'></script>
<script>
var margin = {top: 20, right: 20, bottom: 70, left: 40}
var filename= "/data/users/logs/{% if id %}{{ id }}{% else %}241{% endif %}.csv"
console.log(filename)
</script>

35
timer.py Normal file
View File

@ -0,0 +1,35 @@
from threading import Timer
import time, datetime
mm = 18
t = datetime.datetime.today()
future = datetime.datetime(t.year,t.month,t.day,23,mm)
diff = future - t
delta = diff.total_seconds()
print("waiting until 11:%i PM, which is %i seconds from now." % (mm,delta))
def func(a, b):
print("Called function")
return a * b
# Schedule a timer for 5 seconds
# We pass arguments 3 and 4
t = Timer(delta, func, [3, 4])
start_time = time.time()
# Start the timer
t.start()
end_time = time.time()
if end_time - start_time < 5.0:
print("Timer will wait for sometime before calling the function")
else:
print("%i seconds already passed. Timer finished calling func()" % mm)

BIN
token.pickle Normal file

Binary file not shown.

2203
users.py Normal file

File diff suppressed because it is too large Load Diff

156
util.py Normal file
View File

@ -0,0 +1,156 @@
import re, csv
from collections import defaultdict
def print_table(table):
longest_cols = [
(max([len(str(row[i])) for row in table]) + 3)
for i in range(len(table[0]))
]
row_format = "".join(["{:>" + str(longest_col) + "}" for longest_col in longest_cols])
for row in table:
print(row_format.format(*row))
def remove_nl(str):
return str.rstrip()
def UnicodeDictReader(utf8_data, **kwargs):
csv_reader = csv.DictReader(utf8_data, **kwargs)
for row in csv_reader:
yield {str(key, 'utf-8'):str(value, 'utf-8') for key, value in iter(list(row.items()))}
def minimal_string(s):
s = s.lower()
s = re.sub(r'[^a-zA-Z0-9]',' ',s)
s = re.sub(r'(\s+)',' ',s)
s = s.strip()
return s
def to_file_friendly(st):
st = st.lower()
st = re.sub( r"[^a-z0-9]+","_",st)
return st
def clean_title(st):
sq = re.sub( r"[^a-zA-Z0-9\.\-\!]"," ",st )
if sq: st = sq
if len(st)>50: return st[:50]+'...'
return st
def match59(x):
if x['links']['context']==7959: return True
return False
def item_2(x): return x[2]
def unix_time_millis(dt):
wst = pytz.timezone("US/Pacific")
epoch = datetime.datetime.fromtimestamp(0)
epoch = wst.localize(epoch)
return (dt - epoch).total_seconds() * 1000.0
# ENGL250 returns ENGL
def dept_from_name(n):
m = re.search('^([a-zA-Z]+)\s?[\d\/]+',n)
if m: return m.group(1)
print(("Couldn't find dept from: " + n))
return ''
def most_common_item(li):
d = defaultdict(int)
for x in li:
d[x] += 1
s = sorted(iter(list(d.items())), key=lambda k_v: (k_v[1],k_v[0]), reverse=True)
#pdb.set_trace()
return s[0][0]
def srt_times(a,b):
HERE = tz.tzlocal()
da = dateutil.parser.parse(a)
da = da.astimezone(HERE)
db = dateutil.parser.parse(b)
db = db.astimezone(HERE)
diff = da - db
return diff.seconds + diff.days * 24 * 3600
def how_long_ago(a): # number of hours ago 'a' was...
if not a: return 9999
HERE = tz.tzlocal()
d_now = datetime.datetime.now()
d_now = d_now.replace(tzinfo=None)
#d_now = d_now.astimezone(HERE)
d_then = dateutil.parser.parse(a)
d_then = d_then.replace(tzinfo=None)
#d_then = d_then.astimezone(HERE)
diff = d_now - d_then
return (diff.seconds/3600) + (diff.days * 24) + 8 # add 8 hours to get back from UTC timezone
def partition(times_list):
# get a list of times in this format: 2017-02-14T17:01:46Z
# and break them into a list of sessions, [start, hits, minutes]
global dd
mm = ['x','Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
start = ""
last = ""
hits = 0
minutes_till_new_session = 26
delta = timedelta(minutes=26)
HERE = tz.tzlocal()
sessions = []
sorted_times_list = sorted(times_list, srt_times)
current_set = []
timeline_times = []
for T in sorted_times_list:
dt_naive = dateutil.parser.parse(T)
dt = dt_naive.astimezone(HERE)
timeline_st = unix_time_millis(dt)
timeline_et = timeline_st + (1 * 60 * 1000) # always end 1 minute later....
timeline_dict = {}
timeline_dict['starting_time'] = timeline_st
timeline_dict['ending_time'] = timeline_et
timeline_times.append(timeline_dict)
month = mm[ int(dt.strftime("%m"))]
formatted = month + " " + dt.strftime("%d %H:%M")
if not start: # start a new session
start = dt
start_f = formatted
last = dt
current_set.append(formatted)
hits = 1
else: #
if dt > last + delta: # too long. save sesh. start another, if hits > 2
minutes = (last - start)
minutes = (minutes.seconds / 60) + 5
if hits > 2:
sessions.append( [start_f, hits, minutes,current_set] )
start = dt
start_f = formatted
last = dt
hits = 1
current_set = [formatted]
else: # put in current session
last = dt
current_set.append(formatted)
hits += 1
# save last sesh
if (last):
minutes = (last - start)
minutes = (minutes.seconds / 60) + 5
if hits > 2:
sessions.append( [start_f,hits,minutes,current_set] )
dd.write(json.dumps(timeline_times))
return sessions