summer 24 changes

This commit is contained in:
phowell 2024-07-22 15:19:31 -07:00
parent 9ca707a479
commit 6da470ad1f
7 changed files with 462 additions and 260 deletions

View File

@ -158,6 +158,7 @@ def accessible_check(id=""):
if not id: if not id:
id = input("ID of course to check? ") id = input("ID of course to check? ")
verbose = 1 verbose = 1
PAGES_ONLY = 1
save_file_types = ['application/pdf','application/docx','image/jpg','image/png','image/gif','image/webp','application/vnd.openxmlformats-officedocument.wordprocessingml.document'] save_file_types = ['application/pdf','application/docx','image/jpg','image/png','image/gif','image/webp','application/vnd.openxmlformats-officedocument.wordprocessingml.document']
@ -232,6 +233,7 @@ def accessible_check(id=""):
### ###
### FILES ### FILES
### ###
if not PAGES_ONLY:
files_f = course_folder + '/files' files_f = course_folder + '/files'
headered = 0 headered = 0
print("\nFILES") print("\nFILES")
@ -295,8 +297,9 @@ def accessible_check(id=""):
if os.path.exists(this_page_filename): if os.path.exists(this_page_filename):
d(" - already downloaded %s" % this_page_filename) d(" - already downloaded %s" % this_page_filename)
this_page_content = codecs.open(this_page_filename,'r','utf-8').read() this_page_content = codecs.open(this_page_filename,'r','utf-8').read()
elif re.search(r'eis-prod',p['url']) or re.search(r'gavilan\.ins',p['url']): #elif re.search(r'eis-prod',p['url']) or re.search(r'gavilan\.ins',p['url']):
d(' * skipping file behind passwords') #elif re.search(r'eis-prod',p['url']):
# d(' * skipping file behind passwords')
else: else:
t2 = fetch('/api/v1/courses/' + str(id) + '/pages/'+p['url'], verbose) t2 = fetch('/api/v1/courses/' + str(id) + '/pages/'+p['url'], verbose)
if t2 and 'body' in t2 and t2['body']: if t2 and 'body' in t2 and t2['body']:
@ -314,9 +317,9 @@ def accessible_check(id=""):
src = I.get('src') src = I.get('src')
if src: if src:
d(' - %s' % src) d(' - %s' % src)
if re.search(r'eis-prod', src) or re.search(r'gavilan\.ins', src): #if re.search(r'eis-prod', src) or re.search(r'gavilan\.ins', src):
d(' * skipping file behind passwords') # d(' * skipping file behind passwords')
else: #else:
try: try:
r = requests.get(src,headers=header, stream=True) r = requests.get(src,headers=header, stream=True)
mytype = r.headers['content-type'] mytype = r.headers['content-type']
@ -350,6 +353,8 @@ def accessible_check(id=""):
### ###
### ASSIGNMENTS ### ASSIGNMENTS
### ###
if not PAGES_ONLY:
headered = 0 headered = 0
asm_f = course_folder + '/assignments' asm_f = course_folder + '/assignments'
print("\nASSIGNMENTS") print("\nASSIGNMENTS")

View File

@ -1,9 +1,11 @@
from ast import Try
import json, re, requests, codecs, sys, time, funcy, os import json, re, requests, codecs, sys, time, funcy, os
import pandas as pd import pandas as pd
from datetime import datetime from datetime import datetime
import pytz import pytz
from dateutil import parser from dateutil import parser
from datetime import datetime from datetime import datetime
#from symbol import try_stmt
from util import print_table, int_or_zero, float_or_zero, dept_from_name, num_from_name from util import print_table, int_or_zero, float_or_zero, dept_from_name, num_from_name
from pipelines import fetch, fetch_stream, getSemesterSchedule, fetch_collapse, header, url, shortToLongSem from pipelines import fetch, fetch_stream, getSemesterSchedule, fetch_collapse, header, url, shortToLongSem
from pipelines import sems from pipelines import sems
@ -971,12 +973,16 @@ def enroll_id_list_to_shell(id_list, shell_id, v=0):
print("Something went wrong with id %s, %s, %s" % (j, str(s), str(e))) print("Something went wrong with id %s, %s, %s" % (j, str(s), str(e)))
# multiple semesters
def enroll_stem_students_live(): def enroll_stem_students_live():
semesters = [183,184]
for S in semesters:
enroll_stem_students_live_semester(S)
def enroll_stem_students_live_semester(the_term, do_removes=0):
import localcache2 import localcache2
the_term = '181' # su23 fa23 = 180
do_removes = 0
depts = "MATH BIO CHEM CSIS PHYS PSCI GEOG ASTR ECOL ENVS ENGR".split(" ") depts = "MATH BIO CHEM CSIS PHYS PSCI GEOG ASTR ECOL ENVS ENGR".split(" ")
users_to_enroll = users_in_by_depts_live(depts, the_term) # term id users_to_enroll = users_in_by_depts_live(depts, the_term) # term id
@ -1317,23 +1323,15 @@ def course_search_by_sis():
def course_by_depts_terms(section=0): def course_by_depts_terms(section=0):
"""s = [ x.strip() for x in codecs.open('cache/fa22_eval_sections.csv','r').readlines()]
s = list(funcy.flatten(s))
s.sort()
xyz = input('hit return to continue')
"""
#c = getCoursesInTerm(168,0,1) get_fresh = 0
#c = getCoursesInTerm(174,0,1) # sp22 #SP_TERM = 181
#c = getCoursesInTerm(176,0,1) # fa22 #WI_TERM = 182
TERM = 183
get_fresh = 1 SEM = "su24"
SP_TERM = 181
WI_TERM = 182
SEM = "sp24"
make_changes = 1 make_changes = 1
make_changes_LS = 1 do_all = 0
winter_start_day = 2 winter_start_day = 2
aviation_start_day = 11 aviation_start_day = 11
@ -1341,16 +1339,20 @@ def course_by_depts_terms(section=0):
spring_start_day = 29 spring_start_day = 29
if get_fresh: if get_fresh:
c = getCoursesInTerm(SP_TERM,0,0) print(f"Getting list of courses in {SEM}")
codecs.open(f'cache/courses_in_term_{SP_TERM}.json','w','utf-8').write(json.dumps(c,indent=2)) c = getCoursesInTerm(TERM,0,0)
codecs.open(f'cache/courses_in_term_{TERM}.json','w','utf-8').write(json.dumps(c,indent=2))
else: else:
c = json.loads( codecs.open(f'cache/courses_in_term_{SP_TERM}.json','r','utf-8').read() ) c = json.loads( codecs.open(f'cache/courses_in_term_{TERM}.json','r','utf-8').read() )
crn_to_canvasid = {} crn_to_canvasid = {}
for C in c: for C in c:
#print(C['name'])
if 'sis_course_id' in C and C['sis_course_id']: if 'sis_course_id' in C and C['sis_course_id']:
print( f"{C['name']} -> {C['sis_course_id'][7:13]}" )
crn_to_canvasid[C['sis_course_id'][7:13]] = str(C['id']) crn_to_canvasid[C['sis_course_id'][7:13]] = str(C['id'])
else:
print( f"---NO CRN IN: {C['name']} -> {C}" )
#print(crn_to_canvasid) #print(crn_to_canvasid)
#return #return
@ -1361,19 +1363,47 @@ def course_by_depts_terms(section=0):
start = re.sub( r'\-','/', S['start']) + '/20' + SEM[2:4] start = re.sub( r'\-','/', S['start']) + '/20' + SEM[2:4]
d_start = datetime.strptime(start,"%m/%d/%Y") d_start = datetime.strptime(start,"%m/%d/%Y")
if d_start.month > 5: try:
print("Ignoring ", d_start, " starting too late...") this_id = crn_to_canvasid[S['crn']]
except Exception as e:
print(f"DIDN'T FIND CRN - {start} {d_start} - {S['code']} {S['crn']} {S['name']}" )
continue continue
if d_start.month == 1 and d_start.day == aviation_start_day: print(f" - {start} {d_start} - id: {this_id} - {S['code']} {S['crn']} {S['name']}" )
print("- Aviation ", start, d_start, " - ", S['code'], " ", S['crn'] ) if 1:
if d_start.month < 5 or d_start.month > 7:
print(f" Ignoring {d_start}, starting too far away...")
continue continue
if d_start.month == 1 and d_start.day == nursing_start_day: #if d_start.month == 1 and d_start.day == aviation_start_day:
print("- Nursing ", start, d_start, " - ", S['code'], " ", S['crn'] ) # print("- Aviation ", start, d_start, " - ", S['code'], " ", S['crn'] )
# continue
#if d_start.month == 1 and d_start.day == nursing_start_day:
# print("- Nursing ", start, d_start, " - ", S['code'], " ", S['crn'] )
# continue
if d_start.month == 5 and d_start.day == 28:
print(" Ignoring, term start date" )
continue continue
if d_start.month == 1 and d_start.day == winter_start_day: else:
print(" Adjust course start day?")
if make_changes:
if do_all != 'a':
do_all = input(' -> adjust? [enter] for yes, [a] to do all remaining. [n] to quit. >')
if do_all == 'n':
exit()
if do_all == '' or do_all == 'a':
data = {'course[start_at]':d_start.isoformat(), 'course[restrict_student_future_view]': True,
'course[restrict_enrollments_to_course_dates]':True }
u2 = f"https://gavilan.instructure.com:443/api/v1/courses/{this_id}"
r3 = requests.put(u2, headers=header, params=data)
print(" updated.. OK")
"""if d_start.month == 1 and d_start.day == winter_start_day:
print("+ winter session: ", d_start, " - ", S['code']) print("+ winter session: ", d_start, " - ", S['code'])
data = {'course[term_id]':WI_TERM} data = {'course[term_id]':WI_TERM}
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']] u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
@ -1381,19 +1411,12 @@ def course_by_depts_terms(section=0):
r3 = requests.put(u2, headers=header, params=data) r3 = requests.put(u2, headers=header, params=data)
print(" updated.. OK") print(" updated.. OK")
#print(r3.text) #print(r3.text)
continue continue"""
if d_start.month == 1 and d_start.day == spring_start_day: #if d_start.month == 1 and d_start.day == spring_start_day:
# normal class # # normal class
continue # continue
print("- Late start? ", start, d_start, " - ", S['code'], " ", S['crn'] )
if make_changes_LS:
data = {'course[start_at]':d_start.isoformat(), 'course[restrict_student_future_view]': True,
'course[restrict_enrollments_to_course_dates]':True }
u2 = "https://gavilan.instructure.com:443/api/v1/courses/%s" % crn_to_canvasid[S['crn']]
r3 = requests.put(u2, headers=header, params=data)
print(" updated.. OK")
return return

View File

@ -341,6 +341,8 @@ def create_schedule_table_if_not_exists():
# Populate schedule table and correlate to courses table # Populate schedule table and correlate to courses table
def courses_to_sched(): def courses_to_sched():
# TODO: fix units when they are variable... change to float in between range. round to 0.5 unit.
EXECUTE = 1 EXECUTE = 1
seasons = {'10':'wi','30':'sp','50':'su','70':'fa'} seasons = {'10':'wi','30':'sp','50':'su','70':'fa'}

View File

@ -30,7 +30,7 @@ from path_dict import PathDict
outputfile = '' outputfile = ''
csvwriter = '' csvwriter = ''
TERM = 181 TERM = 183
def escape_commas(s): def escape_commas(s):
@ -149,6 +149,7 @@ def ilearn_shell_slo_to_csv(shell_slos):
L.append("o%i_assd" % i) L.append("o%i_assd" % i)
df = pd.DataFrame(columns=L) df = pd.DataFrame(columns=L)
for S in shell_slos: for S in shell_slos:
try:
short = S[0] short = S[0]
this_crs = {'canvasid':short['ilearnid'], 'name':short['ilearnname'], 'has_outcomes':0, } this_crs = {'canvasid':short['ilearnid'], 'name':short['ilearnname'], 'has_outcomes':0, }
if len(S)>1: if len(S)>1:
@ -180,10 +181,12 @@ def ilearn_shell_slo_to_csv(shell_slos):
this_crs['o%i_vendor_guid' % i] = '!' this_crs['o%i_vendor_guid' % i] = '!'
i += 1 i += 1
df2 = pd.DataFrame(this_crs, columns = df.columns, index=[0]) df2 = pd.DataFrame(this_crs, columns = df.columns, index=[0])
df = pd.concat( [df, df2], ignore_index = True ) df = pd.concat( [df, df2], ignore_index = True )
except Exception as e:
print(f"*** Exception {e} with {S}\n\n")
df.to_csv('cache/outcome.csv') df.to_csv('cache/outcome.csv')
print(df) print(df)

160
stats.py
View File

@ -1393,6 +1393,11 @@ def report_student_stats():
# Save the figure in an HTML file # Save the figure in an HTML file
pio.write_html(fig, 'cache/student_pct_onlinecourse.html') pio.write_html(fig, 'cache/student_pct_onlinecourse.html')
def test_rpy():
pass
'''
def test_rpy(): def test_rpy():
from rpy2 import robjects from rpy2 import robjects
from rpy2.robjects import Formula, Environment from rpy2.robjects import Formula, Environment
@ -1439,8 +1444,162 @@ def test_rpy2():
utils = importr('utils') utils = importr('utils')
pi = robjects.r['pi'] pi = robjects.r['pi']
print(f"pi={pi[0]}") print(f"pi={pi[0]}")
'''
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import silhouette_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import graphviz
from joblib import dump, load
def cluster_by_mode_1():
# Load the data from a CSV file
data = pd.read_csv('cache/students_bymode.csv')
# Extract the relevant features
features = data[['num_semesters', 'num_units', 'inperson_units', 'hybrid_units', 'online_units']]
# Standardize the features
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)
# Perform clustering with different numbers of clusters
for n_clusters in range(4, 12):
kmeans = KMeans(n_clusters=n_clusters, random_state=42)
kmeans.fit(scaled_features)
# Add the cluster labels to the original data
data[f'cluster_{n_clusters}'] = kmeans.labels_
print(f"Clustering with {n_clusters} clusters:")
print(data.groupby(f'cluster_{n_clusters}').size())
print()
# Save the updated data with cluster labels to a new CSV file
data.to_csv('cache/students_bymode_with_clusters_1.csv', index=False)
def cluster_by_mode():
data = pd.read_csv('cache/students_bymode.csv')
# Split features and target
X = data.drop('g_number', axis=1)
y = data['g_number']
# Train decision tree classifier
clf = DecisionTreeClassifier()
clf.fit(X, y)
# Visualize decision tree
dot_data = export_graphviz(clf, out_file=None,
feature_names=X.columns,
class_names=y.unique(),
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.render('decision_tree', view=True)
data.to_csv('cache/students_bymode_with_dt.csv', index=False)
def cluster_by_mode_2():
# Load the data from a CSV file
data = pd.read_csv('cache/students_bymode.csv')
# Extract the features (excluding the 'g_number' column)
features = data.drop('g_number', axis=1)
# Scale the features to have zero mean and unit variance
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)
# Determine the ideal number of clusters using the elbow method
inertias = []
for k in range(4, 40): # Try different values of k (e.g., 1 to 10)
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(scaled_features)
inertias.append(kmeans.inertia_)
# Plot the elbow curve
import matplotlib.pyplot as plt
plt.plot(range(4, 40), inertias, marker='o')
plt.xlabel('Number of Clusters (k)')
plt.ylabel('Inertia')
plt.title('Elbow Method')
plt.show()
# Choose the ideal number of clusters based on the elbow curve
ideal_k = 12 # Adjust this based on your observation
# Perform clustering with the ideal number of clusters
kmeans = KMeans(n_clusters=ideal_k, random_state=42)
kmeans.fit(scaled_features)
# Get the cluster labels for each data point
labels = kmeans.labels_
# Add the cluster labels to the original data
data['Cluster'] = labels
# Save the cluster labels to a new CSV file
data.to_csv('cache/students_bymode_with_clusters_2.csv', index=False)
# Get the cluster centers (centroids)
centroids = scaler.inverse_transform(kmeans.cluster_centers_)
# Print the cluster centers
for i, centroid in enumerate(centroids):
print(f"Cluster {i} center:")
for feature, value in zip(features.columns, centroid):
print(f"{feature}: {value}")
print()
# Save the trained objects to files
dump(kmeans, 'kmeans.joblib')
dump(scaler, 'scaler.joblib')
# Load the saved objects for future use
loaded_kmeans = load('kmeans.joblib')
loaded_scaler = load('scaler.joblib')
# Use the loaded objects for predictions
new_data_scaled = loaded_scaler.transform(new_data)
predictions = loaded_kmeans.predict(new_data_scaled)
def cluster_with_new_data():
## NOT TESTED
# need to save the kmeans and scaler objects from previous step.
# Load the new data
new_data = pd.read_csv('new_data.csv')
# Extract the features from the new data
new_features = new_data.drop('g_number', axis=1)
# Scale the new features using the fitted scaler
scaled_new_features = scaler.transform(new_features)
# Predict the cluster labels for the new data
new_labels = kmeans.predict(scaled_new_features)
# Add the cluster labels to the new data
new_data['Cluster'] = new_labels
if __name__ == "__main__": if __name__ == "__main__":
options = { 1: ['get all historical grades from ilearn',get_all] , options = { 1: ['get all historical grades from ilearn',get_all] ,
2: ['process grades csv file',process_grades] , 2: ['process grades csv file',process_grades] ,
@ -1462,6 +1621,7 @@ if __name__ == "__main__":
30: ['visualize course modes multi semester', visualize_course_modes_multi_semester], 30: ['visualize course modes multi semester', visualize_course_modes_multi_semester],
31: ['Report on student stats', report_student_stats], 31: ['Report on student stats', report_student_stats],
32: ['test rpy', test_rpy], 32: ['test rpy', test_rpy],
33: ['cluster students by mode', cluster_by_mode],
} }
print ('') print ('')

View File

@ -66,6 +66,14 @@ where (s.type='online' or s.type='hybrid' or s.type='online line')
and not cc.path='sstaff@gavilan.edu' and not cc.path='sstaff@gavilan.edu'
order by u.sortable_name; order by u.sortable_name;
-- names that are uppercase
SELECT *
FROM canvas.users
WHERE REGEXP_LIKE(name, '^[A-Z]+[[:space:]]')
order by last_logged_out ;
-- for outlook -- for outlook
select string_agg(distinct LOWER(cc.path), '; ') from canvas.courses c select string_agg(distinct LOWER(cc.path), '; ') from canvas.courses c
@ -112,7 +120,12 @@ group by u.sortable_name
order by total desc, online desc, onlinelive desc, hybrid desc; order by total desc, online desc, onlinelive desc, hybrid desc;
-- find (fix?) rows where units are variable
SELECT * FROM canvas.schedule
WHERE units LIKE '%-%';
SELECT * FROM canvas.schedule
WHERE units LIKE '%/%';
-- num units -- num units
select u.sortable_name, p.sis_user_id, select u.sortable_name, p.sis_user_id,
@ -145,6 +158,76 @@ order by total desc;
-- students.csv: each student, num_semesters, num_units, num_f2f, num_online, num_hybrid
SELECT
p.sis_user_id as g_number,
COUNT(DISTINCT s.sem) AS num_semesters,
SUM(s.units::FLOAT) AS num_units,
sum(CASE WHEN s.type = 'in-person' THEN s.units::FLOAT ELSE 0 end) AS inperson_units,
sum(CASE WHEN s.type = 'hybrid' THEN s.units::FLOAT ELSE 0 end) AS hybrid_units,
sum(CASE WHEN s.type = 'online' or s.type = 'online live' THEN s.units::FLOAT ELSE 0 end) AS online_units
FROM
canvas.users u
JOIN canvas.enrollments e ON u.id = e.user_id
JOIN canvas.courses c ON e.course_id = c.id
JOIN canvas.schedule s ON c.id = s.canvascourse
JOIN canvas.pseudonyms p ON u.id = p.user_id
WHERE
e.workflow_state = 'active'
AND e.type = 'StudentEnrollment'
AND u.id IN (
SELECT u.id FROM canvas.enrollments AS e
JOIN canvas.users AS u ON e.user_id=u.id
JOIN canvas.courses AS c ON e.course_id=c.id
WHERE (c.sis_source_id LIKE '202450-%%' or c.sis_source_id LIKE '202470-%%')
AND e.workflow_state='active'
AND e.type='StudentEnrollment'
GROUP BY u.id
)
GROUP BY
p.sis_user_id
ORDER BY
num_semesters, p.sis_user_id;
-- students.csv: each student, num_semesters, num_units
SELECT
p.sis_user_id as g_number,
COUNT(DISTINCT s.sem) AS num_semesters,
SUM(s.units::FLOAT) AS num_units
FROM
canvas.users u
JOIN canvas.enrollments e ON u.id = e.user_id
JOIN canvas.courses c ON e.course_id = c.id
JOIN canvas.schedule s ON c.id = s.canvascourse
JOIN canvas.pseudonyms p ON u.id = p.user_id
WHERE
e.workflow_state = 'active'
AND e.type = 'StudentEnrollment'
AND u.id IN (
SELECT u.id FROM canvas.enrollments AS e
JOIN canvas.users AS u ON e.user_id=u.id
JOIN canvas.courses AS c ON e.course_id=c.id
WHERE (c.sis_source_id LIKE '202450-%%' or c.sis_source_id LIKE '202470-%%')
AND e.workflow_state='active'
AND e.type='StudentEnrollment'
GROUP BY u.id
)
GROUP BY
p.sis_user_id
ORDER BY
num_semesters, p.sis_user_id;
-- each class -- each class
select u.sortable_name, c.course_code, s.type, s.units::FLOAT select u.sortable_name, c.course_code, s.type, s.units::FLOAT

View File

@ -1796,8 +1796,8 @@ def track_user(id=0,qid=0):
url_addition = "" url_addition = ""
if 1: # hard code dates if 1: # hard code dates
start_date = "2023-08-01T00:00:00-07:00" start_date = "2024-01-01T00:00:00-07:00"
end_date = "2024-01-01T00:00:00-07:00" end_date = "2024-07-01T00:00:00-07:00"
url_addition = f"?start_time={start_date}&end_time={end_date}" url_addition = f"?start_time={start_date}&end_time={end_date}"
elif 'last_days_log' in info: elif 'last_days_log' in info:
print("There's existing log data for %s (%s)" % (info['name'] , info['sis_user_id'])) print("There's existing log data for %s (%s)" % (info['name'] , info['sis_user_id']))
@ -2041,80 +2041,6 @@ def find_new_teachers():
for J in jj: for J in jj:
print( J['teacher']) print( J['teacher'])
def user_db_sync():
# currently in db
conusr = fetch("http://192.168.1.6:8080/dir_api.php?users=1")
conusr_emails = set([x.lower() for x in funcy.pluck('email',conusr)])
#fetch all staff from ilearn ILRN unique emails
ilrn = json.loads(codecs.open("cache/ilearn_staff.json","r","utf-8").read())
ilrn_emails = set([x.lower() for x in funcy.pluck('email',ilrn)])
for e in ilrn_emails:
if not (e in conusr_emails) and e.endswith('@gavilan.edu'):
E = funcy.first(funcy.where(ilrn,email=e))
goo = E['login_id'][3:]
#print("not in conf_user: %s \t %s \t %s" % (e,E['short_name'], E['login_id']) )
print("INSERT INTO conf_users (goo,email,name) VALUES ('%s', '%s', '%s');" % (goo,e,E['short_name']) )
def user_db_sync2():
#fetch all personnel dir entries from dir_api.php. PERSL unique emails
persl = fetch("http://hhh.gavilan.edu/phowell/map/dir_api.php?personnel=1")
persl_emails = set([x.lower() for x in funcy.pluck('email',persl)])
#persl_ids = set([x.lower() for x in funcy.pluck('email',persl)])
#
#fetch all staff from ilearn ILRN unique emails
ilrn = json.loads(codecs.open("cache/ilearn_staff.json","r","utf-8").read())
ilrn_emails = set([x.lower() for x in funcy.pluck('email',ilrn)])
#
#fetch all conf_users from dir_api.php CONUSR unique emails
conusr = fetch("http://hhh.gavilan.edu/phowell/map/dir_api.php?users=1")
conusr_emails = set([x.lower() for x in funcy.pluck('email',conusr)])
#fetch all gavi_personnel_ext from dir_api.php GPEREXT must have column 'personnel' or 'c_users' or both.
gperext = fetch("http://hhh.gavilan.edu/phowell/map/dir_api.php?personnelext=1")
all_emails = set(persl_emails)
all_emails.update(ilrn_emails)
all_emails.update(conusr_emails)
all_emails = list(all_emails)
all_emails.sort()
fout = codecs.open('cache/db_staff_report.csv','w','utf-8')
fout.write('email,personnel_dir,ilearn,conf_user\n')
for e in all_emails:
if e in ilrn_emails and not (e in conusr_emails) and e.endswith('@gavilan.edu'):
E = funcy.first(funcy.where(ilrn,email=e))
goo = E['login_id'][3:]
#print("not in conf_user: %s \t %s \t %s" % (e,E['short_name'], E['login_id']) )
print("INSERT INTO conf_users (goo,email,name) VALUES ('%s', '%s', '%s');" % (goo,e,E['short_name']) )
# goo (minus G00) email, and name go into conf_users
fout.write(e+',')
if e in persl_emails:
fout.write('1,')
else:
fout.write('0,')
if e in ilrn_emails:
fout.write('1,')
else:
fout.write('0,')
if e in conusr_emails:
fout.write('1,')
else:
fout.write('0,')
fout.write('\n')
fout.close()
#
#print( json.dumps( [persl,ilrn,conusr,gperext], indent=2 ) )
print('done')
import traceback import traceback