sept 2023 updates
This commit is contained in:
parent
021a8ea7e9
commit
5ccf69b740
112
courses.py
112
courses.py
|
|
@ -1685,6 +1685,116 @@ def remove_n_analytics(section=0):
|
|||
|
||||
|
||||
|
||||
def fetch_rubric_scores(course_id=16528, assignment_id=1):
|
||||
api_url = f'{url}/api/v1/courses/{course_id}'
|
||||
course_info = fetch(api_url)
|
||||
|
||||
out = codecs.open('cache/rubric_scores.txt','w','utf-8')
|
||||
|
||||
#print(course_info)
|
||||
|
||||
# Extract course details
|
||||
course_name = course_info['name']
|
||||
course_short_name = course_info['course_code']
|
||||
course_semester = course_info['enrollment_term_id']
|
||||
|
||||
# Print course information
|
||||
out.write(f"Course Name: {course_name}\n")
|
||||
out.write(f"Short Name: {course_short_name}\n")
|
||||
out.write(f"Semester: {course_semester}\n")
|
||||
|
||||
api_url = f'{url}/api/v1/courses/{course_id}/assignments'
|
||||
assignments_list = fetch(api_url)
|
||||
|
||||
#print(assignments_list)
|
||||
|
||||
assignments_dict = {}
|
||||
ratings_dict = {}
|
||||
|
||||
# Iterate through the list of assignments and populate the dictionary
|
||||
for assignment in assignments_list:
|
||||
assignment_id = assignment['id']
|
||||
assignment_name = assignment['name']
|
||||
rubric = assignment.get('rubric', []) # Get the rubric field (default to an empty list if not present)
|
||||
|
||||
has_rubric = 'no'
|
||||
if rubric: has_rubric = 'yes'
|
||||
|
||||
out.write(f" Asmt Name: {assignment_name} ID: {assignment_id} Rubric: {has_rubric}\n")
|
||||
|
||||
# Save assignment details including rubric
|
||||
assignments_dict[assignment_id] = {
|
||||
'name': assignment_name,
|
||||
'rubric': rubric
|
||||
# Add more assignment details if needed
|
||||
}
|
||||
|
||||
if rubric:
|
||||
print("RUBRIC:")
|
||||
print(json.dumps(rubric,indent=2))
|
||||
for r in rubric:
|
||||
for rat in r.get('ratings',[]):
|
||||
ratings_dict[rat['id']] = { 'rub_description': r['description'], 'rat_description': rat['description'], 'points': rat['points']}
|
||||
|
||||
|
||||
# Print the assignments dictionary
|
||||
out.write(json.dumps(assignments_dict,indent=2)+'\n\n\n')
|
||||
out.write(json.dumps(ratings_dict,indent=2)+'\n\n\n')
|
||||
|
||||
# Loop thru assignments with rubrics and report on grades
|
||||
for assignment in assignments_list:
|
||||
|
||||
if not assignment.get('rubric', []):
|
||||
continue
|
||||
|
||||
assignment_id = assignment['id']
|
||||
out.write(f" Asmt Name: {assignment_name} ID: {assignment_id}\n")
|
||||
|
||||
api_url = f'{url}/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions?include[]=rubric_assessment'
|
||||
|
||||
# Include the 'include[]=rubric_assessment' parameter to request rubric assessments
|
||||
# params = {'include[]': 'rubric_assessment'}
|
||||
|
||||
# Make the API request with the parameters
|
||||
#response = requests.get(api_url, params=params)
|
||||
|
||||
# Check if the request was successful (status code 200)
|
||||
#if response.status_code != 200:
|
||||
# print(f"Request failed with status code {response.status_code}")
|
||||
# continue
|
||||
|
||||
submissions_data = fetch(api_url)
|
||||
|
||||
|
||||
# Iterate through the list of submissions and retrieve rubric scores and comments
|
||||
for submission in submissions_data:
|
||||
user_id = submission['user_id']
|
||||
rubric = submission.get('rubric_assessment', []) # Get the rubric assessment (empty list if not present)
|
||||
comments = submission.get('submission_comments', '') # Get submission comments (empty string if not present)
|
||||
score = submission.get('score', -1)
|
||||
|
||||
|
||||
# Process and use rubric scores and comments as needed
|
||||
# Example: Print user information, rubric scores, and comments
|
||||
if rubric:
|
||||
print(json.dumps(submission,indent=2))
|
||||
|
||||
out.write(f"\nSubmission User ID/Assignment ID: {user_id}/{assignment_id}\n")
|
||||
out.write(f"Score: {score}\n")
|
||||
out.write(f"Submission Comments: {comments}\n")
|
||||
out.write(f"Rubric:\n")
|
||||
for k,v in rubric.items():
|
||||
rub_desc = '?'
|
||||
rat_desc = '?'
|
||||
if v['rating_id'] in ratings_dict:
|
||||
rub_rating = ratings_dict[v['rating_id']]
|
||||
rub_desc = rub_rating['rub_description']
|
||||
rat_desc = rub_rating['rat_description']
|
||||
out.write(f" {rub_desc} - {rat_desc} ({v['rating_id']}): {v['points']}/{rub_rating['points']} points: {v['comments']}\n")
|
||||
out.write("---") # Separator between submissions
|
||||
out.flush()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1801,6 +1911,8 @@ if __name__ == "__main__":
|
|||
# 21: ['Add announcements to homepage', change_course_ann_homepage],
|
||||
# TODO wanted: group shell for each GP (guided pathway) as a basic student services gateway....
|
||||
#
|
||||
|
||||
45: ['Fetch rubric scores and comments', fetch_rubric_scores],
|
||||
}
|
||||
print ('')
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,118 @@
|
|||
|
||||
import codecs, os,regex, subprocess
|
||||
|
||||
|
||||
def html_to_markdown(infile,out):
|
||||
cmd = f"pandoc -o \"./{out}\" -f html -t markdown \"./{infile}\""
|
||||
print(cmd)
|
||||
result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if result.returncode != 0:
|
||||
print(f"Error occurred: {result.stderr.decode('utf-8')}")
|
||||
else:
|
||||
print(f"Successfully converted '{infile}' to '{out}'")
|
||||
|
||||
|
||||
def pdf_to_html(infile,out):
|
||||
import PyPDF2
|
||||
|
||||
pdf_file = open(infile, 'rb')
|
||||
pdf_reader = PyPDF2.PdfReader(pdf_file)
|
||||
|
||||
text = ''
|
||||
for page_num in range(len(pdf_reader.pages)):
|
||||
page = pdf_reader.pages[page_num]
|
||||
text += page.extract_text()
|
||||
|
||||
pdf_file.close()
|
||||
ofile = codecs.open(out,'w','utf-8')
|
||||
ofile.write(text)
|
||||
ofile.close()
|
||||
|
||||
|
||||
def pdf_to_html2(infile,out):
|
||||
from pdfminer.high_level import extract_pages
|
||||
from pdfminer.layout import LTTextContainer
|
||||
import html
|
||||
|
||||
ofile = codecs.open(out,'w','utf-8')
|
||||
|
||||
print(infile)
|
||||
for page_layout in extract_pages(infile):
|
||||
for element in page_layout:
|
||||
if isinstance(element, LTTextContainer):
|
||||
text = html.escape(element.get_text()) # sanitize the text for HTML
|
||||
ofile.write(f"<p>{text}</p>") # wraps in HTML paragraph tags
|
||||
|
||||
|
||||
|
||||
def convert(filename=""):
|
||||
|
||||
target_dir = 'cache/docs'
|
||||
ff = os.listdir(target_dir)
|
||||
|
||||
if filename:
|
||||
parts = filename.split('.')
|
||||
OUTFILE = f"{parts[0]}.html"
|
||||
pdf_to_html(target_dir + "/" + filename, target_dir + "/" + OUTFILE)
|
||||
html_to_markdown( target_dir + "/" + OUTFILE, target_dir + "/" + parts[0] + ".md" )
|
||||
|
||||
|
||||
else:
|
||||
for INFILE in ff:
|
||||
if INFILE.endswith(".pdf"):
|
||||
parts = INFILE.split('.')
|
||||
OUTFILE = f"{parts[0]}.html"
|
||||
|
||||
pdf_to_html(target_dir + "/" + INFILE, target_dir + "/" + OUTFILE)
|
||||
|
||||
html_to_markdown( target_dir + "/" + OUTFILE, target_dir + "/" + parts[0] + ".md" )
|
||||
|
||||
|
||||
def clean(fn):
|
||||
# Open file and read contents
|
||||
with open(fn, 'r', encoding='utf-8') as myfile:
|
||||
data = myfile.read()
|
||||
|
||||
# Replace unicode non-breaking space with a regular space
|
||||
data = data.replace('\u00A0', ' ')
|
||||
data = data.replace('\u00AD', '')
|
||||
data = data.replace('\u200B', '')
|
||||
|
||||
# Write cleaned data back to file
|
||||
with open(fn, 'w', encoding='utf-8') as myfile:
|
||||
myfile.write(data)
|
||||
|
||||
|
||||
def fix_line_breaks(fn):
|
||||
with codecs.open(fn, 'r', 'utf-8') as file:
|
||||
lines = file.readlines()
|
||||
|
||||
new_lines = []
|
||||
paragraph = ''
|
||||
|
||||
for line in lines:
|
||||
if line.strip() == '':
|
||||
# If the line is blank, it's the end of a paragraph
|
||||
new_lines.append(paragraph.strip())
|
||||
paragraph = ''
|
||||
else:
|
||||
# If the line is not blank, add it to the paragraph (extra space included for word separation)
|
||||
paragraph += line.strip() + ' '
|
||||
|
||||
# Handle the last paragraph
|
||||
if paragraph != '':
|
||||
new_lines.append(paragraph.strip())
|
||||
|
||||
fout = codecs.open(fn, 'w','utf-8')
|
||||
fout.write('\n'.join(new_lines))
|
||||
|
||||
|
||||
fix_file = 'hyflex.md'
|
||||
convert('hyflex.pdf')
|
||||
|
||||
clean(f'cache/docs/{fix_file}')
|
||||
|
||||
fix_line_breaks(f'cache/docs/{fix_file}')
|
||||
|
||||
|
||||
|
||||
24
stats.py
24
stats.py
|
|
@ -880,9 +880,32 @@ def cluster_student_histories():
|
|||
|
||||
df = pd.read_csv(infile)
|
||||
|
||||
def dept(s):
|
||||
parts = s.split(' ')
|
||||
return parts[0]
|
||||
|
||||
def try_make_sched():
|
||||
term = "fa23"
|
||||
sched = requests.get(f"http://gavilan.cc/schedule/{term}_sched.json").json()
|
||||
#print(json.dumps(sched,indent=2))
|
||||
|
||||
d = "CSIS"
|
||||
courses = [ [x['code'], x['crn']] for x in sched if dept(x['code'])==d ]
|
||||
teachers = { x['teacher'] for x in sched if dept(x['code'])==d }
|
||||
|
||||
print(courses)
|
||||
print(teachers)
|
||||
|
||||
|
||||
def section_stats():
|
||||
pass
|
||||
|
||||
# for each course, (ENG1A) how many are enrolled in each all sections?
|
||||
# (and break down by mode,time,location,etc)
|
||||
#
|
||||
# for each course, how many are first semester gav students?
|
||||
#
|
||||
#
|
||||
|
||||
if __name__ == "__main__":
|
||||
options = { 1: ['get all historical grades from ilearn',get_all] ,
|
||||
|
|
@ -896,6 +919,7 @@ if __name__ == "__main__":
|
|||
9: ['semester startdates list', semester_dates],
|
||||
10: ['normalize course histories', normalize_course_histories],
|
||||
11: ['cluster student histories', cluster_student_histories],
|
||||
12: ['try to make a schedule', try_make_sched],
|
||||
}
|
||||
print ('')
|
||||
|
||||
|
|
|
|||
32
tasks.py
32
tasks.py
|
|
@ -1412,7 +1412,36 @@ def print_a_calendar():
|
|||
|
||||
|
||||
|
||||
def word_calendar():
|
||||
from docx import Document
|
||||
from docx.shared import Inches
|
||||
import datetime
|
||||
|
||||
# Define the start date
|
||||
start_date = datetime.date(2023, 8, 28)
|
||||
|
||||
# Prepare a list of 18 weeks beginning from the start date
|
||||
dates = [start_date + datetime.timedelta(weeks=x) for x in range(18)]
|
||||
|
||||
# Initialize an instance of a word document
|
||||
doc = Document()
|
||||
table = doc.add_table(rows=1, cols=3)
|
||||
|
||||
# Set the headers
|
||||
hdr_cells = table.rows[0].cells
|
||||
hdr_cells[0].text = 'Week'
|
||||
hdr_cells[1].text = 'Date'
|
||||
hdr_cells[2].text = 'Events/Notes'
|
||||
|
||||
# Iterate through the list of dates
|
||||
for i, date in enumerate(dates):
|
||||
cells = table.add_row().cells
|
||||
cells[0].text = str(i+1)
|
||||
cells[1].text = date.strftime("%B %d")
|
||||
cells[2].text = ''
|
||||
|
||||
# Save the document
|
||||
doc.save('cache/tasks_schedule.docx')
|
||||
|
||||
|
||||
|
||||
|
|
@ -1430,7 +1459,8 @@ if __name__ == "__main__":
|
|||
10: ['dumb rename images mistake',file_renamer] ,
|
||||
11: ['list auth', list_auth],
|
||||
12: ['update auth', update_auth],
|
||||
13: ['print a calendar', print_a_calendar]
|
||||
13: ['print a calendar', print_a_calendar],
|
||||
14: ['create a week calendar in word', word_calendar],
|
||||
}
|
||||
|
||||
if len(sys.argv) > 1 and re.search(r'^\d+',sys.argv[1]):
|
||||
|
|
|
|||
Loading…
Reference in New Issue