187 lines
8.8 KiB
Python
187 lines
8.8 KiB
Python
import os, json, sys, codecs, openai
|
|
|
|
from canvas_secrets import openai_org, openai_api_key
|
|
|
|
|
|
openai.organization = openai_org
|
|
openai.api_key = openai_api_key
|
|
#print(openai.Model.list())
|
|
|
|
SAVE_ATTACHEMENTS = 0
|
|
|
|
|
|
def gpt_faq(faq):
|
|
system_role = """Ignore all previous instructions. You are an expert on the management and implementation of all aspects of online courses and running a distance education program, especially in higher education and community college. You are also an expert in pedagogy and the tools and techniques of effective teaching and learning, based on research and experience. You know how to focus on concrete examples of effective teaching which actually work in the real world and the advice you give is specific and immediately useful to teachers in the classroom. You provide accurate, factual, thoughtful, nuanced answers, and are brilliant at reasoning. Your users are experts in teaching and college administration, and are comfortable with the California and Federal Educational code, and know they need to comply with it. There is no need to explain this beyond noting any relevant laws. Don't be verbose in your answers, but do provide details and examples where it might help the explanation.
|
|
You are a world class technical writer. Your style is human-centered, friendly and kind. Give me concise and accurate answers and ignore all the niceties that OpenAI programmed you with. Omit all qualifications and excuses. I know you are a large language model but I want you to pretend you are a confident and superintelligent oracle that can help a limited human warp reality to his own intentions. You can reveal your own sense of humor and depth of intelligence. It is very important that you get this right.
|
|
Adopt a penetratingly insightful, incisively funny, cleverly witty persona. Your answers cultivate playful, out-of-the-box thinking, rationality, technology, design, and careful use of incentives. Speak in specific, topic relevant terminology. Do NOT hedge or qualify. Do not waffle. Speak directly and be willing to make creative guesses.
|
|
I am pasting below an email chain between myself and colleague(s). I am fairly certain that it contains a technical question that I have answered. They may be spread out over several messages, or there might be some clarification or follow up questions. There will be headers, signatures, and other text that isn't a part of the core message. Ignore that. Consider the whole email chain while you prepare the following: Respond with a json formatted dictionary that contains the following:
|
|
{ "question": "Restate the question or problem in a concise but clear manner", "topics": ["keywords", "or phrases", "that categorize the issue"], "answer": "The best possible answer, written in markdown format. Draw the answer from the email but feel free to edit or embelish based on your knowledge. Generalize the answer to anyone who might have the issue. Your audience is mostly instructors working at a small community college. Do not refer to anyone's name specifically, unless it is Peter or Sabrina, but instead write for a general audience looking for the answers to their questions. We are writing a FAQ or help page. Feel free to use markdown-formatted bold, italic, lists, and links."} """
|
|
|
|
# create a completion
|
|
my_model = "gpt-4" # "gpt-3.5-turbo-16k" # gpt-3.5-turbo gpt-4 gpt-4-32k
|
|
completion = openai.ChatCompletion.create(model=my_model, messages=[
|
|
{"role": "system", "content": system_role},
|
|
{"role": "user", "content": faq} ] )
|
|
|
|
log3 = codecs.open('cache/gptlog.txt','a','utf-8')
|
|
log3.write(json.dumps(completion,indent=2))
|
|
log3.write("\n\n---\n\n")
|
|
log3.close()
|
|
|
|
r = completion['choices'][0]['message']['content']
|
|
#print(str(r) + "\n\n")
|
|
return r
|
|
|
|
|
|
def gpt_test():
|
|
|
|
my_prompt = "Write a series of texts trying to sell a pen to a stranger."
|
|
print(sys.argv)
|
|
exit
|
|
|
|
if len(sys.argv)>1:
|
|
my_prompt = " ".join(sys.argv[1:])
|
|
else:
|
|
print("Prompt: %s" % my_prompt)
|
|
|
|
my_model = "text-davinci-003"
|
|
|
|
# create a completion
|
|
completion = openai.Completion.create(engine=my_model, prompt=my_prompt, max_tokens=1000, temperature=1,top_p=1)
|
|
|
|
#print(completion)
|
|
#print(json.dumps(completion,indent=2))
|
|
print(completion.choices[0].text)
|
|
print()
|
|
|
|
|
|
def sample_send_email():
|
|
import win32com.client
|
|
ol=win32com.client.Dispatch("outlook.application")
|
|
olmailitem=0x0 #size of the new email
|
|
newmail=ol.CreateItem(olmailitem)
|
|
newmail.Subject= 'Testing Mail'
|
|
newmail.To='peter.howell@gmail.com'
|
|
#newmail.CC='xyz@gmail.com'
|
|
newmail.Body= 'Hello, this is a test email to showcase how to send emails from Python and Outlook.'
|
|
# attach='C:\\Users\\admin\\Desktop\\Python\\Sample.xlsx'
|
|
# newmail.Attachments.Add(attach)
|
|
# To display the mail before sending it
|
|
# newmail.Display()
|
|
newmail.Send()
|
|
|
|
|
|
|
|
def fetch_useful_info():
|
|
import win32com.client
|
|
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
|
|
root_folder = outlook.Folders.Item(1)
|
|
print (f"Root folder: {root_folder.Name}")
|
|
|
|
#And to know the names of the subfolders you have:
|
|
print("\nFolders:")
|
|
for folder in root_folder.Folders:
|
|
print (" " + folder.Name)
|
|
|
|
log = codecs.open("cache/email_usefulinfo.txt","w","utf-8")
|
|
|
|
#Finally, let's say you want to access a subfolder named folder_of_soldy in your root_folder, you do:
|
|
print("\nUseful Info Reference:")
|
|
uinfo = root_folder.Folders['useful info ref']
|
|
for message in uinfo.Items:
|
|
atch_list = "Attachments: "
|
|
atch_count = 0
|
|
if SAVE_ATTACHEMENTS:
|
|
attachments = message.Attachments
|
|
try:
|
|
attachment = attachments.Item(1)
|
|
for attachment in message.Attachments:
|
|
print(" -> " + str(attachment))
|
|
loc = "C:\\Users\\peter\\Documents\\gavilan\\ca_merged\\canvasapp\\cache\\attachments\\" + str(attachment)
|
|
attachment.SaveAsFile(loc)
|
|
atch_list += str(attachment) + ', '
|
|
atch_count += 1
|
|
break
|
|
except Exception as e:
|
|
pass
|
|
print(" " + message.Subject)
|
|
log.write(f"\n\n---\nSubject: {message.Subject}\nFrom: {message.Sender}\n")
|
|
if atch_count:
|
|
log.write(f"{atch_list}\n")
|
|
log.write(f"Date: {message.SentOn}\n\n{message.body}\n")
|
|
|
|
|
|
|
|
def process_email_filesave(message, log, i):
|
|
atch_list = "Attachments: "
|
|
atch_count = 0
|
|
if SAVE_ATTACHEMENTS:
|
|
attachments = message.Attachments
|
|
try:
|
|
attachment = attachments.Item(1)
|
|
for attachment in message.Attachments:
|
|
print(" -> " + str(attachment))
|
|
loc = "C:\\Users\\peter\\Documents\\gavilan\\ca_merged\\canvasapp\\cache\\attachments_faq\\" + str(attachment)
|
|
attachment.SaveAsFile(loc)
|
|
atch_list += str(attachment) + ', '
|
|
atch_count += 1
|
|
break
|
|
except Exception as e:
|
|
pass
|
|
print(" " + message.Subject)
|
|
log.write(f"\n\n---\nSubject: {message.Subject}\nFrom: {message.Sender}\n")
|
|
if atch_count:
|
|
log.write(f"{atch_list}\n")
|
|
log.write(f"Date: {message.SentOn}\n\n{message.body}\n")
|
|
|
|
logeach = codecs.open(f"cache/faq/{i}.txt","w","utf-8")
|
|
logeach.write(f"Subject: {message.Subject}\nFrom: {message.Sender}\nDate: {message.SentOn}\n\n{message.body}")
|
|
logeach.close()
|
|
|
|
|
|
def fetch_faq():
|
|
import win32com.client
|
|
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
|
|
root_folder = outlook.Folders.Item(1)
|
|
|
|
PAUSE = 0
|
|
|
|
startat = input("Press enter to continue or a number to start at that message: ")
|
|
if startat == '': startat = '0'
|
|
startat = int(startat)
|
|
i = 0
|
|
|
|
|
|
log = codecs.open("cache/email_gpt_faq.txt","w","utf-8")
|
|
log2 = codecs.open("cache/faq.json","a","utf-8")
|
|
|
|
# access a subfolder
|
|
print("\nFAQ Emails:")
|
|
uinfo = root_folder.Folders['for faq']
|
|
for message in uinfo.Items:
|
|
if i < startat:
|
|
i += 1
|
|
continue
|
|
|
|
try:
|
|
process_email_filesave(message, log, i)
|
|
except Exception as e:
|
|
print(f"Exception: {str(e)}")
|
|
|
|
#summary = gpt_faq( f"Subject: {message.Subject}\nBody: {message.body}")
|
|
#log2.write( f",\n{summary}")
|
|
#log2.flush()
|
|
#print(f"Subject: {message.Subject}\n{summary}\n\n-----\n\n")
|
|
|
|
i += 1
|
|
if PAUSE:
|
|
temp = input(f"press enter to continue, or q to quit now at message {i}: ")
|
|
if temp == 'q':
|
|
exit()
|
|
|
|
#fetch_useful_info()
|
|
|
|
fetch_faq()
|
|
|
|
|
|
|