gpt
This commit is contained in:
parent
1de9cff762
commit
e87d19432b
59
gpt.py
59
gpt.py
|
|
@ -17,20 +17,61 @@ SAVE_ATTACHEMENTS = 1
|
||||||
|
|
||||||
|
|
||||||
def gpt_chat(instruction, prompt, model=DEFAULT_MODEL):
|
def gpt_chat(instruction, prompt, model=DEFAULT_MODEL):
|
||||||
|
# 1) Strip extremely long Outlook protection URLs first
|
||||||
|
try:
|
||||||
|
prompt = re.sub(r"\S*protection\.outlook\.com\S*", "", prompt, flags=re.I)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 2) Crude token estimation and truncation (target ~29k tokens)
|
||||||
|
# Approximates 1 token ≈ 4 characters.
|
||||||
|
def _est_tokens(s):
|
||||||
|
try:
|
||||||
|
return max(1, int(len(s) / 4))
|
||||||
|
except Exception:
|
||||||
|
return 1
|
||||||
|
|
||||||
|
MAX_TOKENS = 29000
|
||||||
|
# reserve some tokens for system/overhead
|
||||||
|
overhead = 200
|
||||||
|
inst_tokens = _est_tokens(instruction)
|
||||||
|
prompt_tokens = _est_tokens(prompt)
|
||||||
|
total = inst_tokens + prompt_tokens + overhead
|
||||||
|
if total > MAX_TOKENS:
|
||||||
|
allow = max(500, MAX_TOKENS - inst_tokens - overhead)
|
||||||
|
# convert back to characters
|
||||||
|
allow_chars = max(2000, allow * 4)
|
||||||
|
# keep last N chars under assumption latest content is most relevant
|
||||||
|
if len(prompt) > allow_chars:
|
||||||
|
prompt = prompt[-allow_chars:]
|
||||||
|
|
||||||
messages = [
|
messages = [
|
||||||
{"role": "system", "content": instruction},
|
{"role": "system", "content": instruction},
|
||||||
{"role": "user", "content": prompt}
|
{"role": "user", "content": prompt}
|
||||||
]
|
]
|
||||||
|
|
||||||
response = client.chat.completions.create(model=model, messages=messages)
|
try:
|
||||||
result = response.choices[0].message.content
|
response = client.chat.completions.create(model=model, messages=messages)
|
||||||
|
result = response.choices[0].message.content
|
||||||
|
except Exception as e:
|
||||||
|
# Graceful failure; log and return empty JSON
|
||||||
|
try:
|
||||||
|
log3 = codecs.open('cache/gptlog.txt','a','utf-8')
|
||||||
|
log3.write(json.dumps({'prompt': prompt[:2000], 'error': str(e)}, indent=2))
|
||||||
|
log3.write("\n\n---\n\n")
|
||||||
|
log3.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return "{}"
|
||||||
|
|
||||||
|
# Log success
|
||||||
log3 = codecs.open('cache/gptlog.txt','a','utf-8')
|
try:
|
||||||
log3.write(json.dumps({'prompt':prompt, 'result': result},indent=2))
|
log3 = codecs.open('cache/gptlog.txt','a','utf-8')
|
||||||
log3.write("\n\n---\n\n")
|
log3.write(json.dumps({'prompt':prompt, 'result': result},indent=2))
|
||||||
log3.close()
|
log3.write("\n\n---\n\n")
|
||||||
|
log3.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def summarize_u_info(msg):
|
def summarize_u_info(msg):
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue