videos/servertoys.py

460 lines
13 KiB
Python

import os,json, funcy, re, sqlite3, operator, sys, socket
from flask import render_template
from collections import defaultdict
q = ''
DEEP_IP = '192.168.1.6'
this_machine = socket.gethostname()
if this_machine=='ROGDESKTOP':
HOST = "192.168.1.7"
PORT = "9999"
BASE = "\\\\%s\\hd2\\deep1_homedir\\Documents\\ooo\\" % DEEP_IP
LIBBASE = "\\\\%s\\hd2\\peter_home\\Documents\\scripts\\ooopics\\lib\\" % DEEP_IP
MEMEBASE = "\\\\s\\hd2\\peter_home\\images\\Ok Pictures and Memes\\" % DEEP_IP
CHANBASE = "\\\\%s\\hd2\\peter_home_offload\\Documents\\scripts\\chan\\" % DEEP_IP
MOVBASE = "\\\\%s\\hd2\\nogood\\media\\" % DEEP_IP
DOCBASE = "\\\\%s\\hd2\\bit_complete" % DEEP_IP
else:
HOST = DEEP_IP
PORT = "9999"
BASE = "/media/hd2/deep1_homedir/Documents/ooo/"
LIBBASE = "/media/hd2/peter_home/Documents/scripts/ooopics/lib/"
MEMEBASE = "/media/hd2/peter_home/images/Ok Pictures and Memes/"
CHANBASE = "/media/hd2/peter_home_offload/Documents/scripts/chan/"
MOVBASE = "/media/hd2/nogood/media/"
DOCBASE = "/media/hd2/bit_complete"
con = ''
cur = ''
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
##########
########## Determining the contents of a folder
##########
movietype = "mkv,mp4,wmv,avi,webm,mpg,mpeg".split(",")
booktype = "epub,pdf,odt,docx,html,rtf,mobi,djvu,azw,azw3,chm".split(",")
musictype = "flac,mp3,wma,m3u".split(",")
imagetype = "jpg,jpeg,png,gif,webp".split(",")
archivetype = "zip,rar,iso,tar,gz".split(",")
types = {'movie':movietype, 'book':booktype, 'music':musictype, 'image':imagetype, 'archive':archivetype}
#to_keep = "flac,epub,pdf,mkv,mp4,wmv,mp3,wma,avi,webm,m3u,zip,odt,jpeg,jpg,png,html,rtf,txt,mobi,djvu,azw,docx,azw3,".split(",")
def cleantext(x):
return x.encode('utf8').decode(sys.stdout.encoding)
def overview_folder(f):
count_files = 0
count_dirs = 0
size = 0
for root, dirs, files in os.walk(f):
count_files += len(files)
count_dirs += len(dirs)
size += sum( [ os.path.getsize(os.path.join(root,x)) for x in files ] )
return (count_files,count_dirs,size)
def ending(x): return x.split('.')[-1].lower()
def depth(x): return len(x.split('/')) - len(DOCBASE.split('/'))
def greater_type(x):
for label,coll in types.items():
if x in coll: return label
return "unknown"
def count_types(filelist):
endings = map(ending, filelist)
gts = list(map(greater_type, endings))
howmany = defaultdict(int)
for G in gts: howmany[G] += 1
sorted_d = sorted(howmany.items(), key=operator.itemgetter(1))
return sorted_d
def most_common(filelist):
sortedtypes = count_types(filelist)
if len(sortedtypes)==0: return ''
y = sortedtypes[-1]
if not y[0]=='unknown': return y[0]
if len(sortedtypes)>1:
return sortedtypes[-2][0]
return ''
#### GOAL: picture manager. Show folder, and
####
#### - mark for hide or delete
#### - rate
#### - accomodate major sortings, like personal/fam, gav, meme, x3, etc
####
#### - refer to any pic from Z app.
####
#### - make an infinite / zoomable ideaboard
#### + make autolayout
#### + make auto slideshows
#### + multi-client slideshow for bigger effects (multi-projector)
####
#### - tag it
#### - possibly move all selected or all of a tag if the drives cooperate
#### - make hierarchy of tags so i can see or search a group of them
#### - and hide some by default
#### - framework for ai so i can practice
#### + face detection
#### + descriptions
#### + similar photos
#### + train fancier stuff, gans, face swaps, etc
####
# oopics look.py
# my torrent sort.py
# zettle app
# my bookmark
# app and
# chrome bkmks
# Serve the ooopics backend
def tag(x,y): return "<%s>%s</%s>" % (x,y,x)
def img(s): return "<img src='img/%s' />" % s
def tagc(x,c,y): return '<%s class="%s">%s</%s>' % (x,c,y,x)
def a(href,txt): return '<a href="%s">%s</a>' % (href,txt)
def isPic(s): return os.path.isfile(BASE+s)
def isPicPre(s,p):
t = s
t.lower()
if plain(t).startswith(p):
return os.path.isfile(BASE+s)
return False
def isVid(s): return s.name.endswith('.mp4') and os.path.isfile(MOVBASE+s.name)
def base(s): return BASE + s
def plain(s):
s = s.lower()
return re.sub(r'[^a-z0-9]','',s)
def plain_sw(s,pre):
s = plain(s)
if s[0] == pre[0] and s[1]==pre[1] and s[2]==pre[2]:
print("%s startswith %s ?" % (s,pre))
if s.startswith(pre):
print('returning T')
return True
#print('returning F')
return False
def first_n(s): # / i m g / _5____
n = 4
prefixlen = 4
s = plain(s)
if len(s) > (n+prefixlen): return s[prefixlen:prefixlen+n]
return s
def modtime(s):
try:
return os.path.getmtime(base(s))
except:
return ''
def main2():
all = BASE # "/home/phowell/hd2/deep1_homedir/Documents/ooo/"
allp = os.listdir(all)
allp = ['/img/'+x for x in filter(isPic, allp) ]
allp.sort( key=modtime )
allj = json.dumps(allp,indent=2)
return render_template('index.html', all_pics_json=allj, index="100")
def prefix():
all = BASE # "/home/phowell/hd2/deep1_homedir/Documents/ooo/"
allp = os.listdir(all)
allp = ['/img/'+x for x in filter(isPic, allp) ]
allp.sort()
all_grps = [ [g for g in k] for k in funcy.partition_by(first_n, allp)]
all_label_grps = [ (first_n(k[0]),len(k),k) for k in all_grps if len(k) > 3 ]
print(all_label_grps)
#return json.dumps(all_label_grps,indent=2)
#all_label_grps.sort( key= lambda x: 0-x[1] )
all_label_grps.sort( key= lambda x: x[0] )
all_keys = [ k[0] for k in all_label_grps ]
amounts = [k[1] for k in all_label_grps ]
allk = json.dumps(all_keys,indent=2)
alla = json.dumps(amounts,indent=2)
allj = json.dumps(allp,indent=2)
return render_template('prefix.html', prefix_json=allk, amounts=alla, all_pics_json=allj, index="1")
# secret Cles0wAwm9o4N_jnPNXOwgH2-DJXKw
# name stm21
def image_getter(thread_url):
r = requests.get(thread_url) #, auth=('user', 'pass'))
soup = bs(r.text,'html.parser')
img_url = a.title[href].attr('href')
folder = "tt1/"
(head,tail) = os.path.split(img_url)
if os.path.exists( os.path.join(folder,tail) ):
print(" + Image %i was already downloaded." % i)
return
print(" getting %s" % img_url)
r = requests.get(imgsrc,stream=True)
if r.status_code == 200:
with open(os.path.join(folder,tail),'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
print(" + Done with image.")
time.sleep(0.75)
else:
print(" - Failed with image.")
# hd2/peter_home_offload/Documents/scripts/chan/
# all the folders
# display images in a folder, mark them if [yes] save to database.
# [clean] the folder, delete 'unmarked' ones
#
def get_pic_by_id(g):
#global con, cur
con = sqlite3.connect(LIBBASE + 'ooo.db')
con.row_factory = dict_factory
cur = con.cursor()
cur.execute("SELECT * FROM pics p JOIN pictag pt ON pt.pic=p.id JOIN tags t ON t.id=pt.tag WHERE p.id=%s" % str(g))
return json.dumps(cur.fetchone())
def get_all_pics(j=0):
#global con, cur
con = sqlite3.connect(LIBBASE + 'ooo.db')
con.row_factory = dict_factory
cur = con.cursor()
#cur.execute("SELECT * FROM pics p JOIN pictag pt ON pt.pic=p.id JOIN tags t ON t.id=pt.tag GROUP BY p.path")
cur.execute("SELECT * FROM pics")
if j: return json.dumps(cur.fetchall())
return cur.fetchall()
def get_all_tags(which=1):
#global con, cur
con = sqlite3.connect(LIBBASE + 'ooo.db')
con.row_factory = dict_factory
cur = con.cursor()
cur.execute("SELECT * FROM 'tags' WHERE app=%s ORDER BY label" % str(which))
return json.dumps(cur.fetchall())
def add_pic(path):
#global con, cur
con = sqlite3.connect(LIBBASE + 'ooo.db')
con.row_factory = dict_factory
cur = con.cursor()
cur.execute("INSERT INTO pics (path) VALUES('%s')" % str(path))
con.commit()
return json.dumps(cur.lastrowid)
def add_pic_tag(tag_id,pic_id):
con = sqlite3.connect(LIBBASE + 'ooo.db')
cur = con.cursor()
# TODO select and dont duplicate
cur.execute("INSERT INTO pictag (pic,tag) VALUES('%s','%s')" % ( str(pic_id), str(tag_id) ) )
con.commit()
return json.dumps( {'status':'success','id':cur.lastrowid} )
def add_tag(tag):
#global con, cur
con = sqlite3.connect(LIBBASE + 'ooo.db')
con.row_factory = dict_factory
cur = con.cursor()
cur.execute("INSERT INTO tags (label) VALUES('%s')" % tag )
con.commit()
return cur.lastrowid
def add_pic_tag_str(pic_id,tag_str):
#global con, cur
con = sqlite3.connect(LIBBASE + 'ooo.db')
con.row_factory = dict_factory
cur = con.cursor()
cur.execute("SELECT * FROM tags WHERE label='%s'" % str(tag_str))
result = cur.fetchone()
if result:
tag_id = result['id']
else:
tag_id = add_tag(tag_str)
return add_pic_tag(pic_id,tag_id)
def start_tt():
global q
url = 'https://www.reddit.com/r/TinyTits/top/?t=week'
r = requests.get(url) #, auth=('user', 'pass'))
open('temp.html','w').write(r.text)
soup = bs(r.text,'html.parser')
threads = []
qqueue = []
i = 0
print(soup.get_text())
for img in soup.select('a.title'):
link = img.attrs('href')
print(link)
image_getter(link)
i += 1
print("There are %i images to fetch." % len(qqueue))
pool = ThreadPoolExecutor(max_workers=5)
for q in qqueue:
q["total"] = len(qqueue)
future = pool.submit(image_getter, q)
def plain_sort(index=''):
root_len = len(DOCBASE)
output = "<link href='lib/pics.css' rel='stylesheet' />\n"
output += tag('h2','Downloaded Files')
print(' --starting dir listing')
folder_line = '<div class="pure-g"><div class="pure-u-%i-24"></div><div class="pure-u-%i-24"><b>%s</b></div></div>\n'
file_line = '<div class="pure-g"><div class="pure-u-%i-24"></div><div class="pure-u-%i-24">%s</div></div>\n'
for (root,dirs,files) in os.walk( DOCBASE , topdown=True):
dirs.sort()
files.sort()
#output += str(root)
#output += str(dirs)
#output += str(files)
print(' --%s' % root, end=", ")
myroot = root[root_len:]
parts = myroot.split("\/")
path_len = "" # " " * len(parts[:-1])
path_last = parts[-1]
(fi,di,si) = overview_folder(root) # num files, num dirs, size
if si < 1024:
si = "%s b" % si
elif si < 1024 * 1024:
si = "%s K" % int( si / 1024.0 )
elif si < (1024 * 1024 * 1024):
si = "%s M" % int( si/(1024*1024.0) )
else:
si = "%s G" % int( si/(1024*1024*1024.0) )
default_cmd = most_common(files)
if not default_cmd: default_cmd = "NO"
this_line = "%s\n\t%s \tDepth: %i \t Subdirs: %i \t # Files: %i \t # Total size: %s \n" % \
( path_len + cleantext(path_last), default_cmd, depth(root), len(dirs), fi, si)
mydepth = depth(root) + 1
if mydepth > 22: mydepth = 22
output += folder_line % (mydepth, 24-mydepth, this_line)
#moveit(files, root, default_cmd)
try:
#output += tag("p", "%i total files, in %i total folders, for %s" % (fi,di,si))
for F in sorted(files, \
key = lambda x: (os.path.splitext(x))[1]+(os.path.split(x))[1] ):
output += file_line % (mydepth, 24-mydepth, cleantext(F))
except Exception as e:
print("**** Some sort of error with %s" % files, end=" ")
print(e)
output += tag("p","All done with files.")
return output
if __name__ == '__main__':
from queue import Queue
from bs4 import BeautifulSoup as bs
from concurrent.futures import ThreadPoolExecutor
import requests, os, json, shutil, time
import requests
import requests.auth
#import praw
q = Queue()
def start_tt2():
client_auth = requests.auth.HTTPBasicAuth('DDsmF856ZAookA', 'Cles0wAwm9o4N_jnPNXOwgH2-DJXKw')
post_data = {"grant_type": "password", "username": "ssttmm2323", "password": "ssttmm2323"}
headers = {"User-Agent": "Totally Chrome"}
response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers)
print(response.json())
headers = {"Authorization": "bearer fhTdafZI-0ClEzzYORfBSCR7x3M", "User-Agent": "Totally Chrome"}
response = requests.get("https://oauth.reddit.com/api/v1/me", headers=headers)
print(response.json())
def start_tt3():
reddit = praw.Reddit( client_id="DDsmF856ZAookA",
client_secret="Cles0wAwm9o4N_jnPNXOwgH2-DJXKw",
password="ssttmm2323",
user_agent="Totally Chrome",
username="ssttmm2323",)
# start_tt3()