index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
984,000 | 2f4eda909bfa59458dc36a23a6d665ca43e16173 | import os
import pytest
import redis
from falcon import testing
from msgr.app import create
from msgr.db import DbClient
def is_responsive(client):
try:
return client.ping()
except redis.ConnectionError:
return False
@pytest.fixture(scope='session')
def redis_client(docker_ip, docker_services):
"""A fixture that starts a real redis instance via docker-compose."""
client = redis.StrictRedis(host='localhost', port=6379, db=0)
docker_services.wait_until_responsive(
timeout=30.0, pause=0.1,
check=lambda: is_responsive(client)
)
return client
@pytest.fixture(scope='session')
def docker_compose_file(pytestconfig):
return os.path.join(
str(pytestconfig.rootdir),
'docker-compose.yml'
)
@pytest.fixture(scope='function')
def client(redis_client, request):
def cleanup():
"""Remove everything from redis to ensure a clean slate between tests."""
redis_client.flushall()
request.addfinalizer(cleanup)
return testing.TestClient(create(DbClient(client=redis_client)))
class MockDb():
def __init__(self):
pass
def add(self, key, value):
return True
def get_range(self, key, start, stop):
return []
def get_unread(self, key):
return []
def remove(self, key, elements):
return [0, 0]
|
984,001 | 24c3b86144925ca57f1616d7282db847b1473077 | #!/usr/bin/python
""" Combines multiple PDB files into one """
import sys
import glob
if len(sys.argv) < 2:
print "model.py *.pdb > model.pdb"
sys.exit()
i = 1
for filename in sys.argv[1:]:
print """MODEL %d
%s
ENDMDL
""" % (i, open(filename, 'r').read().strip())
i += 1 |
984,002 | 7a51f69e73dfbeaae5cbb637b055ce8d94c1014f | from django.urls import path
from . import views
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('videopoll/<str:video_id>/',views.video_poll,name='video-poll')
]
|
984,003 | a85358d3979d0a5667a4ee81539d17c3029e6b1e | from wtforms import widgets
from wtforms.validators import ValidationError
import pkg_resources
from . import validators
from .fields import *
from .forms import Form, SecureForm
__version__ = pkg_resources.get_distribution('pyramid_wtforms').version
|
984,004 | e6688d258df087875b33204276c35f7f675e3b38 | #!/usr/bin/python
import os,sys # to get path
from subprocess import call
import meter_db as mdb # for sql queries
def uploadEdata(dataFileName):
metaID = dataFileName.split('_')[0]
print "<p>Meta ID: {}</p>".format(metaID)
householdID = mdb.getHouseholdForMeta(metaID)
print "<p>Household ID: {}</p>".format(householdID)
if (checkExistence(metaID)):
print "Data already exists - aborting"
else:
# data is new
sqlq = "LOAD DATA INFILE '/home/meter/data/" + dataFileName + "' INTO TABLE Electricity FIELDS TERMINATED BY ',' (dt,Watt) SET Meta_idMeta = " + str(metaID) + ";"
mdb.executeSQL(sqlq)
mdb.commit()
updateDataQuality(metaID, 1)
# update status
sqlq = """UPDATE Household
SET `status`= '6'
WHERE `idHousehold` ='{}';
""".format(householdID)
mdb.executeSQL(sqlq)
mdb.commit()
print "<p>Status for HH {} set to 6</p>".format(householdID)
# produce 1min and 10min data
os.system('python /home/meter/Interface/el_downsample.py')
def updateDataQuality(idMeta, Quality):
""" set Quality in Meta table """
# XXX add for diaries
sqlq = "UPDATE Meta \
SET `Quality`= %s \
WHERE `idMeta` = %s;" % (Quality, idMeta)
mdb.executeSQL(sqlq)
mdb.commit()
def checkExistence(idMeta):
""" set Quality in Meta table """
# XXX add for diaries
sqlq = """
SELECT dt
FROM Electricity_10min
WHERE `Meta_idMeta` = {}
LIMIT 1;
""".format(idMeta)
try:
result = mdb.getSQL(sqlq)[0]
if (result):
return True
except:
return False
uploadEdata(sys.argv[1])
print "<p>Upload complete</p>"
|
984,005 | 780405603817ca18d10a856dc9a4459ee2056a16 | import imgurpython
import urllib
import requests
import os
# escape file name
# if title throws error, append to "unknwon_title_" the id which is the last element in the string
def escape(name):
response = "unknown_title_"
try:
response = "".join([c for c in str(name) if c.isalpha() or c.isdigit() or c == ' ']).rstrip()
except:
for c in reversed(name):
if c != "_":
response = response + c
break
return response
# download one image
# imgur give access to various formats e.g. png, jpg, so you can change it to your favorite format
def download_image(img, path):
if img.get("type"):
if img["type"] == "image/gif" and img["size"] <= 2097152:
if os.path.exists(path + ".gif") == False:
urllib.urlretrieve(img["link"], path + ".gif")
elif img["type"] == "image/gif" and img["size"] > 2097152:
if os.path.exists(path + ".mp4") == False:
urllib.urlretrieve(img["mp4"], path + ".mp4")
else:
if os.path.exists(path + ".png") == False:
urllib.urlretrieve(img["link"] + ".png", path + ".png")
if __name__ == "__main__":
print("You have to register a new app on api.imgur.com to get a client id and secret")
client_id = str(input("Client id: "))
client_secret = str(input("Client secret: "))
username = str(input("Your username: "))
client = imgurpython.ImgurClient(client_id, client_secret)
# Authorization url, where you get a pin to get access to your profile
authorization_url = client.get_auth_url('pin')
# ... redirect user to `authorization_url`, obtain pin
print("Please go here and copy the pin: " + authorization_url)
pin = str(input('Paste the PIN:'))
credentials = client.authorize(pin, 'pin')
client.set_user_auth(credentials['access_token'], credentials['refresh_token'])
# cannot get all favorites without the page parameter, so i have to iterate through the pages
# maybe a bug
for page in range(0, 100):
res = requests.get("https://api.imgur.com/3/account/" + username + "/favorites/" + str(page),
headers={'Authorization': 'Bearer %s' % client.auth.get_current_access_token()})
imgs = res.json()
# problem with client.get_account_favorites(username) -> not getting all favorites
# maybe a bug
for index, img in enumerate(imgs["data"]):
if img["is_album"] == False:
print("Loading image" + str(index) + " from " + str(len(imgs["data"])))
path = os.path.dirname(os.path.abspath(__file__)) + "\\images\\" + escape(img["title"]) + "_" + str(
img["id"])
download_image(img, path)
else:
print("Loading album" + str(index) + " from " + str(len(imgs["data"])))
album = img
res = requests.get("https://api.imgur.com/3/album/" + img["id"] + "/images",
headers={'Authorization': 'Bearer %s' % client.auth.get_current_access_token()})
imgs = res.json()
album_path = os.path.dirname(os.path.abspath(__file__)) + "\\images\\" + escape(album["title"])
if not os.path.exists(album_path):
os.makedirs(album_path)
for img in imgs["data"]:
img_path = album_path + "\\" + str(img["id"])
download_image(img, img_path)
|
984,006 | d7ec565f226dbe884edb3d1ddf5b85ae592ebf2b | from random import randint
f=open("Input.txt","w+")#The Input.txt will be made in a folder from which the program is executed
for _ in range(100):
val=randint(0,100)
f.write("%d\n"%(val))
print(val)
f.close() |
984,007 | e1f7c7ed13c294497950c520c8a92ae321173dcc | #!/usr/bin/python
string = raw_input("string: ")
print(string[::-1]) |
984,008 | 8dd0c84f3d5765ffd3bb67d094cf1364956fd8ce | import random
import re
from sys import argv
#import pdb
#import operator
source_text = open('book.txt').read().split()
def histogram(source_text):
histo = {}
histo_new = {}
for word in source_text:
if word not in histo:
filter_text(word)
histo[word] = 1
else:
filter_text(word)
histo[word] += 1
for key, value in histo.items():
histo_new[key] = ((value / len(histo)))
return histo_new
def filter_text(word):
word = re.sub(r"\W+", "", word)
word = word.lstrip('_')
word = word.rstrip('_')
return word
def compare(a):
return a[1]
def random_sentence(word_count):
sentence = []
sentence_random = []
returned_histo = histogram(source_text)
returned_histo = sorted(returned_histo.items(), reverse=True, key=compare)
for word, freq in returned_histo[:word_count]:
sentence.append(word)
#optional for randomizing the sentences. Commenting next line will order the sentence in frequency of words
random.shuffle(sentence)
return sentence
if __name__ == '__main__':
word_count = int(argv[1])
print(*random_sentence(word_count))
|
984,009 | e086450de8ea64b660bfd24112c4f6cd012492be | import pandas as pd
import numpy as np
import cPickle as pk
from collections import Counter
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pk.dump(obj, f, pk.HIGHEST_PROTOCOL)
def load_obj(name ):
with open( name + '.pkl', 'rb') as f:
return pk.load(f)
disease=311
patient_file='/home/data/sensitive_disease/%s_Patient.csv' % (disease)
lab_file='/home/data/sensitive_disease/%s_Lab.csv' % (disease)
procedure_file='/home/data/sensitive_disease/%s_Procedure.csv' % (disease)
med_file='/home/data/sensitive_disease/%s_Med.csv' % (disease)
pt_data=pd.read_csv(patient_file)
lab_data=pd.read_csv(lab_file)
pr_data=pd.read_csv(procedure_file)
med_data=pd.read_csv(med_file)
pid=pt_data.loc[:,'person_id'].unique()
print "numeber of patients:",len(pid)
print "lab data size:",lab_data.shape
print "procedure data size:",pr_data.shape
print "med data size:",med_data.shape
# for lab data
print "mrd_lab_id unique number:",lab_data.mrd_lab_id.unique().size
print "lab_nm unique number:",lab_data.lab_nm.unique().size
key=zip(lab_data.mrd_lab_id,lab_data.lab_nm)
pair=Counter(key)
save_obj(pair,'/home/data/sensitvie_disease/csm/lab{}_idnm'.format(disease))
item=[(x[0][0],x[0][1],x[1]) for x in pair.items()]
df=pd.DataFrame.from_records(item,columns=['mrd_lab_id','lab_nm','counts'])
df.to_csv('/home/data/sensitive_disease/csm/lab{}_idnm.csv'.format(disease),index=False)
# for procedure_data
print "order_cpt_cd unique number:",pr_data.order_cpt_cd.unique().size
print "order_nm unique number:",pr_data.order_nm.unique().size
print "mrd_order_id unique number:",pr_data.mrd_order_id.unique().size
key=zip(pr_data.order_cpt_cd,pr_data.order_nm)
pair=Counter(key)
save_obj(pair,'/home/data/sensitvie_disease/csm/pr{}_cptnm'.format(disease))
item=[(x[0][0],x[0][1],x[1]) for x in pair.items()]
df=pd.DataFrame.from_records(item,columns=['order_cpt_cd','order_nm','counts'])
df.to_csv('/home/data/sensitive_disease/csm/pr{}_cptnm.csv'.format(disease),index=False)
# for med_data
print "mrd_med_id unique number:",med_data.mrd_med_id.unique().size
print "generic_nm unique number:",med_data.generic_nm.unique().size
#print "mrd_order_id unique number:",pr_data.mrd_order_id.unique().size
key=zip(med_data.mrd_med_id,med_data.generic_nm)
pair=Counter(key)
save_obj(pair,'/home/data/sensitvie_disease/csm/med{}_cptnm'.format(disease))
item=[(x[0][0],x[0][1],x[1]) for x in pair.items()]
df=pd.DataFrame.from_records(item,columns=['mrd_med_id','generic_nm','counts'])
df.to_csv('/home/data/sensitive_disease/csm/med{}_idnm.csv'.format(disease),index=False) |
984,010 | dc0fc078b40c8312dd7728fc503f14e77cdb500f | rint (12/2)+1;
|
984,011 | 03bbd6bfc03e5cb1a3cbd6da7edf78e605e1ae81 | from abc import ABC, abstractmethod
class Video():
def __init__(self, titulo, duracion, canal, fecha, likes, vistas, descripcion, id=None, compartidas=None):
self.Id = id
self.Titulo = titulo
self.Duracion = duracion
self.NombreCanal = canal
self.Fecha = fecha
self.Likes = likes
self.Vistas = vistas
self.Descripcion = descripcion
self.Compartidas = compartidas
class Categoria():
def __init__(self, id, nombre):
self.Id = id
self.Titulo = nombre
class AbstractRepo(ABC):
@abstractmethod
def GuardarVideo(self, video):
pass
@abstractmethod
def MostrarLista(self):
pass
@abstractmethod
def MostrarVideo(self, id):
pass
@abstractmethod
def ModificarVideo(self, video):
pass
@abstractmethod
def BorrarVideo(self, id):
pass
class AbstractYoutube(ABC):
@abstractmethod
def InfoVideo(self, url):
pass
if __name__ == "__main__":
cat = Categoria(24, "algo")
print(cat.Titulo)
|
984,012 | cbd75104931c779adb863f808ffc5b117cb198cc | """--------------------------------------------------------------------------------------------------------------------------------------
MODULE
NetPaymentsforGrouper.
DESCRIPTION
This module contains a functions that nets payments on a counterparty group level.
-----------------------------------------------------------------------------------------------------------------------------------------
HISTORY
=========================================================================================================================================
Date Change no Developer Requester Description
-----------------------------------------------------------------------------------------------------------------------------------------
2020-01-28 FAOPS-905 Metse Moshobane Gasant Thulsie Initial Implementation.
-----------------------------------------------------------------------------------------------------------------------------------------
"""
import acm
from logging import getLogger
LOGGER = getLogger(__name__)
def net_payments_for_grouper(eii):
"""
Function used for netting payments according to the party selected from the
'Net Payments' FMenuExtension.
"""
if eii.ExtensionObject().ActiveSheet().Selection().SelectedCells()[0].RowObject().Settlements():
settlements = list(
eii.ExtensionObject().ActiveSheet().Selection().SelectedCells()[0].RowObject().Settlements())
else:
LOGGER.info("The grouper selected has no setlements")
return
qualifying_settlements = []
net_amount = 0
first_settlement = settlements[0]
for settlement in settlements:
if check_qualifying_settlements_for_netting(first_settlement, settlement):
qualifying_settlements.append(settlement)
for settlement in qualifying_settlements:
net_amount = net_amount + settlement.Amount()
if len(qualifying_settlements) > 1:
settlement_attributes = get_settlement_attributes(qualifying_settlements)
new_settlement = create_settlement(settlement_attributes, net_amount)
set_parent_settlement(new_settlement, qualifying_settlements)
else:
LOGGER.info("There is either only one or no settlements to be netted")
def check_qualifying_settlements_for_netting(first_settlement, settlement):
currency = first_settlement.Currency().Name()
value_day = first_settlement.ValueDay()
counter_party = first_settlement.Counterparty().Name()
if settlement.Status() != 'Authorised':
return False
if settlement.Type() not in ['Loan Fee', 'Finder Fee', 'Payment Cash']:
return False
if settlement.Currency().Name() != currency:
return False
if settlement.ValueDay() != value_day:
return False
if settlement.Counterparty().Name() != counter_party:
return False
if settlement.AcquirerName() != 'SECURITY LENDINGS DESK':
return False
return True
def get_settlement_attributes(qualifying_settlements):
"""
Getting attributes from one of the settlements in the "qualifying_settlements" list into a dictionary
"""
settlement_dict = dict()
settlement_dict['status'] = qualifying_settlements[0].Status()
settlement_dict['aquirer'] = qualifying_settlements[0].AcquirerName()
settlement_dict['acquirer_acc_name'] = qualifying_settlements[0].AcquirerAccName()
settlement_dict['acquirer_account'] = qualifying_settlements[0].AcquirerAccount()
settlement_dict['currency'] = qualifying_settlements[0].Currency().Name()
settlement_dict['value_day'] = qualifying_settlements[0].ValueDay()
settlement_dict['settlement_type'] = qualifying_settlements[0].Type()
settlement_dict['counter_party'] = qualifying_settlements[0].Counterparty().Name()
settlement_dict['counter_party_account_ref'] = qualifying_settlements[0].CounterpartyAccountRef()
settlement_dict['acquirer_account_network_name'] = qualifying_settlements[0].AcquirerAccountNetworkName()
return settlement_dict
def create_settlement(settlement_dict, net_amount):
"""
Creating a new settlement and setting up certain attributes
"""
try:
new_settlement = acm.FSettlement()
new_settlement.RegisterInStorage()
new_settlement.Status = settlement_dict.get('status')
new_settlement.AcquirerName = settlement_dict.get('aquirer')
new_settlement.AcquirerAccName = settlement_dict.get('acquirer_acc_name')
new_settlement.AcquirerAccount = settlement_dict.get('acquirer_account')
new_settlement.Currency = settlement_dict.get('currency')
new_settlement.ValueDay = settlement_dict.get('value_day')
new_settlement.Type = settlement_dict.get('settlement_type')
new_settlement.Counterparty = settlement_dict.get('counter_party')
new_settlement.CounterpartyAccountRef = settlement_dict.get('counter_party_account_ref')
new_settlement.AcquirerAccountNetworkName = settlement_dict.get('acquirer_account_network_name')
new_settlement.Amount = net_amount
new_settlement.RelationType('Ad Hoc Net')
new_settlement.Trade(None)
set_call_confirmation(new_settlement)
new_settlement.Commit()
return new_settlement
except Exception as e:
LOGGER.exception(e)
def set_parent_settlement(new_settlement, qualifying_settlements):
"""
Setting up the parent settlement and changing the status of the children to void
"""
acm.BeginTransaction()
try:
for settlement in qualifying_settlements:
settlement_image = settlement.StorageImage()
settlement_image.Parent(new_settlement)
settlement_image.Status('Void')
settlement_image.Commit()
acm.CommitTransaction()
except Exception as e:
LOGGER.exception(e)
acm.AbortTransaction()
def set_call_confirmation(settlement):
additional_info_field = 'Call_Confirmation'
additional_info_value = 'SBLManualRelease'
settlement.AddInfoValue(additional_info_field, additional_info_value)
LOGGER.info("Auto-setting Call_Confirmation for Settlement with id {settlement}".format(settlement=settlement.Oid()))
settlement.Commit()
|
984,013 | d469243b8e83450becb28d18702aa14d755dcbd3 | from pydantic import BaseModel
class PostBase(BaseModel):
"""ベース
"""
content: str
class PostCreate(PostBase):
"""リクエストパラメータ
"""
pass
class Post(PostBase):
id: int
user_id: int
class Config:
orm_mode = True
|
984,014 | b012dfbd2edf86b8c9eb5fd637520fa10f0015a6 | num1 =float(input("Enter first number: ")) #they have to insert a number otherwise we get an error
qp = input("Enter operator: ")
num2 = float(input("Enter second number: "))
def myfunc(num1, num2):
if qp == "+":
print(num1 + num2)
elif qp == "-":
print(num1 - num2)
elif qp == "/" :
print(num1 / num2)
elif qp == "*":
print(num1 * num2)
else:
print("Invalid operator")
myfunc(num1, num2)
repeat = str(input("Would you like to input another equation? : ")).lower()
responses = "yes", "y", "yeah", "yah", "si"
while True:
if repeat in responses:
num3 = float(input("Enter first number: "))
qp = input("Enter operator: ")
num4 = float(input("Enter second number: "))
myfunc(num3, num4) #putting print in front of myfunc(makes it print out none)
responses = input("Would you like to input another equation? :").lower()
# num3 = float(input("Enter first number: "))
# qp2 = input("Enter operator: ")
# num4 = float(input("Enter second number: "))
# print(myfunc(num3, num4))
# print(myfunc(num1, num2))
elif repeat not in responses:
# num3 = float(input("Enter first number: "))
# qp = input("Enter operator: ")
# num4 = float(input("Enter second number: "))
# print(myfunc(num3, num4))
# input("Would you like to input another equation? :").lower()
print("Dueces!!")
break
|
984,015 | 49a0d3738f64b0945ec7b69d5d58081e449c1aad | import os
import time
import torch
import argparse
import matplotlib
import numpy as np
import matplotlib.patches as patches
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from datasets import *
from networks import *
from utils import *
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
from mytracker_private_detections import *
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type = int, default = 1, help = "Image Batch Size")
parser.add_argument("--image_folder", type = str, default = "/home/pulkit/Datasets/MOT_Datasets/Detection_Datasets/MOT17Det/train/MOT17-13/img1/", help = "Path to the dataset folder")
parser.add_argument("--network_config", type = str, default = "config/yolov3-coco2014.cfg", help = "Patch to the file containing the network definition")
parser.add_argument("--use_coco2014_weights", type = int, default = 1, help = "Set this to 1 if you want to use the pretrained COCO2014 weights else use the custom weights file")
parser.add_argument("--coco2014_weights_path_detector", type = str, default = "checkpoints_coco2014/yolov3.weights", help = "Path to the weights file")
parser.add_argument("--custom_weights_path_detector", type = str, default = "checkpoints_800_mot1720Det/model_epoch_29.pth", help = "Path to the weights file")
parser.add_argument("--class_path", type = str, default = "data/classes.names", help = "Path to the class label file")
parser.add_argument("--conf_thresh", type = float, default = 0.5, help = "Object Confidence Threshold")
parser.add_argument("--nms_thresh", type = float, default = 0.5, help = "IOU threshold for Non-Maximum Suppression")
parser.add_argument("--iou_thresh", type = float, default = 0.3, help = "IOU threshold for tracking")
parser.add_argument("--n_cpu", type = int, default = 0, help = "Number of CPU threads to use for batch generation")
parser.add_argument("--inp_img_size", type = int, default = 800, help = "Dimension of input image to the network")
parser.add_argument("--tracks_folder_name", type = str, default = "outputs_simple/tracks_private_detections", help = "Enter the name of the folder where you want to save the tracks obtained")
parser.add_argument("--tracks_file_name", type = str, default = "MOT17-13.txt", help = "Enter the file name in which you want to store the object trajectories")
parser.add_argument("--output_images_folder_name", type = str, default = "outputs_simple/private_detections/MOT17-13", help = "Enter the folder name in which you want to save the images with tracks")
args = parser.parse_args()
os.makedirs(args.tracks_folder_name, exist_ok = True)
file_name = args.tracks_folder_name + '/' + args.tracks_file_name
file_out = open(file_name, 'w+')
os.makedirs(args.output_images_folder_name, exist_ok = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Model Initialization
model_config = args.network_config
img_size = args.inp_img_size
object_detector = Darknet(model_config, img_size)
# Loading the object detector checkpoint weights
if args.use_coco2014_weights:
object_detector.load_darknet_weights(args.coco2014_weights_path_detector)
else:
checkpoint_detector = torch.load(args.custom_weights_path_detector)
object_detector_parameters = checkpoint_detector['model_state_dict']
object_detector.load_state_dict(object_detector_parameters)
object_detector.to(device)
object_detector.eval()
images = ImageFolder(args.image_folder, img_size)
dataloader = DataLoader(images, batch_size = 1, shuffle = False, num_workers = args.n_cpu)
with open(args.class_path, 'r') as class_name_file:
names = class_name_file.readlines()
class_names = []
for name in names:
class_names.append(name.rstrip().lstrip())
images_names = sorted(os.listdir(args.image_folder))
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# Instantiating the Tracker
tracker = Tracker(object_detector, args.conf_thresh, args.nms_thresh, args.iou_thresh) # The instances of the YOLOv3 object detector will be passed
fig = plt.figure(figsize = (16,9))
ax = fig.add_subplot(111)
colors = np.random.rand(512,3) # used only for displaying the bounding box trackers
for index, image in enumerate(dataloader):
img = image.type(Tensor)
orig_image = Image.open(args.image_folder + images_names[index])
orig_image_size = orig_image.size
ax.imshow(orig_image)
ax.set_axis_off()
tic = time.time()
with torch.no_grad():
trackers = tracker.step(img, orig_image)
toc = time.time()
print('Processing Time:', (toc - tic))
for track in trackers:
x_min = int(track[0])
y_min = int(track[1])
x_max = int(track[2])
y_max = int(track[3])
width = x_max - x_min
height = y_max - y_min
object_num = int(track[4])
ax.add_patch(patches.Rectangle((x_min, y_min), width, height, fill = False, lw = 3, ec = colors[(object_num % 512), :]))
plt.text(x = x_min, y = y_min, s = str('%d'%object_num), fontsize = 8)
print('%d,%d,%.2f,%.2f,%.2f,%.2f,-1,-1,-1'%((index+1), object_num, x_min, y_min, width, height),file = file_out)
fig.savefig(args.output_images_folder_name + '/' + str(index + 1).zfill(6) + '.jpg', bbox_inches = 'tight', pad_inches = 0)
# if index == 0:
# break
ax.cla() |
984,016 | d885c9073a1563492b1e80f30506b9998cbeb35b | # simply EX 16 from around page 59
# I've expanded the program myself to taken in
# however many lines the user wants
# this is of course done with a simple while loop
# and makes the program much more useful than the
# hard coded 3 lines the book wanted me to type...
# run like this: python ex16_improved.py test.txt
# and in your pwd you'll have a test.txt file with
# your work in it!
from sys import argv
script, filename = argv
print "The file %r is going to be overwritten..." % filename
print "Back out now with: CTRL-C (^C)..."
print "Or onwards and upwards with: ENTER/RETURN..."
raw_input("?")
print "Loading the file..."
target = open(filename, 'w')
print "Truncating the file."
target.truncate()
#mod for asking how many lines are needed
num_of_lines = input("Enter number of lines you want: ")
i = 1
while i < num_of_lines + 1:
#what I want looped
s = str(i)
line = raw_input("line %s : " % s)
target.write(line)
target.write("\n")
#debug statement
#print(i)
#increment with plus assgin
i += 1
print "Saving data...do not turn off the power"
print "Sucess!"
target.close()
|
984,017 | 256786fb1d45c31850a9429641b08e5a0ad4a248 | """
53. Maximum Subarray
if dp[i-1] > 0: dp[i] = dp[i-1]+nums[i]
else: dp[i] = nums[i]
@date: 2017/02/07
"""
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return nums
dp = [0]*len(nums)
dp[0] = nums[0]
for i in xrange(1, len(nums)):
dp[i] = nums[i] + (dp[i - 1] if dp[i - 1] > 0 else 0)
return max(dp) |
984,018 | 799273915f088d4f4beb87cb470daf9639242024 | from typing import Tuple, Literal, Union, List, Dict
from .api import Sqlite3
from .tools import *
from .Table import Table
class Sqlite3ApiError(Exception):
""" Ошибки, которые возникают при работе API """
class API(Sqlite3):
def __init__(self, tables, db_path: str = None):
self._active = False
if db_path:
Sqlite3.__init__(self, db_path)
self._active = True
if type(tables).__name__ != 'module':
raise Sqlite3ApiError(
'Параметр `tables` должен быть '
'импортированным файлом с классами таблиц'
)
self._tables: Dict[str, Table] = tables
self._get_tables()
def save(self, *table_classes) -> str:
"""
Сохраняем все изменения.
:param table_classes: Объекты полученные из базы данных.
:return: "Successfully"
"""
self._check_active()
if len(table_classes) == 0:
raise Sqlite3ApiError('Не переданы классы таблиц')
table_classes: Tuple[Table]
for table_class in table_classes:
updated_fields = [] # Поля, которые изменили
old_table_class = self.filter(
table_name=type(table_class).__name__.lower(),
return_type='classes',
id=table_class.id
) # Старые данные
# Проверяем какие поля были изменены
for field_name, value in vars(table_class).items():
if not field_name.startswith('_'):
if value != old_table_class.__getattribute__(field_name):
updated_fields.append('{field_name}={value}'.format(
field_name=field_name,
value=convert_from_class(value)
))
if len(updated_fields) != 0:
self.execute(
"UPDATE '%s' SET %s WHERE id = %s" % (
type(table_class).__name__.lower(),
', '.join(updated_fields),
table_class.id
))
self.commit()
return 'Successfully'
def filter(
self,
table_name: str,
return_type: Literal['visual', 'classes'] = 'visual',
return_list: Literal[True, False] = False,
**where
) -> Union[
List[list], list,
List[Table], Table,
None
]:
"""
Функция выбирает данные из базы данных на основе указанных параметров.
:param table_name: Название таблицы, с которой мы работаем.
:param return_type:
Для "classes" - вернёт объект класса таблицы.
Для "visual" - вернёт данные в том виде,
в котором они хранятся в базе данных.
:param return_list:
Для True - вернёт список объектов независимо от их количества.
:param where: Параметры сортировки
"""
self._check_active()
table_name = table_name.lower()
table = self._get_table(table_name)
table_fields = table.get_fields()
conditions = []
# Формирование параметров сортировки
for key, value in where.items():
if '_' in key:
index = key.rfind('_')
try:
field = key[:index]
opt = OPT_MAP[key[index + 1:]]
except KeyError:
field = key
opt = '='
else:
field = key
opt = '='
if field not in table_fields and field != 'id':
raise Sqlite3ApiError(
f'Поле `{field}` не найдено в таблице `{table_name}`'
)
conditions.append(
f'{field} {opt} {str(convert_from_class(value))}'
)
# Получение данных
if len(conditions) != 0:
data = self.fetchall(
"SELECT * FROM '%s' WHERE %s" % (
table_name,
' and '.join(conditions)
))
else:
data = self.fetchall(
"SELECT * FROM '%s'" % table_name
)
if len(data) == 0:
return
if return_type == 'visual':
if return_list:
return data if isinstance(data, list) else [data]
return data[0] if len(data) == 1 else data
elif return_type == 'classes':
data = data if isinstance(data, list) else [data]
classes = [self.get_class(table_name, cls) for cls in data]
if not return_list:
return classes[0] if len(classes) == 1 else classes
return classes
def insert(self, table_name: str, **values) -> str:
"""
Функция добавляет данные в таблицу.
:param table_name: Название таблицы, с которой мы работаем.
:param values: Значения полей.
:return: "Successfully"
"""
self._check_active()
table_name = table_name.lower()
table = self._get_table(table_name)
table_fields = table.get_fields()
fields = table_fields.copy()
for filed, value in values.items():
if filed not in table_fields:
raise Sqlite3ApiError(
f'Поле `{filed}` не найдено в таблице `{table_name}`'
)
fields[fields.index(filed)] = str(convert_from_class(value))
table_fields.remove(filed)
if len(table_fields) != 0:
raise Sqlite3ApiError(
f'Не переданы значения для полей: '
f'{", ".join(table_fields)}'
)
self._cursor.execute(
"INSERT INTO '%s' (%s) VALUES (%s)" % (
table_name,
', '.join(table.get_fields()),
', '.join(fields)
))
self.commit()
return 'Successfully'
def get_class(self, table_name: str, data: Union[list, tuple]) -> Table:
"""
Возвращает объект класса таблицы на основе его данных `data`
:param table_name: Название таблицы с, которой мы работаем.
:param data: Данные которые хранились в базе данных.
"""
table = self._get_table(table_name.lower())
types = table.get_types()
fields = table.get_fields()
table.__setattr__('id', data[0])
data = data[1:]
for i, field in enumerate(fields):
if types[field][1] in ['list', 'dict']:
table.__setattr__(
field,
convert(convert_from_data(data[i]))
)
else:
table.__setattr__(
field,
convert_from_data(data[i])
)
return table
def add_field(self, table_name: str, field_name: str, start_value) -> str:
"""
Добавляет поле в таблицу.
:param table_name: Название таблицы, с которой мы работаем.
:param field_name: Название нового поля.
:param start_value: Значение нового поля.
:return: "Successfully"
"""
self._check_active()
table_name = table_name.lower()
table = self._get_table(table_name)
table_fields = table.get_fields()
if field_name not in table_fields:
raise Sqlite3ApiError(
f'Поле `{field_name}` не найдено '
f'в классе таблицы `{table_name}`'
)
self._cursor.execute(
"ALTER TABLE '%s' ADD %s %s" % (
table_name,
field_name,
table.get_types()[field_name][0] # Тип данных
)) # Добавление нового поля
self._cursor.execute(
"UPDATE '%s' SET %s = %s" % (
table_name,
field_name,
str(convert_from_class(start_value))
)) # Изменение стартового значения
self.commit()
return 'Successfully'
def create_db(self) -> str:
"""
Создание таблиц.
:return: "Successfully"
"""
self._check_active()
for table_name, table in self._tables.items():
fields = [
f'{key}{value[0]}'
for key, value in table.get_types().items()
if key != 'id' and not key.startswith('__')
]
self._cursor.execute(
"CREATE TABLE IF NOT EXISTS %s "
"(id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, %s)" % (
table_name,
', '.join(fields)
))
return 'Successfully'
def _get_tables(self):
excluded = [
'string', 'integer', 'list_', 'dict_', 'Table', 'data_bases',
'Dict', 'List'
]
tables = {
k.lower(): v
for k, v in vars(self._tables).items()
if not k.startswith('__') and k not in excluded
}
self._tables_names: List[str] = [
name for name in tables.keys()
]
self._tables = tables
def _check_active(self):
if not self._active:
raise Sqlite3ApiError('Файл базы данных не инициализирован')
def _get_table(self, table_name: str) -> Table:
if table_name in self._tables_names:
return self._tables[table_name]()
else:
raise Sqlite3ApiError(f'Таблица `{table_name}` не найдена')
@property
def cursor(self):
""" Sqlite3 cursor """
@cursor.getter
def cursor(self):
""" Получение курсора """
self._check_active()
return self._cursor
|
984,019 | 400a1a8fda87e2ef28fc96bcf9fcd9763d857a37 | from mpi4py import MPI
from data.Stock import Stock
from pathlib import Path
from pprint import pprint
from statistics import mean
import json
import pickle
import socket
import pandas as pd
from flask import Flask
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# NASDAQ_LIST_FILE = "/mnt/d/Projects/stocker/db/companylist.csv"
# NASDAQ_LIST_FILE = "http://www.asisodia.com/datasets/companylist.csv"
NASDAQ_LIST_FILE = Path("stocker/db/companylist.csv")
PER_NODE_LIMIT = 5
STOCK_BUFFER = PER_NODE_LIMIT * 3 // 2
# TOTAL_NODES = int(MPI.INFO_ENV.get("soft"))
TOTAL_NODES = comm.Get_size()
API_KEYS = ["GUPPO7FAKF3SENRJ", "066F5B6VQS6TH2N0", "9BTNEH3XG5J4S7KW"]
KEY_IDX = 0
app = Flask(__name__)
@app.route("/")
def main():
print("hit1")
if rank == 0:
stocks = pd.read_csv(NASDAQ_LIST_FILE, engine='python').sample(frac=1)
for i in range(1, TOTAL_NODES):
comm.send(stocks[(i-1)*STOCK_BUFFER: i*STOCK_BUFFER], dest=i)
result = {}
best_stock = None
best_stock_profit = -1
best_stock_score = None
for i in range(1, TOTAL_NODES):
temp_result = comm.recv(source=i)
if not temp_result:
continue
else:
result[i] = temp_result
if result[i]["diff"] > best_stock_profit:
best_stock = result[i]["best_stock"]
best_stock_profit = result[i]["diff"]
best_stock_score = result[i]["score"]
result[0] = {
"name": socket.gethostname(),
"best_stock": best_stock,
# "best_stock_info": ,
"diff": best_stock_profit,
"best_stock_score": best_stock_score
}
# print("P", rank, best_stock, best_stock_profit)
# pprint(result)
return json.dumps(result)
def work():
print("hit2")
global KEY_IDX
if rank != 0:
# pickle_off = open("db/%s.pickle" % rank, "rb")
# stocks = pickle.load(pickle_off)
key_idx = KEY_IDX
stocks = []
predictions = []
highest_diff = 0
highest_diff_stock = None
highest_diff_score = None
for stock_symbol in comm.recv(source=0).symbol:
stock = Stock(stock_symbol, API_KEYS[key_idx], True)
key_idx = (key_idx + 1) % len(API_KEYS)
# if stock.points is None:
# continue
try:
prediction = stock.predict()
# print(rank, stock.symbol, prediction)
diff = prediction[0][0] - prediction[1][0]
if highest_diff < diff:
highest_diff = diff
highest_diff_stock = stock
highest_diff_score = (prediction[0][1], prediction[1][1])
stocks.append(stock)
except Exception as e:
continue
if len(stocks) == PER_NODE_LIMIT:
break
KEY_IDX = key_idx
"""
pickling_on = open("db/%s.pickle" % rank, "wb")
pickle.dump(stocks, pickling_on)
pickling_on.close()
# for i in range(PER_NODE_LIMIT):
# stock = stocks[i]
# try:
# print(stock.points.head())
# predictions.append(mean(prediction))
# except Exception as e:
# print(rank, stock.symbol, e)
# print(stock.points.head())
"""
if highest_diff_stock is not None:
result = {
"name": socket.gethostname(),
"stocks": [stock.symbol for stock in stocks],
"best_stock": highest_diff_stock.symbol,
"diff": diff,
"score": highest_diff_score
}
# print("P", rank, socket.gethostname(), , highest_diff_stock.symbol, diff, highest_diff_score)
comm.send(result, dest=0)
else:
comm.send(False, dest=0)
work()
# print(rank, len(stocks), mean(predictions), len(predictions))
# print(stocks[0].get_points().head())
if __name__ == "__main__":
if rank == 0:
app.run()
else:
work()
# main()
|
984,020 | ea48683d3ce8836d520de5280e48d99572ecfe9f | def nod(number):
if (number <= 1.):
return 0
return 1 + nod(number / 2.)
n = int(raw_input())
for i in xrange(n):
x = float(raw_input())
print "%d dias" %(nod(x))
|
984,021 | 6524f6bf9790ca11505225cdfcb33827f7306881 | from math import pi
# ClassCircle
# Write class called circle, with an instance variable, for the radius
# and methods to calculate the area and circumference of the circle.
class Circle:
def __init__(self):
self.radius = 0
def area (self):
return (self.radius ** 2) * pi
def circumference (self):
return self.radius * 2 * pi |
984,022 | 2b4bf02bf194c10cb2620e051d007a16584b9f59 | from bs4 import BeautifulSoup
import re
import urllib.request
import random
import json
username = json.load(open('config.json'))["githubusername"]
def latestrepo(uname):
url = 'https://github.com/'+uname+'?tab=repositories'
stories = []
sock = urllib.request.urlopen(url).read().decode("utf-8")
soup = BeautifulSoup(sock, "lxml")
span = soup.findAll("a", { "itemprop" : "name codeRepository" })
for item in span:
stripitem = re.findall('>([^"]*)<', str(item))[0]
if stripitem not in stories:
stories.append(stripitem)
return 'https://github.com/'+uname+'/'+stories[0].strip()
def latestmessage(url):
stories = []
sock = urllib.request.urlopen(url).read().decode("utf-8")
soup = BeautifulSoup(sock, "lxml")
span = soup.findAll("a", { "class" : "message" })
for item in span:
stripitem = re.findall('>([^"]*)<', str(item))[0]
if stripitem not in stories:
stories.append(stripitem)
return stories[0]
def all():
return latestmessage(latestrepo(username))
def outsource(text):
if "what's my latest commit" in text:
return 'in your last commit message, you said ' + all() |
984,023 | a40492163f371bc2fdaf963f3340aebc757a1be4 | cores = {'limpo':'\033[m',
'Azul n':'\033[1;34m',
'Azul s':'\033[4;33m',
'Azul i':'\033[7;33m'}
a = 'Olá Mundo!'
print('{}Olá Mundo!{}'.format(cores['Azul n'], cores['limpo'])) |
984,024 | 6b81049e2e3eb95711579db07e560ed5a856aae5 | from multiprocessing import Pool, TimeoutError
import multiprocessing as mp
from Queue import Queue
from threading import Thread
import threading
import subprocess
import time
import os,subprocess,sys
#input directory for the files to be analyzed
directory = "/dpm/in2p3.fr/home/cms/phedex/store/user/ccollard/HSCP/prodOct2019_CMSSW_10_6_2/SingleMuon/"
#list of periods corresponding to subdirectories in directory
periods = ["run2017B","run2017C","run2017D","run2017E","run2017F","run2018A","run2018B","run2018C","run2018D"]
#assume a structur like this directory/period/subdir/subsubdir/files_id.root
#periods = ["run2017B"]
#Name of the script description the job to be launch
# contents 3 keywords that will be replaced
# PEDIOD correspond to the periods above mentioned
# LABEL typically the integer of the first files ran
# FILES list of files to be analyzed
TemplateScript = "template.sh"
#abolute path to the script for submission
SubmitScript = "/home-pbs/echabert/HSCP/launch/submit.sh"
#number of files ran in a single job
NFilesPerJob = 5
for period in periods:
cdir = directory+period
command = ["rfdir",cdir]
process = subprocess.Popen(command,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
#first first subdir
subdir = stdout.split()[8]
cdir +="/"+subdir
command = ["rfdir",cdir]
process = subprocess.Popen(command,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
#first subsubdir
subdir = stdout.split()[8]
print(stdout)
cdir +="/"+subdir
#directory has been updated
#find all files
command = ["rfdir",cdir] #,"|","awk","'{print $9}'"]
process = subprocess.Popen(command,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
#split per line
listFiles = stdout.split('\n')
#print(listFiles)
#search for the name (9th and last element in a line)
listFiles = [e.split()[8] for e in listFiles if len(e.split())==9]
print(listFiles)
print("In directory",cdir,"\t nof files = ",len(listFiles))
pairFiles = []
count = 1
gcount = 1
idForJob = 1
sfilesForJob = ""
for f in listFiles:
idfile = f.split("_")[3].split(".")[0]
if count == 1:
idForJob = idfile
sfilesForJob = ""
#print(idfile)
fullfilename = "root://sbgse1.in2p3.fr:/"+cdir+"/"+f
#print(idfile,fullfilename)
sfilesForJob+=" "+fullfilename
count+=1
gcount+=1
if count==(NFilesPerJob+1) or gcount==len(listFiles):
#Ready !!
shfile = open(TemplateScript)
content = shfile.read()
#replace the keywords by the values
content = content.replace("PERIOD",period)
content = content.replace("FILES",sfilesForJob)
content = content.replace("LABEL",idForJob)
#write content in a file
ofilename = "jobs/job_"+period+"_"+idForJob+".sh"
ofile = open(ofilename,"w")
ofile.write(content)
ofile.close()
#ready to launch the job
command = ["source "+SubmitScript+" "+ofilename+" "+period+"_"+idForJob+" "+sfilesForJob]
print(command)
#subprocess.check_call(command)
process = subprocess.Popen(command,shell=True)
#process = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#stdout, stderr = process.communicate()
#print(stdout,stderr)
count = 1
|
984,025 | 7dac854d67bb35bf747c730df13a5ab7fcd99015 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 17:08:50 2019
@author: hbagaria
"""
num = int(input("Enter a number"))
for i in range(1,num+1):
print("* "*i)
if i == num:
while True:
i -= 1
print("* "*i)
if i == 0:
break
|
984,026 | 62589e4ecca0a049a48464e05374a608d07b8d00 | # This script requires reports "Scheduling" to be downloaded most recently from Repo and Reg. Generates a file 'sched.csv' to import into reg.
import pandas as pd
import numpy as np
import os
import glob
# set home directory so can be used on all OS
home = os.path.expanduser('~')
# find the most recent data file exported from the reg and repo and set it as the file
reg_file = max(glob.iglob(home+'/Downloads/RDRPRegistry_DATA_*'), key=os.path.getctime)
repo_file = max(glob.iglob(home+'/Downloads/RDRPRepository_DATA_*'), key=os.path.getctime)
# load data using the record_id as the index
reg = pd.read_csv(reg_file, index_col='record_id', dtype=object)
repo = pd.read_csv(repo_file, index_col='record_id', dtype=object)
# import time and get current time
#from time import gmtime, localtime
#now = strftime("%Y-%m-%d %H:%M", localtime())
from datetime import datetime
now = datetime.now().strftime('%d-%m-%Y %H:%M:%S')
# find all subs with upcoming appointments
upcoming = pd.unique(repo.loc[repo['visit_dt'] >= now].index)
# change recruitment and past sub status of those without upcoming appointments
for sub in reg.index:
if not sub in upcoming:
if reg.ix[sub, 'recruiting_status'] == '2':
reg.ix[sub, 'recruiting_status'] = 0
reg.ix[sub, 'past_sub'] = 1
# set subs with upcoming appointments as scheduled
for sub in upcoming:
reg.loc[reg.index == sub, 'recruiting_status']=2
reg.loc[reg.index == sub, 'past_sub'] = 1
# write out csv for upload to registry
reg.to_csv(home+'/Downloads/sched.csv')
|
984,027 | 933c82041d0145ec7dd77a14f51c72a3341c009d | from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.paginator import Paginator
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, UpdateView, CreateView, DeleteView
from vigil.models import LogicAlertAction
class ShowLogicAlertActionsView(LoginRequiredMixin, ListView):
model = LogicAlertAction
template_name = 'vigil/alert_action/logic/list.html'
class LogicAlertActionDetailView(LoginRequiredMixin, DetailView):
model = LogicAlertAction
template_name = 'vigil/alert_action/logic/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['task_results'] = Paginator(self.object.task_results.all(), 8).page(1)
return context
class LogicAlertActionUpdateView(LoginRequiredMixin, UpdateView):
model = LogicAlertAction
template_name = 'vigil/alert_action/logic/update_form.html'
fields = ['name', 'description', 'task', 'expected_data', 'business_logic_data',
'notification_actions']
def get_success_url(self):
return reverse_lazy('logic_alert_action_detail', kwargs={'pk': self.object.pk})
class LogicAlertActionCreateView(SuccessMessageMixin, LoginRequiredMixin, CreateView):
model = LogicAlertAction
template_name = 'vigil/alert_action/logic/create_form.html'
fields = ['name', 'description', 'task', 'expected_data', 'business_logic_data',
'notification_actions']
success_message = '%(name)s was created. ' \
'Check the data fields for any required information'
def get_success_url(self):
return reverse_lazy('logic_alert_action_update', kwargs={'pk': self.object.pk})
class LogicAlertActionDeleteView(LoginRequiredMixin, DeleteView):
model = LogicAlertAction
template_name = 'vigil/alert_action/logic/confirm_delete.html'
success_url = reverse_lazy('logic_alert_actions_list')
|
984,028 | 5036936d4d04bfc37c2806d41710b2a906d301d9 | from typing import Tuple
import pytest
from requests.models import Response
from fastapi.testclient import TestClient
from project.api.main import app
client = TestClient(app)
class TestPartnerRoutes:
@pytest.fixture()
def partner_id(self) -> str:
return "5fd53ee325a51f5b8631730a"
@pytest.fixture()
def get_partner_response(self, partner_id: str) -> Response:
return client.get(f"/partners/{partner_id}/")
@pytest.fixture()
def insert_partner_response(self, partner_json: str) -> Response:
return client.post("/partners/", data=partner_json)
@pytest.fixture()
def search_partner_response(self, location: Tuple[float, float]) -> Response:
return client.get(f"/partners/search?long={location[0]}&lat={location[1]}")
class TestGivenThereIsNoPartner:
class TestWhenFetchingPartner:
def test_should_return_returns_404(
self, get_partner_response: Response
) -> None:
assert get_partner_response.status_code == 404
class TestWhenInsertingPartner:
def test_partner_is_inserted(
self, insert_partner_response: Response
) -> None:
assert insert_partner_response.status_code == 201
class TestWhenSearchingByLocation:
@pytest.fixture()
def location(self) -> Tuple[float, float]:
return -46.57421, -21.785741
def test_should_return_404(self, search_partner_response: Response) -> None:
assert search_partner_response.status_code == 404
class TestGivenThereIsPartners:
class TestWhenInsertingNewPartner:
@pytest.fixture()
def partner_id(self, insert_partner_response: Response) -> str:
return insert_partner_response.text.strip('"')
def test_should_find_inserted_partner(
self, get_partner_response: Response
) -> None:
assert get_partner_response.status_code == 200
class TestWhenInsertingExistingPartner:
def test_should_raise_409(self, partner_json: str) -> None:
response = client.post("/partners/", data=partner_json)
assert response.status_code == 201
response = client.post("/partners/", data=partner_json)
assert response.status_code == 409
class TestWhenSearchingForNearPartnerCoveringLocation:
@pytest.fixture()
def location(self) -> Tuple[float, float]:
return -44.014835357666016, -19.90638004921044
def test_should_return_partner(
self,
insert_partner_response: Response,
search_partner_response: Response,
) -> None:
assert insert_partner_response.status_code == 201
assert search_partner_response.status_code == 200
|
984,029 | 3e51bfb907c1b4322c8d3da33eb78589af8a6b4c | from config import *
branch1 = ['conv1', 'relu0', 'pool0', 'norm0', 'convB1', 'relu1', 'convB2', 'relu2', 'pool1', 'linear']
branch2 = ['conv1', 'relu0', 'norm0', 'pool0', 'conv2', 'relu1', 'pool1', 'norm1', 'convB1', 'relu2', 'pool2',
'linear']
branch3 = ['conv1', 'relu0', 'norm0', 'pool0', 'conv2', 'relu1', 'norm1', 'pool1', 'conv3', 'relu2', 'conv4',
'relu3', 'conv5', 'relu4', 'pool2', 'classifier']
branch1_partition_index = [2, 8]
branch2_partition_index = [3, 6, 10]
branch3_partition_index = [3, 7, 14]
# partitiion point number for every branch
partition_point_number = [2, 2, 3]
branches_info = [(branch1, branch1_partition_index), (branch2, branch2_partition_index),
(branch3, branch3_partition_index)]
# Bytes
model_size = {
'branch1_part1L': 20087,
'branch1_part1R': 111804,
'branch1_part2L': 131559,
'branch1_part2R': 63325,
'branch2_part1L': 20074,
'branch2_part1R': 1457263,
'branch2_part2L': 1249966,
'branch2_part2R': 227393,
'branch2_part3L': 1471614,
'branch2_part3R': 5723,
'branch3_part1L': 20074,
'branch3_part1R': 93870657,
'branch3_part2L': 1249896,
'branch3_part2R': 92640835,
'branch3_part3L': 9806701,
'branch3_part3R': 84083973,
}
###############################################
# Mobile device side time prediction class
###############################################
class DeviceTime:
def __init__(self):
self.branch1 = {
'conv1': self.device_conv(3, (5 * 5 * 3) ** 2 * 64),
'relu0': self.device_relu(63 * 32 * 32),
'pool0': self.device_pool(64 * 32 * 32, 64 * 15 * 15),
'norm0': self.device_lrn(64 * 15 * 15),
'convB1': self.device_conv(64, (3 * 3 * 64) ** 2 * 32),
'relu1': self.device_relu(32 * 15 * 15),
'convB2': self.device_conv(32, (3 * 3 * 32) ** 2 * 32),
'relu2': self.device_relu(32 * 15 * 15),
'pool1': self.device_pool(32 * 15 * 15, 32 * 7 * 7),
'linear': self.device_linear(1568, 10),
}
self.branch2 = {
'conv1': self.device_conv(3, (5 * 5 * 3) ** 2 * 64),
'relu0': self.device_relu(64 * 32 * 32),
'norm0': self.device_lrn(64 * 32 * 32),
'pool0': self.device_pool(64 * 32 * 32, 64 * 15 * 15),
'conv2': self.device_conv(64, (5 * 5 * 64) ** 2 * 192),
'relu1': self.device_relu(192 * 13 * 13),
'pool1': self.device_pool(192 * 13 * 13, 192 * 6 * 6),
'norm1': self.device_lrn(192 * 6 * 6),
'convB1': self.device_conv(192, (3 * 3 * 192) ** 2 * 32),
'relu2': self.device_relu(32 * 6 * 6),
'pool2': self.device_pool(32 * 6 * 6, 32 * 2 * 2),
'linear': self.device_linear(128, 10),
}
self.branch3 = {
'conv1': self.device_conv(3, (5 * 5 * 3) ** 2 * 64),
'relu0': self.device_relu(64 * 32 * 32),
'norm0': self.device_lrn(64 * 32 * 32),
'pool0': self.device_pool(64 * 32 * 32, 64 * 15 * 15),
'conv2': self.device_conv(64, (5 * 5 * 64) ** 2 * 192),
'relu1': self.device_relu(192 * 13 * 13),
'norm1': self.device_lrn(192 * 13 * 13),
'pool1': self.device_pool(192 * 13 * 13, 192 * 6 * 6),
'conv3': self.device_conv(192, (3 * 3 * 192) ** 2 * 384),
'relu2': self.device_relu(384 * 6 * 6),
'conv4': self.device_conv(384, (3 * 3 * 384) ** 2 * 256),
'relu3': self.device_relu(256 * 6 * 6),
'conv5': self.device_conv(256, (3 * 3 * 256) ** 2 * 256),
'relu4': self.device_relu(256 * 6 * 6),
'pool2': self.device_pool(256 * 6 * 6, 256 * 2 * 2),
'classifier': self.device_dropout(1024) + self.device_linear(1024, 4096) +
self.device_relu(4096) + self.device_dropout(4096) +
self.device_linear(4096, 4096) + self.device_relu(4096) +
self.device_linear(4096, 10)
}
self.branches = [self.branch1, self.branch2, self.branch3]
# time predict function
def device_lrn(self, data_size):
return 9.013826444839453e-08 * data_size + 0.0013616842338199375
def device_pool(self, input_data_size, output_data_size):
return 1.1864462944013584e-08 * input_data_size - 2.031421398089179e-09 * output_data_size + 0.0001234705954153948
def device_relu(self, input_data_size):
return 6.977440389615429e-09 * input_data_size + 0.0005612587990019447
def device_dropout(self, input_data_size):
return 9.341929545685408e-08 * input_data_size + 0.0007706006740869353
def device_linear(self, input_data_size, output_data_size):
return 1.1681471979101294e-08 * input_data_size + 0.00029824333961563884 * output_data_size - 0.0011913997548602204
def device_conv(self, feature_map_amount, compution_each_pixel):
# compution_each_pixel stands for (filter size / stride)^2 * (number of filters)
return 0.00020423363723714956 * feature_map_amount + 4.2077298118910815e-11 * compution_each_pixel + 0.025591776113868925
def device_model_load(self, model_size):
return 4.558441818370891e-09 * model_size + 0.001395207253916772
# tool
def predict_time(self, branch_number, partition_point_number):
'''
:param branch_number: the index of branch
:param partition_point_number: the index of partition point
:return:
'''
branch_layer, partition_point_index_set = branches_info[branch_number]
partition_point = partition_point_index_set[partition_point_number]
# layers in partitioned model
layers = branch_layer[:partition_point + 1]
time_dict = self.branches[branch_number]
time = 0
for layer in layers:
time += time_dict[layer]
return time
###############################################
# Edge server side time prediction class
###############################################
class ServerTime:
def __init__(self):
self.branch1 = {
'conv1': self.server_conv(3, (5 * 5 * 3) ** 2 * 64),
'relu0': self.server_relu(63 * 32 * 32),
'pool0': self.server_pool(64 * 32 * 32, 64 * 15 * 15),
'norm0': self.server_lrn(64 * 15 * 15),
'convB1': self.server_conv(64, (3 * 3 * 64) ** 2 * 32),
'relu1': self.server_relu(32 * 15 * 15),
'convB2': self.server_conv(32, (3 * 3 * 32) ** 2 * 32),
'relu2': self.server_relu(32 * 15 * 15),
'pool1': self.server_pool(32 * 15 * 15, 32 * 7 * 7),
'linear': self.server_linear(1568, 10),
}
self.branch2 = {
'conv1': self.server_conv(3, (5 * 5 * 3) ** 2 * 64),
'relu0': self.server_relu(64 * 32 * 32),
'norm0': self.server_lrn(64 * 32 * 32),
'pool0': self.server_pool(64 * 32 * 32, 64 * 15 * 15),
'conv2': self.server_conv(64, (5 * 5 * 64) ** 2 * 192),
'relu1': self.server_relu(192 * 13 * 13),
'pool1': self.server_pool(192 * 13 * 13, 192 * 6 * 6),
'norm1': self.server_lrn(192 * 6 * 6),
'convB1': self.server_conv(192, (3 * 3 * 192) ** 2 * 32),
'relu2': self.server_relu(32 * 6 * 6),
'pool2': self.server_pool(32 * 6 * 6, 32 * 2 * 2),
'linear': self.server_linear(128, 10),
}
self.branch3 = {
'conv1': self.server_conv(3, (5 * 5 * 3) ** 2 * 64),
'relu0': self.server_relu(64 * 32 * 32),
'norm0': self.server_lrn(64 * 32 * 32),
'pool0': self.server_pool(64 * 32 * 32, 64 * 15 * 15),
'conv2': self.server_conv(64, (5 * 5 * 64) ** 2 * 192),
'relu1': self.server_relu(192 * 13 * 13),
'norm1': self.server_lrn(192 * 13 * 13),
'pool1': self.server_pool(192 * 13 * 13, 192 * 6 * 6),
'conv3': self.server_conv(192, (3 * 3 * 192) ** 2 * 384),
'relu2': self.server_relu(384 * 6 * 6),
'conv4': self.server_conv(384, (3 * 3 * 384) ** 2 * 256),
'relu3': self.server_relu(256 * 6 * 6),
'conv5': self.server_conv(256, (3 * 3 * 256) ** 2 * 256),
'relu4': self.server_relu(256 * 6 * 6),
'pool2': self.server_pool(256 * 6 * 6, 256 * 2 * 2),
'classifier': self.server_dropout(1024) + self.server_linear(1024, 4096) +
self.server_relu(4096) + self.server_dropout(4096) +
self.server_linear(4096, 4096) + self.server_relu(4096) +
self.server_linear(4096, 10)
}
self.branches = [self.branch1, self.branch2, self.branch3]
def server_lrn(self, data_size):
return 2.111544033139625e-08 * data_size + 0.0285872721707483
def server_pool(self, input_data_size, output_data_size):
return -3.08201145e-10 * input_data_size + 1.19458883e-09 * output_data_size - 0.0010152380964514613
def server_relu(self, input_data_size):
return 2.332339368254984e-09 * input_data_size + 0.005070494191853819
def server_dropout(self, input_data_size):
return 3.962833398808942e-09 * input_data_size + 0.015458175165054516
def server_linear(self, input_data_size, output_data_size):
return 9.843676646891836e-12 * input_data_size + 4.0100716666407315e-07 * output_data_size + 0.015619779485748695
def server_conv(self, feature_map_amount, compution_each_pixel):
# compution_each_pixel stands for (filter size / stride)^2 * (number of filters)
return 1.513486447521604e-06 * feature_map_amount + 4.4890001480985655e-12 * compution_each_pixel + 0.009816023641653768
def server_model_load(self, model_size):
return 7.753178793348365e-10 * model_size + 0.000678369983568624
# tool
def predict_time(self, branch_number, partition_point_number):
'''
:param branch_number: the index of branch
:param partition_point_number: the index of partition point
:return:
'''
branch_layer, partition_point_index_set = branches_info[branch_number]
partition_point = partition_point_index_set[partition_point_number]
# layers in partitioned model(right part)
layers = branch_layer[partition_point + 1:]
time_dict = self.branches[branch_number]
time = 0
for layer in layers:
time += time_dict[layer]
return time
class OutputSizeofPartitionLayer:
# float32 which is 4B(32 bits)
branch1 = {
'pool0': 64 * 15 * 15 * 32,
'pool1': 32 * 7 * 7 * 32,
}
branch2 = {
'pool0': 64 * 15 * 15 * 32,
'pool1': 192 * 6 * 6 * 32,
'pool2': 32 * 2 * 2 * 32,
}
branch3 = {
'pool0': 64 * 15 * 15 * 32,
'pool1': 192 * 6 * 6 * 32,
'pool2': 256 * 2 * 2 * 32,
}
branches = [branch1, branch2, branch3]
@classmethod
def output_size(cls, branch_number, partition_point_number):
'''
:return:unit(bit)
'''
branch_layer, partition_point_index_set = branches_info[branch_number]
partition_point = partition_point_index_set[partition_point_number]
# layers in partitioned model
layer = branch_layer[partition_point]
outputsize_dict = cls.branches[branch_number]
return outputsize_dict[layer]
|
984,030 | 9882925b7598b3f6b29e37876485e4e8943721c8 | # Generated by Django 2.2.10 on 2020-04-02 08:04
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datasets', '0057_auto_20200402_0753'),
]
operations = [
migrations.AlterField(
model_name='indicator',
name='subindicators',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=255), blank=True, default=list, size=None),
),
]
|
984,031 | cd8600fd8de5dd77e5636bfb9b236ec80e94e9e9 | # microbit-module: touch@0.0.1
# MPR121 Touch Sensor Module
"""
Copyright (c) 2020 Roger Wagner
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from microbit import i2c
from micropython import const
_MAKERBIT_MPR121 = const(0x5A) # MakerBit MPR121
# CalibrationLock
_BaselineTrackingOn = const(0b00)
_BaselineTrackingOff = const(0b01)
_BaselineTrackingAndInitializeFirst5MSB = const(0b10)
_BaselineTrackingAndInitialize = const(0b11)
# Touch
_TOUCH_DISABLED = const(0b0000)
_TOUCH_ELE_0 = const(0b0001)
_TOUCH_ELE_0_TO_1 = const(0b0010)
_TOUCH_ELE_0_TO_2 = const(0b0011)
_TOUCH_ELE_0_TO_3 = const(0b0100)
_TOUCH_ELE_0_TO_4 = const(0b0101)
_TOUCH_ELE_0_TO_5 = const(0b0110)
_TOUCH_ELE_0_TO_6 = const(0b0111)
_TOUCH_ELE_0_TO_7 = const(0b1000)
_TOUCH_ELE_0_TO_8 = const(0b1001)
_TOUCH_ELE_0_TO_9 = const(0b1010)
_TOUCH_ELE_0_TO_10 = const(0b1011)
_TOUCH_ELE_0_TO_11 = const(0b1100)
# Proximity
_PROXMITY_DISABLED = const(0b00)
_PROXMITY_ELE0_TO_1 = const(0b01)
_PROXMITY_ELE_0_TO_3 = const(0b10)
_PROXMITY_ELE_0_TO_11 = const(0b11)
# Config register
_MHDR = const(0x2b)
_NHDR = const(0x2c)
_NCLR = const(0x2d)
_FDLR = const(0x2e)
_MHDF = const(0x2f)
_NHDF = const(0x30)
_NCLF = const(0x31)
_FDLF = const(0x32)
_NHDT = const(0x33)
_NCLT = const(0x34)
_FDLT = const(0x35)
_MHDPROXR = const(0x36)
_NHDPROXR = const(0x37)
_NCLPROXR = const(0x38)
_FDLPROXR = const(0x39)
_MHDPROXF = const(0x3a)
_NHDPROXF = const(0x3b)
_NCLPROXF = const(0x3c)
_FDLPROXF = const(0x3d)
_NHDPROXT = const(0x3e)
_NCLPROXT = const(0x3f)
_FDLPROXT = const(0x40)
_E0TTH = const(0x41)
_E0RTH = const(0x42)
_E1TTH = const(0x43)
_E1RTH = const(0x44)
_E2TTH = const(0x45)
_E2RTH = const(0x46)
_E3TTH = const(0x47)
_E3RTH = const(0x48)
_E4TTH = const(0x49)
_E4RTH = const(0x4a)
_E5TTH = const(0x4b)
_E5RTH = const(0x4c)
_E6TTH = const(0x4d)
_E6RTH = const(0x4e)
_E7TTH = const(0x4f)
_E7RTH = const(0x50)
_E8TTH = const(0x51)
_E8RTH = const(0x52)
_E9TTH = const(0x53)
_E9RTH = const(0x54)
_E10TTH = const(0x55)
_E10RTH = const(0x56)
_E11TTH = const(0x57)
_E11RTH = const(0x58)
_E12TTH = const(0x59)
_E12RTH = const(0x5a)
_DTR = const(0x5b)
_AFE1 = const(0x5c)
_AFE2 = const(0x5d)
_ECR = const(0x5e)
_CDC0 = const(0x5f)
_CDC1 = const(0x60)
_CDC2 = const(0x62)
_CDC4 = const(0x63)
_CDC5 = const(0x64)
_CDC6 = const(0x65)
_CDC7 = const(0x66)
_CDC8 = const(0x67)
_CDC9 = const(0x68)
_CDC10 = const(0x69)
_CDC11 = const(0x6a)
_CDC12 = const(0x6b)
_CDT_0_1 = const(0x6c)
_CDT_2_3 = const(0x6d)
_CDT_4_5 = const(0x6e)
_CDT_6_7 = const(0x6f)
_CDT_8_9 = const(0x70)
_CDT_10_11 = const(0x71)
_CDT_12 = const(0x72)
_GPIO_CTL0 = const(0x73)
_GPIO_CTL1 = const(0x74)
_GPIO_DIR = const(0x76)
_GPIO_EN = const(0x77)
_GPIO_SET = const(0x78)
_GPIO_CLR = const(0x79)
_GPIO_TOG = const(0x7a)
_AUTO_CONFIG_0 = const(0x7b)
_AUTO_CONFIG_1 = const(0x7c)
_AUTO_CONFIG_USL = const(0x7d)
_AUTO_CONFIG_LSL = const(0x7e)
_AUTO_CONFIG_TL = const(0x7f)
def write(register, value):
buf = bytearray(2)
buf[0] = register
buf[1] = value
i2c.write(_MAKERBIT_MPR121, buf)
def reset():
write(0x80, 0x63)
def stop():
write(_ECR, 0x0)
def start(cl, eleprox, ele):
write(_ECR, (cl << 6) | (eleprox << 4) | ele)
def read():
data = i2c.read(_MAKERBIT_MPR121, 2)
return data[1]<<8 | data[0]
def is_touched(sensor):
if sensor < 5 or sensor > 16:
return None
bit = 0b100000000000 >> (sensor - 5)
return (bit & read()) != 0
def get_sensor():
bit = 0b100000000000 # T5
status = read()
for sensor in range(5, 17):
if (bit & status) != 0:
return sensor # return first hit
bit >>= 1
return None
def init():
reset()
stop()
#
# Start capturing with default configuration
# Input filter for rising state
write(_MHDR, 0x01)
write(_NHDR, 0x01)
write(_NCLR, 0x10)
write(_FDLR, 0x20)
# Input filter for falling state
write(_MHDF, 0x01)
write(_NHDF, 0x01)
write(_NCLF, 0x10)
write(_FDLF, 0x20)
# Input filter for touched state
write(_NHDT, 0x01)
write(_NCLT, 0x10)
write(_FDLT, 0xff)
# Unused proximity sensor filter
write(_MHDPROXR, 0x0f)
write(_NHDPROXR, 0x0f)
write(_NCLPROXR, 0x00)
write(_FDLPROXR, 0x00)
write(_MHDPROXF, 0x01)
write(_NHDPROXF, 0x01)
write(_NCLPROXF, 0xff)
write(_FDLPROXF, 0xff)
write(_NHDPROXT, 0x00)
write(_NCLPROXT, 0x00)
write(_FDLPROXT, 0x00)
# Debounce configuration (used primarily for interrupts)
write(_DTR, 0x11)
# Electrode clock frequency etc
write(_AFE1, 0xff)
write(_AFE2, 0x30)
# Enable autoconfiguration / calibration
write(_AUTO_CONFIG_0, 0x00)
write(_AUTO_CONFIG_1, 0x00)
# Tuning parameters for the autocalibration algorithm
write(_AUTO_CONFIG_USL, 0x00)
write(_AUTO_CONFIG_LSL, 0x00)
write(_AUTO_CONFIG_TL, 0x00)
# Set touch thresholds
for i in range(0, 12):
write(_E0TTH + i * 2, 60)
# Set release thresholds
for i in range(0, 12):
write(_E0RTH + i * 2, 20)
# Start capture
start(
_BaselineTrackingAndInitialize,
_PROXMITY_DISABLED,
_TOUCH_ELE_0_TO_11
)
init()
|
984,032 | 0d877b3a02a0a856600edef874ca19cdea00f708 | import asyncio
import sys
sys.path.append(r'../')
from server import ServerProtocol, ServerUdpProtocol
from cache import DnsCache
from config import Config
# in unix,test in windows
# import uvloop
# asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# if sys.platform == 'win32':
#
# l = asyncio.ProactorEventLoop()
# asyncio.set_event_loop(l)
# else:
# import uvloop
# asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
dns = DnsCache()
loop = asyncio.get_event_loop()
config_getter = Config(server=True)
servers = []
for key, value in config_getter.full_config['users'].items():
port = value['port']
coro = loop.create_server(lambda: ServerProtocol(loop, dns, config_getter, True), '127.0.0.1', port)
server = loop.run_until_complete(coro)
udp = asyncio.ensure_future(loop.create_datagram_endpoint(lambda: ServerUdpProtocol(loop, dns, config_getter),
local_addr=('127.0.0.1', port)))
servers.append(server)
print("server created,waiting for local client's request")
print(servers)
try:
loop.run_forever()
except KeyboardInterrupt as e:
print('all tasks cancelled')
print(asyncio.gather(asyncio.Task.all_tasks()).cancel())
server.close()
loop.run_until_complete(asyncio.wait(servers))
loop.close()
|
984,033 | d251f4146111eef027b201713158c7ea32566806 | from controllers.base.mirakl.offersdate.controllers.offersdate_upload import OffersDateUpload
class EgMiraklOffersDateUpload(OffersDateUpload):
def __init__(self, params=None):
super().__init__("egmirakloffersdate", params)
offers_params = self.get_param_sincro('miraklOffersdateUpload')
self.offers_url = offers_params['url']
self.offers_test_url = offers_params['test_url']
self.set_sync_params(self.get_param_sincro('mirakl'))
|
984,034 | 9098906de474eeecdeb83a5b40db623e42cd0369 | import pygame
pygame.init()
screen = pygame.display.set_mode((600, 600))
on = True
down_right = True
down_left = False
up_right = False
up_left = False
player_pos = [300, 300]
while on:
screen = pygame.display.set_mode((600, 600))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
on = False
x = player_pos[0]
y = player_pos[1]
if x >= 580:
down_left = True
down_right = False
up_right = False
up_left = False
if down_left:
x = player_pos[0]
y = player_pos[1]
x -= 7
y += 4
player_pos = [x, y]
if y >= 580:
up_left = True
down_left = False
down_right = False
up_right = False
if up_left:
x = player_pos[0]
y = player_pos[1]
x -= 5
y -= 8
player_pos = [x, y]
if x <= 0:
up_right = True
up_left = False
down_right = False
down_left = False
if up_right:
x = player_pos[0]
y = player_pos[1]
x += 3
y -= 10
player_pos = [x, y]
if y <= 0:
down_right = True
up_right = False
up_left = False
down_left = False
if down_right:
x = player_pos[0]
y = player_pos[1]
x += 5
y += 4
player_pos = [x, y]
screen.fill((76, 153, 0))
player = pygame.draw.rect(screen, (51, 255, 255), (player_pos[0], player_pos[1], 30, 30))
clock = pygame.time.Clock()
clock.tick(30)
pygame.display.update()
|
984,035 | f7661c280e0964a1f5a3ecfc444925b21af9bab4 | # Load library
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Load image as grayscale
image_gray = cv2.imread("images/plane_256x256.jpg", cv2.IMREAD_GRAYSCALE)
# Calculate median intensity
median_intensity = np.median(image_gray)
# Set thresholds to be one standard deviation above and below median intensity
lower_threshold = int(max(0, (1.0 - 0.33) * median_intensity))
upper_threshold = int(min(255, (1.0 + 0.33) * median_intensity))
# Apply canny edge detector
image_canny = cv2.Canny(image_gray, lower_threshold, upper_threshold)
# Show image
plt.imshow(image_canny, cmap="gray"), plt.axis("off")
plt.show()
|
984,036 | b3fea1a38fedd784f0ee4de3626d5ec4cdfa849d | import os
class vcf_correction:
def __init__(self,hap):
self.hapFile = os.path.join(hap.outputFolder ,hap.prefix + '.single.hap')
self.modVCFlog = os.path.join(hap.outputFolder , hap.prefix + '.modVCF.log')
self.vcf = hap.vcf
self.ploidy = hap.ploidy
return
def run (self):
def trim_tails():
region_ploidy = len(l)
import sys
mx_l = -1
mn_r = sys.maxint
hap_b = []
for i in l:
a = i.split()
mx_l = mx_l if int(a[2]) < mx_l else int(a[2])
mn_r = mn_r if int(a[2]) + len(a[3]) > mn_r else int(a[2]) + len(a[3])
hap_b.append((a[1],int(a[2]), a[3]))
cnt_block_len = 0
l_haps = [''] * region_ploidy
for j in range(mx_l, mn_r):
l_snp = []
for ii in range(region_ploidy):
pos = hap_b[ii][1]
hap = hap_b[ii][2]
l_snp.append(hap[j - pos])
if '-' in l_snp:
for k in range(region_ploidy):
l_haps[k] += '-'
else:
for k in range(region_ploidy):
l_haps[k] += l_snp[k]
h = []
hname = []
if len(l_haps[0]) != 0 :
for i in range(region_ploidy):
h += [l_haps[i]]
hname += [hap_b[i][0]]
return True, h,hname,mx_l
return False,[],[],-1
def snp_modification():
region_ploidy = len(haps)
print(haps)
for i in range(len(haps[0])):
mp_snp = {}
for j in range(region_ploidy):
al = haps[j][i]
if al in mp_snp:
mp_snp[al] += 1
else:
mp_snp[al] = 1
if '-' in mp_snp:
continue
if len(mp_snp) == 1:
mp[scfName][start+i] = 'D'
else:
l_t = []
cnt_e = 0
for k in mp_snp:
l_t.append((mp_snp[k], int(k)))
cnt_e += 1
snps = []
for k in sorted(l_t, key=lambda x:(-x[0],x[1])):
snps += [(k[1],k[0])]
mp[scfName][start+i] = ('M',snps)
self.deletion = 0
self.modified = 0
ploidy = self.ploidy
withPickle = False
if withPickle:
import pickle
if False:
hapFile = self.hapFile
mp = {}
for line in open(hapFile):
if line.startswith(">>>"):
a = line.split()
scfName = a[1]
scfLen = int(a[2])
mp[scfName] = {}
l = []
cnt = 0
continue
cnt +=1
l+=[line.rstrip()]
if cnt < ploidy:
continue
bo_empty, haps, hapsName, start = trim_tails()
snp_modification()
l = []
cnt = 0
pickle.dump( mp, open( "save.p", "wb" ) )
else:
mp = pickle.load( open( "save.p", "rb" ) )
else:
hapFile = self.hapFile
mp = {}
for line in open(hapFile):
print line.rstrip()
if line.startswith(">>>"):
a = line.split()
scfName = a[1]
scfLen = int(a[2])
mp[scfName] = {}
l = []
cnt = 0
continue
cnt +=1
region_ploidy = int(line.split()[0])
l+=[line.rstrip()]
if len(l) < region_ploidy:
continue
bo_empty, haps, hapsName, start = trim_tails()
print haps
snp_modification()
l = []
cnt = 0
vcfFile = self.vcf
modVCFFile = self.vcf[:-3] + 'mod.vcf'
fo = open(modVCFFile,'w')
fo_mod = open(self.modVCFlog,'w')
print 'modified VCF file:\t'+modVCFFile
print "log of modified SNPs:\t"+self.modVCFlog
scf = ''
fo.write("###Modified vcf file with ranbow haplotyper\n")
for line in open(vcfFile):
if line[0] == "#":
fo.write(line)
continue
b = line.split()
if scf != b[0]:
cnt = 0
scf = b[0]
else:
if end_of_prev_ref_allele > int(b[1]): # the overlapping SNPs in the code are ignored. the first SNP is maintained and rest of them are removed.
continue
cnt += 1
end_of_prev_ref_allele = int(b[1]) + len(b[3])
b_1 = b
scf_mp = mp.get(scf, -1)
if scf_mp == -1:
fo.write(line)
else:
pos_scf_mp = scf_mp.get(cnt,-1)
if pos_scf_mp == -1:
fo.write(line)
else:
if pos_scf_mp == 'D':
self.deletion += 1
fo_mod.write("D\t"+b[0]+'\t'+b[1]+'\n')
#print "D\t"+b[0]+'\t'+b[1]
continue
elif pos_scf_mp[0] == "M":
self.modified += 1
new_all = pos_scf_mp[1]
ans = ''
ans = b[0] + '\t' + b[1] + '\t'+ b[2]+'\t'
b1 = b[4].split(',')
alleles = [b[3]] + b1
mp_alleles = {}
for i,j in enumerate(alleles):
mp_alleles[i] = j
ans += mp_alleles[new_all[0][0]]+'\t'
GT = "0/"* new_all[0][1]
fo_mod.write("M\t"+b[0]+'\t'+b[1]+'\t'+str(pos_scf_mp)+'\n')
#print "M\t"+b[0]+'\t'+b[1]#+str(pos_scf_mp)
for i in xrange(1,len(new_all)):
ans += mp_alleles[new_all[i][0]]+','
GT += (str(i)+'/')*new_all[i][1]
ans = ans[:-1] + '\t20\t.\t'
freq_ref = new_all[0][1]
ans += 'AC=' + str(freq_ref) + ";"
ans += 'Af=' + str(freq_ref/float(ploidy)) + ";AN=6\tGT\t"+GT[:-1]+"\n"
fo.write(ans)
print "Modified SNPs:",self.modified, "\tDeleted SNPs:", self.deletion
|
984,037 | 5a87d22be094efd867347555f569b656f5c117fe | class Solution:
def __init__(self):
self.directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]
self.cache = None
def dfs(self, matrix, row, col, rows, cols):
# if the value is present in cache, return it
if self.cache[row][col] > 0:
return self.cache[row][col]
maxVal = 0
# for every direction up down left right
for dir_ in self.directions:
x = row + dir_[0]
y = col + dir_[1]
# if we are in the bounds of the matrix and the path is increasing, update max val
if x >= 0 and y >= 0 and x < rows and y < cols and matrix[x][y] > matrix[row][col]:
maxVal = max(maxVal, self.dfs(matrix, x, y, rows, cols))
# increase max path by 1 and store it in cache
self.cache[row][col] = maxVal + 1
return self.cache[row][col]
def longestIncreasingPath(self, matrix):
# get the size of the matrix and resize the cache
rows = len(matrix)
if rows == 0:
return 0
cols = len(matrix[0])
self.cache = [[0 for i in range(cols + 1)] for i in range(rows + 1)]
maxPath = 0
# go a dfs and keep updating maxPath
for i in range(rows):
for j in range(cols):
maxPath = max(maxPath, self.dfs(matrix, i, j, rows, cols))
return maxPath
def main():
Matrix = [[9,9,4],[6,6,8],[2,1,1]]
mySol = Solution()
print("The max increasing path for Matrix ")
print(Matrix)
print("is " + str(mySol.longestIncreasingPath(Matrix)))
if __name__ == "__main__":
main() |
984,038 | 811331560f622dbdc1e5af0c22413daf3e63d032 | cases = int(raw_input())
for _ in range(cases):
numbers = map(int, raw_input().split())
point1 = [numbers[0], numbers[1]]
point2 = [numbers[2], numbers[3]]
vec = []
for i in range(2):
vec.append(point2[i] - point1[i])
a = point2[0]+vec[0]
b = point2[1]+vec[1]
print a, b |
984,039 | 81d1018d87d8ae3a318d55af9400ce5979b1150b | #importing numpy module
import numpy as np
from numpy import random
import math
#numpy version
print(np.__version__)
#task1
samplelist = [1,2,3,4,5]
samplelist.sort(reverse = True)
for i in range(len(samplelist)):
samplelist[i] = float(samplelist[i])
arr = np.array(samplelist)
print(arr)
print("\n")
#task2
random_matrix = random.randint(100, size=(4,3))
print('Original matrix')
print(random_matrix )
print("\n")
print('Reshaped matrix:')
reshaped_matrix = random_matrix.reshape(2,6) #reshaped matrix
print(reshaped_matrix)
#task3
mat1 = random.randint(50, size=(3,5))
mat2 = random.randint(50, size=(7,5))
print("\n Matrix 1:-")
print(mat1)
print("\n Matrix 2:-")
print(mat2)
#join matrix
join_mat = np.concatenate((mat1,mat2))
print("\n Concatenated matrix along axis 0:-")
print(join_mat)
print(f"\n Shape of concatenated array is: {np.shape(join_mat)}")
#task4
mat3 = random.randint(10, size=(3,3))
mat4 = random.randint(10, size=(3,3))
print("\n Matrix 1:-")
print(mat3)
print("\n Matrix 2:-")
print(mat4)
#inner product
inner_prod = np.inner(mat3, mat4)
#outer product
outer_prod = np.outer(mat3, mat4)
print(f"\n Inner product of 2 matrices is :- \n {inner_prod}")
print(f"\n Outer product of 2 matrices is :- \n{outer_prod}")
#task5
arr1 = np.array(random.randint(10,size=8))
print(f'\n1-D array is {arr1}')
print(f'\nReverse of arr1 is:- {arr1[::-1]}')
#task6
M = random.randint(10, size=(2,3))
print(f'\nMatrix M is :- \n{M}')
N = []
for row in range(2):
for col in range(3):
if row%2!=0 and col%2==0:
N.append(M[row,col])
print(f'\nMatrix N is: {N}')
#task7
mat = np.arange(25, dtype=float).reshape(5,5)
print(f'\nTask 7 Matrix is: \n{mat}')
del_mat = np.delete(mat,1,0)
print(f'\nAfter deleted 2nd row: \n{del_mat}')
new_mat = np.insert(del_mat,1,np.nan,0)
print(f'\nNew matrix after inserting NAN values: \n{new_mat}')
#task8
a = np.arange(9).reshape(3,3)
print(f'\nOriginal matrix: \n{a}')
sum_of_rows = a.sum(axis=1)
#print(sum_of_rows) array containing each row sum
#print(sum_of_rows[:,np.newaxis])
normalized_arr = a/sum_of_rows[:,np.newaxis]
print(f'\nNormalized form of matrix is: \n{normalized_arr}') |
984,040 | ba7dbfce6ed7080ce870e13fcef0d000e4f87fd5 | import cv2
import numpy as np
from track import BaseTrackObj
class Rect(BaseTrackObj.BaseTrackObj):
def __init__(self, xmin, ymin, xmax, ymax, frame_index, score):
super(Rect, self).__init__()
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.frame_index = frame_index
self.score = score
self.box = [xmin, ymin, xmax, ymax]
self.center = np.array([(self.xmin + self.xmax) / 2, (self.ymin + self.ymax) / 2], dtype=np.float32)
@property
def area(self):
width, height = self.xmax - self.xmin, self.ymax - self.ymin
return width * height
@property
def radius(self):
return self.area ** 0.5
def output_traj(self):
return [self.track_id, self.frame_index, self.center[0], self.center[1]]
def show_in_frame(self, frame):
xmin, ymin, xmax, ymax, score = self.xmin, self.ymin, self.xmax, self.ymax, self.score
text_pos = (int((xmin + xmax) / 2), int(ymin - 10))
cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (0, 255, 0))
cv2.putText(frame, '{}'.format(self.track_id), text_pos, cv2.FONT_HERSHEY_COMPLEX, 0.8, (255, 255, 255))
return frame
def __str__(self):
return '{} {}'.format(self.center[0], self.center[1]) |
984,041 | 9dd0077ceae4a1b7aa7b24fd9b9efded561e2b98 | class Square():
square_list=[]
def __init__(self, s1):
self.s1 = s1
self.square_list.append(s1)
def calculate_perimeter(self):
return self.s1 * 4
def change_size(self, new_size):
self.s1 += new_size
|
984,042 | e7ea4d4e9b07657b62e439857cd0c1e136a1af1d | from collections import Counter
N, P = map(int, input().split())
Ss = input()
if P == 2 or P == 5:
ans = 0
for i, S in enumerate(Ss):
if int(S)%P == 0:
ans += i+1
else:
As = [0]
A = 0
D = 1
for S in Ss[::-1]:
S = int(S)
A = (A + S*D) % P
As.append(A)
D = D*10 % P
cnt = Counter()
ans = 0
for A in As:
ans += cnt[A]
cnt[A] += 1
print(ans)
|
984,043 | bb9ed9b1e15e45d2f41d9b5f5bf71883bb598dfb | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 15:13:37 2019
@author: Shrutika
"""
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv(r'C:\Users\Shrutika\Desktop\spyder programs\Clustering\income.csv')
df.columns
x=pd.DataFrame(df.Age)
y=pd.DataFrame(df.Income)
from sklearn.cluster import KMeans
obj=KMeans(n_clusters=4)
obj.fit_predict(x)
b=obj.cluster_centers_
plt.scatter(x.Age,y.Income,c=obj.labels_,cmap='rainbow')
plt.scatter(b[:,0],b[:,1],marker='*',s=60,color='red')
|
984,044 | 7c6b92bf42bdaed078dba76c9ac14872db531054 | #Atividade 06 - LAB01
altura = float(input('Digite a altura do retângulo: '))
comprimento = float(input('Digite o comprimento do retângulo: '))
print(f'O perímetro do retângulo é: {(altura*2)+(comprimento*2)}')
print(f'A área do retângulo é: {altura*comprimento}')
|
984,045 | 8398c7a1f96df1c7bcbc89434ad84c10efa229ef | from django.conf.urls import url
from .import views
app_name='list'
urlpatterns = [
url(r'^about/$',views.about1,name="about"),
url(r'create/$',views.article_create, name="create"),
url(r'^(?P<slug>[\w-]+)/$',views.details,name="data"),
#url(r'^$',views.homepage, name="test"),
]
|
984,046 | b3cc1335d7a8715be7e6cbe1544b224faa8e6891 | from django.contrib import admin
from books.models import Book, Author, CustomLog
admin.site.register(Book)
admin.site.register(Author)
admin.site.register(CustomLog)
|
984,047 | e49c28808888f5040b7f8f2238a492f30614169a | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 19:28:30 2019
@author: Eastwind
"""
import csv
import numpy as np
import pandas as pd
from sklearn import preprocessing
def parse_data_file(data_file):
# read in csv
dataset = pd.read_csv(data_file)
# extract actual classification from data
y = dataset['Class']
X = dataset.drop('Class', 1)
# convert all values to numbers, NA if '?'
for column in X:
X[column] = pd.to_numeric(dataset[column], errors='coerce')
# fill missing with mean
X = X.fillna(X.mean())
# normalize
X[:] = preprocessing.MinMaxScaler().fit_transform(X.values)
return X, y
if __name__ == '__main__':
parse_data_file('arrhythmia.csv')
|
984,048 | 19743ecd570479e67c28272862358daa5ebd31bb | from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from apps.usuarios import forms, models
from apps.core.models import Filial
class UsuariosTest(TestCase):
def setUp(self):
usuario = User.objects.create(
username='Nathan Bahia',
email='nathan@mail.com'
)
filial = Filial.objects.create(
nome='Filial A',
cidade='Cidade A'
)
models.Perfil.objects.create(
usuario=usuario,
filial=filial
)
def test_formulario_cadastro_usuario(self):
form = forms.UsuarioForm()
self.assertIn('password', form.fields)
usuario = User.objects.latest('id')
form = forms.UsuarioForm(instance=usuario)
self.assertNotIn('password', form.fields)
def test_str_perfil(self):
perfil = models.Perfil.objects.latest('id')
self.assertEqual(perfil.usuario.first_name, str(perfil))
def test_response_login_view(self):
response = self.client.get(reverse('usuarios:login'))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, 'registration/login.html')
self.assertIn('form', response.context)
|
984,049 | 246a5b2e60a91f470c9c1c9fb77b722cdd1a3749 | import wx, os
FONTSIZE = 14
class TextDocPrintout( wx.Printout ):
def __init__( self, text, title, margins ):
wx.Printout.__init__( self, title )
self.lines = text.split( '\n' )
self.margins = margins
self.numPages = 1
def HasPage( self, page ):
return page <= self.numPages
def GetPageInfo( self ):
return ( 1, self.numPages, 1, self.numPages )
def CalculateScale( self, dc ):
ppiPrinterX, ppiPrinterY = self.GetPPIPrinter()
ppiScreenX, ppiScreenY = self.GetPPIScreen()
logScale = float( ppiPrinterX )/float( ppiScreenX )
pw, ph = self.GetPageSizePixels()
dw, dh = dc.GetSize()
scale = logScale * float( dw )/float( pw )
dc.SetUserScale( scale, scale )
self.logUnitsMM = float( ppiPrinterX )/( logScale*25.4 )
def CalculateLayout( self, dc ):
topLeft, bottomRight = self.margins
dw, dh = dc.GetSize()
self.x1 = topLeft.x * self.logUnitsMM
self.y1 = topLeft.y * self.logUnitsMM
self.x2 = ( dc.DeviceToLogicalXRel( dw) - bottomRight.x * self.logUnitsMM )
self.y2 = ( dc.DeviceToLogicalYRel( dh ) - bottomRight.y * self.logUnitsMM )
self.pageHeight = self.y2 - self.y1 - 2*self.logUnitsMM
font = wx.Font( FONTSIZE, wx.TELETYPE, wx.NORMAL, wx.NORMAL )
dc.SetFont( font )
self.lineHeight = dc.GetCharHeight()
self.linesPerPage = int( self.pageHeight/self.lineHeight )
def OnPreparePrinting( self ):
dc = self.GetDC()
self.CalculateScale( dc )
self.CalculateLayout( dc )
self.numPages = len(self.lines) / self.linesPerPage
if len(self.lines) % self.linesPerPage != 0:
self.numPages += 1
def OnPrintPage(self, page):
dc = self.GetDC()
self.CalculateScale(dc)
self.CalculateLayout(dc)
dc.SetPen(wx.Pen("black", 0))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
r = wx.RectPP((self.x1, self.y1), (self.x2, self.y2))
dc.DrawRectangleRect(r)
dc.SetClippingRect(r)
line = (page-1) * self.linesPerPage
x = self.x1 + self.logUnitsMM
y = self.y1 + self.logUnitsMM
while line < (page * self.linesPerPage):
dc.DrawText(self.lines[line], x, y)
y += self.lineHeight
line += 1
if line >= len(self.lines):
break
return True
|
984,050 | de97a8f865693f8bd7f2c615ed337df921f7eb5f | from PIL import Image, ImageDraw
from presentation.observer import Observer
from presentation.screens.layouts.default import Default
SCREEN_HEIGHT = 122
SCREEN_WIDTH = 250
class Picture(Observer):
def __init__(self, observable, filename, mode):
super().__init__(observable=observable)
self.filename = filename
self.layout = Default(SCREEN_WIDTH, mode)
def update(self, data):
image = Image.new('1', (SCREEN_WIDTH, SCREEN_HEIGHT), 255)
self.layout.form_image(ImageDraw.Draw(image), data)
image.save(self.filename)
def close(self):
pass
|
984,051 | 8e3aa2fc3e7d99043af78d4d4173e82e5038e4cb | _.error_on_external_run # unused attribute (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:20)
_.reuse_existing_virtualenvs # unused attribute (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:21)
_.stop_on_first_error # unused attribute (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:22)
test # unused function (/home/lschmelzeisen/Repositories/wallcrop/noxfile.py:25)
Config # unused class (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:34)
search_path # unused variable (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:35)
Config # unused class (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:39)
version # unused variable (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:41)
description # unused variable (/home/lschmelzeisen/Repositories/wallcrop/src/wallcrop/_cli.py:42)
|
984,052 | 798e4dbf462c084b1a0153aaebdb47f4b5dea870 | #Vanity miner (searching for an address containing a certain string)
import bitcoin
import os
import codecs
search = '1kid'
while True:
secret = os.urandom(32)
address = bitcoin.privkey_to_address(secret)
if search in address:
break
print('Found vanity address! ', address)
print('Secret: ', codecs.encode(secret, 'hex').decode()) |
984,053 | 09ef6801df4e5aab52094c47ef9c193c8110c97d | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 17:14:44 2019
@author: Sheeza
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('aids1.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
|
984,054 | ce2ddd9b948b114017eeece75c9d05e8a13837c5 | class Solution:
def wordPattern(self, pattern, str):
cast = {}
strL = str.split()
Plength =len(pattern)
Slength = len(strL)
print(Plength)
print(Slength)
if Plength != Slength:
return False
for i in range(Plength):
if cast.__contains__(pattern[i]):
if cast[pattern[i]] != strL[i]:
return False
else:
if strL[i] in cast.values():
return False
cast[pattern[i]]=strL[i]
print(cast)
return True
sol = Solution()
res = sol.wordPattern('abba','cat dog dog cat')
print(res)
res = sol.wordPattern('abba','cat dog fish cat')
print(res) |
984,055 | b226ed4dd9ec09a957290383a126007b15b1b91f | import cv2
from modules.utils import set_memory_growth
from modules.dataset import load_cifar10_dataset
set_memory_growth()
dataset = load_cifar10_dataset(batch_size=1, split='train', shuffle=False,
using_normalize=False)
for (img, labels)in dataset:
img = img.numpy()[0]
print(img.shape, labels.shape, labels.numpy())
cv2.imshow('img', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
if cv2.waitKey(0) == ord('q'):
exit()
|
984,056 | 23ffe51b32d1d63862ac1f642ca8bb5861f1331a | #%%
import math
n_data = 21 # number simulated measurements to generate
b_true = 20.0 # b used to simulate data
a_true = math.log(2.0/b_true) # a used to simulate data
c_true = 1.0 / b_true # c used to simulate data
phi_true = math.log(0.1) # phi used to simulate data
rel_tol = 1e-5 # relative tolerance used to check optimal solution
#%%
# -------------------------------------------------------------------------
import sys
import pandas
import numpy
import scipy
import pdb
import curvefit
from curvefit.core.model import CurveModel
#
# model for the mean of the data
def generalized_error_function(t, params) :
alpha = params[0]
beta = params[1]
p = params[2]
return 0.5 * p * ( 1.0 + scipy.special.erf( alpha * ( t - beta ) ) )
#
# link function used for beta
def identity_fun(x) :
return x
#
# link function used for alpha, p
def exp_fun(x) :
return numpy.exp(x)
#
# inverse of function used for alpha, p
def log_fun(x) :
return numpy.log(x)
#
# true value for fixed effects
fe_true = numpy.array( [ a_true, b_true, c_true, phi_true ] )
num_fe = len(fe_true)
# -----------------------------------------------------------------------
# data_frame
independent_var = numpy.array(range(n_data)) * b_true / (n_data-1)
social_distance = numpy.zeros(n_data, dtype = float)
params_true = numpy.zeros((n_data, 3), dtype = float)
alpha_true = numpy.exp( a_true)
p_true = numpy.exp( phi_true )
for i in range(n_data) :
social_distance[i] = 0 if i < n_data / 2.0 else 1
beta_true = b_true + c_true * social_distance[i]
params_true[i] = [alpha_true, beta_true, p_true ]
params_true = numpy.transpose(params_true)
measurement_value = generalized_error_function(independent_var, params_true)
measurement_std = n_data * [ 0.1 ]
cov_one = n_data * [ 1.0 ]
data_group = n_data * [ 'world' ]
data_dict = {
'independent_var' : independent_var ,
'measurement_value' : measurement_value ,
'measurement_std' : measurement_std ,
'cov_one' : cov_one ,
'social_distance' : social_distance ,
'data_group' : data_group ,
}
data_frame = pandas.DataFrame(data_dict)
# ------------------------------------------------------------------------
# curve_model
col_t = 'independent_var'
col_obs = 'measurement_value'
col_covs = [ ['cov_one'], ['cov_one', 'social_distance'], ['cov_one'] ]
col_group = 'data_group'
param_names = [ 'alpha', 'beta', 'p' ]
link_fun = [ exp_fun, identity_fun, exp_fun ]
var_link_fun = num_fe * [ identity_fun ]
fun = generalized_error_function
col_obs_se = 'measurement_std'
#
curve_model = curvefit.core.model.CurveModel(
data_frame,
col_t,
col_obs,
col_covs,
col_group,
param_names,
link_fun,
var_link_fun,
fun,
col_obs_se
)
# -------------------------------------------------------------------------
#%% fit_params
#
fe_init = fe_true / 3.0
re_init = numpy.zeros( num_fe )
fe_bounds = [ [-numpy.inf, numpy.inf] ] * num_fe
re_bounds = [ [0.0, 0.0] ] * num_fe
options = {
'ftol' : 1e-12,
'gtol' : 1e-12,
}
#
curve_model.fit_params(
fe_init,
re_init,
fe_bounds,
re_bounds,
options=options
)
fe_estimate = curve_model.result.x[:num_fe]
curve_model.result
# -------------------------------------------------------------------------
#%% check result
for i in range(num_fe) :
rel_error = fe_estimate[i] / fe_true[i] - 1.0
assert abs(rel_error) < rel_tol
#
print('covariate.py: OK')
sys.exit(0) |
984,057 | 7b02f74ead6cf5e413798069dd8a6e6e1228b1c0 | # -*- coding: utf-8 -*-
import time
start = time.time()
def factorial(num):
total = 1
for a in range(0,num):
total = total*(a+1)
return total
def comb (num1,num2):
return factorial(num1)/(factorial(num2)*factorial(num1-num2))
count = 0
for n in range(1,101):
for r in range(0,n):
if(comb(n,r) > 1000000):
count = count+1
print(count)
print(time.time()-start) |
984,058 | b80f13350732e024a66c8d451809949f78e33b29 | """A Checkers game"""
import numpy as np
from .wrapped import CCheckersEnv
from ..agents import MCTSAgent
from ..graphics import GraphWin, Text, Point, Rectangle, Circle
from ..gui import Button
class PyCheckersEnv:
"""An environment for two-player checkers."""
def __init__(self):
self.players = 2
self.reset()
def reset(self):
"""Initialize a new game and return board and turn."""
self.board = np.zeros((8, 8), dtype=np.int)
self.done = False
self.actions = []
self.turn = 0
for i in range(8):
for j in range(8):
if (i+j)%2!=0:
if i<3:
self.board[i,j]=1
if i==2:
moves=(self.fdiag(i,j),self.fadiag(i,j))
for r in range(len(moves)):
if moves[r] is not None:
self.actions.append(moves[r])
if i>4:
self.board[i,j]=-1
def step(self, action):
"""Perform action and return new board, rewards, done, and turn."""
if np.abs(action[0][0]-action[1][0])==2:
self.board[(action[0][0]+action[1][0])//2,(action[0][1]+action[1][1])//2]=0
self.board[action[1]] = self.board[action[0]]
if action[1][0]==0 or action[1][0]==7:
self.board[action[1]] = 2*np.sign(self.board[action[0]])
self.board[action[0]] = 0
self.turn = (self.turn + 1)%2
self.actions=[]
for i in range(8):
for j in range(8):
if np.sign(self.board[i,j])==(-1)**self.turn:
moves=(self.bdiag(i,j),self.badiag(i,j),self.fdiag(i,j),self.fadiag(i,j))
for r in range(4):
if moves[r] is not None:
self.actions.append(moves[r])
winner = self.winner(action)
if winner is not None:
rewards = np.array([winner,(-1)*winner])
else:
rewards = np.array([0,0])
self.done = winner is not None
return self.board.copy(), rewards, self.done, self.turn
def winner(self,action):
if len(self.actions)==0:
return (-1)**(self.turn+1)
return None
def bdiag(self, row, col):
if self.board[row,col]!=1:
if row>0 and col>0 and self.board[row-1,col-1]==0:
return ((row,col),(row-1,col-1))
if row>1 and col>1 and self.board[row-2,col-2]==0 and np.sign(self.board[row-1,col-1])==(-1)*np.sign(self.board[row,col]):
return ((row,col),(row-2,col-2))
return None
def badiag(self, row, col):
if self.board[row,col]!=1:
if row>0 and col<7 and self.board[row-1,col+1]==0:
return ((row,col),(row-1,col+1))
if row>1 and col<6 and self.board[row-2,col+2]==0 and np.sign(self.board[row-1,col+1])==(-1)*np.sign(self.board[row,col]):
return ((row,col),(row-2,col+2))
return None
def fdiag(self, row, col):
if self.board[row,col]!=-1:
if row<7 and col<7 and self.board[row+1,col+1]==0:
return ((row,col),(row+1,col+1))
if row<6 and col<6 and self.board[row+2,col+2]==0 and np.sign(self.board[row+1,col+1])==(-1)*np.sign(self.board[row,col]):
return ((row,col),(row+2,col+2))
return None
def fadiag(self, row, col):
if self.board[row,col]!=-1:
if row<7 and col>0 and self.board[row+1,col-1]==0:
return ((row,col),(row+1,col-1))
if row<6 and col>1 and self.board[row+2,col-2]==0 and np.sign(self.board[row+1,col-1])==(-1)*np.sign(self.board[row,col]):
return ((row,col),(row+2,col-2))
return None
def copy(self):
copy = CheckersEnv()
copy.board = self.board.copy()
copy.turn = self.turn
copy.done = self.done
copy.actions = self.actions.copy()
return copy
def render(self):
print(self.board)
def __eq__(self, other):
return np.array_equal(self.board, other.board) and self.turn==other.turn
class CheckersApp:
"""Application for running a game of Checkers."""
def __init__(self, interface, implementation="c"):
self.env = PyCheckersEnv() if implementation == "python" else CCheckersEnv()
self.interface = interface
def run(self):
"""Start the application."""
while True:
self.interface.show_start()
choice = self.interface.want_to_play()
if choice[0].lower() == 'q':
self.interface.close()
break
else:
player = int(choice) - 1
agent = MCTSAgent()
self.play_games(player, agent)
def play_games(self, player, agent):
"""Play games between player and agent."""
self.interface.show_board()
while True:
self.env.reset()
total_rewards = np.zeros(self.env.players)
while not self.env.done:
self.interface.update_board(self.env.board)
if self.env.turn == player:
a=self.interface.get_action1(self.env.actions)
if a[0].lower() == 'q':
return
elif a[0].lower() == 'r':
self.env.reset()
total_rewards = np.zeros(self.env.players)
continue
else:
a=a.replace('(', '').replace(')', '').split(',')
a=(int(a[0]),int(a[1]))
b=self.interface.get_action2(self.env.actions,a).replace('(', '').replace(')', '').split(',')
b=(int(b[0]),int(b[1]))
action=(a,b)
else:
action = agent.act(self.env)
_, rewards, _, _ = self.env.step(action)
total_rewards += rewards
self.interface.update_board(self.env.board)
if total_rewards[0]==0:
self.interface.show_winner(0)
else:
self.interface.show_winner(np.argmax(total_rewards)+1)
choice = self.interface.want_to_replay()
if choice[0].lower() == 'q':
return
class CheckersBoard:
"""Widget for a Checkers board."""
def __init__(self, win, center):
self.win = win
self.background_color = "white"
self.frame_colors = ['white','brown']
self.piece_colors = ['black', 'red', 'gray', 'orange']
self.pieces = [[self._make_piece(Point(50+col*45,100+row*45), 45, (row+col)%2)
for col in range(8)]
for row in range(8)]
self.circles=[[Circle(Point(50+col*45,100+row*45), 17)
for col in range(8)]
for row in range(8)]
def _make_piece(self, center, size, var):
"""Set up the grid of pieces."""
piece = Rectangle(Point(center.getX()-size/2,center.getY()-size/2),Point(center.getX()+size/2,center.getY()+size/2))
piece.setFill(self.frame_colors[var])
return piece
def draw(self, win):
for row in range(8):
for col in range(8):
self.pieces[row][col].draw(win)
def undraw(self):
for row in range(8):
for col in range(8):
self.pieces[row][col].undraw()
self.circles[row][col].undraw()
def update(self, board):
"""Draw board state on this widget."""
for row in range(8):
for col in range(8):
if board[row, col] == -1:
self.circles[row][col].undraw()
self.circles[row][col].draw(self.win)
self.circles[row][col].setFill(self.piece_colors[0])
elif board[row, col] == -2:
self.circles[row][col].undraw()
self.circles[row][col].draw(self.win)
self.circles[row][col].setFill(self.piece_colors[2])
elif board[row, col] == 0:
self.circles[row][col].undraw()
self.pieces[row][col].setFill(self.frame_colors[(row+col)%2])
elif board[row, col] == 1:
self.circles[row][col].undraw()
self.circles[row][col].draw(self.win)
self.circles[row][col].setFill(self.piece_colors[1])
elif board[row, col] == 2:
self.circles[row][col].undraw()
self.circles[row][col].draw(self.win)
self.circles[row][col].setFill(self.piece_colors[3])
class CheckersGUI:
def __init__(self, window):
#self.window = GraphWin("Checkers", 400, 575)
self.window=window
self.window.setBackground("white")
self.banner = Text(Point(200, 50), "")
self.banner.setSize(25)
self.banner.setFill("black")
self.banner.setStyle("bold")
self.banner.draw(self.window)
self.start_buttons = [
Button(self.window, Point(200, 275), 150, 50, "Player 1"),
Button(self.window, Point(200, 350), 150, 50, "Player 2"),
Button(self.window, Point(200, 425), 150, 50, "Quit"),
]
self.action_buttons = [Button(self.window, Point(50+j*45,100+i*45), 45, 45, "({},{})".format(i,j)) for i in range(8) for j in range(8)]
self.action_buttons.append(Button(self.window, Point(100, 525), 150, 50, "Restart"))
self.action_buttons.append(Button(self.window, Point(300, 525), 150, 50, "Quit"))
self.board = CheckersBoard(self.window, Point(200, 250))
def show_start(self):
for b in self.action_buttons:
b.undraw()
self.board.undraw()
self.banner.setText("Checkers")
for b in self.start_buttons:
b.draw(self.window)
def want_to_play(self):
for b in self.start_buttons:
b.activate()
while True:
p = self.window.getMouse()
for b in self.start_buttons:
if b.clicked(p):
label = b.getLabel()
if label != 'Quit':
label = label[-1]
return label
def show_board(self):
for b in self.start_buttons:
b.undraw()
self.banner.setText("")
for b in self.action_buttons:
b.draw(self.window)
self.board.draw(self.window)
def get_action1(self, actions):
self.banner.setText("Your turn")
for i in range(8):
for j in range(8):
self.action_buttons[8*i+j].deactivate()
for r in actions:
self.action_buttons[8*r[0][0]+r[0][1]].activate()
self.action_buttons[-1].activate()
self.action_buttons[-2].activate()
while True:
p = self.window.getMouse()
for b in self.action_buttons:
if b.clicked(p):
self.banner.setText("")
return b.getLabel()
def get_action2(self, actions, firstaction):
self.banner.setText("Your turn")
for i in range(8):
for j in range(8):
self.action_buttons[8*i+j].deactivate()
for r in range(len(actions)):
if actions[r][0][0]==firstaction[0] and actions[r][0][1]==firstaction[1]:
self.action_buttons[8*actions[r][1][0]+actions[r][1][1]].activate()
#self.action_buttons[-1].activate()
#self.action_buttons[-2].activate()
while True:
p = self.window.getMouse()
for b in self.action_buttons:
if b.clicked(p):
self.banner.setText("")
return b.getLabel()
def update_board(self, board):
self.board.update(board)
self.banner.setText("")
def show_winner(self, winner):
if winner==0:
self.banner.setText("Its a tie!")
else:
self.banner.setText("Player {} wins!".format(winner))
def want_to_replay(self):
for b in self.action_buttons:
b.activate()
while True:
p = self.window.getMouse()
for b in self.action_buttons:
if b.clicked(p):
return b.getLabel()
def close(self):
self.banner.undraw()
for b in self.start_buttons:
b.undraw()
#self.window.close()
|
984,059 | cd1966553ffa9168fa8d73871c9f91af8e1178fb | from flask import Blueprint
from flask import render_template
from flask import request, url_for
from bookmark.blueprint.api.form import BookmarkForm
from werkzeug.contrib.atom import AtomFeed
from bookmark.service import get_list_bookmark
b = Blueprint('web', __name__)
@b.route('/', methods=['GET', ])
def index():
form = BookmarkForm(create=True)
return render_template('index.html', form=form)
@b.route('feed/news.xml')
def new_bookmarks_feed():
feed = AtomFeed('Recent bookmarks', feed_url=request.url,
url=url_for('web.index'))
bookmarks = get_list_bookmark(page=0, per_page=20)
for bookmark in bookmarks:
feed.add(bookmark.title, unicode(bookmark.description),
content_type='html', url=bookmark.link,
updated=bookmark.update_time,
published=bookmark.update_time)
return feed.get_response()
|
984,060 | 318b0a4e0b1afcf11e93611661778654bda431bd | #!/bin/python
from datetime import date, timedelta
moduleStart=date(2014,12,29)
currentDay=moduleStart
moduleDays=1
holidayList=[date(2014,10,13),date(2014,11,11),date(2014,11,27),date(2014,12,25),date(2015,1,1),date(2015,1,19),date(2015,2,16),date(2015,5,25),date(2015,7,3),date(2015,9,7)]
##########################################################
#Below holidays are here simply for organization
#columbusDay=date(2014,10,13)
#veteransDay=date(2014,11,11)
#ThanksgivingDay=date(2014,11,27)
#christmasDay=date(2014,12,25)
#newYearsDay=date(2015,1,1)
#MLK=date(2015,1,19)
#presidentsDay=date(2015,2,16)
#memorialDay=date(2015,5,25)
#julyFourth=date(2015,7,3)
#laborDay=date(2015,9,7)
##########################################################
print "First day of lesson1 is %s" % currentDay
while moduleDays <= 40:
#Make a list of holidays
if currentDay in holidayList:
print '%s is a holiday; no class' % currentDay
tomorrow=currentDay+timedelta(days=1)
currentDay=tomorrow
if currentDay.weekday() != 5 and currentDay.weekday() != 6:
#daysSinceStart+=1 Most likely not needed
if moduleDays % 40 == 0:
print "last day of lesson4 is %s \n" % currentDay
break
elif moduleDays % 30 == 0:
print "last day of lesson3 is %s \n" % currentDay
elif moduleDays % 20 == 0:
print "last day of lesson2 is %s \n" % currentDay
print "First day of lesson3 is %s" % currentDay
elif moduleDays % 10 == 0:
print "last day of lesson1 is %s \n" % currentDay
print "First day of lesson2 is %s " % currentDay
tomorrow=currentDay+timedelta(days=1)
currentDay=tomorrow
moduleDays+= 1
else:
tomorrow=currentDay+timedelta(days=1)
currentDay=tomorrow
|
984,061 | 0b9940b073ec9979bc9d19ece4e4a8c0b563a86d | from typing import Any, Dict, List, Tuple, Union
import vapoursynth as vs
from lvsfunc.misc import source
from lvsfunc.types import Range
from vardautomation import FileInfo, PresetBD, PresetFLAC, VPath
from project_module import encoder as enc
from project_module import flt
core = vs.core
# Sources
JP_BD = FileInfo(r'BDMV/Vol.01/BTOOOM 01/BDMV/STREAM/00011.m2ts', (24, -24),
idx=lambda x: source(x, cachedir=''), preset=[PresetBD, PresetFLAC])
JP_BD.name_file_final = VPath(fr"premux/{JP_BD.name} (Premux).mkv")
JP_BD.a_src_cut = JP_BD.name
JP_BD.do_qpfile = True
shift_chr: List[Range] = [ # Shift chroma ranges
]
str_grain: List[Range] = [ # Super strong stylistic grain
]
no_filter: List[Range] = [ # No filtering
]
zones: Dict[Tuple[int, int], Dict[str, Any]] = { # Zones for x265
}
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
"""Main filterchain"""
import lvsfunc as lvf
import rekt
import vardefunc as vdf
from awsmfunc import bbmod
from muvsfunc import SSIM_downsample
from vsutil import depth, get_y
src = JP_BD.clip_cut
rkt = rekt.rektlvls(src, [0, -1], [17, 17], [0, -1], [17, 21])
bb_y = bbmod(rkt, top=2, left=2, right=1, u=False, v=False)
bb_uv = bbmod(bb_y, left=2, right=2, y=False)
bb = depth(bb_uv, 32)
descale = lvf.kernels.Mitchell().descale(get_y(bb), 1280, 720)
up_chr = vdf.scale.to_444(bb, 1920, 1080, True).resize.Bicubic(1280, 720)
descale_merge = vdf.misc.merge_chroma(descale, up_chr)
denoise_down = lvf.deblock.vsdpir(descale_merge, strength=5, mode='deblock', matrix=1, i444=True, cuda=True)
supersample = vdf.scale.nnedi3_upscale(get_y(denoise_down))
downscaled = SSIM_downsample(supersample, bb.width, bb.height, smooth=((3 ** 2 - 1) / 12) ** 0.5,
sigmoid=True, filter_param_a=0, filter_param_b=0)
den_chr_up = core.resize.Bicubic(denoise_down, bb.width, bb.height, bb.format.id)
den_chr_up_shift = core.resize.Bicubic(denoise_down, bb.width, bb.height, bb.format.id, src_left=-0.5)
den_chr_up = lvf.rfs(den_chr_up, den_chr_up_shift, shift_chr)
scaled = vdf.misc.merge_chroma(downscaled, den_chr_up)
scaled = depth(scaled, 16)
decs = vdf.noise.decsiz(scaled, sigmaS=4, min_in=208 << 8, max_in=232 << 8)
deband = core.average.Mean([
flt.masked_f3kdb(decs, rad=17, thr=[20, 24], grain=[24, 12]),
flt.masked_f3kdb(decs, rad=21, thr=[32, 24], grain=[24, 12]),
flt.masked_placebo(decs, rad=6, thr=2.8, itr=2, grain=4)
])
grain = vdf.noise.Graigasm(
thrs=[x << 8 for x in (32, 80, 128, 176)],
strengths=[(0.25, 0.0), (0.20, 0.0), (0.15, 0.0), (0.0, 0.0)],
sizes=(1.25, 1.20, 1.15, 1),
sharps=(80, 70, 60, 50),
grainers=[
vdf.noise.AddGrain(seed=69420, constant=False),
vdf.noise.AddGrain(seed=69420, constant=False),
vdf.noise.AddGrain(seed=69420, constant=True)
]).graining(deband)
grain_str = vdf.noise.Graigasm(
thrs=[x << 8 for x in (32, 80, 128, 176)],
strengths=[(0.35, 0.0), (0.30, 0.0), (0.25, 0.0), (0.0, 0.0)],
sizes=(1.25, 1.20, 1.15, 1),
sharps=(80, 70, 60, 50),
grainers=[
vdf.noise.AddGrain(seed=69420, constant=False),
vdf.noise.AddGrain(seed=69420, constant=False),
vdf.noise.AddGrain(seed=69420, constant=True)
]).graining(deband)
grain = lvf.rfs(grain, grain_str, str_grain)
return grain
if __name__ == '__main__':
FILTERED = filterchain()
enc.Encoder(JP_BD, FILTERED).run(clean_up=True, zones=zones) # type: ignore
elif __name__ == '__vapoursynth__':
FILTERED = filterchain()
if not isinstance(FILTERED, vs.VideoNode):
raise ImportError(
f"Input clip has multiple output nodes ({len(FILTERED)})! Please output just 1 clip"
)
else:
enc.dither_down(FILTERED).set_output(0)
else:
JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0)
FILTERED = filterchain()
if not isinstance(FILTERED, vs.VideoNode):
for i, clip_filtered in enumerate(FILTERED, start=1):
clip_filtered.std.SetFrameProp('node', intval=i).set_output(i)
else:
FILTERED.std.SetFrameProp('node', intval=1).set_output(1)
|
984,062 | 224a8b8daeee195109ff947354ac80b85c4ff6d4 | import comtypes.client
def PPTtoPDF(inputFileName, outputFileName, formatType = 32):
powerpoint = comtypes.client.CreateObject("Powerpoint.Application")
powerpoint.Visible = 1
if outputFileName[-3:] != 'pdf':
outputFileName = outputFileName + ".pdf"
deck = powerpoint.Presentations.Open(inputFileName)
deck.SaveAs(outputFileName, formatType) # formatType = 32 for ppt to pdf
deck.Close()
powerpoint.Quit()
# Mention the Presentation name and the desired PDF name
PPTtoPDF("Report.pptx","Report")
|
984,063 | 591dc42e9379cbf90b8648523ec3c8fc92f6f15e |
"""
Classes and functions for working with the components of \*MRS objects.
"""
import re
import logging
import warnings
from collections import namedtuple, MutableMapping
from itertools import starmap
from delphin.exceptions import (XmrsError, XmrsStructureError)
from delphin.util import deprecated
from .config import (
IVARG_ROLE, CONSTARG_ROLE, RSTR_ROLE,
UNKNOWNSORT, HANDLESORT, CVARSORT, QUANTIFIER_POS,
EQ_POST, HEQ_POST, NEQ_POST, H_POST,
BARE_EQ_ROLE
)
# The classes below are generally just namedtuples with extra methods.
# The namedtuples sometimes have default values. thanks:
# http://stackoverflow.com/a/16721002/1441112
# VARIABLES and LNKS
var_re = re.compile(r'^([-\w]*\D)(\d+)$')
def sort_vid_split(vs):
"""
Split a valid variable string into its variable sort and id.
Examples:
>>> sort_vid_split('h3')
('h', '3')
>>> sort_vid_split('ref-ind12')
('ref-ind', '12')
"""
match = var_re.match(vs)
if match is None:
raise ValueError('Invalid variable string: {}'.format(str(vs)))
else:
return match.groups()
def var_sort(v):
"""
Return the sort of a valid variable string.
Examples:
>>> var_sort('h3')
'h'
>>> var_sort('ref-ind12')
'ref-ind'
"""
return sort_vid_split(v)[0]
def var_id(v):
"""
Return the integer id of a valid variable string.
Examples:
>>> var_id('h3')
3
>>> var_id('ref-ind12')
12
"""
return int(sort_vid_split(v)[1])
class _VarGenerator(object):
"""
Simple class to produce variables, incrementing the vid for each
one.
"""
def __init__(self, starting_vid=1):
self.vid = starting_vid
self.index = {} # to map vid to created variable
self.store = {} # to recall properties from varstrings
def new(self, sort, properties=None):
"""
Create a new variable for the given *sort*.
"""
if sort is None:
sort = UNKNOWNSORT
# find next available vid
vid, index = self.vid, self.index
while vid in index:
vid += 1
varstring = '{}{}'.format(sort, vid)
index[vid] = varstring
if properties is None:
properties = []
self.store[varstring] = properties
self.vid = vid + 1
return (varstring, properties)
class Lnk(namedtuple('Lnk', ('type', 'data'))):
"""
Surface-alignment information for predications.
Lnk objects link predicates to the surface form in one of several
ways, the most common of which being the character span of the
original string.
Args:
type: the way the Lnk relates the semantics to the surface form
data: the Lnk specifiers, whose quality depends on *type*
Attributes:
type: the way the Lnk relates the semantics to the surface form
data: the Lnk specifiers, whose quality depends on *type*
Note:
Valid *types* and their associated *data* shown in the table
below.
========= =================== =========
type data example
========= =================== =========
charspan surface string span (0, 5)
chartspan chart vertex span (0, 5)
tokens token identifiers (0, 1, 2)
edge edge identifier 1
========= =================== =========
Example:
Lnk objects should be created using the classmethods:
>>> Lnk.charspan(0,5)
'<0:5>'
>>> Lnk.chartspan(0,5)
'<0#5>'
>>> Lnk.tokens([0,1,2])
'<0 1 2>'
>>> Lnk.edge(1)
'<@1>'
"""
# These types determine how a lnk on an EP or MRS are to be
# interpreted, and thus determine the data type/structure of the
# lnk data.
CHARSPAN = 0 # Character span; a pair of offsets
CHARTSPAN = 1 # Chart vertex span: a pair of indices
TOKENS = 2 # Token numbers: a list of indices
EDGE = 3 # An edge identifier: a number
def __init__(self, type, data):
# class methods below use __new__ to instantiate data, so
# don't do it here
if type not in (Lnk.CHARSPAN, Lnk.CHARTSPAN, Lnk.TOKENS, Lnk.EDGE):
raise XmrsError('Invalid Lnk type: {}'.format(type))
@classmethod
def charspan(cls, start, end):
"""
Create a Lnk object for a character span.
Args:
start: the initial character position (cfrom)
end: the final character position (cto)
"""
return cls(Lnk.CHARSPAN, (int(start), int(end)))
@classmethod
def chartspan(cls, start, end):
"""
Create a Lnk object for a chart span.
Args:
start: the initial chart vertex
end: the final chart vertex
"""
return cls(Lnk.CHARTSPAN, (int(start), int(end)))
@classmethod
def tokens(cls, tokens):
"""
Create a Lnk object for a token range.
Args:
tokens: a list of token identifiers
"""
return cls(Lnk.TOKENS, tuple(map(int, tokens)))
@classmethod
def edge(cls, edge):
"""
Create a Lnk object for an edge (used internally in generation).
Args:
edge: an edge identifier
"""
return cls(Lnk.EDGE, int(edge))
def __str__(self):
if self.type == Lnk.CHARSPAN:
return '<{}:{}>'.format(self.data[0], self.data[1])
elif self.type == Lnk.CHARTSPAN:
return '<{}#{}>'.format(self.data[0], self.data[1])
elif self.type == Lnk.EDGE:
return '<@{}>'.format(self.data)
elif self.type == Lnk.TOKENS:
return '<{}>'.format(' '.join(map(str, self.data)))
def __repr__(self):
return '<Lnk object {} at {}>'.format(str(self), id(self))
def __eq__(self, other):
return self.type == other.type and self.data == other.data
class _LnkMixin(object):
"""
A mixin class for adding `cfrom` and `cto` properties on structures.
By far the most common :class:`Lnk` type is for character spans,
and these spans are conveniently described by `cfrom` and `cto`
properties. This mixin is used by larger structures, such as
:class:`ElementaryPredication`, :class:`Node`, and
:class:`~delphin.mrs.xmrs.Xmrs`, to add `cfrom` and `cto`
properties. These properties exist regardless of the whether the
Lnk is a character span or not; if not, or if Lnk information is
missing, they return the default value of `-1`.
"""
@property
def cfrom(self):
"""
The initial character position in the surface string.
Defaults to -1 if there is no valid cfrom value.
"""
cfrom = -1
try:
if self.lnk.type == Lnk.CHARSPAN:
cfrom = self.lnk.data[0]
except AttributeError:
pass # use default cfrom of -1
return cfrom
@property
def cto(self):
"""
The final character position in the surface string.
Defaults to -1 if there is no valid cto value.
"""
cto = -1
try:
if self.lnk.type == Lnk.CHARSPAN:
cto = self.lnk.data[1]
except AttributeError:
pass # use default cto of -1
return cto
# LINKS and CONSTRAINTS
class Link(namedtuple('Link', ('start', 'end', 'rargname', 'post'))):
"""
DMRS-style dependency link.
Links are a way of representing arguments without variables. A
Link encodes a start and end node, the role name, and the scopal
relationship between the start and end (e.g. label equality, qeq,
etc).
Args:
start: nodeid of the start of the Link
end: nodeid of the end of the Link
rargname (str): role of the argument
post (str): "post-slash label" indicating the scopal
relationship between the start and end of the Link;
possible values are `NEQ`, `EQ`, `HEQ`, and `H`
Attributes:
start: nodeid of the start of the Link
end: nodeid of the end of the Link
rargname (str): role of the argument
post (str): "post-slash label" indicating the scopal
relationship between the start and end of the Link
"""
def __new__(cls, start, end, rargname, post):
return super(Link, cls).__new__(
cls, start, end, rargname, post
)
def __repr__(self):
return '<Link object (#{} :{}/{}> #{}) at {}>'.format(
self.start, self.rargname or '', self.post, self.end, id(self)
)
def links(xmrs):
"""Return the list of Links for the *xmrs*."""
# Links exist for every non-intrinsic argument that has a variable
# that is the intrinsic variable of some other predicate, as well
# as for label equalities when no argument link exists (even
# considering transitivity).
links = []
prelinks = []
_eps = xmrs._eps
_hcons = xmrs._hcons
_vars = xmrs._vars
lsh = xmrs.labelset_heads
lblheads = {v: lsh(v) for v, vd in _vars.items() if 'LBL' in vd['refs']}
top = xmrs.top
if top is not None:
prelinks.append((0, top, None, top, _vars[top]))
for nid, ep in _eps.items():
for role, val in ep[3].items():
if role == IVARG_ROLE or val not in _vars:
continue
prelinks.append((nid, ep[2], role, val, _vars[val]))
for src, srclbl, role, val, vd in prelinks:
if IVARG_ROLE in vd['refs']:
tgtnids = [n for n in vd['refs'][IVARG_ROLE]
if not _eps[n].is_quantifier()]
if len(tgtnids) == 0:
continue # maybe some bad MRS with a lonely quantifier
tgt = tgtnids[0] # what do we do if len > 1?
tgtlbl = _eps[tgt][2]
post = EQ_POST if srclbl == tgtlbl else NEQ_POST
elif val in _hcons:
lbl = _hcons[val][2]
if lbl not in lblheads or len(lblheads[lbl]) == 0:
continue # broken MRS; log this?
tgt = lblheads[lbl][0] # sorted list; first item is most "heady"
post = H_POST
elif 'LBL' in vd['refs']:
if val not in lblheads or len(lblheads[val]) == 0:
continue # broken MRS; log this?
tgt = lblheads[val][0] # again, should be sorted already
post = HEQ_POST
else:
continue # CARGs, maybe?
links.append(Link(src, tgt, role, post))
# now EQ links unattested by arg links
for lbl, heads in lblheads.items():
# I'm pretty sure this does what we want
if len(heads) > 1:
first = heads[0]
for other in heads[1:]:
links.append(Link(other, first, BARE_EQ_ROLE, EQ_POST))
# If not, something like this is more explicit
# lblset = self.labelset(lbl)
# sg = g.subgraph(lblset)
# ns = [nid for nid, deg in sg.degree(lblset).items() if deg == 0]
# head = self.labelset_head(lbl)
# for n in ns:
# links.append(Link(head, n, post=EQ_POST))
def _int(x):
try:
return int(x)
except ValueError:
return 0
return sorted(
links,
key=lambda link: (_int(link.start), _int(link.end), link.rargname)
)
class HandleConstraint(
namedtuple('HandleConstraint', ('hi', 'relation', 'lo'))):
"""
A relation between two handles.
Arguments:
hi (str): hi handle (hole) of the constraint
relation (str): relation of the constraint (nearly always
`"qeq"`, but `"lheq"` and `"outscopes"` are also valid)
lo (str): lo handle (label) of the constraint
Attributes:
hi (str): hi handle (hole) of the constraint
relation (str): relation of the constraint
lo (str): lo handle (label) of the constraint
"""
QEQ = 'qeq' # Equality modulo Quantifiers
LHEQ = 'lheq' # Label-Handle Equality
OUTSCOPES = 'outscopes' # Outscopes
@classmethod
def qeq(cls, hi, lo):
return cls(hi, HandleConstraint.QEQ, lo)
def __repr__(self):
return '<HandleConstraint object ({} {} {}) at {}>'.format(
str(self.hi), self.relation, str(self.lo), id(self)
)
def hcons(xmrs):
"""Return the list of all HandleConstraints in *xmrs*."""
return [
HandleConstraint(hi, reln, lo)
for hi, reln, lo in sorted(xmrs.hcons(), key=lambda hc: var_id(hc[0]))
]
class IndividualConstraint(
namedtuple('IndividualConstraint', ['left', 'relation', 'right'])):
"""
A relation between two variables.
Arguments:
left (str): left variable of the constraint
relation (str): relation of the constraint
right (str): right variable of the constraint
Attributes:
left (str): left variable of the constraint
relation (str): relation of the constraint
right (str): right variable of the constraint
"""
def icons(xmrs):
"""Return the list of all IndividualConstraints in *xmrs*."""
return [
IndividualConstraint(left, reln, right)
for left, reln, right in sorted(xmrs.icons(),
key=lambda ic: var_id(ic[0]))
]
# PREDICATES AND PREDICATIONS
class Pred(namedtuple('Pred', ('type', 'lemma', 'pos', 'sense', 'string'))):
"""
A semantic predicate.
**Abstract** predicates don't begin with an underscore, and they
generally are defined as types in a grammar. **Surface** predicates
always begin with an underscore (ignoring possible quotes), and are
often defined as strings in a lexicon.
In PyDelphin, Preds are equivalent if they have the same lemma,
pos, and sense, and are both abstract or both surface preds. Other
factors are ignored for comparison, such as their being surface-,
abstract-, or real-preds, whether they are quoted or not, whether
they end with `_rel` or not, or differences in capitalization.
Hashed Pred objects (e.g., in a dict or set) also use the
normalized form. However, unlike with equality comparisons,
Pred-formatted strings are not treated as equivalent in a hash.
Args:
type: the type of predicate; valid values are
Pred.ABSTRACT, Pred.REALPRED, and Pred.SURFACE,
although in practice Preds are instantiated via
classmethods that select the type
lemma: the lemma of the predicate
pos: the part-of-speech; a single, lowercase character
sense: the (often omitted) sense of the predicate
Returns:
a Pred object
Attributes:
type: predicate type (Pred.ABSTRACT, Pred.REALPRED,
and Pred.SURFACE)
lemma: lemma component of the predicate
pos: part-of-speech component of the predicate
sense: sense component of the predicate
Example:
Preds are compared using their string representations.
Surrounding quotes (double or single) are ignored, and
capitalization doesn't matter. In addition, preds may be
compared directly to their string representations:
>>> p1 = Pred.surface('_dog_n_1_rel')
>>> p2 = Pred.realpred(lemma='dog', pos='n', sense='1')
>>> p3 = Pred.abstract('dog_n_1_rel')
>>> p1 == p2
True
>>> p1 == '_dog_n_1_rel'
True
>>> p1 == p3
False
"""
pred_re = re.compile(
r'_?(?P<lemma>.*?)_' # match until last 1 or 2 parts
r'((?P<pos>[a-z])_)?' # pos is always only 1 char
r'((?P<sense>([^_\\]|(?:\\.))+)_)?' # no unescaped _s
r'(?P<end>rel)$',
re.IGNORECASE
)
# Pred types (used mainly in input/output, not internally in pyDelphin)
ABSTRACT = GRAMMARPRED = 0 # only a string allowed (quoted or not)
REALPRED = 1 # may explicitly define lemma, pos, sense
SURFACE = STRINGPRED = 2 # quoted string form of realpred
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Pred):
other = Pred.surface(other)
return self.short_form().lower() == other.short_form().lower()
def __str__ (self):
return self.string
def __repr__(self):
return '<Pred object {} at {}>'.format(self.string, id(self))
def __hash__(self):
return hash(self.short_form())
@classmethod
@deprecated(final_version='1.0.0', alternative='Pred.surface()')
def stringpred(cls, predstr):
"""Instantiate a Pred from its quoted string representation."""
return cls.surface(predstr)
@classmethod
def surface(cls, predstr):
"""Instantiate a Pred from its quoted string representation."""
lemma, pos, sense, _ = split_pred_string(predstr)
return cls(Pred.SURFACE, lemma, pos, sense, predstr)
@classmethod
@deprecated(final_version='1.0.0', alternative='Pred.abstract()')
def grammarpred(cls, predstr):
"""Instantiate a Pred from its symbol string."""
return cls.abstract(predstr)
@classmethod
def abstract(cls, predstr):
"""Instantiate a Pred from its symbol string."""
lemma, pos, sense, _ = split_pred_string(predstr)
return cls(Pred.ABSTRACT, lemma, pos, sense, predstr)
@classmethod
@deprecated(final_version='1.0.0', alternative='Pred.surface_or_abstract()')
def string_or_grammar_pred(cls, predstr):
"""Instantiate a Pred from either its surface or abstract symbol."""
return cls.surface_or_abstract(predstr)
@classmethod
def surface_or_abstract(cls, predstr):
"""Instantiate a Pred from either its surface or abstract symbol."""
if predstr.strip('"').lstrip("'").startswith('_'):
return cls.surface(predstr)
else:
return cls.abstract(predstr)
@classmethod
def realpred(cls, lemma, pos, sense=None):
"""Instantiate a Pred from its components."""
string_tokens = [lemma]
if pos is not None:
string_tokens.append(pos)
if sense is not None:
sense = str(sense)
string_tokens.append(sense)
predstr = '_'.join([''] + string_tokens + ['rel'])
return cls(Pred.REALPRED, lemma, pos, sense, predstr)
def short_form(self):
"""
Return the pred string without quotes or a `_rel` suffix.
The short form is the same as the normalized form from
:func:`normalize_pred_string`.
Example:
>>> p = Pred.surface('"_cat_n_1_rel"')
>>> p.short_form()
'_cat_n_1'
"""
return normalize_pred_string(self.string)
def is_quantifier(self):
"""
Return `True` if the predicate has a quantifier part-of-speech.
*Deprecated since v0.6.0*
"""
warnings.warn(
'Deprecated; try checking xmrs.nodeids(quantifier=True)',
DeprecationWarning
)
return self.pos == QUANTIFIER_POS
def split_pred_string(predstr):
"""
Split *predstr* and return the (lemma, pos, sense, suffix) components.
Examples:
>>> Pred.split_pred_string('_dog_n_1_rel')
('dog', 'n', '1', 'rel')
>>> Pred.split_pred_string('quant_rel')
('quant', None, None, 'rel')
"""
predstr = predstr.strip('"\'') # surrounding quotes don't matter
rel_added = False
if not predstr.lower().endswith('_rel'):
logging.debug('Predicate does not end in "_rel": {}'
.format(predstr))
rel_added = True
predstr += '_rel'
match = Pred.pred_re.search(predstr)
if match is None:
logging.debug('Unexpected predicate string: {}'.format(predstr))
return (predstr, None, None, None)
# _lemma_pos(_sense)?_end
return (match.group('lemma'), match.group('pos'),
match.group('sense'), None if rel_added else match.group('end'))
def is_valid_pred_string(predstr):
"""
Return `True` if *predstr* is a valid predicate string.
Examples:
>>> is_valid_pred_string('"_dog_n_1_rel"')
True
>>> is_valid_pred_string('_dog_n_1')
True
>>> is_valid_pred_string('_dog_noun_1')
False
>>> is_valid_pred_string('dog_noun_1')
True
"""
predstr = predstr.strip('"').lstrip("'")
# this is a stricter regex than in Pred, but doesn't check POS
return re.match(
r'_([^ _\\]|\\.)+_[a-z](_([^ _\\]|\\.)+)?(_rel)?$'
r'|[^_]([^ \\]|\\.)+(_rel)?$',
predstr
) is not None
def normalize_pred_string(predstr):
"""
Normalize the predicate string *predstr* to a conventional form.
This makes predicate strings more consistent by removing quotes and
the `_rel` suffix, and by lowercasing them.
Examples:
>>> normalize_pred_string('"_dog_n_1_rel"')
'_dog_n_1'
>>> normalize_pred_string('_dog_n_1')
'_dog_n_1'
"""
tokens = [t for t in split_pred_string(predstr)[:3] if t is not None]
if predstr.lstrip('\'"')[:1] == '_':
tokens = [''] + tokens
return '_'.join(tokens).lower()
class Node(
namedtuple('Node', ('nodeid', 'pred', 'sortinfo',
'lnk', 'surface', 'base', 'carg')),
_LnkMixin):
"""
A DMRS node.
Nodes are very simple predications for DMRSs. Nodes don't have
arguments or labels like :class:`ElementaryPredication` objects,
but they do have a property for CARGs and contain their variable
sort and properties in `sortinfo`.
Args:
nodeid: node identifier
pred (:class:`Pred`): semantic predicate
sortinfo (dict, optional): mapping of morphosyntactic
properties and values; the `cvarsort` property is
specified in this mapping
lnk (:class:`Lnk`, optional): surface alignment
surface (str, optional): surface string
base (str, optional): base form
carg (str, optional): constant argument string
Attributes:
Attributes:
pred (:class:`Pred`): semantic predicate
sortinfo (dict): mapping of morphosyntactic
properties and values; the `cvarsort` property is
specified in this mapping
lnk (:class:`Lnk`): surface alignment
surface (str): surface string
base (str): base form
carg (str): constant argument string
cfrom (int): surface alignment starting position
cto (int): surface alignment ending position
"""
def __new__(cls, nodeid, pred, sortinfo=None,
lnk=None, surface=None, base=None, carg=None):
if sortinfo is None:
sortinfo = {}
elif not isinstance(sortinfo, MutableMapping):
sortinfo = dict(sortinfo)
return super(Node, cls).__new__(
cls, nodeid, pred, sortinfo, lnk, surface, base, carg
)
def __repr__(self):
lnk = ''
if self.lnk is not None:
lnk = str(self.lnk)
return '<Node object ({} [{}{}]) at {}>'.format(
self.nodeid, self.pred.string, lnk, id(self)
)
# note: without overriding __eq__, comparisons of sortinfo will be
# be different if they are OrderedDicts and not in the same
# order. Maybe this isn't a big deal?
# def __eq__(self, other):
# # not doing self.__dict__ == other.__dict__ right now, because
# # functions like self.get_property show up there
# snid = self.nodeid
# onid = other.nodeid
# return ((None in (snid, onid) or snid == onid) and
# self.pred == other.pred and
# # make one side a regular dict for unordered comparison
# dict(self.sortinfo.items()) == other.sortinfo and
# self.lnk == other.lnk and
# self.surface == other.surface and
# self.base == other.base and
# self.carg == other.carg)
def __lt__(self, other):
warnings.warn("Deprecated", DeprecationWarning)
x1 = (self.cfrom, self.cto, self.pred.pos != QUANTIFIER_POS,
self.pred.lemma)
try:
x2 = (other.cfrom, other.cto, other.pred.pos != QUANTIFIER_POS,
other.pred.lemma)
return x1 < x2
except AttributeError:
return NotImplemented
@property
def cvarsort(self):
"""
The "variable" type of the predicate.
Note:
DMRS does not use variables, but it is useful to indicate
whether a node is an individual, eventuality, etc., so this
property encodes that information.
"""
return self.sortinfo.get(CVARSORT)
@cvarsort.setter
def cvarsort(self, value):
self.sortinfo[CVARSORT] = value
@property
def properties(self):
"""
Morphosemantic property mapping.
Unlike :attr:`sortinfo`, this does not include `cvarsort`.
"""
d = dict(self.sortinfo)
if CVARSORT in d:
del d[CVARSORT]
return d
def is_quantifier(self):
"""
Return `True` if the Node's predicate appears to be a quantifier.
*Deprecated since v0.6.0*
"""
warnings.warn(
'Deprecated; try checking xmrs.nodeids(quantifier=True)',
DeprecationWarning
)
return self.pred.is_quantifier()
def nodes(xmrs):
"""Return the list of Nodes for *xmrs*."""
nodes = []
_props = xmrs.properties
varsplit = sort_vid_split
for p in xmrs.eps():
sortinfo = None
iv = p.intrinsic_variable
if iv is not None:
sort, _ = varsplit(iv)
sortinfo = _props(iv)
sortinfo[CVARSORT] = sort
nodes.append(
Node(p.nodeid, p.pred, sortinfo, p.lnk, p.surface, p.base, p.carg)
)
return nodes
class ElementaryPredication(
namedtuple('ElementaryPredication',
('nodeid', 'pred', 'label', 'args', 'lnk', 'surface', 'base')),
_LnkMixin):
"""
An MRS elementary predication (EP).
EPs combine a predicate with various structural semantic
properties. They must have a `nodeid`, `pred`, and `label`.
Arguments and other properties are optional. Note nodeids are not a
formal property of MRS (unlike DMRS, or the "anchors" of RMRS), but
they are required for Pydelphin to uniquely identify EPs in an
:class:`~delphin.mrs.xmrs.Xmrs`. Intrinsic arguments (`ARG0`) are
not required, but they are important for many semantic operations,
and therefore it is a good idea to include them.
Args:
nodeid: a nodeid
pred (:class:`Pred`): semantic predicate
label (str): scope handle
args (dict, optional): mapping of roles to values
lnk (:class:`Lnk`, optional): surface alignment
surface (str, optional): surface string
base (str, optional): base form
Attributes:
nodeid: a nodeid
pred (:class:`Pred`): semantic predicate
label (str): scope handle
args (dict): mapping of roles to values
lnk (:class:`Lnk`): surface alignment
surface (str): surface string
base (str): base form
cfrom (int): surface alignment starting position
cto (int): surface alignment ending position
"""
def __new__(cls, nodeid, pred, label, args=None,
lnk=None, surface=None, base=None):
if args is None:
args = {}
# else:
# args = dict((a.rargname, a) for a in args)
return super(ElementaryPredication, cls).__new__(
cls, nodeid, pred, label, args, lnk, surface, base
)
def __repr__(self):
return '<ElementaryPredication object ({} ({})) at {}>'.format(
self.pred.string, str(self.iv or '?'), id(self)
)
def __lt__(self, other):
warnings.warn("Deprecated", DeprecationWarning)
x1 = (self.cfrom, self.cto, -self.is_quantifier(), self.pred.lemma)
try:
x2 = (other.cfrom, other.cto, -other.is_quantifier(),
other.pred.lemma)
return x1 < x2
except AttributeError:
return NotImplemented
# these properties are specific to the EP's qualities
@property
def intrinsic_variable(self):
"""
The value of the intrinsic argument (likely `ARG0`).
"""
if IVARG_ROLE in self.args:
return self.args[IVARG_ROLE]
return None
#: A synonym for :attr:`ElementaryPredication.intrinsic_variable`
iv = intrinsic_variable
@property
def carg(self):
"""
The value of the constant argument.
"""
return self.args.get(CONSTARG_ROLE, None)
def is_quantifier(self):
"""
Return `True` if this is a quantifier predication.
"""
return RSTR_ROLE in self.args
def elementarypredications(xmrs):
"""
Return the list of :class:`ElementaryPredication` objects in *xmrs*.
"""
return list(starmap(ElementaryPredication, xmrs.eps()))
def elementarypredication(xmrs, nodeid):
"""
Retrieve the elementary predication with the given nodeid.
Args:
nodeid: nodeid of the EP to return
Returns:
:class:`ElementaryPredication`
Raises:
:exc:`KeyError`: if no EP matches
"""
return ElementaryPredication(*xmrs.ep(nodeid))
|
984,064 | 96c4300ef01471bdb6cbc559c69ec94766ac1079 | from django.template import loader, Context, Template
from django.http import HttpResponse, Http404
from .models import Tag, StartUp, NewsLink
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import View, DetailView as OrigDetailView
from .forms import TagForm, StartUpForm, NewsLinkForm
from .utils import (
CreateView, ObjectUpdateMixin, ObjectDeleteMixin,
DetailView)
from django.urls import reverse_lazy, reverse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.contrib.auth.decorators import method_decorator, login_required
# def tag_list(request):
# tag_list = Tag.objects.all()
# template = loader.get_template('organizer/tag_list.html')
# rendered_template = template.render({'tag_list': tag_list})
# return HttpResponse(rendered_template)
class TagList(View):
template_name = 'organizer/tag_list.html'
model = Tag
def get(self, request, page_number=None):
return render(request, self.template_name, {'tag_list': self.model.objects.all()})
class TagPageList(View):
template_name = 'organizer/tag_list.html'
paginate_by = 5
def get(self, request, page_number):
tags = Tag.objects.all()
paginator = Paginator(tags, self.paginate_by)
try:
page = paginator.page(page_number)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
if page.has_previous():
prev_url = reverse(
'organizer_tag_page',
args=(
page.previous_page_number(),
))
else:
prev_url = None
if page.has_next():
next_url = reverse(
'organizer_tag_page',
args=(
page.next_page_number(),
))
else:
next_url = None
context = {
'is_paginated':
page.has_other_pages(),
'next_page_url': next_url,
'paginator': paginator,
'previous_page_url': prev_url,
'tag_list': page,
}
return render(
request, self.template_name, context)
# def tag_detail(request, slug):
# print(str(type(request)).center(300, '|'))
# # try
# # tag = Tag.objects.get(slug__iexact='hi')
# # except Tag.DoesNotExist:
# # raise Http404
# tag = get_object_or_404(Tag, slug__iexact=slug)
# # template = loader.get_template('organizer/tag_detail.html')
# # rendered_template = template.render({'tag':tag})
# # return HttpResponse(rendered_template)
# return render(request, 'organizer/tag_detail.html', {'tag': tag})
class TagDetail(OrigDetailView):
template_name = 'organizer/tag_detail.html'
context_object_name = 'tag'
def get_queryset(self):
return Tag.objects.all()
class StartUpDetail(DetailView):
template_name = 'organizer/startup_detail.html'
model = StartUp
context_object_name = 'startup'
class StartUpList(View):
template_name = 'organizer/startup_list.html'
model = StartUp
paginate_by = 5
page_kwarg = 'page'
model = StartUp
def get(self, request):
paginator = Paginator(self.model.objects.all(), self.paginate_by)
# page_number = request.GET.get(self.page_kwarg, 1)
page_number = request.GET.get(self.page_kwarg,)
try:
page = paginator.page(page_number)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
print(str(request.path).center(600,'%'))
if page.has_previous():
prev_url = f'?{self.page_kwarg}={page.previous_page_number()}'
else:
prev_url = None
if page.has_next():
next_url = f'?{self.page_kwarg}={page.next_page_number()}'
else:
next_url = None
print(str(request.GET).center(500, '%'))
return render(request, self.template_name, {'startup_list': page,
'paginator': paginator,
'is_paginated': page.has_other_pages(),
'previous_page_url': prev_url,
'next_page_url': next_url})
# def tag_create(request):
# if request.method == 'POST':
# form = TagForm(request.POST)
# if form.is_valid():
# new_tag = form.save()
# return redirect(new_tag)
# else: # request.method != 'POST'
# form = TagForm()
# return render(request, 'organizer/tag_form.html', {'form': form})
class TagCreate(CreateView, View):
form_class = TagForm
template_name = 'organizer/tag_form.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class StartUpCreate(CreateView, View):
form_class = StartUpForm
template_name = 'organizer/startup_form'
class StartUpDelete(ObjectDeleteMixin, View):
model = StartUp
success_url = reverse_lazy('organizer_startup_list')
template_name = 'organizer/startup_confirm_delete.html'
class NewsLinkCreate(CreateView, View):
form_class = NewsLinkForm
template_name = 'organizer/newslink_form.html'
class NewsLinkUpdate(View):
form_class = NewsLinkForm
template_name = 'organizer/newslink_form_update.html'
def get(self, request, pk):
newslink = get_object_or_404(NewsLink, pk=pk)
return render(request, self.template_name, {
'form': self.form_class(instance=NewsLink),
'newslink': newslink,
})
def post(self, request, pk):
newslink = get_object_or_404(NewsLink, pk=pk)
bound_form = self.form_class(request.POST, instance=newslink)
if bound_form.is_valid():
return redirect(bound_form.save())
else:
return render(request, self.template_name, {'form': bound_form,
'newslink': newslink})
class TagUpdate(ObjectUpdateMixin, View):
form_class = TagForm
model = Tag
template_name = 'organizer/tag_form_update.html'
class TagDelete(ObjectDeleteMixin, View):
model = Tag
success_url = reverse_lazy('organizer_tag_list')
template_name = 'organizer/tag_confirm_delete.html'
class StartUpUpdate(ObjectUpdateMixin, View):
form_class = StartUpForm
model = StartUp
template = 'organizer/start_form_update.html'
class NewsLinkDelete(View):
def get(self, request, pk):
newslink = get_object_or_404(NewsLink, pk=pk)
return render(request, 'organizer/newslink_confirm_delete.html', {'newslink': newslink})
def post(self, request, pk):
newslink = get_object_or_404(NewsLink, pk=pk)
startup = newslink.startup
newslink.delete()
return redirect(startup)
|
984,065 | ed4aab18881afea20a514f04491e60c225566f61 | import pygame
from pygame import *
from ImageControl import *
from Physics import *
from Point import *
from Enemy import *
from Camera import *
class TerrainSurface(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("../graphics/Stage4/stage4.png").convert_alpha()
self.image = ImageControl.fixScale(self.image)
self.rect = self.image.get_rect()
class Stage4:
def __init__(self, controller, window, sound, player, sprite, fonts):
self.control = controller
self.name = "Stage4"
self.nextStageKey = "Stage4"
self.window = window
self.sound = sound
self.player = player
self.player.newLevel(50, 400)
self.sprite = sprite
level_dimensions = ImageControl.fixValues(4650, 1000)
self.camera = Camera(self.window.camera, level_dimensions)
self.entities = pygame.sprite.Group()
self.entities.add(self.player)
self.objects = pygame.sprite.Group()
self.surface_terreno = pygame.Surface(ImageControl.fixValues(1000, 1000))
self.mask_terreno = None
self.background = None
self.loadImages()
self.player.physics = Physics()
#self.define_enemies()
self.i = True
def define_enemies(self):
self.create_enemies(ImageControl.fixValues(300, 500))
self.create_enemies(ImageControl.fixValues(700, 500))
self.create_enemies(ImageControl.fixValues(700, -200), True)
def create_enemies(self, pos, inverted=False):
enemy = Enemy("pelo")
if not inverted:
enemy.image = self.peloimage
else:
enemy.image = pygame.transform.flip(self.peloimage, False, True)
enemy.rect = self.peloimage.get_rect()
enemy.rect.x = pos[0]
enemy.rect.y = pos[1]
self.objects.add(enemy)
def loadImages(self):
self.background = pygame.image.load("../graphics/Textures/skin3.jpg").convert_alpha()
self.background = ImageControl.fixScale(self.background)
self.surface_terreno = TerrainSurface()
self.peloimage = pygame.image.load("../graphics/Plataformas/pelo.png").convert_alpha()
self.peloimage = ImageControl.fixScale(self.peloimage)
# self.entities.add(self.background)
self.entities.add(self.surface_terreno)
self.player.loadImages()
self.mask_terreno = pygame.mask.from_surface(self.surface_terreno.image)
def scene_imgs(self):
for o in self.objects:
image = pygame.transform.rotate(o.image, o.angle)
self.window.windowScreen.blit(image, self.camera.apply(o))
for e in self.entities:
self.window.windowScreen.blit(e.image, self.camera.apply(e))
#ImageControl.setImageAt(self.window, self.background, (0, 0))
#ImageControl.repeatImage(self.window, self.background, True)
def object_actions(self):
for o in self.objects:
o.action()
def collision_terrain(self):
collision_pos = self.mask_terreno.overlap(self.player.mask_player, self.player.point.ixy())
if collision_pos is None:
# Sem colisao
pass
# print(collision_pos)
if self.player.point.xy()[0] > ImageControl.defineX(4600): # Se houver colisao com o fim da fase...
self.nextStageKey = "Stage5"
elif collision_pos != None:
# Com colisao
self.nextStageKey = "Menu"
def collision(self):
for o in self.objects:
collision_pos = pygame.sprite.collide_mask(o, self.player)
if collision_pos == None:
#nao houve colisao
pass
else:
self.player.speed = 2
self.collision_terrain()
def update(self):
while self.nextStageKey is "Stage4":
self.player.movement(self.control.checkPressed())
self.player.update()
self.camera.update(self.player)
self.scene_imgs()
self.object_actions()
pygame.display.flip()
pygame.time.Clock().tick(60)
self.window.windowScreen.fill((255, 255, 255))
self.collision()
return self.nextStageKey, True, False
|
984,066 | 7bbb9c8e24b21b6ad3709285b0fc202c7857640f | def find_sum_of_lists(nums):
positive = sum(filter(lambda x: x > 0, nums))
negative = sum(filter(lambda x: x < 0, nums))
print(negative)
print(positive)
if positive > abs(negative):
print("The positives are stronger than the negatives")
else:
print("The negatives are stronger than the positives")
numbers = list(map(int, input().split()))
find_sum_of_lists(numbers)
|
984,067 | 1b092146c890b3af60600e6b37f70a47ce6f59a4 | # -*- coding: utf-8 -*-
"""
Created on Sun May 23 19:13:16 2021
@author: subhankar
"""
'''
P: Prime number
G: G<P & G is primitive root of P
Key generation:-
XA: XA<P -> private key
YA: YA = G^(XA) mod P -> public key
XB: XB<P -> private key
YB: YB = G^(XB) mod P -> public key
secret key of:-
A: k = (YB)^XA mod P
B: k = (YA)^XB mod P
note: kA will be exactly equal to kB
'''
import sympy
import random
from math import sqrt
def generatePrime():
return sympy.randprime(20000, 2000000)
""" Iterative Function to calculate (x^n)%p
in O(logy) */"""
def power( x, y, p):
res = 1
x = x % p
while (y > 0):
if (y & 1):
res = (res * x) % p
y = y >> 1
x = (x * x) % p
return res
def findPrimefactors(s, n) :
while (n % 2 == 0) :
s.add(2)
n = n // 2
for i in range(3, int(sqrt(n)), 2):
while (n % i == 0) :
s.add(i)
n = n // i
if (n > 2) :
s.add(n)
def findPrimitive(n):
s = set()
if(not sympy.isprime(n)):
return -1
phi = n - 1
findPrimefactors(s, phi)
roots = []
for r in range(2, phi + 1):
flag = False
for it in s:
if (power(r, phi // it, n) == 1):
flag = True
break
if (flag == False):
roots.append(r)
if(len(roots)>0):
return roots
# If no primitive root found
return -1
def generate_key():
P = generatePrime()
print("Prime number:", P)
G = random.choice(findPrimitive(P))
print("Primitive root:", G)
XA = random.randint(20000, P)
YA = power(G,XA,P)
print("private & public key of a:", XA, YA)
XB = random.randint(20000, P)
YB = power(G,XB,P)
print("private & public key of b:", XB, YB)
A = power(YB,XA,P)
B = power(YA,XB,P)
print("secret key of a & b:", A, B)
# generate_key() |
984,068 | 76948c56d7155f49d797a1c744a4c8551ef2149a | import torch
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
class Policy(torch.nn.Module):
def __init__(self, state_space, action_space):
super().__init__()
self.state_space = state_space
self.action_space = action_space
self.hidden = 256
self.fc1 = torch.nn.Linear(state_space, self.hidden)
self.fc2_mean = torch.nn.Linear(self.hidden, action_space)
# TODO: Add another linear layer for the critic
self.fc3 = torch.nn.Linear(self.hidden, 1)
#self.sigma = torch.zeros(1) # TODO: Implement learned variance (or copy from Ex5)
self.sigma = torch.nn.Parameter(torch.tensor([10.0], requires_grad = True))
self.init_weights()
def init_weights(self):
for m in self.modules():
if type(m) is torch.nn.Linear:
torch.nn.init.normal_(m.weight)
torch.nn.init.zeros_(m.bias)
def evaluate(self, state, action):
action_dist, state_value = self.forward(state)
#print(action_dist)
#print(action_dist.mean.shape)
log_prob = action_dist.log_prob(action.unsqueeze(1))
#log_prob = action_dist.log_prob(action)
print(log_prob)
entropy = action_dist.entropy()
return log_prob, entropy
def forward(self, x):
# Common part
x = self.fc1(x)
x = F.relu(x)
# Actor part
action_mean = self.fc2_mean(x)
sigma = F.softplus(self.sigma)
value = self.fc3(x)
action_dist = Normal(action_mean, sigma)
return action_dist,value
class Agent(object):
"""
Agent using a minimal actor-critic neural network and Proximal Policy Optimization
"""
def __init__(self):
self.name = "pong_bot"
self.policy_file_suffix = "ppo_policy.pth"
hyperparam_file = "ppo_params.txt"
with open(hyperparam_file) as file:
lines = file.readlines()
if lines[0].strip() != "PPO hyperparameters":
raise ValueError("Incorrect file identifier")
lines = lines[1:]
params = {line.split("=")[0].strip(): line.split("=")[1].strip() for line in lines}
self.learning_rate = float(params["learning_rate"])
self.gamma = float(params["gamma"])
self.epochs = int(params["epochs"])
self.clip_epsilon = float(params["clip_epsilon"])
self.vf_coeff = float(params["vf_coeff"])
self.ent_coeff = float(params["ent_coeff"])
self.norm_adv = params["norm_adv"].lower() == "true"
self.clip_vloss = params["clip_vloss"].lower() == "true"
self.max_grad_norm = float(params["max_grad_norm"])
self.gae = params["gae"].lower() == "true"
self.gae_lambda = float(params["gae_lambda"])
self.training_device = "cuda" if torch.cuda.is_available() else "cpu"
# Set up the policy
self.state_space = 4
self.action_space = 1
self.policy = Policy(self.state_space, self.action_space)
self.policy = self.policy.to(self.training_device)
self.old_policy = Policy(self.state_space, self.action_space).to(self.training_device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self.learning_rate)
self.MseLoss = torch.nn.MSELoss()
# State transition buffers
self.states = None
self.state_values = None
self.actions = None
self.action_probs = None
self.rewards = None
self.dones = None
def init_memory(self, num_steps):
self.states = torch.zeros((num_steps, self.state_space )).to(self.training_device)
self.state_values = torch.zeros((num_steps, 1)).to(self.training_device)
self.actions = torch.zeros((num_steps, 1)).to(self.training_device)
self.action_probs = torch.zeros((num_steps, 1)).to(self.training_device)
self.rewards = torch.zeros((num_steps, 1)).to(self.training_device)
self.dones = torch.zeros((num_steps, 1)).to(self.training_device)
def get_action(self, state):
"""
Given the observation, stochastically choose an action following the old policy.
:param observation: observed state, has the shape of the environment state space vector.
:return: chosen action, logarithmic probability of the action, and distribution entropy
"""
state = torch.from_numpy(state).float().to(self.training_device)
action_dist, _ = self.old_policy.forward(state)
action = action_dist.sample()
return action
def evaluate(self, state, action):
if type(state) is torch.Tensor:
#print('new')
obs = state
return self.policy.evaluate(obs, action)
else:
#print('old')
obs = torch.from_numpy(state).float().to(self.training_device)
return self.old_policy.evaluate(obs, action)
def update_policy(self, minibatch_size):
"""
Update the policy with PPO. Gets the necessary data from state transition buffers.
:param minibatch_size: size of the minibatch for optimization
:return: Nothing
"""
steps = self.rewards.shape[0]
batch_size = self.rewards.shape[0] * self.rewards.shape[1]
#steps = 500
#batch_size = 500
#print(steps)
#print(batch_size)
# Compute advantages
'''
with torch.no_grad():
if self.gae:
advantages = torch.zeros_like(self.rewards).to(self.training_device)
lastgaelam = 0
for t in reversed(range(steps)):
if t == steps - 1:
nextnonterminal = 1.0 - self.dones[t]
nextvalues = self.state_values[t]
else:
nextnonterminal = 1.0 - self.dones[t + 1]
nextvalues = self.state_values[t + 1]
delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]
advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam
returns = advantages + self.state_values
else:
returns = torch.zeros_like(self.rewards).to(self.training_device)
for t in reversed(range(steps)):
if t == steps - 1:
nextnonterminal = 1.0 - self.dones[t]
next_return = self.state_values[t]
else:
nextnonterminal = 1.0 - self.dones[t+1]
next_return = returns[t+1]
returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return
advantages = returns - self.state_values
'''
returns = torch.zeros_like(self.rewards).to(self.training_device)
for t in reversed(range(steps)):
if t == steps - 1:
nextnonterminal = 1.0 - self.dones[t]
next_return = self.state_values[t]
else:
nextnonterminal = 1.0 - self.dones[t+1]
next_return = returns[t+1]
returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return
advantages = returns - self.state_values
# flatten the batch
#b_obs = self.states.reshape((-1,) + self.state_space)
#print(self.states.shape)
b_obs = self.states.reshape((-1,4)).detach()
b_logprobs = self.action_probs.reshape(-1,1).detach()
b_actions = self.actions.reshape((-1,)).detach()
b_advantages = advantages.reshape(-1,1)
b_returns = returns.reshape(-1,1)
b_values = self.state_values.reshape(-1,1)
# Optimize policy and value network for K epochs, run optimization in minibatches
inds = np.arange(batch_size)
for i_epoch_pi in range(self.epochs):
np.random.shuffle(inds)
for start in range(0, batch_size, minibatch_size):
end = start + minibatch_size
minibatch_ind = inds[start:end]
mb_advantages = b_advantages[minibatch_ind]
if self.norm_adv:
mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
#_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])
newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])
#ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()
ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))
# Stats
approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()
# Policy loss
pg_loss1 = -mb_advantages * ratio
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
entropy_loss = entropy.mean()
# Value loss
_, new_values = self.policy.forward(b_obs[minibatch_ind])
if self.clip_vloss:
v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])
#v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)
v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],
-self.clip_epsilon, self.clip_epsilon)
#v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2
v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
#v_loss = 0.5 * v_loss_max.mean()
v_loss = 0.5 * v_loss_max
else:
#v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()
v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])
loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.optimizer.step()
# Copy new weights into old policy:
self.old_policy.load_state_dict(self.policy.state_dict())
def update_(self):
steps = self.rewards.shape[0]
batch_size = self.rewards.shape[0] * self.rewards.shape[1]
#compute advantage
returns = torch.zeros_like(self.rewards).to(self.training_device)
for t in reversed(range(steps)):
if t == steps - 1:
nextnonterminal = 1.0 - self.dones[t]
next_return = self.state_values[t]
else:
nextnonterminal = 1.0 - self.dones[t+1]
next_return = returns[t+1]
returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return
returns = (returns - returns.mean()) / (returns.std() + 1e-5)
advantages = returns - self.state_values
b_obs = self.states.reshape((-1,4)).detach()
b_logprobs = self.action_probs.reshape(-1,1).detach()
b_actions = self.actions.reshape((-1,)).detach()
b_advantages = advantages.reshape(-1,1)
b_returns = returns.reshape(-1,1)
b_values = self.state_values.reshape(-1,1)
for i_epoch_pi in range(self.epochs):
#mb_advantages = b_advantages
newlogproba, entropy = self.evaluate(b_obs, b_actions)
_,state_v = self.policy(b_obs)
state_v = state_v.reshape(-1,1)
e_advantages = returns - state_v.detach()
ratio = torch.exp((newlogproba - b_logprobs.detach()))
surr1 = ratio * e_advantages
surr2 = torch.clamp(ratio, 1-self.clip_epsilon, 1+self.clip_epsilon) * e_advantages
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_v, b_returns) - 0.01*entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.old_policy.load_state_dict(self.policy.state_dict())
def store_outcome(self, step, state, action, action_prob, reward, done):
"""
Store the outcome of a timestep into the state transition buffers.
:param step: the current timestep, i.e. index in memory
:param state: the state where the action was taken
:param action: the action that was taken
:param action_prob: logarithmic probability of the action
:param reward: immediate reward
:param done: true if next_state is terminal
:return: Nothing
"""
state = torch.from_numpy(state).float()
self.states[step] = state.to(self.training_device)
self.actions[step] = action.to(self.training_device)
self.action_probs[step] = action_prob.to(self.training_device)
self.rewards[step] = torch.from_numpy(np.asarray(np.clip(reward, -1, 1))).float().to(self.training_device)
with torch.no_grad():
_, state_values = self.policy.forward(state)
self.state_values[step] = state_values
self.dones[step] = torch.Tensor([done]).to(self.training_device)
|
984,069 | 55e2fe7080e472cc594c8400645421183b82465f | print('-' * 23)
print('{:-^23}'.format(' Exercício 071 '))
print('-' * 23)
print('*' * 30)
print('{:^30}'.format('BANCO K9'))
print('*' * 30)
value = int(input('Que valor quer sacar? R$'))
total = value
ballot = 100
total_ballot = 0
while True:
if total >= ballot:
total -= ballot
total_ballot += 1
else:
if total_ballot > 0:
print(f'Total de {total_ballot} cédulas de R${ballot}')
if ballot == 100:
ballot = 50
elif ballot == 50:
ballot = 20
elif ballot == 20:
ballot = 10
elif ballot == 10:
ballot = 5
elif ballot == 5:
ballot = 1
total_ballot = 0
if total == 0:
break
print('=' * 30)
print('Volte sempre ao BANCO K9! \nTenha um bom dia!')
|
984,070 | 7ca3c4a93116a3e613ae397a76c7247b062d9e0f | import copy
from typing import List
class Solution:
def minFallingPathSum(self, matrix: List[List[int]]) -> int:
M = len(matrix)
N = len(matrix[0])
dirs = [(1, -1), (1, 0), (1, 1)]
#dp = [[0 for _ in range(N)] for _ in range(M)]
dp = copy.deepcopy(matrix)
## If there is only one row
if M == 1:
return max(matrix[0])
for r in range(M-2, -1, -1):
for c in range(0, N):
minv = float('inf')
for d in dirs:
dr, dc = d
if self.isvalid(r + dr, c + dc, M, N):
tem = dp[r + dr][c + dc] + matrix[r][c]
else:
tem = float('inf')
minv = min(minv, tem)
dp[r][c] = minv
print(dp)
return min(dp[0])
def isvalid(self, r, c, M, N):
if 0 <= r < M and 0 <= c < N:
return True
else:
return False |
984,071 | dc7684809c40660c82321d4c3903c84b3fe0795a | #!/usr/bin/env python
import os
import argparse
import subprocess
from demosthenes import INVENTORY, DEMOSTHENES_CONFIG
SKEL_PATHS = [
os.path.join('ansible', INVENTORY, 'group_vars', 'all'),
os.path.join('ansible', INVENTORY, 'host_vars'),
os.path.join('ansible', 'playbooks'),
os.path.join('ansible', 'roles'),
]
DEFAULT_CONFIG = """\
# -*- conf -*-
# FIXME, need better section name
[repos]
;playbooks_repo: https://github.com/debops/debops-playbooks.git
;playbooks_dirname: debops-playbooks
[paths]
;data-home: /var/lib/demosthenes
[ansible defaults]
;callback_plugins = /path/to/plugins/callback
;roles_path = /path/to/roles
[ansible paramiko]
;record_host_keys = True
[ansible ssh_connection]
;ssh_args = -o ControlMaster=auto -o ControlPersist=60s
"""
# emacs, vim, $EDITOR ignores belong in
# global gitignore
DEFAULT_GITIGNORE = """\
ansible/{SECRET_NAME}
{SECRET_NAME}
{ENCFS_PREFIX}{SECRET_NAME}
ansible.cfg
#-- python
*.py[co]
"""
HOSTS_FILE_HEADER = """\
# This is an Ansible inventory file.
# List your hosts here. Read about section headers here: #FIXME
"""
HOSTS_FILE_CONTENT = """\
# Uncomment below to use this machine as a controller.
[ansible_controllers]
#%s ansible_connection=local
"""
def write_file(filename, content):
if not os.path.isfile(filename):
with file(filename, 'w') as outfile:
outfile.write(content)
def write_config_files(project_root):
config_name = os.path.join(project_root, '.demosthenes.cfg')
write_file(config_name, DEFAULT_CONFIG)
ignore_name = os.path.join(project_root, '.gitignore')
write_file(ignore_name, DEFAULT_GITIGNORE)
hosts_name = os.path.join(project_root, 'ansible', INVENTORY, 'hosts')
content = HOSTS_FILE_HEADER + HOSTS_FILE_CONTENT
write_file(hosts_name, content)
def main():
orig_project_root = args.project_dir
project_root = os.path.abspath(args.project_dir)
config_path = os.path.join(project_root, DEMOSTHENES_CONFIG)
if os.path.exists(config_path):
raise RuntimeError, "%s exists." % config_path
for skel_path in SKEL_PATHS:
abspath = os.path.join(project_root, skel_path)
if not os.path.isdir(abspath):
os.makedirs(abspath)
write_config_files(project_root)
return 0
parser = argparse.ArgumentParser()
parser.add_argument('project_dir', default=os.curdir)
args = parser.parse_args()
|
984,072 | a6ad06caa9504ed1121c3d9fd77c294a5e9d0d09 | ##########################################################################
#
# Copyright (c) 2019, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class SetVisualiserTest( GafferSceneTest.SceneTestCase ) :
def testDefaultAction ( self ) :
# Make sure we dont affect the scene by default
inScene = self.__basicSphereScene()
visualiser = GafferScene.SetVisualiser()
visualiser["sets"].setValue( '*' )
visualiser["in"].setInput( inScene["setC"]["out"] )
self.assertTrue( "gl:surface" not in visualiser["out"].attributes( "/group" ) )
self.assertTrue( "gl:surface" not in visualiser["out"].attributes( "/group/sphere" ) )
self.assertTrue( "gl:surface" not in visualiser["out"].attributes( "/group/sphere1" ) )
self.assertTrue( "gl:surface" not in visualiser["out"].attributes( "/group/sphere2" ) )
def testOutSets( self ) :
inScene = self.__basicSphereScene()
visualiser = GafferScene.SetVisualiser()
visualiser["sets"].setValue( "*" )
visualiser["in"].setInput( inScene["setC"]["out"] )
f = self.__addMatchAllFilter( visualiser )
outSets = visualiser["__outSets"].getValue()
# set names are interned strings which don't sort well as is
inSetNames = sorted([ str(s) for s in visualiser["out"].setNames() ])
self.assertListEqual( inSetNames, list(outSets["names"]) )
# Make sure we are returning unique colors for each set
colors = outSets["colors"]
for c in colors:
self.assertEqual( colors.count(c), 1 )
def testSetFilter( self ) :
inScene = self.__basicSphereScene()
visualiser = GafferScene.SetVisualiser()
visualiser["in"].setInput( inScene["setC"]["out"] )
f = self.__addMatchAllFilter( visualiser )
self.assertEqual( visualiser["sets"].getValue(), "" )
self.assertEqual( len(visualiser["__outSets"].getValue()["names"]), 0 )
visualiser["sets"].setValue( "setA setB setC" )
self.assertEqual( list(visualiser["__outSets"].getValue()["names"]), self.__testSetNames )
visualiser["sets"].setValue( "set*" )
self.assertEqual( list(visualiser["__outSets"].getValue()["names"]), self.__testSetNames )
visualiser["sets"].setValue( "set* setA" )
self.assertEqual( list(visualiser["__outSets"].getValue()["names"]), self.__testSetNames )
sceneSets = sorted([ str(s) for s in inScene["setC"]["out"].setNames() ])
visualiser["sets"].setValue( "*" )
self.assertEqual( list(visualiser["__outSets"].getValue()["names"]), sceneSets )
def testShadersAssignedToAllLocations( self ) :
inScene = self.__basicSphereScene()
self.assertTrue( "gl:surface" not in inScene["setC"]["out"].attributes( "/group" ) )
self.assertTrue( "gl:surface" not in inScene["setC"]["out"].attributes( "/group/sphere" ) )
self.assertTrue( "gl:surface" not in inScene["setC"]["out"].attributes( "/group/sphere1" ) )
self.assertTrue( "gl:surface" not in inScene["setC"]["out"].attributes( "/group/sphere2" ) )
visualiser = GafferScene.SetVisualiser()
visualiser["sets"].setValue( 'set*' )
visualiser["in"].setInput( inScene["setC"]["out"] )
f = self.__addMatchAllFilter( visualiser )
self.assertTrue( "gl:surface" in visualiser["out"].attributes( "/group" ) )
self.assertTrue( "gl:surface" in visualiser["out"].attributes( "/group/sphere" ) )
self.assertTrue( "gl:surface" in visualiser["out"].attributes( "/group/sphere1" ) )
self.assertTrue( "gl:surface" in visualiser["out"].attributes( "/group/sphere2" ) )
def testInherited( self ) :
inScene = self.__basicSphereScene()
visualiser = GafferScene.SetVisualiser()
visualiser["sets"].setValue( 'set*' )
visualiser["in"].setInput( inScene["setC"]["out"] )
f = self.__addMatchAllFilter( visualiser )
self.assertTrue( visualiser["includeInherited"].getValue() )
self.assertEqual( visualiser["out"].attributes( "/group" )["gl:surface"].outputShader().parameters["numColors"].value, 1 )
self.assertEqual( visualiser["out"].attributes( "/group/sphere" )["gl:surface"].outputShader().parameters["numColors"].value, 1 )
self.assertEqual( visualiser["out"].attributes( "/group/sphere1" )["gl:surface"].outputShader().parameters["numColors"].value, 2 )
self.assertEqual( visualiser["out"].attributes( "/group/sphere2" )["gl:surface"].outputShader().parameters["numColors"].value, 3 )
visualiser["includeInherited"].setValue( False )
self.assertEqual( visualiser["out"].attributes( "/group" )["gl:surface"].outputShader().parameters["numColors"].value, 1 )
self.assertEqual( visualiser["out"].attributes( "/group/sphere" )["gl:surface"].outputShader().parameters["numColors"].value, 0 )
self.assertEqual( visualiser["out"].attributes( "/group/sphere1" )["gl:surface"].outputShader().parameters["numColors"].value, 1 )
self.assertEqual( visualiser["out"].attributes( "/group/sphere2" )["gl:surface"].outputShader().parameters["numColors"].value, 2 )
def testColors( self ) :
red = imath.Color3f( 1.0, 0.0, 0.0 )
green = imath.Color3f( 0.0, 1.0, 0.0 )
blue = imath.Color3f( 0.0, 0.0, 1.0 )
inScene = self.__basicSphereScene()
visualiser = GafferScene.SetVisualiser()
visualiser["in"].setInput( inScene["setC"]["out"] )
visualiser["sets"].setValue( '*' )
f = self.__addMatchAllFilter( visualiser )
self.assertEqual( visualiser["includeInherited"].getValue(), True )
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "setA", red, True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "setB", green, True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "setC", blue, True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual( visualiser["out"].attributes( "/group" )["gl:surface"].outputShader().parameters["colors"][0], red )
self.assertEqual( visualiser["out"].attributes( "/group/sphere" )["gl:surface"].outputShader().parameters["colors"][0], red )
self.assertEqual( visualiser["out"].attributes( "/group/sphere1" )["gl:surface"].outputShader().parameters["colors"][0], red )
self.assertEqual( visualiser["out"].attributes( "/group/sphere1" )["gl:surface"].outputShader().parameters["colors"][1], blue )
self.assertEqual( visualiser["out"].attributes( "/group/sphere2" )["gl:surface"].outputShader().parameters["colors"][0], red )
self.assertEqual( visualiser["out"].attributes( "/group/sphere2" )["gl:surface"].outputShader().parameters["colors"][1], green )
self.assertEqual( visualiser["out"].attributes( "/group/sphere2" )["gl:surface"].outputShader().parameters["colors"][2], blue )
def testColorOverrides( self ) :
inScene = self.__basicSphereScene()
visualiser = GafferScene.SetVisualiser()
visualiser["in"].setInput( inScene["setC"]["out"] )
visualiser["sets"].setValue( 'set*' )
f = self.__addMatchAllFilter( visualiser )
## We never generate white so we can use it as a safe test value
white = imath.Color3f( 1.0 )
self.assertNotIn( white, visualiser["__outSets"].getValue()["colors"] )
self.assertEqual( len(visualiser["colorOverrides"].children()), 0 )
def colorForSetName( name ):
d = visualiser["__outSets"].getValue()
i = d["names"].index( name )
return d["colors"][i]
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "setA", white, True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertEqual( colorForSetName( "setA" ), white )
self.assertNotEqual( colorForSetName( "setB" ), white )
self.assertNotEqual( colorForSetName( "setC" ), white )
visualiser["colorOverrides"].children()[0]["name"].setValue( "set*" )
self.assertEqual( colorForSetName( "setA" ), white )
self.assertEqual( colorForSetName( "setB" ), white )
self.assertEqual( colorForSetName( "setC" ), white )
sceneSets = inScene["setC"]["out"].setNames()
defaultSets = [ s for s in sceneSets if s not in self.__testSetNames ]
for s in defaultSets :
self.assertNotEqual( colorForSetName( s ), white )
visualiser["colorOverrides"].children()[0]["enabled"].setValue( False )
self.assertNotEqual( colorForSetName( "setA" ), white )
self.assertNotEqual( colorForSetName( "setB" ), white )
self.assertNotEqual( colorForSetName( "setC" ), white )
def testOverrideValidation( self ) :
inScene = self.__basicSphereScene()
visualiser = GafferScene.SetVisualiser()
visualiser["in"].setInput( inScene["setC"]["out"] )
visualiser["sets"].setValue( 'set*' )
f = self.__addMatchAllFilter( visualiser )
# None of these should error as empty names or disabled should be fine
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "setA", imath.Color3f( 1.0 ), True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
visualiser["__outSets"].getValue()
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "setB", imath.Color3f( 1.0 ), False, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
visualiser["__outSets"].getValue()
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "", imath.Color3f( 1.0 ), True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
visualiser["__outSets"].getValue()
# Non-color3f types should error
visualiser["colorOverrides"].addChild( Gaffer.NameValuePlug( "setB", "notAColor", True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
self.assertRaises( RuntimeError, visualiser["__outSets"].getValue )
__testSetNames = [ 'setA', 'setB', 'setC' ]
def __basicSphereScene( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
group["in"][2].setInput( sphere["out"] )
# For safety, make sure we don't already have any sets with our names
# creating any sets with our test names
defaultSetNames = group["out"].setNames()
for s in defaultSetNames :
self.assertFalse( s.startswith( "set" ), msg = "Default set '%s' conflicts with the test case" % s )
setA = GafferScene.Set()
setA["in"].setInput( group["out"] )
setA["name"].setValue( 'setA' )
setA["paths"].setValue( IECore.StringVectorData( [ '/group' ] ) )
setB = GafferScene.Set()
setB["in"].setInput( setA["out"] )
setB["name"].setValue( 'setB' )
setB["paths"].setValue( IECore.StringVectorData( [ '/group/sphere2' ] ) )
setC = GafferScene.Set()
setC["in"].setInput( setB["out"] )
setC["name"].setValue( 'setC' )
setC["paths"].setValue( IECore.StringVectorData( [ '/group/sphere1', '/group/sphere2' ] ) )
self.assertSceneValid( setC["out"] )
# So they don't all get deleted here
return {
"sphere" : sphere,
"group" : group,
"setA" : setA,
"setB" : setB,
"setC" : setC
}
def __addMatchAllFilter( self, node ):
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ '...' ] ) )
node["filter"].setInput( f["out"] )
return f
if __name__ == "__main__":
unittest.main()
|
984,073 | 109c40e9cc53882f9982422b9247b3b192aaf7fe | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-06 07:16
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('backend', '0030_auto_20171105_0129'),
]
operations = [
migrations.AddField(
model_name='project',
name='match_data_time',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 6, 7, 16, 26, 97879, tzinfo=utc), verbose_name='比赛时间'),
),
migrations.AlterField(
model_name='project',
name='contact_name',
field=models.CharField(default='郭志芃', max_length=30, verbose_name='紧急联系人姓名'),
),
migrations.AlterField(
model_name='project',
name='contact_tel',
field=models.CharField(default='18813040000', max_length=30, verbose_name='紧急联系人电话'),
),
migrations.AlterField(
model_name='project',
name='ddl_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 6, 7, 16, 26, 97879, tzinfo=utc), verbose_name='报名截止日期'),
),
migrations.AlterField(
model_name='project',
name='group_project',
field=models.BooleanField(default=False, verbose_name='是否为团体项目'),
),
migrations.AlterField(
model_name='project',
name='max_reg',
field=models.IntegerField(default=100, verbose_name='报名人数限制'),
),
migrations.AlterField(
model_name='project',
name='project_hot',
field=models.IntegerField(default=0, verbose_name='当前报名人数'),
),
migrations.AlterField(
model_name='project',
name='project_name',
field=models.CharField(max_length=30, verbose_name='项目名称'),
),
migrations.AlterField(
model_name='project',
name='project_text',
field=models.CharField(max_length=200, verbose_name='项目描述'),
),
migrations.AlterField(
model_name='project',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2017, 11, 6, 7, 16, 26, 97879, tzinfo=utc), verbose_name='发布时间'),
),
migrations.AlterField(
model_name='userinfo',
name='birth_date',
field=models.DateField(default=datetime.datetime(2017, 11, 6, 7, 16, 26, 97879, tzinfo=utc), verbose_name='出生日期'),
),
migrations.AlterField(
model_name='userinfo',
name='clothes_size',
field=models.CharField(choices=[('M', 'M'), ('XS', 'XS'), ('S', 'S'), ('XL', 'XL'), ('L', 'L')], max_length=2, verbose_name='衣服尺寸'),
),
]
|
984,074 | af145e302757424b24b9ec28a8f5d21442638d99 | with open('E:/Py_scr/stat_grad/Informatics/in2010402/files/24/24.txt', 'r') as data:
minStr = data.readline()
minCount = minStr.count('N')
for line in data:
localCount = line.count('N')
if localCount < minCount:
minCount = localCount
minStr = line
alphabet = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'J': 0, 'K': 0, 'L': 0,
'M': 0, 'N': 0, 'O': 0, 'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0, 'Y': 0, 'Z': 0}
maxCount = 0
let = ''
for letter, counter in alphabet.items():
alphabet[letter] = minStr.count(letter)
if maxCount <= alphabet[letter]:
maxCount = alphabet[letter]
let = letter
print(let, maxCount)
|
984,075 | 3a51dcf148422bbd2bcc2984950599458e30fc79 | # -*- coding:utf-8 -*-
class lexicon(object):
def __init__(self):
self.DIRECTIONS = ("north", "south", "east", "west",
"down", "up", "left", "right", "back")
self.VERBS = ("go", "stop", "kill", "eat")
self.STOPS = ("the", "in", "of", "from", "at", "it")
self.NOUNS = ("door", "bear", "princess", "cabinet")
def scan(self, str):
words = self.DealWithString(str)
result = []
for word in words:
if word in self.DIRECTIONS:
temp = ('direction', word)
elif word in self.VERBS:
temp = ('verb', word)
elif word in self.STOPS:
temp = ('stop', word)
elif word in self.NOUNS:
temp = ('noun', word)
elif self.Is_Number(word):
temp = ('number', int(word))
else:
temp = ('error', word)
result.append(temp)
return result
def DealWithString(self, s):
words = s.split()
return words
def Is_Number(self, s):
try:
int(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
l = lexicon()
print(l.scan("hh 123")) |
984,076 | 5e4e8ac074d6e9de0266a9fbf1cdb897a2c39567 | import re
from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from fantasy_baseball.items import TeamPlayerItem
class DailyPlayerSpider(BaseSpider):
name = "fbb-players"
allowed_domains = ["games.espn.go.com"]
start_urls = [
"http://games.espn.go.com/flb/playertable/prebuilt/freeagency?leagueId=17692&teamId=8&seasonId=2013&avail=-1&view=stats&context=freeagency&startIndex=0&version=last7",
"http://games.espn.go.com/flb/playertable/prebuilt/freeagency?leagueId=17692&teamId=8&seasonId=2013&avail=-1&view=stats&context=freeagency&startIndex=0&version=last15",
"http://games.espn.go.com/flb/playertable/prebuilt/freeagency?leagueId=17692&teamId=8&seasonId=2013&avail=-1&view=stats&context=freeagency&startIndex=0&version=last30",
"http://games.espn.go.com/flb/playertable/prebuilt/freeagency?leagueId=17692&teamId=8&seasonId=2013&avail=-1&view=stats&context=freeagency&startIndex=0&version=bvp",
]
def make_requests_from_url(self, url):
return Request(url, callback=self.parse_players)
def parse_players(self, response):
hxs = HtmlXPathSelector(response)
num_str = ''
regex = re.compile('(.*)(startIndex=)(\d+)(.*)')
m = regex.match(response.url)
if m:
num_str = m.group(3)
next_num = 50 + int(num_str)
print next_num
if next_num < 551:
key_str = m.group(2)
print key_str
val_str = str(next_num)
print val_str
replace_str = key_str + val_str
print replace_str
next_req_url = re.sub(regex, r'\1' + replace_str + r'\4', response.url)
print next_req_url
yield Request(next_req_url, callback=self.parse_players)
pRow = hxs.select("//table[@id='playertable_0']/tr[position()>2]")
for player in pRow:
item = TeamPlayerItem()
pId = player.select('td[position()=1]/@id').extract()
item ['playerId'] = pId
print 'id is ' + pId
pName = player.select('td[position()=1]/a[position()=1]/text()').extract()
item['playerName'] = pName
print 'name is ' + item['playerName']
yield item
SPIDER = DailyPlayerSpider()
|
984,077 | 171d85f11e195179254089a630639d003e350248 | from datetime import timedelta
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
default_args = {
'owner': 'admin',
'start_date': airflow.utils.dates.days_ago(1),
# 'end_date': datetime(2021, 2, 18),
'depends_on_past': False,
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
# If a task fails, retry it once after waiting
# at least 5 minutes
'retries': 1,
'retry_delay': timedelta(minutes=1),
}
dag = DAG(
'spark_demo',
default_args=default_args,
description='A demo for spark data pipeline',
# Continue to run DAG once per day
schedule_interval=timedelta(days=1),
)
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = BashOperator(
task_id='ingestion_from_mysql',
bash_command='spark-submit --master local --deploy-mode client --jars /usr/share/java/mysql-connector-java-5.1.48-bin.jar --class com.spark.scala.learning.IngestionFromMysql /home/fieldemployee/IdeaProjects/demo/target/scala-2.12/demo_2.12-0.1.jar',
dag=dag,
)
t2 = BashOperator(
task_id='ingestion_from_postgres',
depends_on_past=False,
bash_command='spark-submit --master local --deploy-mode client --jars /usr/local/software/postgresql-42.2.18.jar --class com.spark.scala.learning.IngestionFromPostgres /home/fieldemployee/IdeaProjects/demo/target/scala-2.12/demo_2.12-0.1.jar',
dag=dag,
)
t3 = BashOperator(
task_id='ingestion_from_oracle',
depends_on_past=False,
bash_command='spark-submit --master local --deploy-mode client --jars /usr/local/software/ojdbc6.jar --class com.spark.scala.learning.IngestionFromOracle /home/fieldemployee/IdeaProjects/demo/target/scala-2.12/demo_2.12-0.1.jar',
dag=dag,
)
t4 = BashOperator(
task_id='create_hive_tables',
depends_on_past=False,
bash_command='spark-submit --master local --deploy-mode client --class com.spark.scala.learning.CreateHiveTables /home/fieldemployee/IdeaProjects/demo/target/scala-2.12/demo_2.12-0.1.jar',
dag=dag,
)
t5 = BashOperator(
task_id='load_from_hive',
depends_on_past=False,
bash_command='spark-submit --master local --deploy-mode client --jars /usr/local/software/sqlite-jdbc-3.8.7.jar --class com.spark.scala.learning.LoadFromHive /home/fieldemployee/IdeaProjects/demo/target/scala-2.12/demo_2.12-0.1.jar',
dag=dag,
)
t1 >> t2 >> t3 >> t4 >> t5
|
984,078 | 56ee13d8a6b4d43da4abe82b1664aa1307041b6b | #%%
import pandas as pd
import matplotlib.pyplot as plt
import riotConstant
# Language / Region / Champion / Tier
# will be passed as parameter from user's selection
# Read Data
def readData( language, region, championName, tier ):
RIOTConstant = riotConstant.RIOTConstant()
RIOTConstant.setLanguage(language)
championId = RIOTConstant.getChampionId(championName)
data = pd.read_csv('./data/finalData/' + region + "/" + tier + "/champion_" + str(championId) + ".csv")
return data
def dataSort( language, region, championName, tier ):
# pd.options.mode.chained_assignment = None
RIOTConstant = riotConstant.RIOTConstant()
RIOTConstant.setLanguage(language)
championId = RIOTConstant.getChampionId(championName)
data = readData( language, region, championName, tier)
# Changin the combination of spells in ascending order to check frequency
print( data[['spell1Id', 'spell2Id']].head(5) )
spell_combination = data[['spell1Id', 'spell2Id']]
for i, spells in spell_combination.iterrows():
if( spells['spell1Id'] > spells['spell2Id']):
data.loc[i, ['spell1Id', 'spell2Id']] = [spells['spell2Id'], spells['spell1Id']]
print( data[['spell1Id', 'spell2Id']].head(5) )
print( data[['item0', 'item1', 'item2', 'item3', 'item4', 'item5', 'item6']].head(5) )
item_combination = data[['item0', 'item1', 'item2', 'item3', 'item4', 'item5', 'item6']]
for i, items in item_combination.iterrows():
sorted_items = items.sort_values(ascending=False)
data.loc[i, ['item0', 'item1', 'item2', 'item3', 'item4', 'item5', 'item6']] = sorted_items.to_numpy()
print( data[['item0', 'item1', 'item2', 'item3', 'item4', 'item5', 'item6']].head(5) )
print( data[['perkPrimaryStyle', 'perk0', 'perk1','perk2','perk3']].head(5))
prime_run_combination = data[['perkPrimaryStyle', 'perk0', 'perk1','perk2','perk3']]
for i, prime_runes in prime_run_combination.iterrows():
sorted_prime_runes = prime_runes.sort_values(ascending=True)
data.loc[i, ['perkPrimaryStyle', 'perk0', 'perk1','perk2','perk3']] = sorted_prime_runes.to_numpy()
print( data[['perkPrimaryStyle', 'perk0', 'perk1','perk2','perk3']].head(5) )
print( data[['perkSubStyle', 'perk4', 'perk5']].head(5))
sub_run_combination = data[['perkSubStyle', 'perk4', 'perk5']]
for i, sub_runes in sub_run_combination.iterrows():
sorted_sub_runes = sub_runes.sort_values(ascending=True)
data.loc[i, ['perkSubStyle', 'perk4', 'perk5']] = sorted_sub_runes.to_numpy()
print( data[['perkSubStyle', 'perk4', 'perk5']].head(5) )
data.to_csv('./data/finalData/' + region + "/" + tier + "/champion_" + str(championId) + ".csv")
return
def byWinRate( language, region, championName, tier ):
data = readData( language, region, championName, tier)
return
def byPickRate( language, region, championName, tier ):
data = readData( language, region, championName, tier)
return
def spellByWinRate( language, region, championName, tier ):
data = readData( language, region, championName, tier)
# print( data[['win', 'spell1Id', 'spell2Id']].head(5) )
print( data.groupby(["win", "spell1Id", "spell2Id"]).size() )
print( data.groupby(["spell1Id", "spell2Id"]).size() )
def runeByWinRate( language, region, championName, tier ):
data = readData( language, region, championName, tier)
# print( data[['win', 'spell1Id', 'spell2Id']].head(5) )
print( data.groupby(["win", "perkPrimaryStyle", "perkSubStyle"]).size() )
print( data.groupby(["win", "perk0", "perk1", "perk2", "perk3"]).size())
print( data.groupby(["win", "perk4", "perk5"]).size())
print( data.groupby(["perkPrimaryStyle", "perkSubStyle"]).size() )
print( data.groupby(["perk0", "perk1", "perk2", "perk3"]).size())
print( data.groupby(["perk4", "perk5"]).size())
dataSort( "ko_KR", "BR1", "annie", "GOLD")
spellByWinRate( "ko_KR", "BR1", "annie", "GOLD" )
runeByWinRate( "ko_KR", "BR1", "annie", "GOLD" ) |
984,079 | 40ef9a1221d90d12c22de47af48b58411f66419e |
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
data = pd.read_csv('dog_rates_tweets.csv')
data
# In[3]:
datanew = data['text'].str.contains(r'(\d+(\.\d+)?)/10')
datanew
# In[4]:
data = data[datanew]
# In[5]:
dates = pd.to_datetime(data['created_at'])
dates
# In[6]:
ratings = data['text'].str.extract(r'(\d+(\.\d+)?)/10')[0]
# In[8]:
plt.scatter(dates.values, ratings)
plt.ylim(0,25)
plt.xticks(rotation=25)
plt.show()
|
984,080 | 80cda623a941a7f00c18f98645f2ea738c5a2ee2 | from staff import *
from search_staff import *
from create_staff_job_role import *
from create_staff_certificate import *
from create_staff_training import *
from add_staff_to_organisation import *
from add_staff_to_office import *
from add_staff_to_warehouse import *
from create_staff import * |
984,081 | 82b86cd868039ed21534e107e70360322a43321e | ## 2. Introduction to the Data ##
import pandas as pd
all_ages = pd.read_csv('all-ages.csv')
recent_grads = pd.read_csv('recent-grads.csv')
print(all_ages[0:5])
print(recent_grads[0:5])
## 3. Summarizing Major Categories ##
# Unique values in Major_category column.
print(all_ages['Major_category'].unique())
aa_cat_counts = dict()
rg_cat_counts = dict()
def calculate_major_cat_totals(df):
cats = df['Major_category'].unique()
counts_dictionary = dict()
for c in cats:
major_df = df[df["Major_category"] == c]
total = major_df["Total"].sum()
counts_dictionary[c] = total
return counts_dictionary
aa_cat_counts = calculate_major_cat_totals(all_ages)
rg_cat_counts = calculate_major_cat_totals(recent_grads)
## 4. Low-Wage Job Rates ##
low_wage_percent = 0.0
low_wage_proportion = recent_grads['Low_wage_jobs'] / recent_grads['Total']
print(low_wage_proportion)
low_wage_jobs_sum = recent_grads['Low_wage_jobs'].sum()
recent_grads_sum = recent_grads['Total'].sum()
low_wage_proportion_sum = low_wage_jobs_sum / recent_grads_sum
print(low_wage_proportion_sum)
## 5. Comparing Data Sets ##
# All majors, common to both DataFrames
majors = recent_grads['Major'].unique()
rg_lower_count = 0
for major in majors:
recent_grads_row = recent_grads[recent_grads['Major'] == major]
all_ages_row = all_ages[all_ages['Major'] == major]
rg_unemp_rate = recent_grads_row.iloc[0]['Unemployment_rate']
aa_unemp_rate = all_ages_row.iloc[0]['Unemployment_rate']
if rg_unemp_rate < aa_unemp_rate:
rg_lower_count += 1
print(rg_lower_count) |
984,082 | 0e8847b26e39727213f9147764df6acb32f3d9a6 | #!/usr/bin/env python
# coding: utf-8
# ! conda activate <ACTIVATE_CONDA_ENVIRONMENT>
#
# run Trimmomatic (Bolger et al. 2014) to remove low quality bases, artifacts and contamination likeso:
#
# for PE:
# trimmomatic PE \
# -threads <NUMBER_THREADS> \
# -phred33 <PHRED_VALUE_THRESHOLD> \
# <INPUT_FORWARD_READS> <INPUT_REVERSE_READS> \
# <OUTPUT_FORWARD_READS> <OUTPUT_REVERSE_READS> \
# ILLUMINACLIP:TruSeq3-PE.fa:2:30:10:2:keepBothReads \
# LEADING:3 TRAILING:3 MINLEN:36 \
#
# for SE:
# trimmomatic SE \
# -threads <NUMBER_THREADS> \
# -phred33 <PHRED_VALUE_THRESHOLD> \
# <INPUT_SINGLE-END_READS> \
# ILLUMINACLIP:TruSeq3-SE:2:30:10 LEADING:3 TRAILING:3 \
# SLIDINGWINDOW:4:15 MINLEN:36 \
#
# --TruSeq3-PE are the ILLUMINA adapters's fasta sequences
# (https://github.com/timflutre/trimmomatic/tree/master
# /adapters).
#
# INPUT: raw reads and adapters's fasta sequences.
# OUTPUT: clean reads.
# In[ ]:
|
984,083 | 75a28aa5d5000cf753164067c3a0c22fd9a30b8c | """
[Topics]
deque
enumerate
[Sketch]
우선순위 점수랑 인덱스번호를 함께 저장
"""
#
def solution(priorities, location):
from collections import deque
answer = 0
d = deque([(v,i) for i,v in enumerate(priorities)])
while len(d):
item = d.popleft()
if d and max(d)[0] > item[0]:
print(max(d)[0])
d.append(item)
else:
answer += 1
if item[1] == location:
break
return answer
#
|
984,084 | 34cb75c3c25d4bf9c4fa182c472080b451e5e185 | from ListNode import ListNode
def cyclically_right_shift_list(L, k):
if not L: return L
tail, n = L, 1
while tail.next:
n += 1
tail = tail.next
k %= n
if k == 0: return L
steps, tail.next = n-k, L
while steps > 0:
steps -= 1
tail = tail.next
new_head = tail.next
tail.next = None
return new_head
L = ListNode(1)
assert cyclically_right_shift_list(L, 2) is L
L = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5, None)))))
assert cyclically_right_shift_list(L, 2).data is 4
|
984,085 | 0c6258f32ed2c9f9a2cfd2d24155fbb21c8fa0b2 | from PIL import Image
from facenet import MTCNN, InceptionResnetV1
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
mtcnn = MTCNN(image_size=160, margin=0, device=device)
resnet = InceptionResnetV1(pretrained='vggface2', device=device).eval()
def extract(image_path):
image = Image.open(image_path)
image_cropped = mtcnn(image)
if image_cropped is not None:
image_embedding = resnet(image_cropped.unsqueeze(0).cuda())
image_embedding_np = image_embedding.cpu().detach().numpy()
return image_embedding_np
else:
return None
|
984,086 | ecfb8a8d524c26dd940d7d4390491cf436a0cb9b | """
A module in construction.
Create class which validates the strategy -
1. Ensures all methods are implemented and all looks good
2. Basic sanity checks
Add additional information -
1. Add a versioning information in the class automatically
"""
# Create class which validates the strategy -
# 1. Ensures all methods are implemented and all looks good
# 2. Basic sanity checks
# Add additional information
# 1. Add a versioning information in the class automatically
|
984,087 | 32b3f74bde9ecc8e9293c458d7e9deb92203f6b9 | class StackWithMin:
def __init__(self):
self.values = []
self.minValues = []
def isEmpty(self):
return self.values == []
def push(self, value):
if self.isEmpty() or value <= self.min():
self.minValues.append(value)
self.values.append(value)
def pop(self):
try:
value = self.values.pop()
if value == self.min():
self.minValues.pop()
except IndexError:
value = None
return value
def peek(self):
try:
value = self.values[-1]
except IndexError:
value = None
return value
def size(self):
return len(self.values)
def min(self):
try:
minValue = self.minValues[-1]
except IndexError:
minValue = None
return minValue
|
984,088 | 714233145642a8223481c67f5ff60b8b6d847f4e | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 21 13:18:30 2021
@author: joann
"""
## 함수, 클래스
class Node(): #클래스(붕어빵틀)는 대문자로 시작하는 것이 관례
def __init__(self):
self.data = None
self.link = None # 클래스로 노드구조(데이터+링크)를 만들어놓음
## 전역
## 메인
node1=Node()
node1.data='다현'
node2=Node()
node2.data='정연'
node1.link = node2 # node2 data 가 생성된 후 링크 생성
node3=Node()
node3.data='쯔위'
node2.link = node3
node4=Node()
node4.data='사나'
node3.link = node4
node5=Node()
node5.data='지효'
node4.link = node5
print(node1.data, end=' ')
print(node1.link.data, end=' ')
print(node1.link.link.data, end=' ')
print(node1.link.link.link.data, end=' ')
print(node1.link.link.link.link.data, end=' ') |
984,089 | d7267135d37fbeef2bf50862df748ce3dd6f23b3 | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='python-plaid',
version='0.1.0',
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url='https://github.com/chrisforrette/python-plaid',
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
|
984,090 | 55e8c432b02c1102dcd06867b879eb7519a29f34 | import os
import inspect
from experiments import *
from src.experiments.CountMWE import CountMWE
from src.experiments.CountPOS import CountPOS
from src.Classification import Classification
import src.utils.Stream as Stream
from src.utils.common import splitOptions, getOptions
DATA_PATH = os.path.expanduser("data")
def getFeatureGroups(names, dummy=False):
global DATA_PATH
groups = [eval(x) if isinstance(x, basestring) else x for x in names]
for i in range(len(groups)): # Initialize classes
if inspect.isclass(groups[i]):
groups[i] = groups[i]()
groups[i].initialize(DATA_PATH)
return groups
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Run University of Turku experiments for CAMDA 2015')
parser.add_argument('-o', '--output', help='Output directory', default=None)
#parser.add_argument('-d', "--debug", default=False, action="store_true", dest="debug")
parser.add_argument('-a', "--action", default="build,classify,analyse", dest="action")
groupE = parser.add_argument_group('build', 'Example Generation')
#groupE.add_argument('-e', "--examples", default=False, action="store_true", dest="examples")
groupE.add_argument('-e', '--experiment', help='Experiment class', default="RemissionMutTest")
groupE.add_argument('-f', '--features', help='Feature groups (comma-separated list)', default=None)
groupE.add_argument('-x', '--extra', default=None)
groupC = parser.add_argument_group('classify', 'Example Classification')
groupC.add_argument('-s', '--classification', help='', default="Classification")
groupC.add_argument('-c','--classifier', help='', default=None)
groupC.add_argument('-r','--classifierArguments', help='', default=None)
groupC.add_argument('-m','--metric', help='', default="roc_auc")
#groupC.add_argument('-i','--iteratorCV', help='', default='getStratifiedKFoldCV')
#groupC.add_argument('-n','--numFolds', help='Number of folds in cross-validation', type=int, default=10)
groupC.add_argument('-v','--verbose', help='Cross-validation verbosity', type=int, default=4)
groupC.add_argument('-l', '--parallel', help='Cross-validation parallel jobs', type=int, default=1)
groupC.add_argument("--hidden", default=False, action="store_true", dest="hidden")
groupC.add_argument('--preDispatch', help='', default='2*n_jobs')
groupA = parser.add_argument_group('Analysis', 'Analysis for classified data')
groupA.add_argument("-y", "--analyses", default="ResultAnalysis")
options = parser.parse_args()
actions = splitOptions(options.action, ["build", "classify", "analyse"])
Stream.openLog(os.path.join(options.output, "log.txt"), clear = "build" in actions)
print "Options:", options.__dict__
if "build" in actions:
print "======================================================"
print "Building Examples"
print "======================================================"
ExperimentClass = eval(options.experiment)
if options.extra:
e = ExperimentClass(**getOptions(options.extra))
else:
e = ExperimentClass()
e.dataPath = DATA_PATH
e.includeSets = ("train", "test") if options.hidden else ("train",)
featureGroups = (e.featureGroups if e.featureGroups != None else []) + (options.features.split(",") if options.features else [])
print "Using feature groups:", featureGroups
e.featureGroups = getFeatureGroups(featureGroups)
taggers = e.taggers
print "Using taggers:", taggers
e.taggers = getFeatureGroups(taggers)
e.run(options.output)
e = None
resultPath = os.path.join(options.output, "classification.json")
if "classify" in actions:
print "======================================================"
print "Classifying"
print "======================================================"
ClassificationClass = eval(options.classification)
classification = ClassificationClass(options.classifier, options.classifierArguments, 10, options.parallel, options.metric, classifyHidden=options.hidden)
classification.readExamples(options.output)
classification.classify()
classification = None
if "analyse" in actions and options.analyses is not None:
for analysisName in options.analyses.split(","):
print "======================================================"
print "Analysing", analysisName
print "======================================================"
exec "from src.analyse." + analysisName + " import " + analysisName
analysisClass = eval(analysisName)
analysisObj = analysisClass(dataPath=DATA_PATH)
analysisObj.analyse(options.output, hidden=options.hidden) |
984,091 | 726184de8b8ac131e39c5e4a981bef227aa04d51 | par = [0]
sz = [1]
def uf_find(a):
if par[a] == a: return a
else:
par[a] = uf_find(par[a])
return par[a]
def uf_union(a, b):
pa = uf_find(a)
pb = uf_find(b)
if pa != pb:
par[pb] = uf_find(a)
sz[pa] += sz[pb]
return True
else:
return False
def uf_size(a):
return sz[uf_find(a)]
|
984,092 | ec17976b43eaf907443fd371db646dc35d34894f | import random
class NodeData:
def __init__(self, key:int, pos):
self.key = key
self.neighbors = {}
self.neighbors_in = {}
self.info = "White"
self.tag = -1
self.weight = -1
if not pos == None:
self.pos = Geo_location(pos[0], pos[1], pos[2])
else:
self.pos = None
def get_key(self):
return self.key
def get_tag(self):
return self.tag
def get_info(self):
return self.info
def get_neighbors(self):
return self.neighbors
def get_weight(self):
return self.weight
def get_pos(self):
return self.pos
def get_neighbors_in(self):
return self.neighbors_in
def add_neighbor(self, key, weight):
self.neighbors[key] = weight
def add_neighbor_in(self,key,weight):
self.neighbors_in[key] = weight
def remove_neighbor(self, id):
if id in self.neighbors:
self.neighbors.pop(id)
def set_weight(self, weight):
self.weight = weight
def set_tag(self,tag):
self.tag = tag
def set_pos(self, x, y, z):
self.pos = Geo_location(x, y, z)
def set_info(self, info):
self.info = info
def __lt__(self, other):
return self.weight < other.weight
class Geo_location:
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
def get_x(self):
return self.x
def get_y(self):
return self.y
def get_z(self):
return self.z |
984,093 | 23f1b47e4efc34b1a2a1132de26f29b27ba5c9c9 | from mpmath import mp
from mpmath import lambertw
from decimal import Decimal
from decimal import getcontext
getcontext().prec = mp.dps = 80 # 78 digits for a maximum of 2^256-1, and 2 more digits for after the decimal point
def purchaseTargetAmount(supply, balance, weight, amount):
supply, balance, weight, amount = [Decimal(value) for value in vars().values()]
return supply*((1+amount/balance)**(weight/1000000)-1)
def saleTargetAmount(supply, balance, weight, amount):
supply, balance, weight, amount = [Decimal(value) for value in vars().values()]
return balance*(1-(1-amount/supply)**(1000000/weight))
def crossReserveTargetAmount(balance1, weight1, balance2, weight2, amount):
balance1, weight1, balance2, weight2, amount = [Decimal(value) for value in vars().values()]
return balance2*(1-(balance1/(balance1+amount))**(weight1/weight2))
def fundCost(supply, balance, weights, amount):
supply, balance, weights, amount = [Decimal(value) for value in vars().values()]
return balance*(((supply+amount)/supply)**(1000000/weights)-1)
def fundSupplyAmount(supply, balance, weights, amount):
supply, balance, weights, amount = [Decimal(value) for value in vars().values()]
return supply*((amount/balance+1)**(weights/1000000)-1)
def liquidateReserveAmount(supply, balance, weights, amount):
supply, balance, weights, amount = [Decimal(value) for value in vars().values()]
return balance*(1-((supply-amount)/supply)**(1000000/weights))
def power(baseN, baseD, expN, expD, precision):
baseN, baseD, expN, expD, precision = [Decimal(value) for value in vars().values()]
return (baseN/baseD)**(expN/expD)*2**precision
def lambertPos(x, precision):
x, precision = [Decimal(value) for value in vars().values()]
return Decimal(str(lambertw(x/2**precision)))/(x/2**precision)*2**precision
def lambertNeg(x, precision):
x, precision = [Decimal(value) for value in vars().values()]
return Decimal(str(lambertw(-x/2**precision)))/(-x/2**precision)*2**precision
|
984,094 | b30de5409d9cc884eb7d8342e6fdafed4c5037dd | /home/sunny/anaconda3/lib/python3.6/weakref.py |
984,095 | 4cf3a504ecd2a17ff00eafa4cb35b7a07e1a400a | # -*- coding: utf-8 -*-
"""
Run Oracle Query and save query to DF
@author: 105028218
"""
import pandas as pd
import os
import sys
print(os.getcwd())
path = "c:\\Temp\\"
os.environ['PATH'] = 'C:\Oracle\instantclient_18_5'
sys.path.append("c:\\users\\105028218\\box sync\\fb pc sync\\python\\venv\\lib\\site-packages")
import cx_Oracle as cx
con_PO = cx.connect('GEOG_SOU_SC/rfs4375tf@oscaroltp-db.og.ge.com:10110/oscaroltp')
con_CYB = cx.connect('APPSV/VAPPS@ogoelamsdbp-scan.og.ge.com:1521/ORPOGOP6')
con_CDI = cx.connect('APPSV/VAPPS@ogoelamsdbp-scan.og.ge.com:1521/ORPOGOP6')
con_DWH = cx.connect('SSO105028218/Fab_123s@argo-prod-db.og.ge.com:10110/POGG1O')
con_ITO = cx.connect('SSO105028218/S23dS23dS23d@ogtmflitptcdb01.corporate.ge.com:1521/ONNTMP10')
## Check connection
#print (con_CDI.version)
#con.close()
fd = open('Expired_PD_sql_Bot.sql', 'r')
sql = fd.read()
fd.close()
df_PO = pd.read_sql(sql, con_DWH)
print(df_PO.shape)
#
with pd.ExcelWriter(r'C:\-FB-DATA-\_FB_Wk_Python\tmp1_EXP_PD\output.xlsx') as writer:
df_PO.to_excel(writer, sheet_name='All')
print("fine")
#with con:
# cur = con.cursor()
# cur.execute(sql_PO)
# rows = cur.fetchall()
# df = pd.read_sql(sql_PO, con)
# print(df.shape) |
984,096 | 377f9300adb3d39f1c9e10ae28a6c6d17d86cb1b | from Classes.GetFastFindMajorsList import GetFastFindMajorsList
from Classes.GrantForwardLeads import GrantForwardLeads
from Classes.ProcessGrantForwardLeads import ProcessGrantForwardLeads
class GrantForwardRunMajorsList(object):
def __init__(self, isTest=False):
self.isTest = isTest
self.fastFindMajorsList = GetFastFindMajorsList.getGrantForwardItemsList()
if self.isTest:
for major in self.fastFindMajorsList[:5]:
majorSpecificGrantForwardLeads = GrantForwardLeads(major).processSearchResultsAndMakeLeadArray()
ProcessGrantForwardLeads(majorSpecificGrantForwardLeads)
else:
for major in self.fastFindMajorsList:
majorSpecificGrantForwardLeads = GrantForwardLeads(major).processSearchResultsAndMakeLeadArray()
ProcessGrantForwardLeads(majorSpecificGrantForwardLeads) |
984,097 | 33cc5e31b29f4642e8e0cd14cd33e4b8e996b951 | def AddOne(args):
if(isinstance(args,dict)):
if(isinstance(args["a"],int)):
return {'result':args["a"]+2}
return "args error" |
984,098 | 5dc8ff8630c9abcd99e8b1ff7f71b87320caef33 | from pico2d import *
import gfw
import gobj
import pattern
from player import Player
from boss import Boss
from background import VertScrollBackground
import life_gauge
import highscore
MAX_PATTERN = 2
STATE_IN_GAME, STATE_PAUSED, STATE_GAME_OVER = range(3)
def enter():
gfw.world.init(['bg', 'missile', 'player', 'bullet', 'boss', 'ui'])
pattern.init()
global player
player = Player()
gfw.world.add(gfw.layer.player, player)
global boss
boss = Boss()
gfw.world.add(gfw.layer.boss, boss)
global bg
bg = VertScrollBackground('./res/world02.png')
leaf = VertScrollBackground('./res/world02c2.png')
gfw.world.add(gfw.layer.bg, bg)
gfw.world.add(gfw.layer.bg, leaf)
global font
font = gfw.font.load('res/ConsolaMalgun.ttf', 20)
global sound_damage, sound_pldead, music_bg
music_bg = load_music('res/bg_sound.mp3')
sound_damage = load_wav('res/se_damage00.wav')
sound_pldead = load_wav('res/se_pldead00.wav')
sound_damage.set_volume(20)
sound_pldead.set_volume(20)
music_bg.set_volume(50)
global pattern_index
pattern_index = 0
p1 = pattern.Pattern1()
pattern.add(p1)
p2 = pattern.Pattern2()
pattern.add(p2)
life_gauge.load()
highscore.load()
start_game()
def exit():
pass
def start_game():
global game_state
game_state = STATE_IN_GAME
gfw.world.clear_at(gfw.layer.missile)
gfw.world.clear_at(gfw.layer.bullet)
# gfw.world.remove(highscore)
music_bg.repeat_play()
def pause_game():
global game_state
game_state = STATE_PAUSED
music_bg.pause()
def resume_game():
global game_state
game_state = STATE_IN_GAME
music_bg.resume()
def end_game():
global game_state
print('Dead')
game_state = STATE_GAME_OVER
music_bg.stop()
highscore.add(player.death)
gfw.world.add(gfw.layer.ui, highscore)
def update():
global game_state
if game_state != STATE_IN_GAME:
return
gfw.world.update()
if MAX_PATTERN != pattern_index:
pattern.patterns[pattern_index].update()
else:
end_game()
check_collsion(boss)
def draw():
gfw.world.draw()
font.draw(700, get_canvas_height() - 45, 'Death: %d' % player.death)
def handle_event(e):
if e.type == SDL_Quit:
gfw.quit()
if e.type == SDL_KEYDOWN:
if e.key == SDLK_ESCAPE:
gfw.pop()
player.handle_event(e)
def check_collsion(Boss):
global pattern_index
if gobj.Collsion_AABB(player, Boss) and player.nodamage == False:
player.nodamage = True
sound_pldead.play()
player.death += 1
for b in gfw.world.objects_at(gfw.layer.bullet):
if gobj.Collsion_AABB(b, player) and player.nodamage == False:
sound_pldead.play()
player.nodamage = True
player.death += 1
for b in gfw.world.objects_at(gfw.layer.missile):
if gobj.Collsion_AABB(b, Boss) and Boss.nodamage == False:
sound_damage.play()
dead = Boss.decrease_life(b.power)
if dead:
pattern_index += 1
Boss.life = 1000
Boss.nodamage = True
b.remove()
return
if __name__ == '__main__':
gfw.run_main()
|
984,099 | 7ff8ad7e669d4b05ae0038776574b21f2f58589a | import math
class Solution(object):
def hasGroupsSizeX(self, deck):
if len(deck) < 2:
return False
deck_set = {}
for d in deck:
if d in deck_set:
deck_set[d] = deck_set[d] + 1
else:
deck_set[d] = 1
gcd_ = None
for k in deck_set:
if gcd_ is None:
gcd_ = deck_set[k]
gcd_ = math.gcd(gcd_, deck_set[k])
if gcd_ < 2:
return False
return True
"""
:type deck: List[int]
:rtype: bool
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.