seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7662960040 | #Alex Black
import cv2
import numpy as np
import io
from PIL import Image
from threading import Thread
import collections
frame = cv2.VideoCapture( 0 )
kern = np.ones( ( 1, 1 ), np.uint8 )
i = 0
while True:
i+=1
print( i )
_,cimg = frame.read()
#cimg = cv2.imread( 'test.png' )
cimgh, cimgw, _ = cimg.shape
cimg = cv2.blur( cimg, ( 1, 1 ) ) #Quick blur removes some of the worst artifacts
bwcimg = cv2.cvtColor( cimg, cv2.COLOR_BGR2GRAY )
'''This section isolates areas'''
diff = cv2.cvtColor( cv2.bitwise_xor( cimg, cv2.blur( cimg, ( 5, 5 ) ) ), cv2.COLOR_BGR2GRAY )
white1 = np.array( [ 10 ] )
white2 = np.array( [ 256 ] )
maxdiff = cv2.cvtColor( cv2.inRange( cv2.blur( diff, ( 5, 5 ) ), white1, white2 ), cv2.COLOR_GRAY2BGR )
edge = cv2.bitwise_and( cv2.Canny( cv2.blur( cimg, ( 2, 2 ) ), 133, 100 ), cv2.cvtColor( cv2.bitwise_and( cimg, maxdiff ), cv2.COLOR_BGR2GRAY ) )
w1 = np.array( [ 50 ] )
w2 = np.array( [ 256 ] )
edge = cv2.cvtColor( cv2.inRange( edge, w1, w2 ), cv2.COLOR_GRAY2BGR )
dedge = cv2.dilate( edge, np.ones( ( 5, 5 ), np.uint8 ), iterations = 3 )
rcimg = cv2.bitwise_and( dedge, cimg )
'''This section is for contour identification'''
bwcimg = cv2.cvtColor( rcimg, cv2.COLOR_BGR2GRAY )
ret, thresh = cv2.threshold( bwcimg, 0, 255, 0 )
dthresh = cv2.dilate( thresh, np.ones( ( 5, 5 ), np.uint8 ), iterations = 5 )
im2, contours, hierarchy = cv2.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
#cv2.drawContours( cimg, contours, -1, ( 0, 255, 0 ), 3 )
fMask = np.zeros( ( cimgh, cimgw ), np.uint8 )
cMask = np.zeros( ( cimgh, cimgw ), np.uint8 )
for o in contours:
cv2.fillPoly( cMask, pts = [ o ], color = 255 )
pcontavg = cv2.meanStdDev( bwcimg, mask = cMask )
pcontmodavg = cv2.meanStdDev( bwcimg, mask = cv2.bitwise_xor( cv2.dilate( cMask, np.ones( ( 5, 5 ), np.uint8 ), iterations = 3 ), cMask ) )
#For this if statement, it appears that the magic value for the blue star (greyscaled) example is between 10.0125 and 10.01, and the splotches of white tend to fall within a seperate but similar range
#Having an upper bound may not work later for different targets, notably white one
#A value of ten should pick up many dull objects in theory
if( abs( pcontavg[ 1 ][ 0 ] - pcontmodavg[ 1 ][ 0 ] ) > 10.0 ):
fMask = cv2.bitwise_or( fMask, cMask )
cMask = np.zeros( ( cimgh, cimgw ), np.uint8 )
'''Just some other things, display mostly'''
cv2.imshow( 'ori', cimg )
cv2.imshow( 'cut1', cv2.bitwise_and( cv2.cvtColor( dthresh, cv2.COLOR_GRAY2BGR ), cimg ) )
cv2.imshow( 'cut2', cv2.bitwise_and( cv2.cvtColor( fMask, cv2.COLOR_GRAY2BGR ), cimg ) )
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
frame.release()
| shihaocao/RC1 | areacompare.py | areacompare.py | py | 2,878 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.blur",
"line_numb... |
30903061705 | import requests
import bs4
from bs4.dammit import EncodingDetector
from urllib.request import Request, urlopen, urlretrieve
def get_flag(url):
try:
url = url.replace(" ", "%20")
useragent = ["Mozilla/5.0 (compatible; Googlebot/2.1; +http://google.com/bot.html)",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/42.0"]
headers = {"User-Agent": useragent[1]}
req = Request(url)
a = urlopen(req).read()
html_encoding = EncodingDetector.find_declared_encoding(a, is_html=True)
search_soup = bs4.BeautifulSoup(a, from_encoding=html_encoding, features="lxml")
resp = search_soup.find_all('a')
for i in resp:
filename = i.extract().get_text()
url_new = url+filename
url_new = url_new.replace(" ", "%20")
if (filename[-1] == '/' and filename[0] != '.'):
get_flag(url_new)
print(url_new)
except Exception as e:
print(e)
result = get_flag("https://www.google.com/search?client=firefox-b-d&q=questrade")
print(result) | cfowles27293/leetcode | venv/server_get_exploit.py | server_get_exploit.py | py | 1,332 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.request.Request",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.dammit.EncodingDetector.find_declared_encoding",
"line_number": 16,
"usage_type": "call... |
35076567124 | """
Factory for HID transport connections.
Currently supports only Cython/HIDAPI
"""
import platform
from logging import getLogger
from ..pyedbglib_errors import PyedbglibNotSupportedError
def hid_transport(library="hidapi"):
"""
Dispatch a transport layer for the OS in question
The transport layer is typically used to connect to a tool and then it is passed in as a parameter when creating
protocol objects. An example where the transport layer is used to create an instance of the housekeepingprotocol
for communication with the nEDBG debugger::
from pyedbglib.hidtransport.hidtransportfactory import hid_transport
transport = hid_transport()
connect_status = False
try:
connect_status = transport.connect(serial_number='', product='nedbg')
except IOError as error:
print("Unable to connect to USB device ({})".format(error))
if not connect_status:
print("Unable to connect to USB device")
housekeeper = housekeepingprotocol.Jtagice3HousekeepingProtocol(transport)
:param library: Transport library to use, currently only 'hidapi' is supported which will use the libusb hidapi
:type library: string
:returns: Instance of transport layer object
:rtype: class:cyhidapi:CyHidApiTransport
"""
logger = getLogger(__name__)
operating_system = platform.system().lower()
logger.debug("HID transport using library '{:s}' on OS '{:s}'".format(library, operating_system))
# HID API is the primary transport
if library == 'hidapi':
hid_api_supported_os = ['windows', 'darwin', 'linux', 'linux2']
if operating_system in hid_api_supported_os:
from .cyhidapi import CyHidApiTransport
return CyHidApiTransport()
msg = "System '{0:s}' not implemented for library '{1:s}'".format(operating_system, library)
logger.error(msg)
raise PyedbglibNotSupportedError(msg)
# Other transports may include cmsis-dap DLL, atusbhid (dll or so) etc
msg = "Transport library '{0}' not implemented.".format(library)
logger.error(msg)
raise PyedbglibNotSupportedError(msg)
| SpenceKonde/megaTinyCore | megaavr/tools/libs/pyedbglib/hidtransport/hidtransportfactory.py | hidtransportfactory.py | py | 2,186 | python | en | code | 471 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cyhidapi.CyHidApiTransport",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pyedbgli... |
8828354298 | #!/usr/bin/env python
# coding: utf-8
# In[38]:
# Importar las librerias
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import sklearn
# In[2]:
get_ipython().run_line_magic('cd', "'/home/jovyan/python/dataset'")
get_ipython().run_line_magic('ls', '')
# In[9]:
dir = '/home/jovyan/python/dataset/{}'.format('data4.csv')
data = pd.read_csv(dir)
# In[10]:
len(data)
# ### Dividir utilizando la distribución normal
# In[11]:
a = np.random.randn(len(data))
# In[12]:
plt.hist(a)
# In[23]:
check = (a <0.75)
# In[24]:
check
# In[25]:
plt.hist(check.astype(int))
# In[26]:
training = data[check]
testing = data[~check]
# In[27]:
len(training)
# In[28]:
len(testing)
# ### Con la libreria sklearn
# In[31]:
train, test = train_test_split(data, test_size = 0.2)
# In[32]:
len(train)
# In[33]:
len(test)
# ### Usando una función de shuffle
# In[36]:
data.head()
# In[41]:
data = sklearn.utils.shuffle(data)
# In[44]:
cut_id = int(0.75* len(data))
train_data = data[:cut_id ]
test_data = data[cut_id +1:]
# In[45]:
len(train_data)
# In[46]:
len(test_data)
# In[ ]:
| afnarqui/python | Testing.py | Testing.py | py | 1,221 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplo... |
32952177369 | from lib2to3.pgen2 import token
import requests
import json
#Variáveis iniciais
def get_logradouro():
inicio = 1
qtregistros = 99
mais_paginas = True
token_entidade = "xxxxxx"
conta_registros = 0
lote = 0
#Parametros da requisição
url = "https://e-gov.betha.com.br/glb/service-layer/v2/api/logradouros/"
querystring = {"iniciaEm": inicio,"nRegistros": qtregistros}
payload = ""
headers = {"Authorization": "Bearer "+ token_entidade}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
logradouros_json = json.loads(response.text)
mais_paginas = logradouros_json['maisPaginas']
print("Possui mais registros?:" + str(logradouros_json['maisPaginas']))
print("Quantidade de registros retornados no lote:" + str(len(logradouros_json['conteudo'])))
while mais_paginas:
lote += 1
querystring = {"iniciaEm": inicio,"nRegistros": qtregistros}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
logradouros_json = json.loads(response.text)
print("Lote = " + str(lote))
print("Registros = " + str(conta_registros))
print("Possui mais registros?:" + str(logradouros_json['maisPaginas']))
print("Quantidade de registros retornados no lote:" + str(len(logradouros_json['conteudo'])))
for logradouro in logradouros_json['conteudo']:
logradouros = open('logradouros.json', 'a', encoding='utf-8')
logradouros.write(str(logradouro).replace("'",'"'))
logradouros.write('\n')
inicio = logradouro['idGerado']['iLogradouros']
logradouros.close
conta_registros += 1
mais_paginas = logradouros_json['maisPaginas']
get_logradouro()
| ermescarletto/bth | get_logradouros.py | get_logradouros.py | py | 1,862 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "requests.request",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_n... |
146002694 | import os
import sys
import argparse
from spinalcordtoolbox.utils import Metavar, SmartFormatter, init_sct, extract_fname, printv
def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description='Transpose bvecs file (if necessary) to get nx3 structure.',
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-bvec',
metavar=Metavar.file,
required=True,
help="Input bvecs file. Example: bvecs.txt"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-o',
metavar=Metavar.file,
default='',
help="Output bvecs file. By default, input file is overwritten. Example: bvecs_t.txt"
)
optional.add_argument(
'-v',
choices=['0', '1', '2'],
default='1',
help="Verbose: 0 = nothing, 1 = basic, 2 = extended."
)
return parser
# MAIN
# ==========================================================================================
def main(args=None):
parser = get_parser()
if args:
arguments = parser.parse_args(args)
else:
arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
fname_in = arguments.bvec
fname_out = arguments.o
verbose = int(arguments.v)
init_sct(log_level=verbose, update=True) # Update log level
# get bvecs in proper orientation
from dipy.io import read_bvals_bvecs
bvals, bvecs = read_bvals_bvecs(None, fname_in)
# # Transpose bvecs
# printv('Transpose bvecs...', verbose)
# # from numpy import transpose
# bvecs = bvecs.transpose()
# Write new file
if fname_out == '':
path_in, file_in, ext_in = extract_fname(fname_in)
fname_out = path_in + file_in + ext_in
fid = open(fname_out, 'w')
for iLine in range(bvecs.shape[0]):
fid.write(' '.join(str(i) for i in bvecs[iLine, :]) + '\n')
fid.close()
# display message
printv('Created file:\n--> ' + fname_out + '\n', verbose, 'info')
# Start program
# =======================================================================================================================
if __name__ == "__main__":
init_sct()
# call main function
main()
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITHUB_REPOS/neuropoly@spinalcordtoolbox/scripts/sct_dmri_transpose_bvecs.py | sct_dmri_transpose_bvecs.py | py | 2,558 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "spinalcordtoolbox.utils.SmartFormatter",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_number": 14,
"usage_type": "call"
},
{
"a... |
41766349642 | from py2neo import Graph, Node, Relationship
from flask import jsonify
import json
import re
import chardet
from demjson import decode
import numpy as np
from neo4j import GraphDatabase, basic_auth, kerberos_auth, custom_auth, TRUST_ALL_CERTIFICATES
def build_nodes(node_record):
data = {'id': str(node_record['id(n)'])}
return {'data': data}
def build_edges(relation_record):
data = {'source': str(relation_record['id(k)']),
'target': str(relation_record['id(n)']),
'relationship': "生产"}
return {'data': data}
def old():
graph = Graph()
nodes = list(map(build_nodes, graph.run('MATCH (n:product) WHERE n.title=~".*索尼.*" RETURN id(n) LIMIT 50').data()))
nodes.append({'data': {'id': '14'}})
edges = list(map(build_edges, graph.run(
'MATCH (n:product) WHERE n.title=~".*索尼.*" MATCH (k)-[r]-(n) RETURN id(n),id(k) LIMIT 50').data()))
elements = {'nodes': nodes, 'edges': edges}
js = json.dumps(elements)
print(js)
def re_data():
graph = Graph()
data = graph.run('match (n:newNode)-[r]-(k:newNode) return n,r,k').data()
# print(type(data))
# print(data)
# for i in data:r'^("[^"]+")([^"]+)("[^"]+")'
# for e in i:
# if 'title' in e:
# print(i)
nodes = []
edges = []
# print(data)
for i in data:
for e in i:
if 'title' in i[e]:
id_pattern = re.compile(r'_(\d+):')
id = id_pattern.findall(str(i[e]))
label_pattern = re.compile(r':(\w+)')
label = label_pattern.findall(str(i[e]))
title_pattern = re.compile(r'title: \'(\S+)\'')
title = title_pattern.findall(str(i[e]))
t_title = eval(repr(title).replace('\\\\', '\\'))
s_title = str(t_title).replace("['", "").replace("']", "")
info_pattern = re.compile(r'\"(.+)\"')
info = info_pattern.findall(str(i[e]))
t_info = eval(repr(info).replace('\\\\', '\\'))
s_info = str(t_info).replace("['", "").replace("']", "")
data = {'id': str(id).replace("['", "").replace("']", ""),
'label': str(label).replace("['", "").replace("']", ""),
'title': s_title, 'info': s_info}
nodes.append({'data': data})
else:
target_pattern = re.compile(r'_(\d+)\)-')
target = target_pattern.findall(str(i[e]))
source_pattern = re.compile(r'->\(_(\d+)')
source = source_pattern.findall(str(i[e]))
rela_pattern = re.compile(r'\'(.+)\'')
rela = rela_pattern.findall(str(i[e]))
r = eval(repr(rela).replace('\\\\', '\\'))
s = str(r).replace("['", "").replace("']", "")
data = {'source': str(source).replace("['", "").replace("']", ""),
'target': str(target).replace("['", "").replace("']", ""),
'relationship': s}
edges.append({'data': data})
elements = {'nodes': nodes, 'edges': edges}
js = json.dumps(elements, ensure_ascii=False)
print(js)
class Solution:
def entityParser(self, text: str) -> str:
data = text.replace(""", "\"").replace("'", "\'").replace("&", "&").replace(">", ">").replace(
"<", "<").replace("⁄", "/")
return data
def jaccard_coefficient(terms_model, reference):
grams_reference = set(reference)
grams_model = set(terms_model)
temp = 0
for i in grams_reference:
if i in grams_model:
temp = temp + 1
dis = len(grams_model) + len(grams_reference) - temp
jaccard_res = float(temp / dis)
return jaccard_res
def dice_coefficient(a, b):
"""dice coefficient 2nt/na + nb."""
a_bigrams = set(a)
b_bigrams = set(b)
overlap = len(a_bigrams & b_bigrams)
return overlap * 2.0 / (len(a_bigrams) + len(b_bigrams))
def edit_distance(word1, word2):
len1 = len(word1)
len2 = len(word2)
dp = np.zeros((len1 + 1, len2 + 1))
for i in range(len1 + 1):
dp[i][0] = i
for j in range(len2 + 1):
dp[0][j] = j
for i in range(1, len1 + 1):
for j in range(1, len2 + 1):
delta = 0 if word1[i - 1] == word2[j - 1] else 1
dp[i][j] = min(dp[i - 1][j - 1] + delta, min(dp[i - 1][j] + 1, dp[i][j - 1] + 1))
return dp[len1][len2]
def print_title_of(tx, name):
for record in tx.run("MATCH (a:class)-[]->(f) "
"WHERE a.name = {name} "
"RETURN f.name", name=name):
print(record["f.name"])
if __name__ == '__main__':
uri = "bolt://localhost:7687"
driver = GraphDatabase.driver(uri, auth=("neo4j", "password"))
session = driver.session()
data = session.run("MATCH (m)-[r]->(n) RETURN m.title, r.relation, n.title LIMIT 100")
# print(data)
blists = []
out_j = []
out_d = []
for d in data:
bs = str(d[0])
blists.append(bs)
for i in range(len(blists)):
for j in range(0, i):
a = blists[i]
b = blists[j]
td_j = jaccard_coefficient(a, b)
td_d = dice_coefficient(a, b)
std = edit_distance(a, b) / max(len(a), len(b))
fy = 1 - std
avg_j = (td_j + fy) / 2
avg_d = (td_d + fy) / 2
if avg_j < 1:
# print(blists[i], blists[j])
# print('avg_sim: ', avg_j)
out_j.append(blists[i] + " " + blists[j] + " " + str(avg_j))
if avg_d < 1:
out_d.append(blists[i] + " " + blists[j] + " " + str(avg_d))
list_j = list(set(out_j))
list_d = list(set(out_d))
list_j.sort()
list_d.sort()
print(list_j)
| ownia/KGRM | kg_web/test.py | test.py | py | 5,914 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "py2neo.Graph",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "py2neo.Graph",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": ... |
2545116778 | import os
from typing import Dict, List, Optional
class CopySpec:
"""Copy specification of a single file or directory."""
def __init__(self, source_path: str, target_path: Optional[str] = None):
self.source_path = source_path
self.target_path = target_path
def get_target(self) -> str:
"""Get the target path of the copy spec.
Returns:
str: If a target_path is given explicitly, it will be returned.
Otherwise the source_path will be returned.
"""
if self.target_path is not None:
return self.target_path
return self.source_path
def copy_templates(
template_dir: str,
target_dir: str,
template_file_mapping: List[CopySpec],
variables: Dict[str, str],
) -> None:
"""Copy templates from the template dir to the target dir.
Args:
template_dir (str): Path to the directory containing the template files.
target_dir (str): Path to the target directory.
template_file_mapping (Dict[str, str]): A mapping of source path to target path.
variables (Dict[str, str]): Name to value mapping which will be replaced when parsing the template files.
"""
for file_to_copy in template_file_mapping:
with open(
os.path.join(
template_dir,
file_to_copy.source_path,
),
encoding="utf-8",
) as file_in:
target_file_path = os.path.join(target_dir, file_to_copy.get_target())
os.makedirs(os.path.split(target_file_path)[0], exist_ok=True)
with open(target_file_path, encoding="utf-8", mode="w") as file_out:
lines = file_in.readlines()
for line in lines:
for key, value in variables.items():
line = line.replace("${{ " + key + " }}", value)
file_out.write(line)
| eclipse-velocitas/devenv-devcontainer-setup | grpc-interface-support/src/util/templates.py | templates.py | py | 1,938 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_numbe... |
1438555767 | #!/usr/bin/env python3
import uuid
from typing import List
import click
import backoff
from bravado.exception import HTTPConflict, HTTPError
from neptune.new.internal.backends.hosted_neptune_backend import HostedNeptuneBackend
from neptune.new.internal.credentials import Credentials
class Invitation:
def __init__(self, _uuid: uuid.UUID, invitee: str):
self.uuid = _uuid
self.invitee = invitee
@backoff.on_exception(backoff.expo, HTTPError, max_tries=5)
def invite_member(backend, organization, invitee) -> List[Invitation]:
payload = {
"organizationIdentifier": organization,
"invitationsEntries": [
{
"invitee": invitee,
"invitationType": "emailRecipient",
"roleGrant": "member",
"addToAllProjects": False
}
]
}
try:
response = backend.backend_client.api.createOrganizationInvitations(
newOrganizationInvitations=payload,
**backend.DEFAULT_REQUEST_KWARGS)\
.response()
except HTTPConflict:
click.echo(f"ERROR: Pending invitation for '{invitee}'")
return []
return list(map(lambda r: Invitation(_uuid=r.id, invitee=r.invitee), response.result.invitations or []))
@backoff.on_exception(backoff.expo, HTTPError, max_tries=5)
def remove_member(backend: HostedNeptuneBackend, organization: str, username: str):
backend.backend_client.api.deleteOrganizationMember(
organizationIdentifier=organization,
userId=username,
**backend.DEFAULT_REQUEST_KWARGS)\
.response()
click.echo(f"Removed '{username}' from organization '{organization}'")
@click.group()
def cli():
pass
@cli.command()
@click.argument('organization')
@click.option('--invitee-email', 'email', help='Email to invite')
@click.option('--admin-api-token', envvar='NEPTUNE_API_TOKEN', help='API Token for organization Admin')
def invite(admin_api_token, email, organization):
credentials = Credentials(api_token=admin_api_token)
config_api_url = credentials.api_url_opt or credentials.token_origin_address
backend = HostedNeptuneBackend(credentials=credentials)
invitations = invite_member(backend=backend, organization=organization, invitee=email)
for invitation in invitations:
click.echo(f"{config_api_url}/-/invitations/organization/{invitation.uuid}")
@cli.command()
@click.argument('organization')
@click.option('--removed-username', 'username', help='User to removal')
@click.option('--admin-api-token', envvar='NEPTUNE_API_TOKEN', help='API Token for organization Admin')
def remove(admin_api_token, username, organization):
credentials = Credentials(api_token=admin_api_token)
backend = HostedNeptuneBackend(credentials=credentials)
remove_member(backend=backend, organization=organization, username=username)
if __name__ == '__main__':
cli()
| neptune-ai/neptune-admin-utils | manage_users.py | manage_users.py | py | 2,931 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "uuid.UUID",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "bravado.exception.HTTPConflict",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "click.echo",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "backoff.on_e... |
11595543761 | import torch
from gtts import gTTS
import os
import keyboard
from tqdm import tqdm
import random
import speech_recognition as sr
def recognize_speech_from_mic(recognizer, microphone):
"""Transcribe speech from recorded from `microphone`.
Returns a dictionary with three keys:
"success": a boolean indicating whether or not the API request was
successful
"error": `None` if no error occured, otherwise a string containing
an error message if the API could not be reached or
speech was unrecognizable
"transcription": `None` if speech could not be transcribed,
otherwise a string containing the transcribed text
"""
# check that recognizer and microphone arguments are appropriate type
if not isinstance(recognizer, sr.Recognizer):
raise TypeError("`recognizer` must be `Recognizer` instance")
if not isinstance(microphone, sr.Microphone):
raise TypeError("`microphone` must be `Microphone` instance")
# adjust the recognizer sensitivity to ambient noise and record audio
# from the microphone
with microphone as source:
print("Listening...")
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
print("cold mic...")
# set up the response object
response = {"success": True, "error": None, "transcription": None}
# try recognizing the speech in the recording
# if a RequestError or UnknownValueError exception is caught,
# update the response object accordingly
try:
response["transcription"] = recognizer.recognize_google(audio)
except sr.RequestError:
# API was unreachable or unresponsive
response["success"] = False
response["error"] = "API unavailable"
except sr.UnknownValueError:
# speech was unintelligible
response["error"] = "Unable to recognize speech"
return (
response
if response["success"]
else recognize_speech_from_mic(recognizer, microphone)
)
def generate(model, tokenizer, context):
# tokenize question and text as a pair
encodings = tokenizer.encode_plus(context)
input_ids = encodings["input_ids"]
attention_mask = encodings["attention_mask"]
output = model.generate(
inputs=torch.tensor([input_ids]),
attention_mask=torch.tensor([attention_mask]),
do_sample=True,
num_beams=3,
max_new_tokens=50,
temperature=1.8,
repetition_penalty=1.32,
)
return tokenizer.decode(output[0])
def speak(text):
myobj = gTTS(text=text, lang="en", slow=False)
myobj.save("output/speech.mp3")
# Playing the converted file
return os.system("mpg321 output/speech.mp3")
def sample_context(path):
with open(path, "r") as contexts:
lines = contexts.readlines()
return random.sample(lines, 1)[0]
def main():
# Model
with open("models/generator.pth", "rb") as f:
model = torch.load(f, map_location=torch.device('cpu'))
# Tokenizer
with open("models/tokenizer.pth", "rb") as f:
tokenizer = torch.load(f, map_location=torch.device('cpu'))
# create recognizer and mic instances
recognizer = sr.Recognizer()
microphone = sr.Microphone()
while True:
print("\n\n\n\n\n\n\n\n\n\n\n\nReady.")
if keyboard.read_key() == "space":
# context = recognize_speech_from_mic(recognizer, microphone)["transcription"]
context = sample_context("data/context.txt")
idx_start = len(context)
# Create output file
with open("output/crazy.txt", "w") as crazy:
# Generate while reseeding 5 times
for _ in tqdm(range(5)):
context = generate(model, tokenizer, str(context).strip())
context = context.replace("\n", "").replace("<|endoftext|>", "").replace("<pad>", "")
# Write completed generation to file
crazy.write(context)
# Display response
print(context[idx_start:])
# Create and play an audio file of the output
# speak(context[idx_start:])
main()
| jahkelr/machine-learning | Unsupervised/ToxBot/src/generator.py | generator.py | py | 4,254 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "speech_recognition.Recognizer",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "speech_recognition.RequestError",
"line_number": 47,
"usage_type": ... |
26681362352 | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from textblob import TextBlob
import numpy as np
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Histogram
from sklearn.externals import joblib
from sqlalchemy import create_engine
ONE_LABEL_COL = "child_alone"
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('tb_msg', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
## For genre analysis
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
pos_ratios = list((df[df.columns[4:]] > 0).mean(axis=1))
cat_names = list(df.columns[4:])
message_lengths = df.message.apply(lambda text: len(TextBlob(text).tokens)).values
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts,
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=cat_names,
y=pos_ratios
)
],
'layout': {
'title': 'Distribution of Non-Zero labels in Each Category',
'yaxis': {
'title': "Ratio of Positive Instances"
},
'xaxis': {
'title': "Category Name"
}
}
},
{
'data': [
Histogram(
x=message_lengths,
xbins=dict(start=np.min(message_lengths), size=0.8, end=np.max(message_lengths))
)
],
'layout': {
'title': 'Distribution of Message Length',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Message Length"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0].tolist()
one_label_idx = df.columns[4:].tolist().index(ONE_LABEL_COL)
classification_labels.insert(one_label_idx, 0)
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| qiaochen/TextClsApp | app/run.py | run.py | py | 3,895 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "s... |
23206544173 | import robin_stocks.robinhood as r
import json
import pandas as pd
# Reads data about the previous trades from a JSON file and prints it
def read_trade_history(file_name):
print("read_trade_history()")
with open(file_name) as json_file:
data = json.load(json_file)
for sell_date, event in data.items():
print(sell_date + ": ")
for symbol, dict in event.items():
quantity, price, change, percent, purchase_date = str(int(float(dict.get("quantity")))), dict.get("price"), dict.get("equity_change"), dict.get("percent_change"), dict.get("purchase_date")
print("\tSold " + quantity + " shares of "+ symbol + " at " + price + ", " + change + " (" +
percent + "%) profit/loss, bought on " + purchase_date)
# Writes data about a trade to a JSON file, containing the sell date, buy date, buy price sell price, etc.
def update_trade_history(symbols, holdings_data, file_name):
print("update_trade_history()")
with open(file_name) as json_file:
data = json.load(json_file)
current_time = str(pd.Timestamp("now"))
data[current_time] = ({})
for symbol in symbols:
data[current_time].update({symbol: holdings_data[symbol]})
with open(file_name, 'w') as outfile:
json.dump(data, outfile)
# Returns the amount of money you've gained/lost through trading since the creation of your account, minus dividends
def get_total_gains_minus_dividends():
print("get_total_gains_minus_dividends()")
holding = []
holding.append('temp')
profileData = r.load_portfolio_profile()
print(profileData)
allTransactions = r.get_bank_transfers()
deposits = sum(float(x['amount']) for x in allTransactions if (x['direction'] == 'deposit')) # and (x['state'] == 'completed'))
withdrawals = sum(float(x['amount']) for x in allTransactions if (x['direction'] == 'withdraw') and (x['state'] == 'completed'))
money_invested = deposits - withdrawals
print(deposits)
dividends = r.get_total_dividends()
percentDividend = dividends/money_invested*100
totalGainMinusDividends =float(profileData['extended_hours_equity'])-dividends-money_invested
return totalGainMinusDividends
| messi618/TradingAlgorithm | vivekTradingBot/tradingStatistics.py | tradingStatistics.py | py | 2,230 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 2... |
17514167841 | from datetime import datetime
import pytz as pytz
import boto3
boto3.setup_default_session(profile_name='s3-viewer')
s3 = boto3.client('s3')
response = s3.list_buckets()
bucket_names = [bucket['Name'] for bucket in response['Buckets']]
print('Select the bucket:')
for name in bucket_names:
print(name)
selected_bucket = input('Enter bucket name: ')
response = s3.list_objects(Bucket=selected_bucket)
object_names = [obj['Key'] for obj in response['Contents']]
print('\nSelect the object:')
for name in object_names:
print(name)
selected_object = input('\nEnter object key: ')
response = s3.list_object_versions(Bucket=selected_bucket, Prefix=selected_object)
versions = response['Versions']
versions.sort(key=lambda version: version['LastModified'])
print('\nObject have the next versions:')
for version in versions:
print(version['LastModified'])
input_date = input("\nEnter the date to get the object in the next format: 'YYYY-MM-dd HH:mm:ss': ")
date = datetime.strptime(input_date, '%Y-%m-%d %H:%M:%S').replace(tzinfo=pytz.UTC)
selected_version = None
for version in versions:
if date > version['LastModified']:
selected_version = version
if selected_version:
response = s3.get_object(Bucket=selected_bucket, Key=selected_object, VersionId=selected_version['VersionId'])
print(response['Body'].read().decode('utf-8'))
else:
print('Such version is not exist')
| Aleh-Zamzhytski/cloudx | get_object_by_date.py | get_object_by_date.py | py | 1,407 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "boto3.setup_default_session",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "dat... |
6123825523 | from PIL import Image
import numpy as np
import matplotlib.cm as cm
import math
ASPECT_RATIO = 1.0 / 1.0
FRAME_HEIGHT = 8.0
FRAME_WIDTH = FRAME_HEIGHT * ASPECT_RATIO
XRES = 1000
YRES = 1000
x_min = -FRAME_WIDTH/2
x_max = FRAME_WIDTH/2
y_min = -FRAME_HEIGHT/2
y_max = FRAME_HEIGHT/2
x_values = np.linspace(x_min, x_max, XRES+1)
y_values = np.linspace(y_min, y_max, YRES+1)
#print(x_values)
#print(y_values)
pixels = []
for i in range(len(y_values) - 1)[::-1]:
pixels.append([])
for j in range(len(x_values) - 1):
x1, x2 = x_values[j:j + 2]
y1, y2 = y_values[i:i + 2]
x, y = (x1 + x2)/2, (y1 + y2)/2
if x > 0:
y = -y
# if -2 < x < 2 and -2 < y < 2:
# pixel = [0, 0, 0]
# else:
# pixel = [255, 255, 255]
z = (x+y*1j)
if z.real != 0:
theta = 2*(math.atan(z.imag/z.real) + np.pi/2)
else:
theta = np.pi
pixel = [x * 255 for x in cm.viridis(theta/(2*np.pi))]
pixels[-1].append(pixel)
array = np.array(pixels, dtype=np.uint8)
new_image = Image.fromarray(array)
new_image.save("img/z_squared.png")
new_image.show()
| vivek3141/videos | create_img.py | create_img.py | py | 1,195 | python | en | code | 132 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "math.atan",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number":... |
35466272972 | """Models and all supporting code"""
import cv2
import numpy as np
import utils
class OtsuThresholding(object):
"""Represents model for predicting i-contours given images and o-countours using Otsu's method"""
def __init__(self, kernel_size=None):
"""Configures loader
:param kernel_size: size of kernel for morphological closing, when `None` closing is not performed,
defaults to `None`
"""
self._kernel_size = kernel_size
def predict(self, input):
"""Predicts i-contour
:param input: tuple with images and o-contours
:return: predicted i-contours
"""
images, o_contours = input
assert images.ndim == o_contours.ndim == 3
assert images.shape == o_contours.shape
assert o_contours.dtype == np.bool
i_contours = []
for image, o_contour in zip(images, o_contours):
image = (image / image.max() * 255).astype(np.uint8)
_, i_contour_vector = cv2.threshold(image[o_contour], 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
i_contour_vector = i_contour_vector.squeeze()
i_contour = np.copy(o_contour)
i_contour[o_contour] = i_contour_vector
if self._kernel_size:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (self._kernel_size, self._kernel_size))
i_contour = cv2.morphologyEx(i_contour.astype(np.uint8), cv2.MORPH_CLOSE, kernel).astype(np.bool)
i_contour[np.logical_not(o_contour)] = False # ignore everything outside o-contour
i_contours.append(i_contour)
i_contours = np.array(i_contours)
return i_contours
def score(self, input, i_contours):
"""Computes mean IOU on the given data and labels
:param input: tuple with images and o-contours
:param i_contours: i-contours labels
:return: mean IOU
"""
i_contours_pred = self.predict(input)
return utils.iou(i_contours, i_contours_pred)
| vshmyhlo/dicom-data-loader | models.py | models.py | py | 2,051 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.bool",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
... |
25507365738 | from django.shortcuts import render
from django.http import JsonResponse
import json
from django.shortcuts import get_object_or_404
from django.http import Http404
from django.views.decorators.csrf import csrf_exempt
from . import models
def index(request):
ctx = {}
return render(request, 'main/index.html', ctx)
def charts(request, onlyChart, sickness):
ctx = {}
aux = {}
aux2 = {}
if sickness == 'all':
queryset = models.Case.objects.all()
else:
queryset = models.Case.objects.filter(sickness=sickness)
for case in queryset:
code = case.municipality.state.country.code
year = case.date.year
if aux.has_key(code) and aux[code].has_key(year):
aux[code][year] += 1
else:
aux[code] = {year : 1}
if aux2.has_key(year):
aux2[year] += 1
else:
aux2[year] = 1
world = []
for year in aux2:
world.append([year, aux2[year]])
countries = {}
data = []
for code in aux:
code_data = []
count = 0
for year in aux[code]:
code_data.append([year, aux[code][year]])
count += aux[code][year]
data.append({'code3': code, 'value':count})
countries[code]={
'code3': code,
'data': code_data
}
ctx['main_world'] = {
'data': data,
'all': world,
'countries': countries
}
world = {}
countries_data = {}
for case in queryset:
code = case.municipality.state.country.code
year = case.date.year
if countries_data.has_key(code) and countries_data[code].has_key(year):
if case.age < 13:
countries_data[code][year][0] += 1
elif case.age < 19:
countries_data[code][year][1] += 1
elif case.age < 31:
countries_data[code][year][2] += 1
else:
countries_data[code][year][3] += 1
else:
aux = [0, 0, 0, 0]
if case.age < 13:
aux[0] = 1
elif case.age < 19:
aux[1] = 1
elif case.age < 31:
aux[2] = 1
else:
aux[3] = 1
countries_data[code] = {year: aux}
if world.has_key(year):
if case.age < 13:
world[year][0] += 1
elif case.age < 19:
world[year][1] += 1
elif case.age < 31:
world[year][2] += 1
else:
world[year][3] += 1
else:
aux = [0, 0, 0, 0]
if case.age < 13:
aux[0] = 1
elif case.age < 19:
aux[1] = 1
elif case.age < 31:
aux[2] = 1
else:
aux[3] = 1
world[year] = aux
bar_chart_values = [[],[],[],[]]
bar_chart_totals = [0, 0, 0, 0]
for i in world.values():
for j in range(4):
bar_chart_values[j].append(i[j])
bar_chart_totals[j] += i[j]
countries_values = {}
countries_totals = {}
for code in countries_data:
values = [[],[],[],[]]
totals = [0, 0, 0, 0]
for i in countries_data[code].values():
for j in range(4):
values[j].append(i[j])
totals[j] += i[j]
countries_values[code] = values
countries_totals[code] = totals
ctx['bar_chart'] = {
'categories': world.keys(),
'data': bar_chart_values,
'totals': bar_chart_totals,
'countries_values': countries_values,
'countries_totals': countries_totals
}
aux = {}
aux2={}
for case in queryset.filter(recovered=False):
code = case.municipality.state.country.code
date = json.dumps(case.date.isoformat())
if aux.has_key(code) and aux[code].has_key(date):
aux[code][date].append(case.age)
else:
aux[code] = {date : [case.age]}
if aux2.has_key(date):
aux2[date].append(case.age)
else:
aux2[date] = [case.age]
not_recovered_total = []
for date in aux2:
for age in aux2[date]:
not_recovered_total.append([date, age])
countries_values = {}
countries_total = []
'''for code in aux:
code_data = []
count = 0
for year in aux[code]:
code_data.append([date, aux[code][year]])
count += aux[code][date]
countries_total.append({'code3': code, 'value':count})
countries_values[code]={
'code3': code,
'data': code_data
}'''
ctx['scatter_chart'] = {
'total': not_recovered_total,
'countries_values': countries_values,
'countries_totals': countries_totals
}
if not onlyChart:
return render(request, 'main/charts.html', ctx)
else:
return render(request, 'main/charts/main_chart.html', ctx) | jfsanchez91/CaseReportOpenData | main/views.py | views.py | py | 4,026 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "django.s... |
15534508158 | import gym
from stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv
from stable_baselines3 import PPO, SAC
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.utils import get_schedule_fn
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.distributions import DiagGaussianDistribution
from src.environment_wrappers.env_wrappers import RewardWrapper, SkillWrapper, SkillWrapperFinetune
from src.environment_wrappers.tasks_wrappers import HalfCheetahTaskWrapper, WalkerTaskWrapper, AntTaskWrapper
from src.utils import best_skill
from src.mi_lower_bounds import mi_lower_bound
from src.models.models import Discriminator, SeparableCritic, ConcatCritic
from src.replayBuffers import DataBuffer
from src.callbacks.callbacks import DiscriminatorCallback, VideoRecorderCallback, EvaluationCallback, MI_EvalCallback
from src.config import conf
import torch
import torch.nn as nn
import torch.optim as opt
from torch.utils.tensorboard import SummaryWriter
import copy
import time
from collections import namedtuple
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid")
sns.set(font_scale = conf.font_scale)
class DIAYN():
'''
An implementation of DIAYN with a model-free algorithm (PPO, SAC) as the optimization backbone
'''
def __init__(self, params, alg_params, discriminator_hyperparams, env="MountainCarContinuous-v0", alg="ppo", directory="./", seed=10, conf=None, timestamp=None, checkpoints=False, args=None, task=None, adapt_params=None, n_samples=None):
# create the discirminator
state_dim = gym.make(env).observation_space.shape[0]
skill_dim = params['n_skills']
hidden_dims = conf.num_layers_discriminator*[conf.layer_size_discriminator]
latent_dim = conf.latent_size
temperature = discriminator_hyperparams['temperature']
dropout = discriminator_hyperparams['dropout']
if discriminator_hyperparams['parametrization'] == "MLP":
self.d = Discriminator(state_dim, hidden_dims, skill_dim, dropout=discriminator_hyperparams['dropout']).to(conf.device)
self.d_adapt = Discriminator(state_dim, hidden_dims, skill_dim, dropout=discriminator_hyperparams['dropout']).to(conf.device)
elif discriminator_hyperparams['parametrization'] == "Linear":
self.d = nn.Linear(state_dim, skill_dim).to(conf.device)
elif discriminator_hyperparams['parametrization'] == "Separable":
# state_dim, skill_dim, hidden_dims, latent_dim, temperature=1, dropout=None)
self.d = SeparableCritic(state_dim, skill_dim, hidden_dims, latent_dim, temperature, dropout).to(conf.device)
elif discriminator_hyperparams['parametrization'] == "Concat":
# state_dim, skill_dim, hidden_dims, temperature=1, dropout=None)
self.d = ConcatCritic(state_dim, skill_dim, hidden_dims, temperature, dropout).to(conf.device)
else:
raise ValueError(f"{discriminator_hyperparams['parametrization']} is invalid parametrization")
# Create the data buffer for the discriminator
self.buffer = DataBuffer(params['buffer_size'], obs_shape=state_dim)
# tensorboard summary writer
self.sw = SummaryWriter(
log_dir=directory, comment=f"{alg} Discriminator, env_name:{env}")
# save some attributes
self.alg = alg
self.params = params
self.alg_params = alg_params
self.discriminator_hyperparams = discriminator_hyperparams
self.env_name = env
self.directory = directory
self.seed = seed
self.timestamp = timestamp
self.conf = conf
self.checkpoints = checkpoints
self.args = args
self.task = task
self.parametrization = discriminator_hyperparams['parametrization']
self.adapt_params = adapt_params
self.n_samples = n_samples
def pretrain(self):
'''
Pretraining phase with an intrinsic reward
'''
if self.alg == "ppo":
env = DummyVecEnv([lambda: Monitor(RewardWrapper(SkillWrapper(gym.make(
self.env_name), self.params['n_skills'], max_steps=self.conf.max_steps), self.d, self.params['n_skills'], parametrization=self.parametrization), self.directory)]*self.alg_params['n_actors'])
# create the model with the speicifed hyperparameters
model = PPO('MlpPolicy', env, verbose=1,
learning_rate=self.alg_params['learning_rate'],
n_steps=self.alg_params['n_steps'],
batch_size=self.alg_params['batch_size'],
n_epochs=self.alg_params['n_epochs'],
gamma=self.alg_params['gamma'],
gae_lambda=self.alg_params['gae_lambda'],
clip_range=self.alg_params['clip_range'],
policy_kwargs=dict(activation_fn=nn.ReLU,
net_arch=[dict(pi=[self.conf.layer_size_policy, self.conf.layer_size_policy], vf=[self.conf.layer_size_value, self.conf.layer_size_value])]),
tensorboard_log=self.directory,
seed=self.seed
)
discriminator_callback = DiscriminatorCallback(self.d, self.buffer, self.discriminator_hyperparams,
sw=self.sw, n_skills=self.params['n_skills'], min_buffer_size=self.params['min_train_size'], save_dir=self.directory, on_policy=True)
# TODO: Replace this when you implement the other MI lower bounds with on policy algorithms
eval_env = RewardWrapper(SkillWrapper(gym.make(
self.env_name), self.params['n_skills'], ev=True), self.d, self.params['n_skills'], parametrization=self.parametrization)
eval_env = Monitor(eval_env, f"{self.directory}/eval_results")
eval_callback = EvalCallback(eval_env, best_model_save_path=self.directory,
log_path=f"{self.directory}/eval_results", eval_freq=5000,
deterministic=True, render=False, n_eval_episodes=self.conf.eval_runs)
# create the callback list
if self.checkpoints:
# env_name, alg, discriminator, params, pm, n_samples=100)
fineune_callback = EvaluationCallback(self.env_name, self.alg, self.d, self.adapt_params,self.params, self.parametrization, self.seed, self.directory, self.sw, n_samples=self.n_samples)
callbacks = [discriminator_callback, eval_callback, fineune_callback]
else:
callbacks = [discriminator_callback, eval_callback]
# train the agent
model.learn(total_timesteps=self.params['pretrain_steps'], callback=callbacks, log_interval=3, tb_log_name="PPO Pretrain")
# for testing
# model.learn(total_timesteps=4500, callback=callbacks, log_interval=3, tb_log_name="PPO Pretrain")
elif self.alg == "sac":
env = DummyVecEnv([lambda: Monitor(RewardWrapper(SkillWrapper(gym.make(self.env_name), self.params['n_skills'], max_steps=self.conf.max_steps),
self.d, self.params['n_skills'], parametrization=self.parametrization), self.directory)])
# create the model with the speicifed hyperparameters
model = SAC('MlpPolicy', env, verbose=1,
learning_rate=self.alg_params['learning_rate'],
batch_size=self.alg_params['batch_size'],
gamma=self.alg_params['gamma'],
buffer_size=self.alg_params['buffer_size'],
tau=self.alg_params['tau'],
ent_coef=self.alg_params['ent_coef'],
gradient_steps=self.alg_params['gradient_steps'],
learning_starts=self.alg_params['learning_starts'],
policy_kwargs=dict(net_arch=dict(pi=[self.conf.layer_size_policy, self.conf.layer_size_policy], qf=[
self.conf.layer_size_q, self.conf.layer_size_q]), n_critics=2),
tensorboard_log=self.directory,
seed=self.seed
)
discriminator_callback = DiscriminatorCallback(self.d, None, self.discriminator_hyperparams, sw=self.sw,
n_skills=self.params['n_skills'], min_buffer_size=self.params['min_train_size'], save_dir=self.directory, on_policy=False)
if self.parametrization in ["MLP", "Linear"]:
eval_env = RewardWrapper(SkillWrapper(gym.make(
self.env_name), self.params['n_skills'], ev=True), self.d, self.params['n_skills'], parametrization=self.parametrization)
eval_env = Monitor(eval_env, f"{self.directory}/eval_results")
eval_callback = EvalCallback(eval_env, best_model_save_path=self.directory,
log_path=f"{self.directory}/eval_results", eval_freq=self.conf.eval_freq, deterministic=True, render=False)
elif self.parametrization in ["Separable", "Concat"]:
MI_estimator = namedtuple('MI_estimator', "estimator_func estimator_type log_baseline alpha_logit")
mi_estimate = MI_estimator(mi_lower_bound, self.discriminator_hyperparams['lower_bound'], self.discriminator_hyperparams['log_baseline'], self.discriminator_hyperparams['alpha_logit'])
# (env_name, discriminator, params, tb_sw, discriminator_hyperparams, mi_estimator, model_save_path, eval_freq=5000, verbose=0)
eval_callback = MI_EvalCallback(self.env_name, self.d, self.params, self.sw, self.discriminator_hyperparams, mi_estimate, self.directory, eval_freq=self.conf.eval_freq*self.alg_params['n_actors'])
# create the callback list
if self.checkpoints:
# env_name, alg, discriminator, params, pm, n_samples=100)
fineune_callback = EvaluationCallback(self.env_name, self.alg, self.d, self.adapt_params,self.params, self.parametrization, self.seed, self.directory, self.sw, n_samples=self.n_samples)
callbacks = [discriminator_callback, eval_callback, fineune_callback]
else:
callbacks = [discriminator_callback, eval_callback]
# train the agent
if self.parametrization in ["Separable", "Concat"]:
model.learn(total_timesteps=self.params['pretrain_steps'], callback=callbacks, log_interval=3, tb_log_name="SAC Pretrain", d=self.d, mi_estimator=mi_estimate)
elif self.parametrization in ["MLP", "Linear"]:
model.learn(total_timesteps=self.params['pretrain_steps'], callback=callbacks, log_interval=3, tb_log_name="SAC Pretrain")
# for testing
# model.learn(total_timesteps=2000, callback=callbacks, log_interval=3, tb_log_name="SAC Pretrain")
else:
raise ValueError(f"{discriminator_hyperparams['parametrization']} is invalid parametrization")
if self.checkpoints:
results = {
'steps': fineune_callback.steps,
'intr_rewards': fineune_callback.intr_rewards,
'extr_rewards': fineune_callback.extr_rewards,
}
return model, self.d, results
return model, self.d
# finetune the pretrained policy on a specific task
def finetune(self):
'''
Adapt the pretrained model with task reward using SAC
'''
# For the generalization experiment
if self.task:
# TODO: add other environments
if self.env_name == "HalfCheetah-v2":
env = HalfCheetahTaskWrapper(gym.make("HalfCheetah-v2"), task=self.task)
env = DummyVecEnv([lambda: SkillWrapper(env, self.params['n_skills'], max_steps=self.conf.max_steps)])
# WalkerTaskWrapper, AntTaskWrapper
elif self.env_name == "Walker2d-v2":
env = WalkerTaskWrapper(gym.make("Walker2d-v2"), task=self.task)
env = DummyVecEnv([lambda: SkillWrapper(env, self.params['n_skills'], max_steps=self.conf.max_steps)])
elif self.env_name == "Ant-v2":
env = AntTaskWrapper(gym.make("Ant-v2"), task=self.task)
env = DummyVecEnv([lambda: SkillWrapper(env, self.params['n_skills'], max_steps=self.conf.max_steps)])
else:
env = DummyVecEnv([lambda: SkillWrapper(gym.make(
self.env_name), self.params['n_skills'], max_steps=self.conf.max_steps)])
model_dir = self.directory + "/best_model"
# Extract the model and extract the best skill
if self.alg == "sac":
model = SAC.load(model_dir, env=env, seed=self.seed)
elif self.alg == "ppo":
model = PPO.load(model_dir, env=env, clip_range=get_schedule_fn(
self.alg_params['clip_range']), seed=self.seed)
best_skill_index = best_skill(
model, self.env_name, self.params['n_skills'])
# best_skill_index = 0
del model
# define the environment for the adaptation model
env, eval_env = self.adaptation_environment(best_skill_index)
self.adaptation_model = SAC('MlpPolicy', env, verbose=1,
learning_rate=self.adapt_params['learning_rate'],
batch_size=self.adapt_params['batch_size'],
gamma=self.adapt_params['gamma'],
buffer_size=self.adapt_params['buffer_size'],
tau=self.adapt_params['tau'],
ent_coef=self.adapt_params['ent_coef'],
gradient_steps=self.adapt_params['gradient_steps'],
learning_starts=self.adapt_params['learning_starts'],
policy_kwargs=dict(net_arch=dict(pi=[self.conf.layer_size_policy, self.conf.layer_size_policy], qf=[
self.conf.layer_size_q, self.conf.layer_size_q]), n_critics=2),
tensorboard_log=self.directory,
seed=self.seed
)
eval_env = Monitor(eval_env, f"{self.directory}/finetune_eval_results")
eval_callback = EvalCallback(eval_env, best_model_save_path=self.directory + f"/best_finetuned_model_skillIndex:{best_skill_index}",
log_path=f"{self.directory}/finetune_eval_results", eval_freq=self.conf.eval_freq,
deterministic=True, render=False)
# if the model for pretraining is SAC just load the discriminator
if self.alg == "sac":
sac_model = SAC.load(model_dir, env=env, tensorboard_log=self.directory)
# self.adaptation_model = sac_model
self.adaptation_model.actor.latent_pi.load_state_dict(sac_model.actor.latent_pi.state_dict())
self.adaptation_model.actor.mu.load_state_dict(sac_model.actor.mu.state_dict())
self.adaptation_model.actor.log_std.load_state_dict(sac_model.actor.log_std.state_dict())
self.adaptation_model.actor.optimizer = opt.Adam(self.adaptation_model.actor.parameters(), lr=self.adapt_params['learning_rate'])
# # load the discriminator
# self.d.layers[0] = nn.Linear(gym.make(self.env_name).observation_space.shape[0] + self.params['n_skills'] + gym.make(self.env_name).action_space.shape[0], self.conf.layer_size_discriminator)
# self.d.layers[-1] = nn.Linear(self.conf.layer_size_discriminator, 1)
# seq = nn.Sequential(*self.d.layers)
# # # print(d)
# self.d.eval()
self.adaptation_model.critic.qf0.load_state_dict(sac_model.critic.qf0.state_dict())
self.adaptation_model.critic.qf1.load_state_dict(sac_model.critic.qf1.state_dict())
self.adaptation_model.critic_target.load_state_dict(self.adaptation_model.critic.state_dict())
self.adaptation_model.critic.optimizer = opt.Adam(self.adaptation_model.critic.parameters(), lr=self.adapt_params['learning_rate'])
self.adaptation_model.learn(total_timesteps=self.params['finetune_steps'],
callback=eval_callback, tb_log_name="SAC_FineTune", d=None, mi_estimator=None)
# if the model for the prratrining is PPO load the discrimunator and actor from ppo into sac models
elif self.alg == "ppo":
self.ppo_model = PPO.load(model_dir, env=env, tensorboard_log=self.directory,
clip_range=get_schedule_fn(self.alg_params['clip_range']))
# adaptation_model.actor.action_dist = DiagGaussianDistribution(env.action_space.shape[0])
# load the policy ppo
ppo_actor = self.ppo_model.policy
self.load_state_dicts(ppo_actor)
self.adaptation_model.learn(total_timesteps=self.params['finetune_steps'],
callback=eval_callback, tb_log_name="PPO_FineTune", d=None, mi_estimator=None)
return self.adaptation_model, best_skill_index
def load_state_dicts(self, ppo_actor):
'''
load the pretrained model parameters into the adaptation model
'''
# initlialize the adaptation policy with the pretrained model
self.adaptation_model.actor.latent_pi.load_state_dict(ppo_actor.mlp_extractor.policy_net.state_dict())
self.adaptation_model.actor.mu.load_state_dict(ppo_actor.action_net.state_dict())
self.adaptation_model.actor.log_std.load_state_dict(nn.Linear(in_features=self.conf.layer_size_policy, out_features=gym.make(self.env_name).action_space.shape[0], bias=True).state_dict())
self.adaptation_model.actor.optimizer = opt.Adam(self.adaptation_model.actor.parameters(), lr=self.adapt_params['learning_rate'])
layers = [nn.Linear(gym.make(self.env_name).observation_space.shape[0] + self.params['n_skills'] + gym.make(self.env_name).action_space.shape[0], self.conf.layer_size_discriminator)]
for l in range(len(self.ppo_model.policy.mlp_extractor.value_net)):
if l != 0:
layers.append(self.ppo_model.policy.mlp_extractor.value_net[l])
layers.append(nn.Linear(self.conf.layer_size_discriminator, 1))
seq = nn.Sequential(*layers)
print(seq)
# input()
self.adaptation_model.critic.qf0.load_state_dict(seq.state_dict())
self.adaptation_model.critic.qf1.load_state_dict(seq.state_dict())
self.adaptation_model.critic_target.load_state_dict(self.adaptation_model.critic.state_dict())
self.adaptation_model.critic.optimizer = opt.Adam(self.adaptation_model.critic.parameters(), lr=self.adapt_params['learning_rate'])
def adaptation_environment(self, best_skill_index):
'''
Adaptation environment according to the task reward
'''
if self.task:
# print(f"There is a task which is {self.task}")
# input()
if self.env_name == "HalfCheetah-v2":
# print(f"environment: {self.env_name}")
# input()
env = HalfCheetahTaskWrapper(gym.make("HalfCheetah-v2"), task=self.task)
env = DummyVecEnv([lambda: SkillWrapperFinetune(Monitor(env, f"{self.directory}/finetune_train_results"), self.params['n_skills'], max_steps=gym.make(self.env_name)._max_episode_steps, skill=best_skill_index)])
eval_env = HalfCheetahTaskWrapper(gym.make(self.env_name), task=self.task)
eval_env = SkillWrapperFinetune(eval_env, self.params['n_skills'], max_steps=gym.make(self.env_name)._max_episode_steps, r_seed=None, skill=best_skill_index)
# # WalkerTaskWrapper, AntTaskWrapper
elif self.env_name == "Walker2d-v2":
# print(f"environment: {self.env_name}")
# input()
env = WalkerTaskWrapper(gym.make("Walker2d-v2"), task=self.task)
env = DummyVecEnv([lambda: SkillWrapperFinetune(Monitor(env, f"{self.directory}/finetune_train_results"), self.params['n_skills'], max_steps=gym.make(self.env_name)._max_episode_steps, skill=best_skill_index)])
eval_env = WalkerTaskWrapper(gym.make(self.env_name), task=self.task)
eval_env = SkillWrapperFinetune(eval_env, self.params['n_skills'], max_steps=gym.make(self.env_name)._max_episode_steps, r_seed=None, skill=best_skill_index)
elif self.env_name == "Ant-v2":
# print(f"environment: {self.env_name}")
# input()
env = AntTaskWrapper(gym.make("Ant-v2"), task=self.task)
env = DummyVecEnv([lambda: SkillWrapperFinetune(Monitor(env, f"{self.directory}/finetune_train_results"), self.params['n_skills'], max_steps=gym.make(self.env_name)._max_episode_steps, skill=best_skill_index)])
eval_env = AntTaskWrapper(gym.make(self.env_name), task=self.task)
eval_env = SkillWrapperFinetune(eval_env, self.params['n_skills'], max_steps=gym.make(self.env_name)._max_episode_steps, r_seed=None, skill=best_skill_index)
else:
# print(f"Just ")
env = DummyVecEnv([lambda: SkillWrapperFinetune(Monitor(gym.make(
self.env_name), f"{self.directory}/finetune_train_results"), self.params['n_skills'], r_seed=None,max_steps=gym.make(self.env_name)._max_episode_steps, skill=best_skill_index)])
eval_env = SkillWrapperFinetune(gym.make(
self.env_name), self.params['n_skills'], max_steps=gym.make(self.env_name)._max_episode_steps, r_seed=None, skill=best_skill_index)
return env, eval_env
| FaisalAhmed0/SLUSD | src/diayn.py | diayn.py | py | 22,206 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "seaborn.set_theme",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "seaborn.set",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "src.config.conf.font_scale",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "src.con... |
41433954503 | import os
import mock
import json
import pika
import Queue
import logging
import greenlet
import unittest
import threading
from rackattack.tcp import publish
from rackattack.tcp import subscribe
from rackattack.tests import mock_pika
from rackattack.tests import one_threaded_publish
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.INFO)
class Host:
def id(self):
return "myID"
class FakeHostStateMachine:
def hostImplementation(self):
return Host()
class Test(unittest.TestCase):
def setUp(self):
mock_pika.enableMockedPika(modules=[publish])
self.tested = one_threaded_publish.OneThreadedPublish(mock_pika.DEFAULT_AMQP_URL)
self.consumer = mock_pika.getBlockingConnectionToFakeBroker().channel()
def tearDown(self):
mock_pika.disableMockedPika(modules=[publish])
def test_ThreadStarted(self):
self.tested.threadStartMock.assert_called_once_with(self.tested)
def test_BadURL(self):
killMock = mock.Mock()
origKill = os.kill
try:
os.kill = killMock
publishInstance = greenlet.greenlet(
one_threaded_publish.OneThreadedPublish('invalid amqp url').run)
publishInstance.switch()
finally:
os.kill = origKill
self.assertEquals(killMock.call_count, 1)
def test_AllocationProviderMessage(self):
self.tested.continueWithServer()
allocationID = 2
message = 'alpha bravo tango'
self.tested.allocationProviderMessage(allocationID, message)
self.tested.continueWithServer()
expectedMessage = dict(event='providerMessage', allocationID=allocationID, message=message)
expectedExchange = publish.PublishSpooler.allocationExchange(allocationID)
actualMessage = self._consume(expectedExchange)
self.assertEquals(actualMessage, expectedMessage)
def test_AllocationRequested(self):
self.tested.continueWithServer()
allocationID = 1
fields = dict(requirements=dict(theseAre="myRequirements"),
allocationInfo="whatACoolAllocation")
self.tested.allocationRequested(**fields)
self.tested.continueWithServer()
expectedMessage = dict(fields, event='requested')
expectedExchange = publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME
actualMessage = self._consume(expectedExchange)
self.assertEquals(actualMessage, expectedMessage)
def test_AllocationRejected(self):
self.tested.continueWithServer()
allocationID = 1
self.tested.allocationRejected(reason="No Resources")
self.tested.continueWithServer()
expectedMessage = dict(event='rejected', reason="No Resources")
expectedExchange = publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME
actualMessage = self._consume(expectedExchange)
self.assertEquals(actualMessage, expectedMessage)
def test_AllocationCreated(self):
self.tested.continueWithServer()
allocationID = 1
hostStateMachine = FakeHostStateMachine()
allocated = {"node0": hostStateMachine}
self.tested.allocationCreated(allocationID=allocationID, allocated=allocated)
self.tested.continueWithServer()
expectedAllocated = {"node0": hostStateMachine.hostImplementation().id()}
expectedMessage = dict(event='created', allocated=expectedAllocated, allocationID=allocationID)
expectedExchange = publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME
actualMessage = self._consume(expectedExchange)
self.assertEquals(actualMessage, expectedMessage)
def test_AllocationDone(self):
self.tested.continueWithServer()
allocationID = 1
self.tested.allocationDone(allocationID)
self.tested.continueWithServer()
expectedExchange = publish.PublishSpooler.allocationExchange(allocationID)
message = self._consume(expectedExchange)
self.assertEquals(message["event"], "changedState")
expectedMessage = dict(event="done", allocationID=allocationID)
expectedExchange = publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME
actualMessage = self._consume(expectedExchange)
self.assertEquals(actualMessage, expectedMessage)
def test_AllocationDied_NoWithdrawl(self):
self.tested.continueWithServer()
allocationID = 1
client = self.tested.allocationDied(allocationID=allocationID, reason="freed", message="'sup")
self.tested.continueWithServer()
expectedExchange = publish.PublishSpooler.allocationExchange(allocationID)
message = self._consume(expectedExchange)
self.assertEquals(message["event"], "changedState")
expectedMessage = dict(event="dead", allocationID=allocationID, reason="freed", moreInfo="'sup")
expectedExchange = publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME
actualMessage = self._consume(expectedExchange)
self.assertEquals(actualMessage, expectedMessage)
def test_AllocationDied_Withdrawn(self):
self.tested.continueWithServer()
allocationID = 1
self.tested.allocationDied(allocationID=allocationID, reason="withdrawn", message="'sup")
self.tested.continueWithServer()
expectedExchange = publish.PublishSpooler.allocationExchange(allocationID)
message = self._consume(expectedExchange)
self.assertEquals(message["event"], "withdrawn")
expectedMessage = dict(event="dead", allocationID=allocationID, reason="withdrawn", moreInfo="'sup")
expectedExchange = publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME
actualMessage = self._consume(expectedExchange)
self.assertEquals(actualMessage, expectedMessage)
def test_CleanupAllocationPublishResources(self):
self.tested.continueWithServer()
allocationID = 1
self.tested.allocationDone(allocationID)
self.tested.continueWithServer()
allocationExchange = publish.PublishSpooler.allocationExchange(allocationID)
self._consume(allocationExchange)
expectedExchanges = ['',
allocationExchange,
publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME]
for expectedExchange in expectedExchanges:
self.assertIn(expectedExchange, self.consumer.exchanges)
self.assertIn(expectedExchange, self.consumer.exchanges)
self.tested.cleanupAllocationPublishResources(allocationID)
self.tested.continueWithServer()
expectedExchanges = ['', publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME]
self.assertEquals(self.consumer.exchanges.keys(), expectedExchanges)
def test_TryCleaningUpResourcesForANonExistingAllocationDoesNotCrashServer(self):
self.tested.continueWithServer()
allocationID = 1
self.tested.cleanupAllocationPublishResources(allocationID)
self.tested.continueWithServer()
def test_CleaningUpResourcesForADeadAllocationDoesNotCrashServer(self):
self.tested.continueWithServer()
allocationID = 1
self.tested.allocationDone(allocationID)
self.tested.continueWithServer()
allocationExchange = publish.PublishSpooler.allocationExchange(allocationID)
self._consume(allocationExchange)
expectedExchanges = ['',
allocationExchange,
publish.Publish.ALL_HOSTS_ALLOCATIONS_EXCHANGE_NAME]
for expectedExchange in expectedExchanges:
self.assertIn(expectedExchange, self.consumer.exchanges)
self.assertIn(expectedExchange, self.consumer.exchanges)
self.tested.cleanupAllocationPublishResources(allocationID)
self.tested.continueWithServer()
expectedExchanges.remove(allocationExchange)
self.assertEquals(self.consumer.exchanges.keys(), expectedExchanges)
self.tested.cleanupAllocationPublishResources(allocationID)
self.tested.continueWithServer()
self.assertEquals(self.consumer.exchanges.keys(), expectedExchanges)
def _consume(self, exchange):
message = self.consumer.basic_consume(exchange=exchange)
return json.loads(message)
if __name__ == '__main__':
unittest.main()
| Stratoscale/rackattack-api | py/rackattack/tests/test_publish.py | test_publish.py | py | 8,433 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.StreamHandler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.get... |
21898163142 | import os
import csv
from utilities.utilities import *
from utilities.katuyou import Katuyou
from collections import defaultdict
class EmotionClass:
def __init__(self, feeling_folder_path, line_file_path):
self.feeling_foloder_path = feeling_folder_path
self.line_file_path = line_file_path
files = os.listdir(feeling_folder_path)
self.feeling_dict = defaultdict(list) #key is file name, value is feelings
self.line_dict = defaultdict(list) #key is file name, value is line
self.katuyou = Katuyou('./utilities/KatuyouCSVs')
for file in files:
name = file.split('.')[0]
with open(os.path.join(self.feeling_foloder_path, file), 'r', encoding="utf-8") as f:
feelings = f.readlines()
for feeling in feelings:
feeling = feeling.rstrip('\n')
if feeling.startswith('#') or feeling.startswith('-'):
pass
else:
self.feeling_dict[name].append(feeling)
with open(self.line_file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
reader = csv.reader(lines)
for row in reader:
assert row[0] in self.feeling_dict.keys(), 'Not found file name at {}'.format(row[0])
self.line_dict[row[0]] = row[1:]
def current2past(self, current):
escape_words = ['さすが', '流石', '立派', '知的', '頭', 'どうして', '素敵', '大丈夫', 'お疲れ様']
for word in escape_words:
if word in current:
return current
if 'がない' in current:
current = current.replace('がない', 'がなかった')
elif 'が無い' in current:
current = current.replace('が無い', 'が無かった')
elif 'なんだ' in current:
current = current.replace('なんだ', 'だったんだ')
elif 'いいん' in current:
current = current.replace('いいん', 'よかったん')
elif 'いん' in current:
current = current.replace('いん', 'かったん')
elif 'いね' in current:
current = current.replace('いね', 'かったね')
else:
current = self._verb_cur2past(current)
return current
def _verb_cur2past(self, line): #動詞のやつを過去形に変換。
tokens, positions = mecab_parse(line)
res_line = ''
for i, token, position in zip(range(len(tokens)-1, -1, -1), tokens[::-1], positions[::-1]):
if position[0] == '助動詞' and i -1 >= 0 and (positions[i-1][0] not in ['動詞', '形容詞'] and tokens[i-1] not in ['ん']):
token = self.katuyou.convert_base2ta(token)
res_line = ''.join(tokens[:i]) + token + 'たんだ' + res_line
break
elif position[0] in ['形容詞', '動詞']:
token = self.katuyou.convert_base2ta(token)
res_line = ''.join(tokens[:i]) + token + 'た' + res_line
break
else:
res_line = token + res_line
return res_line
def Emotion(self, phrases):
statement = ''
apply = False
if len(phrases) > 2:
tokens = phrases[-3][0] + phrases[-2][0] + phrases[-1][0]
positions = phrases[-3][1] + phrases[-2][1] + phrases[-1][1]
elif len(phrases) == 2:
tokens = phrases[-2][0] + phrases[-1][0]
positions = phrases[-2][1] + phrases[-1][1]
else:
tokens, positions = phrases[-1]
for i, token, position in zip(range(len(tokens)-1, -1, -1), tokens[::-1], positions[::-1]):
if position[-1] in self.line_dict.keys():
flag = get_flags(tokens[i:], positions[i:])
if not flag['nai'] and not flag['reru']:
apply = True
statement = random.choice(self.line_dict[position[-1]])
if flag['ta']:
statement = self.current2past(statement)
else:
pass
break
return statement, apply
def passive_checker(self, tokens, positions):
for i, token, position in zip(range(len(tokens)), tokens, positions):
if position[0] == '動詞' and i + 2 < len(positions) and tokens[i + 1] in ['て', 'で'] \
and positions[i + 2][-3] in ['くださる', 'もらえる', 'くれる', 'もらう', '下さる']:
return True
elif position[0] == '動詞' and i + 1 < len(positions) and positions[i + 1][-3] in ['れる', 'られる', 'せる']:
return True
elif token in ['を'] and i + 1 < len(positions) and positions[i + 1][-3] in ['受ける', '迎える']:
return True
elif position[0] == '名詞' and position[1] not in ['非自立', '接尾']:
return False
return False
if __name__=='__main__':
emotion = EmotionClass('utilities/FEELINGS', 'utilities/feelings2line.csv')
from Preprocess import Preprocess
sentences = read_lines()
sentences = ['好きな男だった。']
for sentence in sentences:
sentence, apply, subject, conjunction, phrases, tokens, positions = Preprocess(sentence)
print(phrases)
print(emotion.Emotion(phrases)) | blackpopo/Maya_Bot_v01 | EmotionalSummary.py | EmotionalSummary.py | py | 5,462 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "utilities.... |
30724803476 | import pygame
from player import Player
class GameScreen(object):
def __init__(self, w, h, tilesize):
pygame.init()
#initialize all screen variables
self.width = w
self.height = h
self.scene = pygame.display.set_mode( (w,h) )
self.caption = pygame.display.set_caption("This is Game Title")
self.icon = pygame.image.load("assets/icon.png")
#actually displays the icon
pygame.display.set_icon(self.icon)
#tilesize setup
self.tileSize = tilesize
self.gridColor = (130,98,22)
# any game sound here.
self.track = pygame.mixer.music.load("assets/levelMusic.wav")
self.m_loop = pygame.mixer.music.play(-1)
self.m_volum = pygame.mixer.music.set_volume(0)
# also include background image
self.bgc = (51,51,51)
#define player
self.player = Player(self, 5, 8)
self.water = pygame.image.load("assets/water.png")
self.grass = pygame.image.load("assets/grass.png")
self.street = pygame.image.load("assets/road.png")
self.CL = pygame.image.load("assets/car_l.png")
self.CR = pygame.image.load("assets/car_r.png")
self.LL = pygame.image.load("assets/log_l.png")
self.LM = pygame.image.load("assets/log_m.png")
self.LR = pygame.image.load("assets/log_r.png")
#roads
self.roads = []
self.roads.append(["","","CL","CR","","","","CL","CR",""])
self.roads.append(["CR","","","","","","","","","CL"])
self.roads.append(["","","","","","","CL","CR","",""])
self.roads.append(["CL","CR","","","","","","","",""])
self.roads.append(["","","CL","CR","","","","CL","CR",""])
#logs
self.logs = []
self.logs.append(["LL","LM","LR","","","","","","",""])
self.logs.append(["LR","","","","","","","","","LL"])
self.logs.append(["","","LL","LM","LR","","","","",""])
self.logs.append(["","LL","LR","","","","","LL","LR",""])
self.logs.append(["","","","","LL","LM","LR","","",""])
#self.object = ()
self.clock = pygame.time.Clock()
#self.timer = pygame.time.get_ticks()
self.elapsedTime = 0
def draw_grid(self):
for x in range(0, self.width, self.tileSize):
pygame.draw.line(self.scene, self.gridColor, (x, 0), (x, self.height) )
for y in range(0, self.height, self.tileSize):
pygame.draw.line(self.scene, self.gridColor, (0, y), (self.width, y) )
def setBackground(self):
self.scene.fill(self.bgc)
self.draw_grid()
def display_board(self):
for i in range(10):
coords = [i * self.tileSize, 0 * self.tileSize ]
self.scene.blit(self.grass, ( coords[0] , coords[1] ) )
for i in range(1,6):
for u in range(10):
coords = [u * self.tileSize, i * self.tileSize ]
self.scene.blit(self.water, (coords[0], coords[1] ) )
for i in range(10):
coords = [i * self.tileSize, 6 * self.tileSize ]
self.scene.blit(self.grass, ( coords[0] , coords[1] ) )
for i in range(7,12):
for u in range(10):
coords = [u * self.tileSize, i * self.tileSize ]
self.scene.blit(self.street, (coords[0], coords[1] ) )
for i in range(10):
coords = [i * self.tileSize, 12 * self.tileSize ]
self.scene.blit(self.grass, ( coords[0] , coords[1] ) )
def advance_rows(self):
for i in range(len(self.roads)):
row = self.roads[i]
end = row.pop()
row.insert(0, end)
self.roads[i] = row
for i in range(len(self.logs)):
row = self.logs[i]
end = row.pop()
row.insert(0, end)
self.logs[i] = row
def display_objects(self):
for i in range(len(self.roads)):
row = self.roads[i]
for u in range(len(row)):
if(row[u] != ''):
if(row[u] == "CL"):
#print('-')
self.scene.blit(self.CL, (u * self.tileSize, (i+7) * self.tileSize) )
if(row[u] == "CR"):
#print('()')
self.scene.blit(self.CR, (u * self.tileSize, (i+7) * self.tileSize) )
for i in range(len(self.logs)):
row = self.logs[i]
for u in range(len(row)):
if(row[u] != ''):
if(row[u] == "LL"):
#print('-')
self.scene.blit(self.LL, (u * self.tileSize, (i+1) * self.tileSize) )
if(row[u] == "LM"):
#print('()')
self.scene.blit(self.LM, (u * self.tileSize, (i+1) * self.tileSize) )
if(row[u] == "LR"):
#print('()')
self.scene.blit(self.LR, (u * self.tileSize, (i+1) * self.tileSize) )
def update(self):
keepGoing = True
self.setBackground()
self.display_board()
#self.draw_grid()
dt = self.clock.tick()
self.elapsedTime += dt
if(self.elapsedTime > 1000):
self.elapsedTime = 0
self.advance_rows()
self.display_objects()
self.player.update()
pygame.display.update()
return keepGoing
| LSCCyberHawks/GenCyber2023 | Python/gamescreen.py | gamescreen.py | py | 5,715 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
16058498554 | from selenium.webdriver.chrome.service import Service
"""
@package base
WebDriver Factory class implementation
It creates a webdriver instance based on browser configurations
"""
from selenium import webdriver
class WebDriverFactory():
def __init__(self, browser):
"""
Inits WebDriverFactory class
Returns:
None
"""
self.browser = browser
def get_web_driver_instance(self):
"""
Get WebDriver Instance based on the browser configuration
Returns:
'WebDriver Instance'
"""
base_url = "https://www.hudl.com/login"
if self.browser == "chrome":
service = Service("./drivers/chromedriver.exe")
driver = webdriver.Chrome(service=service)
else:
service = Service("./drivers/geckodriver.exe")
driver = webdriver.Firefox(service=service)
# Setting Driver Implicit Time out for An Element
driver.implicitly_wait(3)
# Maximize the window
driver.maximize_window()
# Loading browser with App URL
driver.get(base_url)
return driver
| ManuBoca92/hudl-tech-test | base/WebDriverFactory.py | WebDriverFactory.py | py | 1,156 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 33,
"usage_type": "name"
},
{... |
43495804534 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 12 01:26:44 2023
@author: mingjunsun
"""
import pandas as pd
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import matplotlib.pyplot as plt
#from livelossplot import PlotLosses
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
#set working directory
os.chdir('/Users/mingjunsun/Library/CloudStorage/Dropbox/23 Summer/Data/')
data_original = pd.read_csv("Characteristics/usa.csv")
data = data_original
data_expl = data.head(105)
#exclude data before 1991-12-31
data = data[~(data['date'] < 19920000)]
#exclude observations with missing me in month t and missing return in month t+1
data = data.dropna(subset=['me', 'ret_exc_lead1m'])
#exclude nano caps
data = data.loc[data['size_grp'] != 'nano']
#delete observation with more than 5 out of the 21 characteristics missing
cols = ["be_me", "ret_12_1", "market_equity", "ret_1_0", "rvol_252d", "beta_252d", "qmj_safety", "rmax1_21d", "chcsho_12m","ni_me", "eq_dur", "ret_60_12", "ope_be", "gp_at", "ebit_sale", "at_gr1", "sale_gr1", "at_be","cash_at", "age", "z_score"]
data["missing_num"] = data[cols].isna().sum(1)
data = data.loc[data['missing_num'] <= 5]
#impute the missing characteristics by replacing them with the cross-sectional median
for i in cols:
data[i] = data[i].astype(float)
data[i] = data[i].fillna(data.groupby('date')[i].transform('median'))
#check missing values of the 21 characteristics
#randomlist = random.sample(range(1, 310000), 150)
#data_expl = data.iloc[randomlist]
#cols1 = ["missing_num", "id", "date","be_me", "ret_12_1", "market_equity", "ret_1_0", "rvol_252d", "beta_252d", "qmj_safety", "rmax1_21d", "chcsho_12m","ni_me", "eq_dur", "ret_60_12", "ope_be", "gp_at", "ebit_sale", "at_gr1", "sale_gr1", "at_be","cash_at", "age", "z_score"]
#data_expl = data_expl[cols1]
#predict next month's excess return over the training period
#OLS pytorch
cols1 = ["date","ret_exc_lead1m", "be_me", "ret_12_1", "market_equity", "ret_1_0", "rvol_252d", "beta_252d", "qmj_safety", "rmax1_21d", "chcsho_12m","ni_me", "eq_dur", "ret_60_12", "ope_be", "gp_at", "ebit_sale", "at_gr1", "sale_gr1", "at_be","cash_at", "age", "z_score"]
data1 = data[cols1]
data1 = data1.dropna()
data_train = data1[(data1['date'] < 20120000)]
data_test = data1[(data1['date'] > 20120000)]
X_train_all = data_train[cols].to_numpy()
Y_train_all = data_train["ret_exc_lead1m"].to_numpy()
# split into training and validation
data_validation = data_train[(data_train['date'] > 20050000)]
data_train = data_train[(data_train['date'] < 20050000)]
X_train = data_train[cols].to_numpy()
Y_train = data_train["ret_exc_lead1m"].to_numpy()
X_test = data_test[cols].to_numpy()
Y_test = data_test["ret_exc_lead1m"].to_numpy()
X_validation = data_validation[cols].to_numpy()
Y_validation = data_validation["ret_exc_lead1m"].to_numpy()
P = [7,14,21]
D = [1,2,3]
M = [2,10]
p = 7
d = 1
m = 2
'''
for p in P:
for d in D:
for m in M:
rf = RandomForestRegressor(n_estimators = 500, max_depth = d, min_samples_split = m, max_features = p, bootstrap = True, max_samples = 0.5)
# Train the model on training data
rf.fit(X_train, Y_train)
predictions = rf.predict(X_validation)
mse_error = mean_squared_error(predictions, Y_validation)
# Print out the mean absolute error (mae)
print('P=',p,' D=', d, ' M=', m ,' Mean Squared Error:', round(mse_error, 6), 'degrees.')
'''
# we choose P =7, D = 1, M = 10
rf = RandomForestRegressor(n_estimators = 500, max_depth = 1, min_samples_split = 10, max_features = 7, bootstrap = True, max_samples = 0.5)
# Train the model on training data
rf.fit(X_train_all, Y_train_all)
predictions = rf.predict(X_test)
mse_error = mean_squared_error(predictions, Y_test)
print(mse_error)
| Sho-Shoo/36490-F23-Group1 | example_code/random_forest.py | random_forest.py | py | 4,038 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.chdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "skle... |
30980103485 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^scrape_movies/$', views.scrape_movies, name='scrape_movies'),
url(r'^view_movies/$', views.view_movies, name='view_movies'),
url(r'^filter_movies/$', views.filter_movies, name='filter_movies'),
url(r'^mark_read_movies/$', views.mark_read_movies, name='mark_read_movies'),
url(r'^update_ratings/$', views.update_ratings, name='update_ratings'),
url(r'^remove_duplicates/$', views.remove_duplicates, name='remove_duplicates'),
url(r'^poll_state_scrape/$', views.poll_state_scrape, name='poll_state_scrape'),
url(r'^poll_state_rating/$', views.poll_state_rating, name='poll_state_rating'),
url(r'^poll_state_duplicates/$', views.poll_state_duplicates, name='poll_state_duplicates'),
url(r'^search_movies/$', views.search_movies, name='search_movies'),
url(r'^mark_read_bulk/$', views.mark_read_bulk, name='mark_read_bulk'),
url(r'^delete_bulk/$', views.delete_bulk, name='delete_bulk'),
]
| manujosephv/MovieScraper | movielistview/urls.py | urls.py | py | 1,055 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.co... |
17794645978 | # Lifted heavily from Mustard Mine, and may have some unnecessary guff
import base64
import collections
import datetime
import functools
import json
import os
import sys
import threading
import time
import pytz
from pprint import pprint, pformat
# Hack: Get gevent to do its monkeypatching as early as possible.
# I have no idea what this is actually doing, but if you let the
# patching happen automatically, it happens too late, and we get
# RecursionErrors and such. There's a helpful warning on startup.
from gevent import monkey; monkey.patch_all(subprocess=True)
from flask import Flask, request, redirect, session, url_for, g, render_template, jsonify, Response, Markup
from authlib.integrations.requests_client import OAuth2Session
import requests
try:
import config
except ImportError:
# Construct a config object out of the environment
import config_sample as config
failed = []
# Hack: Some systems like to give us a DATABASE_URL instead of a DATABASE_URI
if "DATABASE_URL" in os.environ: os.environ["DATABASE_URI"] = os.environ["DATABASE_URL"]
for var in dir(config):
if var.startswith("__"): continue # Ignore dunders
if var in os.environ: setattr(config, var, os.environ[var])
else: failed.append(var)
if failed:
print("Required config variables %s not found - see config_sample.py" % ", ".join(failed), file=sys.stderr)
sys.exit(1)
sys.modules["config"] = config # Make the config vars available elsewhere
import database
app = Flask(__name__)
app.secret_key = config.SESSION_SECRET or base64.b64encode(os.urandom(12))
# Override Flask's forcing of Location headers to be absolute, since it
# gets stuff flat-out wrong. Also, the spec now says that relative
# headers are fine (and even when the spec said that the Location should
# to be absolute, everyone accepted relative URIs).
if os.environ.get("OVERRIDE_REDIRECT_HTTPS"):
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app) # Grab info from Forwarded headers
_redirect = redirect
def redirect(*a, **kw):
resp = _redirect(*a, **kw)
resp.autocorrect_location_header = False
return resp
_url_for = url_for
def url_for(*a, **kw): return _url_for(*a, **kw).replace("http://", "https://")
REQUIRED_SCOPES = "channel:read:subscriptions channel_subscriptions" # Ensure that these are sorted
class TwitchDataError(Exception):
def __init__(self, error):
self.__dict__.update(error)
super().__init__(error["message"])
def query(endpoint, *, token, method="GET", params=None, data=None, auto_refresh=True):
# If this is called outside of a Flask request context, be sure to provide
# the auth token, and set auto_refresh to False.
# TODO: Tidy up all this mess of auth patterns. It'll probably be easiest
# to migrate everything to Helix first, and then probably everything will
# use Bearer or App authentication.
if token is None:
auth = None
elif token == "oauth":
auth = "OAuth " + session["twitch_token"]
elif token == "bearer":
auth = "Bearer " + session["twitch_token"]
elif token == "app":
r = requests.post("https://id.twitch.tv/oauth2/token", data={
"grant_type": "client_credentials",
"client_id": config.CLIENT_ID, "client_secret": config.CLIENT_SECRET,
})
r.raise_for_status()
data = r.json()
auth = "Bearer " + data["access_token"]
# TODO: Save the token so long as it's valid
# expires = int(time.time()) + data["expires_in"] - 120
else:
auth = "OAuth " + token
if not endpoint.startswith(("kraken/", "helix/")): raise ValueError("Need explicit selection of API (helix or kraken)")
r = requests.request(method, "https://api.twitch.tv/" + endpoint,
params=params, data=data, headers={
"Accept": "application/vnd.twitchtv.v5+json",
"Client-ID": config.CLIENT_ID,
"Authorization": auth,
})
if auto_refresh and r.status_code == 401 and r.json()["message"] == "invalid oauth token":
r = requests.post("https://id.twitch.tv/oauth2/token", data={
"grant_type": "refresh_token",
"refresh_token": session["twitch_refresh_token"],
"client_id": config.CLIENT_ID, "client_secret": config.CLIENT_SECRET,
})
r.raise_for_status()
resp = r.json()
session["twitch_token"] = resp["access_token"]
session["twitch_refresh_token"] = resp["refresh_token"]
# Recurse for simplicity. Do NOT pass the original token, and be sure to
# prevent infinite loops by disabling auto-refresh. Otherwise, pass-through.
# (But DO pass the token-passing mode.)
return query(endpoint, token="bearer" if token == "bearer" else "oauth",
method=method, params=params, data=data, auto_refresh=False)
if r.status_code == 403:
# TODO: What if it *isn't* of this form??
raise TwitchDataError(json.loads(r.json()["message"]))
r.raise_for_status()
if r.status_code == 204: return {}
return r.json()
@app.route("/")
def mainpage():
dest = os.environ.get("REDIRECT_TO_OTHER")
if dest: return redirect(dest)
# NOTE: If we've *reduced* the required scopes, this will still force a re-login.
# However, it'll be an easy login, as Twitch will recognize the existing auth.
if "twitch_token" not in session or session.get("twitch_auth_scopes") != REQUIRED_SCOPES:
return render_template("login.html")
user = session["twitch_user"]
return render_template("index.html",
username=user["display_name"], subscribers=database.list_subscribers(user["_id"]),
)
@app.route("/logout")
def logout():
session.pop("twitch_token", None)
session.pop("twitch_user", None)
return redirect(url_for("mainpage"))
@app.route("/login")
def login():
twitch = OAuth2Session(config.CLIENT_ID, config.CLIENT_SECRET,
scope=REQUIRED_SCOPES)
uri, state = twitch.create_authorization_url("https://id.twitch.tv/oauth2/authorize",
redirect_uri=os.environ.get("OVERRIDE_REDIRECT_URI") or url_for("authorized", _external=True))
session["login_state"] = state
return redirect(uri)
@app.route("/login/authorized")
def authorized():
if "error" in request.args:
# User cancelled the auth flow - discard auth (most likely there won't be any)
session.pop("twitch_token", None)
return redirect(url_for("mainpage"))
twitch = OAuth2Session(config.CLIENT_ID, config.CLIENT_SECRET,
state=session["login_state"])
resp = twitch.fetch_access_token("https://id.twitch.tv/oauth2/token",
code=request.args["code"],
# For some bizarre reason, we need to pass this information along.
client_id=config.CLIENT_ID, client_secret=config.CLIENT_SECRET,
redirect_uri=os.environ.get("OVERRIDE_REDIRECT_URI") or url_for("authorized", _external=True))
if "access_token" not in resp:
# Something went wrong with the retrieval. No idea what or why,
# so I'm doing a cop-out and just dumping to console.
print("Unable to log in", file=sys.stderr)
pprint(resp, stream=sys.stderr)
print("Returning generic failure.", file=sys.stderr)
raise Exception
session["twitch_token"] = resp["access_token"]
session["twitch_refresh_token"] = resp["refresh_token"]
session["twitch_auth_scopes"] = " ".join(sorted(resp["scope"]))
# kraken_user = query("kraken/user", token="oauth")
# The Kraken response includes fields not in Helix, including created_at,
# and email (though Helix gives us the latter if we add an OAuth scope).
user = query("helix/users", token="bearer")["data"][0]
user["_id"] = user["id"] # For now, everything looks for _id. Existing logins don't have user["id"].
database.login_user(user["_id"], session["twitch_token"])
session["twitch_user"] = user
return redirect(url_for("mainpage"))
@app.route("/upload", methods=["POST"])
def upload_files():
twitchid = session["twitch_user"]["_id"]
for f in request.files.getlist("csv"):
print("Loading", f.filename)
database.bulk_load_subs(twitchid, f.read().decode("UTF-8"))
return redirect(url_for("mainpage"))
# TODO: JSON API endpoints for uploading a CSV, and forcing a recheck
# Hack. TODO: Have a UI to do this
@app.route("/ping-api")
def ping_api():
id = session["twitch_user"]["_id"]
'''
params = {"limit": 100, "offset": 0}
subs = []
while True:
data = query("kraken/channels/%s/subscriptions" % id, token="oauth", params=params)
if not data["subscriptions"]: break
subs.extend(data["subscriptions"])
params["offset"] += params["limit"]
database.update_subs_from_api(id, subs)
'''
data = query("helix/subscriptions", token="bearer", params={"broadcaster_id": id})
# TODO: Paginate the results
return "<pre>" + pformat(data) + "</pre>"
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
# Load us up using gunicorn, configured via the Procfile
with open("Procfile") as f: cmd = f.read().strip().replace("web: ", "")
if "PORT" not in os.environ: os.environ["PORT"] = "5000" # hack - pick a different default port
sys.argv = cmd.split(" ")[1:] # TODO: Split more smartly
from gunicorn.app.wsgiapp import run; run()
else:
# Worker startup. This is the place to put any actual initialization work
# as it won't be done on master startup.
pass
| Rosuav/sub-tracker | subtracker.py | subtracker.py | py | 8,962 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gevent.monkey.patch_all",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gevent.monkey",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
... |
28775868892 | from typing import Optional
from pydantic import Field, HttpUrl, validator
from app.model.response import Success
from app.model.base import DataModel, InCreateModel, InUpdateModel
from app.database.table.location_cabinet import CabinetStatus
from app.util.type.guid import GUID
from app.util.regex_pattern import NAME_PATTERN
from app.util.string_length import SHORT_LENGTH, LONG_LENGTH, URL_LENGTH
class _BaseCabinet(DataModel):
located_room: GUID = Field(
...,
title="存储柜所在房间的 ID",
)
cabinet_name: str = Field(
...,
max_length=SHORT_LENGTH,
regex=NAME_PATTERN,
title="存储柜名称",
description="用来区分存储柜,只能使用中文、大小写字母、数字、下划线、中划线,默认为 Unnamed Cabinet",
example="Cabinet_1",
)
cabinet_comment: Optional[str] = Field(
None,
max_length=LONG_LENGTH,
title="存储柜备注",
example="Some comment of the cabinet",
)
cabinet_image_url: Optional[HttpUrl] = Field(
None,
max_length=URL_LENGTH,
title="存储柜图片的 URL",
example="http://www.example.com/image.png",
)
max_number: int = Field(
...,
gt=0,
title="存储柜最大容量",
description="存储柜最多能存放的物品数量,如果不希望被限制可以尝试使用一个非常大的数值",
example=9999,
)
current_number: int = Field(
...,
gt=0,
title="存储柜当前容量",
description="当前容量不能超过最大值,且当达到最大时会更新存储柜状态为满载",
example=100,
)
status: CabinetStatus = Field(
...,
title="存储柜状态",
description="""
可选的存储柜状态为:
- DISABLED (0): 禁用
- ENABLED (1): 启用
- FULL_LOAD (2): 满载
新建立的存储柜默认为禁用( DISABLED ),可以在设置中修改。""",
)
@validator("current_number")
def check_current_number(cls, value: int, values: dict) -> int:
"""检查存储柜当前容量
Args:
value (int): 当前容量
values (dict): 模型全部字段
Raises:
ValueError: 当当前容量大于最大容量时抛出异常
Returns:
int: 当前容量
"""
max_number: int | None = values.get("max_number")
assert max_number is not None, "max_number is required"
if value > max_number:
raise ValueError(
f"current_number ({value}) is greater than max_number ({max_number})"
)
return value
@validator("status")
def check_status(cls, value: CabinetStatus, values: dict) -> CabinetStatus:
"""检查存储柜状态
Args:
value (CabinetStatus): 当前的状态
values (dict): 模型全部字段
Raises:
ValueError: 当输入的状态不支持时抛出异常
Returns:
CabinetStatus: 存储柜状态
"""
max_number: int | None = values.get("max_number")
current_number: int | None = values.get("current_number")
assert max_number is not None, "max_number is required"
assert current_number is not None, "current_number is required"
if max_number == current_number:
return CabinetStatus.FULL_LOAD
return value
class Cabinet(_BaseCabinet):
pass
class CabinetInCreate(InCreateModel, _BaseCabinet):
cabinet_name: Optional[str] = Field(
"Unnamed Cabinet",
max_length=SHORT_LENGTH,
regex=NAME_PATTERN,
title="存储柜名称",
description="用来区分存储柜,只能使用中文、大小写字母、数字、下划线、中划线,默认为 Unnamed Cabinet",
example="Cabinet_1",
)
current_number: Optional[int] = Field(
0,
title="存储柜当前容量",
description="当前容量不能超过最大值,且当达到最大时会更新存储柜状态为满载",
example=100,
)
status: Optional[CabinetStatus] = Field(
CabinetStatus.DISABLED,
title="存储柜状态",
description="""
可选的存储柜状态为:
- DISABLED (0): 禁用
- ENABLED (1): 启用
- FULL_LOAD (2): 满载
新建立的存储柜默认为禁用( DISABLED ),可以在设置中修改。""",
)
class CabinetInUpdate(InUpdateModel, _BaseCabinet):
located_room: Optional[GUID] = Field(
None,
title="存储柜所在房间的 ID",
)
cabinet_name: Optional[str] = Field(
None,
max_length=SHORT_LENGTH,
regex=NAME_PATTERN,
title="存储柜名称",
description="用来区分存储柜,只能使用中文、大小写字母、数字、下划线、中划线,默认为 Unnamed Cabinet",
example="Cabinet_1",
)
cabinet_comment: Optional[str] = Field(
None,
max_length=LONG_LENGTH,
title="存储柜备注",
example="Some comment of the cabinet",
)
cabinet_image_url: Optional[HttpUrl] = Field(
None,
max_length=URL_LENGTH,
title="存储柜图片的 URL",
example="http://www.example.com/image.png",
)
max_number: Optional[int] = Field(
None,
gt=0,
title="存储柜最大容量",
description="存储柜最多能存放的物品数量,如果不希望被限制可以尝试使用一个非常大的数值",
example=9999,
)
current_number: Optional[int] = Field(
None,
gt=0,
title="存储柜当前容量",
description="当前容量不能超过最大值,且当达到最大时会更新存储柜状态为满载",
example=100,
)
status: Optional[CabinetStatus] = Field(
None,
title="存储柜状态",
description="""
可选的存储柜状态为:
- DISABLED (0): 禁用
- ENABLED (1): 启用
- FULL_LOAD (2): 满载
新建立的存储柜默认为禁用( DISABLED )。""",
)
class CabinetInResponse(Success):
data: list[Cabinet]
| batu1579/instrument-management-service | app/model/location_cabinet.py | location_cabinet.py | py | 6,629 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "app.model.base.DataModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "app.util.type.guid.GUID",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pydan... |
24246196895 | #
# matplot graph class definition
#
# backend independent implementation
#
# Kazutomo Yoshii <ky@anl.gov>
#
import os, sys
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
#import matplotlib.collections as collections
import matplotlib.cm as cm
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.cbook import get_sample_data
from matplotlib._png import read_png
from listrotate import *
class plot_info:
def ypos(self,i):
return 1.0 - 0.05*i
def __init__(self, ax, params):
self.ax = ax
dir=os.path.abspath(os.path.dirname(sys.argv[0]))
ax.axis([0,1,0,1])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_frame_on=True
cfg = params['cfg']
info = params['info']
xoff = 0.05
l=1
# for p in range(info['npkgs']):
# ax.text( 0.1, self.ypos(l), 'CPU PKG%d' % p, color=params['pkgcolors'][p] )
# l += 1
# l += 2
ax.text(xoff, self.ypos(l), '[INFO]' )
l += 2
ax.text(xoff, self.ypos(l), 'Description : %s' % cfg['desc'] )
l += 1
ax.text(xoff, self.ypos(l), 'Node : %s' % info['nodeinfo'] )
l += 1
ax.text(xoff, self.ypos(l), 'Linux kernel : %s' % info['kernelversion'] )
l += 1
plt.text( xoff, self.ypos(l), 'Freq. driver : %s' % info['freqdriver'] )
l += 1
plt.text( xoff, self.ypos(l), 'Memory [GB] : %d' % (int(info['memoryKB'])/1024/1024) )
l += 1
plt.text( xoff, self.ypos(l), 'CPU model : %s' % info['cpumodel'] )
l += 1
plt.text( xoff, self.ypos(l), 'No of procs : %s' % info['ncpus'] )
l += 1
plt.text( xoff, self.ypos(l), 'No of pkgs : %s' % info['npkgs'] )
l += 1
plt.text( xoff, self.ypos(l), 'No of NUMA nodes: %d' % info['nnodes'] )
a = info['pkg0phyid']
ht = 'enabled'
if len(a) == len(set(a)):
ht = 'disabled'
l += 1
plt.text( xoff, self.ypos(l), 'Hyperthread : %s' % ht)
# l += 1
# plt.text( xoff, ypos(l), 'Powercap pkg0 : %d Watt' % s_powercap['p0'] )
# l += 1
# plt.text( xoff, ypos(l), 'Powercap pkg1 : %d Watt' % s_powercap['p1'] )
# l += 1
fn = get_sample_data("%s/coolr-logo-poweredby-48.png" % dir, asfileobj=False)
arr = read_png(fn)
imagebox = OffsetImage(arr, zoom=0.6)
ab = AnnotationBbox(imagebox, (0, 0),
xybox=(.8, .1),
xycoords='data',
boxcoords="axes fraction",
pad=0.5)
ax.add_artist(ab)
def update(self, n):
# self.text1.set_text('%d' % n)
s = 'dummy'
class plot_totpwr:
def __init__(self, ax, params, totpwrs):
self.ax = ax
self.update(params, totpwrs)
def update(self, params, totpwrs):
cfg = params['cfg']
cur_t = params['cur']
gxsec = params['gxsec']
self.ax.cla() # this is a brute-force way to update
self.ax.axis([cur_t-gxsec, cur_t, cfg['pwrmin'], cfg['acpwrmax']]) # [xmin,xmax,ymin,ymax]
x = totpwrs[0].getlistx()
y = totpwrs[0].getlisty()
self.ax.plot(x,y, scaley=False, color='black', label='RAPL total' )
if len(totpwrs) > 1:
x = totpwrs[1].getlistx()
y = totpwrs[1].getlisty()
self.ax.plot(x,y, '--', scaley=False, color='black', label='AC' )
self.ax.legend(loc='lower left', prop={'size':9})
self.ax.set_xlabel('Time [S]')
self.ax.set_ylabel('Power [W]')
# the following plots can be generalized
class plot_xsbench:
def __init__(self, ax, params, lps):
self.ax = ax
# too lazy to figure out axhspan's object. fix this later
self.update(params, lps)
def update(self, params, lps):
cfg = params['cfg']
cur_t = params['cur']
gxsec = params['gxsec']
self.ax.cla() # this is a brute-force way to update
self.ax.set_xlim([cur_t-gxsec, cur_t])
self.ax.set_ylim(bottom=0)
x = lps.getlistx()
y = lps.getlisty()
#self.ax.plot(x,y, scaley=False, color='black', label='' )
self.ax.bar(x,y, color='black', label='' )
self.ax.legend(loc='lower left', prop={'size':9})
self.ax.set_xlabel('Time [S]')
self.ax.set_ylabel('TTS [S]')
class plot_appperf:
def __init__(self, ax, params, lps):
self.ax = ax
# too lazy to figure out axhspan's object. fix this later
self.update(params, lps)
def update(self, params, lps):
cfg = params['cfg']
cur_t = params['cur']
gxsec = params['gxsec']
self.ax.cla() # this is a brute-force way to update
self.ax.set_xlim([cur_t-gxsec, cur_t])
self.ax.autoscale_view(scaley=True)
self.ax.set_ylim(bottom=0)
x = lps.getlistx()
y = lps.getlisty()
self.ax.plot(x,y, label='')
# self.ax.legend(loc='lower left', prop={'size':9})
self.ax.set_xlabel('Time [S]')
self.ax.set_ylabel('App performance')
class plot_runtime: # mean, std
def __init__(self, ax, params, pdata):
self.ax = ax
self.update(params, pdata)
def update(self, params, pdata, ptype = 'temp'):
cfg = params['cfg']
cur_t = params['cur']
gxsec = params['gxsec']
self.ax.cla()
self.ax.set_xlim([cur_t-gxsec, cur_t])
self.ax.autoscale_view(scaley=True)
x = pdata.getlistx()
y = pdata.getlisty()
e = pdata.getlisto()
self.ax.plot(x,y, scaley=True, label='')
self.ax.errorbar(x,y,yerr=e, lw=.2, label = '')
# we need to update labels everytime because of cla()
self.ax.set_xlabel('Time [S]')
self.ax.set_ylabel('Runtime')
# self.ax.legend(loc='lower left', prop={'size':9})
# ----------------------
class plot_rapl:
def __init__(self, ax, params, ppkg, pmem, titlestr=''):
self.ax = ax
self.titlestr = titlestr
# too lazy to figure out axhspan's object. fix this later
self.update(params, ppkg, pmem)
def update(self, params, ppkg, pmem):
cfg = params['cfg']
cur_t = params['cur']
gxsec = params['gxsec']
self.ax.cla() # this is a brute-force way to update
self.ax.set_xlim([cur_t-gxsec, cur_t])
self.ax.autoscale_view(scaley=True)
#self.ax.axis([cur_t-gxsec, cur_t, cfg['pwrmin'], cfg['pwrmax']]) # [xmin,xmax,ymin,ymax]
pkgid = 0
for t in ppkg:
x = t.getlistx()
ycap = t.getlisto()
#self.ax.plot(x,ycap, scaley=False, color='red', label='PKG%dlimit'%pkgid )
self.ax.plot(x,ycap, color='red', label='PKG%dlimit'%pkgid )
pkgid += 1
pkgid = 0
for t in ppkg:
x = t.getlistx()
y = t.getlisty()
#self.ax.plot(x,y,scaley=False,color=params['pkgcolors'][pkgid], label='PKG%d'%pkgid)
self.ax.plot(x,y,color=params['pkgcolors'][pkgid], label='PKG%d'%pkgid)
pkgid += 1
pkgid = 0
for t in pmem:
x = t.getlistx()
y = t.getlisty()
#self.ax.plot(x,y,scaley=False,color=params['pkgcolors'][pkgid], linestyle='--', label='PKG%ddram'%pkgid)
self.ax.plot(x,y,color=params['pkgcolors'][pkgid], linestyle='--', label='PKG%ddram'%pkgid)
pkgid += 1
self.ax.legend(loc='lower left', prop={'size':9})
self.ax.set_xlabel('Time [S]')
self.ax.set_ylabel('Power [W]')
if len(self.titlestr):
self.ax.set_title("%s" % self.titlestr)
class plot_line_err: # used for temp and freq (mean+std)
def __init__(self, ax, params, pdata, ptype = 'temp' ):
self.ax = ax
# unfortunately, I couldn't figure out how to update errorbar correctly
self.update(params, pdata, ptype)
def update(self, params, pdata, ptype = 'temp'):
cfg = params['cfg']
cur_t = params['cur']
gxsec = params['gxsec']
self.ax.cla() # this is a brute-force way to update. I don't know how to update errorbar correctly.
if ptype == 'temp':
self.ax.axis([cur_t-gxsec, cur_t, cfg['tempmin'], cfg['tempmax']]) # [xmin,xmax,ymin,ymax]
elif ptype == 'freq':
self.ax.axis([cur_t-gxsec, cur_t, cfg['freqmin'], cfg['freqmax']]) # [xmin,xmax,ymin,ymax]
plt.axhspan(cfg["freqnorm"], cfg["freqmax"], facecolor='#eeeeee', alpha=0.5)
else:
self.ax.axis([cur_t-gxsec, cur_t, 0, 100]) # [xmin,xmax,ymin,ymax]
pkgid = 0
for t in pdata:
x = t.getlistx()
y = t.getlisty()
e = t.getlisto()
self.ax.plot(x,y,scaley=False,color=params['pkgcolors'][pkgid], label='PKG%d'%pkgid)
self.ax.errorbar(x,y,yerr=e, lw=.2, color=params['pkgcolors'][pkgid], label = '')
pkgid += 1
# we need to update labels everytime because of cla()
self.ax.set_xlabel('Time [S]')
if ptype == 'temp':
self.ax.set_ylabel('Core temperature [C]')
elif ptype == 'freq':
self.ax.set_ylabel('Frequency [GHz]')
else:
self.ax.set_ylabel('Unknown')
self.ax.legend(loc='lower left', prop={'size':9})
# below are kind of examples
#
class plotline:
def __init__(self, ax, x, y):
self.ax = ax
self.line, = ax.plot(x,y)
self.ax.axhspan( 0.7, 1.0, facecolor='#eeeeee', alpha=1.0)
def update(self, x, y):
self.line.set_data(x, y)
class plotcolormap:
def __init__(self, ax, X):
self.ax = ax
self.im = self.ax.imshow(X, cmap=cm.jet, interpolation='nearest')
self.im.set_cmap('spectral')
self.im.set_clim(0, 1.5)
f = plt.gcf()
f.colorbar(self.im)
def update(self,X):
self.im.set_array(X)
class plotbar:
def __init__(self, ax, x, y):
self.ax = ax
self.rects = ax.bar(x, y)
def update(self, y):
for r, h in zip(self.rects, y):
r.set_height(h)
class ploterrorbar:
def __init__(self, ax, x, y, e):
self.ax = ax
l, (b, t), v = ax.errorbar(x, y, e)
self.line = l
self.bottom = b
self.top = t
self.vert = v
def update(self, x, y, e):
# XXX: this is a bit brute-force
# I couldn't figure out how to update vert
self.ax.cla()
self.ax.errorbar(x, y, e)
class plottext:
def ypos(self,i):
return 1.0 - 0.05*i
def __init__(self, ax, n):
self.ax = ax
dir=os.path.abspath(os.path.dirname(sys.argv[0]))
ax.axis([0,1,0,1])
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_frame_on=True
ax.plot( [ 0.1, 0.2], [0.96, 0.96], color='blue', linewidth=2 )
ax.plot( [ 0.1, 0.2], [0.91, 0.91], color='green', linewidth=2 )
ax.plot( [ 0.1, 0.2], [0.86, 0.86], color='red', linewidth=1 )
self.text1 = ax.text( 0.3, self.ypos(2), '%d' % n )
fn = get_sample_data("%s/coolr-logo-poweredby-48.png" % dir, asfileobj=False)
arr = read_png(fn)
imagebox = OffsetImage(arr, zoom=0.4)
ab = AnnotationBbox(imagebox, (0, 0),
xybox=(.75, .12),
xycoords='data',
boxcoords="axes fraction",
pad=0.5)
ax.add_artist(ab)
def update(self, n):
self.text1.set_text('%d' % n)
| UO-OACISS/tau2 | tools/src/pycoolr/src/pycoolrgui/pycoolr-plot/clr_matplot_graphs.py | clr_matplot_graphs.py | py | 11,842 | python | en | code | 34 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_num... |
73034041633 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Python Libs
import os
import random
import string
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath, expensiveTest
ensure_in_syspath('../../../')
# Import Salt Libs
import integration
from salt.config import cloud_providers_config
# Import Third-Party Libs
try:
import libcloud # pylint: disable=W0611
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def __random_name(size=6):
'''
Generates a random cloud instance name
'''
return 'CLOUD-TEST-' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(size)
)
# Create the cloud instance name to be used throughout the tests
INSTANCE_NAME = __random_name()
@skipIf(HAS_LIBCLOUD is False, 'salt-cloud requires >= libcloud 0.13.2')
class RackspaceTest(integration.ShellCase):
'''
Integration tests for the Rackspace cloud provider using the Openstack driver
'''
@expensiveTest
def setUp(self):
'''
Sets up the test requirements
'''
super(RackspaceTest, self).setUp()
# check if appropriate cloud provider and profile files are present
profile_str = 'rackspace-config:'
provider = 'rackspace'
providers = self.run_cloud('--list-providers')
if profile_str not in providers:
self.skipTest(
'Configuration file for {0} was not found. Check {0}.conf files '
'in tests/integration/files/conf/cloud.*.d/ to run these tests.'
.format(provider)
)
# check if api key, user, and tenant are present
path = os.path.join(integration.FILES,
'conf',
'cloud.providers.d',
provider + '.conf')
config = cloud_providers_config(path)
user = config['rackspace-config']['openstack']['user']
tenant = config['rackspace-config']['openstack']['tenant']
api = config['rackspace-config']['openstack']['apikey']
if api == '' or tenant == '' or user == '':
self.skipTest(
'A user, tenant, and an api key must be provided to run these '
'tests. Check tests/integration/files/conf/cloud.providers.d/{0}.conf'
.format(provider)
)
def test_instance(self):
'''
Test creating an instance on rackspace with the openstack driver
'''
# create the instance
instance = self.run_cloud('-p rackspace-test {0}'.format(INSTANCE_NAME))
ret = ' {0}'.format(INSTANCE_NAME)
# check if instance with salt installed returned successfully
try:
self.assertIn(ret, instance)
except AssertionError:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
raise
# delete the instance
delete = self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
ret = ' True'
try:
self.assertIn(ret, delete)
except AssertionError:
raise
def tearDown(self):
'''
Clean up after tests
'''
query = self.run_cloud('--query')
ret = ' {0}:'.format(INSTANCE_NAME)
# if test instance is still present, delete it
if ret in query:
self.run_cloud('-d {0} --assume-yes'.format(INSTANCE_NAME))
if __name__ == '__main__':
from integration import run_tests
run_tests(RackspaceTest)
| shineforever/ops | salt/tests/integration/cloud/providers/rackspace.py | rackspace.py | py | 3,678 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "salttesting.helpers.ensure_in_syspath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 34,
"usage_type": "attribute"
},
{
"a... |
13734241619 | import re
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from base.models import Lexicon, EXCLUDED_LEXICA
class ProfileEditForm(forms.Form):
lexiconChoices = Lexicon.objects.exclude(lexiconName__in=EXCLUDED_LEXICA)
defaultLexicon = forms.ModelChoiceField(
queryset=lexiconChoices,
label='Default Lexicon',
widget=forms.Select(attrs={'class': 'form-control'}),
empty_label=None)
profileText = forms.CharField(
widget=forms.Textarea(attrs={'class': 'form-control'}),
label='Your profile',
required=False)
disableChat = forms.BooleanField(label='Disable Chat', required=False)
default_language = forms.ChoiceField(
choices=settings.LANGUAGES,
required=False,
widget=forms.Select(attrs={'class': 'form-control'}),
)
class UsernameEditForm(forms.Form):
username = forms.CharField(label='Your desired username', required=True)
def clean(self):
u = self.cleaned_data.get('username')
if not u:
raise forms.ValidationError(_('You must enter a username'))
if not re.match(r'\w+$', u):
raise forms.ValidationError(
_('Your username must consist of alphanumeric characters.'))
# Case-insensitive username match
users = User.objects.filter(username__iexact=u)
if users.count() > 0:
raise forms.ValidationError(
_('This username already exists in our system!'))
| domino14/Webolith | djAerolith/accounts/forms.py | forms.py | py | 1,589 | python | en | code | 32 | github-code | 1 | [
{
"api_name": "django.forms.Form",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "base.models.Lexicon.objects.exclude",
"line_number": 12,
"usage_type": "call"
},
{
"api_name"... |
21678788184 | # https://leetcode.com/problems/merge-two-sorted-lists/
# Definition for singly-linked list.
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
head = tail = ListNode(0)
while list1 and list2:
if list1.val < list2.val:
tail.next = list1
list1 = list1.next
else:
tail.next = list2
list2 = list2.next
tail = tail.next
tail.next = list1 or list2
return head.next
if __name__ == '__main__':
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next = ListNode(4)
l2 = ListNode(1)
l2.next = ListNode(3)
l2.next = ListNode(4)
solution = Solution()
print(solution.mergeTwoLists(l1, l2))
| webdastur/ProgrammingProblems | LeetCode/merge_two_sorted_lists.py | merge_two_sorted_lists.py | py | 946 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 13,
"usage_type": "name"
}
] |
17670564445 | from django.utils import timezone
from rest_framework import filters
from rest_framework.response import Response
from rest_framework import status, viewsets, permissions
from url_filter.integrations.drf import DjangoFilterBackend
from .models import (
User, Card, Transaction,
)
from .serializers import (
UserSerializer, CardSerializer, TransactionSerializer,
)
class UserViewSet(viewsets.ModelViewSet):
"""
Вьюшка для регистрации пользователей
"""
permission_classes = [permissions.AllowAny]
serializer_class = UserSerializer
def list(self, request, *args, **kwargs):
"""
NOTE: Вьюшка используется только для создания, просматривать информацию о пользователях не нужно
"""
return Response([], status=status.HTTP_403_FORBIDDEN)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class CardViewSet(viewsets.ModelViewSet):
"""
Вьюшка для CRUD'a карт текущего пользователя
"""
serializer_class = CardSerializer
filter_backends = [filters.OrderingFilter, DjangoFilterBackend]
filter_fields = ['id', 'user', 'name', 'balance', ]
def get_queryset(self):
user = self.request.user
queryset = Card.objects\
.filter(user=user)\
.prefetch_related('user')
return queryset
class TransactionViewSet(viewsets.ModelViewSet):
"""
Вьюшка для CRUD'a транзакций
"""
serializer_class = TransactionSerializer
filter_backends = [filters.OrderingFilter, DjangoFilterBackend]
filter_fields = ['id', 'user', 'card', 'sum', 'operation_type']
def get_queryset(self):
user = self.request.user.id
queryset = Transaction.objects\
.select_related('card')\
.filter(user=user, card__user=user)\
.order_by('date')
return queryset
def create(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
try:
card = Card.objects.get(id=data['card'].id)
except Card.DoesNotExist:
return Response('Нет такой карты', status=status.HTTP_400_BAD_REQUEST)
if data['operation_type'] == 'income':
card.balance += data['sum']
elif data['operation_type'] == 'expense':
card.balance -= data['sum']
card.save()
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(
serializer.data, status=status.HTTP_201_CREATED, headers=headers
)
| legacy72/its-animals-backend | bank/views.py | views.py | py | 3,079 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 19,
"usage_type": ... |
11861469094 | # import the necessary packages
from Stitcher import Stitcher
from MotionDetector import MotionDetector
from imutils.video import VideoStream
from datetime import datetime
import numpy as np
import imutils
import time
import cv2
# initialize the video streams and allow them to warmup
print("[INFO] starting cameras...")
###############################################################################
## When standing behind the cameras, the leftStream should be the camera ##
## to your lefthand side and the rightStream should be the camera to your ##
## righthand side. ##
## ##
## VideoStream(src=0).start() # for default camera (eg. webcam) ##
## VideoStream(usePiCamera=True).start() # for Raspberry Pi camera ##
## VideoStream(src="http://10.42.164.131:8080/video").start() # for IP cam ##
## rightStream = leftStream # for mirroring ##
###############################################################################
leftStream = VideoStream(2).start()
rightStream = VideoStream(4).start()
time.sleep(2.0)
# initialize the image stitcher, motion detector, and total number of frames read
stitcher = Stitcher()
motion = MotionDetector(minArea=500)
frames = 0
# loop over frames from the video streams
while True:
# grab the frames from their respective video streams
left = leftStream.read()
right = rightStream.read()
# resize the frames
left = imutils.resize(left, height=480)
right = imutils.resize(right, height=480)
cv2.imshow("Left Frame", left)
cv2.imshow("Right Frame", right)
# wait for a keypress on first display
if frames == 0:
key = cv2.waitKey(0) & 0xFF
motion_detection = key == ord("m")
###########################################################################
## (frames % x) flushes the homography cache every x frames ##
###########################################################################
# start = datetime.now() # PROFILING
result = stitcher.stitch([left, right], flushCache=(frames % 100 == 0))
# time_ms = (datetime.now() - start).total_seconds() * 1000 # PROFILING
# print("[INFO] stitched frame #{} in {:.2f} ms".format(frames, time_ms)) # PROFILING
# keep trying to compute homograpy if it didn't work the first time
while result is None:
print("[INFO] homography could not be computed: frame #{}".format(frames))
result = stitcher.stitch([left, right], flushCache=(frames % 100))
if motion_detection:
# convert the panorama to grayscale, blur it slightly, update the motion detector
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
locs = motion.update(gray)
# only process the panorama for motion if a nice average has been built up
if frames > 32 and len(locs) > 0:
# initialize the minimum and maximum (x, y)-coordinates, respectively
(minX, minY) = (np.inf, np.inf)
(maxX, maxY) = (-np.inf, -np.inf)
# loop over the locations of motion and accumulate the
# minimum and maximum locations of the bounding boxes
for l in locs:
(x, y, w, h) = cv2.boundingRect(l)
(minX, maxX) = (min(minX, x), max(maxX, x + w))
(minY, maxY) = (min(minY, y), max(maxY, y + h))
# draw the bounding box
cv2.rectangle(result, (minX, minY), (maxX, maxY), (0, 0, 255), 3)
# increment the total number of frames
frames += 1
# show the output image
cv2.imshow("Result", result)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
leftStream.stop()
rightStream.stop()
| sohamroy19/TryangleCam | Python/TryangleCam.py | TryangleCam.py | py | 4,030 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "imutils.video.VideoStream",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "imutils.video.VideoStream",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "Stitch... |
3651344308 | import pycookiecheat
import requests
import re
from bs4 import BeautifulSoup as beaty
import smtplib
from email.message import EmailMessage
class CloudCancellation:
def __init__(self, cancelling, reason=None, success=None):
self.cancelling = cancelling
self.reason = reason
self.success = success
def server_cancellation(self):
existing_cloud = False
self.cancelling = ''.join(re.findall(r'c\d+.***.net', self.cancelling))
if self.cancelling:
existing_cloud = True
if existing_cloud:
self.reason = input('Reason for cancellation + TID#: ')
else:
print('Incorrect Cloud')
maco_url = 'https://***/'
cancellation_url = 'https://***'
cookies = pycookiecheat.chrome_cookies(
maco_url,
cookie_file='/Users/galin.velikov/Library/Application Support/Google/Chrome/Profile 1/Cookies')
payload = {'location': self.cancelling, 'reason': self.reason, 'hours_until_cancellation': '48'}
response = requests.post(cancellation_url, data=payload, cookies=cookies).text
output = beaty(response, "html.parser").find("div", {"class": "CoolErrorDiv"})
print(f'\n{output.text.strip()}')
if 'successfully' in output.text.strip():
cloud_cancellation.email_sending()
def email_sending(self):
body = f"The monitoring was disabled for the server {self.cancelling} and added for cancellation due to {self.reason}\n" \
"\nBest Regards,\n" \
"\nGalin Velikov" \
"\nSystem Administrator" \
"\n***"
sender_email = "g***@****.com"
password = 'trollface'
msg = EmailMessage()
msg.set_content(body)
msg['Subject'] = f'Monitoring disabled for cloud {self.cancelling}'
msg['From'] = "Galin Velikov"
msg['To'] = "s***@***.com"
msg['cc'] = "g***@**.com"
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login(sender_email, password)
server.send_message(msg)
server.quit()
print("Successfully sent email")
except Exception as ex:
print("Something went wrong brat … check it further", ex)
cloud = input('Cloud for cancellation: ')
cloud_cancellation = CloudCancellation(cloud)
cloud_cancellation.server_cancellation()
| galinvelikov/cloud_cancellation | cloud_cancellation.py | cloud_cancellation.py | py | 2,438 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.findall",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pycookiecheat.chrome_cookies",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSo... |
11810014858 | import logging
import logging.handlers
import os
import time
class CustomFormatter(logging.Formatter):
grey = "\x1b[38;20m"
white = "\x1b[37;20m"
yellow = "\x1b[33;20m"
red = "\x1b[31;20m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: white + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
class Log(object):
def __init__(self, logger=None, log_cate="search"):
"""指定保存日志的路径,日志级别,以及调用文件,将日志存入到指定的文件中"""
# 创建一个logger
# logging.getLogger("apscheduler").setLevel(logging.WARNING) # 对一些引用包的日志进行限制
self.logger = logging.getLogger(logger)
self.logger.setLevel(logging.DEBUG)
# 创建一个handler,用于写入日志文件
self.log_time = time.strftime("%Y_%m_%d")
file_dir = os.getcwd() + "/log"
if not os.path.exists(file_dir):
os.mkdir(file_dir)
self.log_path = file_dir # 文件路径
self.log_name = log_cate + "_" + self.log_time + ".log" # 文件名字
self.log_file_name = os.path.join(self.log_path, self.log_name).replace("\\", "/") # 可直接修改此处的path
# fh = logging.FileHandler(self.log_file_name, "a", encoding="utf-8") # 文件输出
fh = logging.handlers.RotatingFileHandler(self.log_file_name,
mode="a",
encoding="utf-8",
maxBytes=10 * 1024 * 1024,
backupCount=5) # 按照大小自动切割文件
fh.setLevel(logging.INFO)
# 再创建一个handler,用于输出到控制台
ch = logging.StreamHandler() # 控制台输出
ch.setLevel(logging.INFO)
# ch.addFilter(filter) # 添加过滤类
# 定义handler的输出格式
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d : %(levelname)s --> %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(CustomFormatter())
# 给logger添加handler
self.logger.addHandler(fh)
self.logger.addHandler(ch)
# 添加下面一句,添加日志后移除句柄
# self.logger.removeHandler(ch)
# self.logger.removeHandler(fh)
# 关闭打开的文件
fh.close()
ch.close()
def get_logger(self):
return self.logger
if __name__ == "__main__":
logger = Log(__name__).get_logger()
logger.info("log config success!")
logger.warning("waring")
| doppler-motion/code-pub | Python/python_modules/logging_demo/logging_demo.py | logging_demo.py | py | 3,121 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.Formatter",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.WA... |
12837454897 | """pim URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from . import views
from django.urls import path, re_path
from django.conf.urls import url, include
urlpatterns = [
path(
'',
views.home,
name='home'),
path(
'article/',
views.article_search,
name='article_search'),
re_path(
'^article/(?P<gtin>[0-9]+)/',
views.article_details,
name='article_details'),
re_path(
'^article/history/(?P<gtin>[0-9]+)/',
views.article_history,
name='article_history'),
re_path(
'^article/edit/(?P<gtin>[0-9]+)/',
views.article_edit,
name='article_edit'),
re_path(
'^article/allergens/edit/(?P<gtin>[0-9]+)/',
views.article_allergens_edit,
name='article_allergens_edit'),
path(
'article/add/',
views.article_add,
name='article_add'),
re_path(
'article-image/history/(?P<id>[0-9]+)/',
views.article_image_history,
name='product_image_history'),
path(
'product/',
views.product_index,
name='product_index'),
path(
'product/search',
views.product_search,
name='product_search'),
path(
'product/recent/',
views.product_recent,
name='product_recent'),
path(
'product/incomplete/',
views.product_incomplete,
name='product_incomplete'),
path(
'product/import/',
views.product_import,
name='product_import'),
path(
'product/import/csv/',
views.product_import_csv,
name='product_import'),
path(
'product/add/gtin/',
views.product_add_gtin,
name='product_add_gtin'),
re_path(
'^product/view/(?P<gtin>[0-9]+)/$',
views.product_view,
name='product_view'),
re_path(
'^product/edit/(?P<gtin>[0-9]+)/$',
views.product_edit,
name='product_edit'),
re_path(
'^product/allergens/edit/(?P<gtin>[0-9]+)/$',
views.product_allergens_edit,
name='product_allergens_edit'),
re_path(
'^product/allergens/copy/(?P<gtin>[0-9]+)/$',
views.product_allergens_copy,
name='product_allergens_copy'),
re_path(
'product/history/(?P<gtin>[0-9]+)/',
views.product_history,
name='product_history'),
re_path(
'product-detail/edit/(?P<id>[0-9]+)/',
views.product_detail_edit,
name='product_detail_edit'),
re_path(
'product/delete/(?P<gtin>[0-9]+)/',
views.product_delete,
name='product_delete'),
re_path(
'product/tags/edit/(?P<gtin>[0-9]+)/',
views.product_tags_edit,
name='product_tags_edit'),
re_path(
'product-image/history/(?P<id>[0-9]+)/',
views.product_image_history,
name='product_image_history'),
re_path(
'product-image/add/(?P<gtin>[0-9]+)/',
views.product_image_add,
name='product_image_add'),
re_path(
'product-image/toogle-active/(?P<id>[0-9]+)/',
views.product_image_toggle_active,
name='product_image_toggle_active'),
re_path(
'product-image/main/(?P<id>[0-9]+)/',
views.product_image_main,
name='product_image_main'),
re_path(
'product-image/delete/(?P<id>[0-9]+)/',
views.product_image_delete,
name='product_image_delete'),
path(
'merchantarticle/',
views.merchantarticle_search,
name='merchantarticle_search'),
re_path(
'merchantarticle/(?P<id>[0-9]+)/',
views.merchantarticle,
name='merchantarticle'),
path(
'merchantarticle/add/',
views.merchantarticle_add,
name='merchantarticle_add'),
re_path(
'merchantarticle/edit/(?P<id>[0-9]+)/',
views.merchantarticle_edit,
name='merchantarticle_edit'),
path(
'tag/',
views.tag_search,
name='tag_search'),
re_path(
'tag/(?P<id>[0-9]+)/',
views.tag,
name='tag'),
path(
'tag/add/',
views.tag_add,
name='tag_add'),
re_path(
'tag/edit/(?P<id>[0-9]+)/',
views.tag_edit,
name='tag_edit'),
path(
'productcategory/',
views.productcategory_search,
name='productcategory_search'),
re_path(
'^productcategory/(?P<id>[0-9]+)/',
views.productcategory_details,
name='productcategory_details'),
path(
'productcategory/add/',
views.productcategory_add,
name='productcategory_add'),
re_path(
'productcategory/tags/edit/(?P<id>[0-9]+)/',
views.productcategory_tags_edit,
name='productcategory_tags_edit'),
re_path(
'productcategory/edit/(?P<categoryid>[0-9]+)/',
views.productcategory_edit,
name='productcategory_edit'),
url(
r'^auth/',
include('social_django.urls', namespace='social')),
path(
'login/',
views.login,
name="login"),
path(
'logout/',
views.logout,
name="logout")
]
| hackcasa/zappa_final | web/urls.py | urls.py | py | 5,762 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.urls.re_... |
36529140223 | import argparse
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
def main():
print("*" * 20)
print("Started HW1_ID1_ID2_old.py")
# Parsing script arguments
parser = argparse.ArgumentParser()
parser.add_argument('csv', type=str, help='Input csv file path')
parser.add_argument('k', type=int, help='k parameter')
parser.add_argument('p', type=float, help='p parameter')
args = parser.parse_args()
print("Processed input arguments:")
print(f"csv = {args.csv}, k = {args.k}, p = {args.p}")
print("Initiating KnnClassifier")
model = KNeighborsClassifier(n_neighbors=args.k, p=args.p)
print(f"Loading data from {args.csv}...")
data = pd.read_csv(args.csv, header=None)
print(f"Loaded {data.shape[0]} rows and {data.shape[1]} columns")
X = data[data.columns[:-1]].values.astype(np.float32)
y = pd.factorize(data[data.columns[-1]])[0].astype(np.uint8)
# print("Fitting...")
# model.fit(X, y)
# print("Done")
# print("Predicting...")
# y_pred = model.predict(X)
# print("Done")
# accuracy = np.sum(y_pred == y) / len(y)
# print(f"Train accuracy: {accuracy * 100 :.2f}%")
# print("*" * 20)
data_x, data_y = load_digits(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.25, random_state=42)
print("Fitting...")
model.fit(train_x, train_y)
print("Done")
print("Predicting...")
y_pred = model.predict(test_x)
print(y_pred)
print("Done")
accuracy = np.sum(y_pred == test_y) / len(test_y)
print(f"test accuracy: {accuracy * 100 :.2f}%")
print("*" * 20)
if __name__ == "__main__":
main()
| omervered0708/hw1MLq2 | test_sklearn.py | test_sklearn.py | py | 1,818 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"ap... |
5927958459 | import logging
import math
from typing import List, Optional
import torch
import torch.nn as nn
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import conditional_grad
from ocpmodels.models.base import BaseModel
from ocpmodels.models.scn.smearing import GaussianSmearing
try:
pass
except ImportError:
pass
from .edge_rot_mat import init_edge_rot_mat
from .gaussian_rbf import GaussianRadialBasisLayer
from .input_block import EdgeDegreeEmbedding
from .layer_norm import (
EquivariantLayerNormArray,
EquivariantLayerNormArraySphericalHarmonics,
EquivariantRMSNormArraySphericalHarmonics,
EquivariantRMSNormArraySphericalHarmonicsV2,
get_normalization_layer,
)
from .module_list import ModuleListInfo
from .radial_function import RadialFunction
from .so3 import (
CoefficientMappingModule,
SO3_Embedding,
SO3_Grid,
SO3_LinearV2,
SO3_Rotation,
)
from .transformer_block import (
FeedForwardNetwork,
SO2EquivariantGraphAttention,
TransBlockV2,
)
# Statistics of IS2RE 100K
_AVG_NUM_NODES = 77.81317
_AVG_DEGREE = (
23.395238876342773 # IS2RE: 100k, max_radius = 5, max_neighbors = 100
)
@registry.register_model("equiformer_v2")
class EquiformerV2_OC20(BaseModel):
"""
Equiformer with graph attention built upon SO(2) convolution and feedforward network built upon S2 activation
Args:
use_pbc (bool): Use periodic boundary conditions
regress_forces (bool): Compute forces
otf_graph (bool): Compute graph On The Fly (OTF)
max_neighbors (int): Maximum number of neighbors per atom
max_radius (float): Maximum distance between nieghboring atoms in Angstroms
max_num_elements (int): Maximum atomic number
num_layers (int): Number of layers in the GNN
sphere_channels (int): Number of spherical channels (one set per resolution)
attn_hidden_channels (int): Number of hidden channels used during SO(2) graph attention
num_heads (int): Number of attention heads
attn_alpha_head (int): Number of channels for alpha vector in each attention head
attn_value_head (int): Number of channels for value vector in each attention head
ffn_hidden_channels (int): Number of hidden channels used during feedforward network
norm_type (str): Type of normalization layer (['layer_norm', 'layer_norm_sh', 'rms_norm_sh'])
lmax_list (int): List of maximum degree of the spherical harmonics (1 to 10)
mmax_list (int): List of maximum order of the spherical harmonics (0 to lmax)
grid_resolution (int): Resolution of SO3_Grid
num_sphere_samples (int): Number of samples used to approximate the integration of the sphere in the output blocks
edge_channels (int): Number of channels for the edge invariant features
use_atom_edge_embedding (bool): Whether to use atomic embedding along with relative distance for edge scalar features
share_atom_edge_embedding (bool): Whether to share `atom_edge_embedding` across all blocks
use_m_share_rad (bool): Whether all m components within a type-L vector of one channel share radial function weights
distance_function ("gaussian", "sigmoid", "linearsigmoid", "silu"): Basis function used for distances
attn_activation (str): Type of activation function for SO(2) graph attention
use_s2_act_attn (bool): Whether to use attention after S2 activation. Otherwise, use the same attention as Equiformer
use_attn_renorm (bool): Whether to re-normalize attention weights
ffn_activation (str): Type of activation function for feedforward network
use_gate_act (bool): If `True`, use gate activation. Otherwise, use S2 activation
use_grid_mlp (bool): If `True`, use projecting to grids and performing MLPs for FFNs.
use_sep_s2_act (bool): If `True`, use separable S2 activation when `use_gate_act` is False.
alpha_drop (float): Dropout rate for attention weights
drop_path_rate (float): Drop path rate
proj_drop (float): Dropout rate for outputs of attention and FFN in Transformer blocks
weight_init (str): ['normal', 'uniform'] initialization of weights of linear layers except those in radial functions
enforce_max_neighbors_strictly (bool): When edges are subselected based on the `max_neighbors` arg, arbitrarily select amongst equidistant / degenerate edges to have exactly the correct number.
avg_num_nodes (float): Average number of nodes per graph
avg_degree (float): Average degree of nodes in the graph
use_energy_lin_ref (bool): Whether to add the per-atom energy references during prediction.
During training and validation, this should be kept `False` since we use the `lin_ref` parameter in the OC22 dataloader to subtract the per-atom linear references from the energy targets.
During prediction (where we don't have energy targets), this can be set to `True` to add the per-atom linear references to the predicted energies.
load_energy_lin_ref (bool): Whether to add nn.Parameters for the per-element energy references.
This additional flag is there to ensure compatibility when strict-loading checkpoints, since the `use_energy_lin_ref` flag can be either True or False even if the model is trained with linear references.
You can't have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine.
"""
def __init__(
self,
num_atoms: int, # not used
bond_feat_dim: int, # not used
num_targets: int, # not used
use_pbc: bool = True,
regress_forces: bool = True,
otf_graph: bool = True,
max_neighbors: int = 500,
max_radius: float = 5.0,
max_num_elements: int = 90,
num_layers: int = 12,
sphere_channels: int = 128,
attn_hidden_channels: int = 128,
num_heads: int = 8,
attn_alpha_channels: int = 32,
attn_value_channels: int = 16,
ffn_hidden_channels: int = 512,
norm_type: str = "rms_norm_sh",
lmax_list: List[int] = [6],
mmax_list: List[int] = [2],
grid_resolution: Optional[int] = None,
num_sphere_samples: int = 128,
edge_channels: int = 128,
use_atom_edge_embedding: bool = True,
share_atom_edge_embedding: bool = False,
use_m_share_rad: bool = False,
distance_function: str = "gaussian",
num_distance_basis: int = 512,
attn_activation: str = "scaled_silu",
use_s2_act_attn: bool = False,
use_attn_renorm: bool = True,
ffn_activation: str = "scaled_silu",
use_gate_act: bool = False,
use_grid_mlp: bool = False,
use_sep_s2_act: bool = True,
alpha_drop: float = 0.1,
drop_path_rate: float = 0.05,
proj_drop: float = 0.0,
weight_init: str = "normal",
enforce_max_neighbors_strictly: bool = True,
avg_num_nodes: Optional[float] = None,
avg_degree: Optional[float] = None,
use_energy_lin_ref: Optional[bool] = False,
load_energy_lin_ref: Optional[bool] = False,
):
super().__init__()
import sys
if "e3nn" not in sys.modules:
logging.error(
"You need to install e3nn==0.4.4 to use EquiformerV2."
)
raise ImportError
self.use_pbc = use_pbc
self.regress_forces = regress_forces
self.otf_graph = otf_graph
self.max_neighbors = max_neighbors
self.max_radius = max_radius
self.cutoff = max_radius
self.max_num_elements = max_num_elements
self.num_layers = num_layers
self.sphere_channels = sphere_channels
self.attn_hidden_channels = attn_hidden_channels
self.num_heads = num_heads
self.attn_alpha_channels = attn_alpha_channels
self.attn_value_channels = attn_value_channels
self.ffn_hidden_channels = ffn_hidden_channels
self.norm_type = norm_type
self.lmax_list = lmax_list
self.mmax_list = mmax_list
self.grid_resolution = grid_resolution
self.num_sphere_samples = num_sphere_samples
self.edge_channels = edge_channels
self.use_atom_edge_embedding = use_atom_edge_embedding
self.share_atom_edge_embedding = share_atom_edge_embedding
if self.share_atom_edge_embedding:
assert self.use_atom_edge_embedding
self.block_use_atom_edge_embedding = False
else:
self.block_use_atom_edge_embedding = self.use_atom_edge_embedding
self.use_m_share_rad = use_m_share_rad
self.distance_function = distance_function
self.num_distance_basis = num_distance_basis
self.attn_activation = attn_activation
self.use_s2_act_attn = use_s2_act_attn
self.use_attn_renorm = use_attn_renorm
self.ffn_activation = ffn_activation
self.use_gate_act = use_gate_act
self.use_grid_mlp = use_grid_mlp
self.use_sep_s2_act = use_sep_s2_act
self.alpha_drop = alpha_drop
self.drop_path_rate = drop_path_rate
self.proj_drop = proj_drop
self.avg_num_nodes = avg_num_nodes or _AVG_NUM_NODES
self.avg_degree = avg_degree or _AVG_DEGREE
self.use_energy_lin_ref = use_energy_lin_ref
self.load_energy_lin_ref = load_energy_lin_ref
assert not (
self.use_energy_lin_ref and not self.load_energy_lin_ref
), "You can't have use_energy_lin_ref = True and load_energy_lin_ref = False, since the model will not have the parameters for the linear references. All other combinations are fine."
self.weight_init = weight_init
assert self.weight_init in ["normal", "uniform"]
self.enforce_max_neighbors_strictly = enforce_max_neighbors_strictly
self.device = "cpu" # torch.cuda.current_device()
self.grad_forces = False
self.num_resolutions: int = len(self.lmax_list)
self.sphere_channels_all: int = (
self.num_resolutions * self.sphere_channels
)
# Weights for message initialization
self.sphere_embedding = nn.Embedding(
self.max_num_elements, self.sphere_channels_all
)
# Initialize the function used to measure the distances between atoms
assert self.distance_function in [
"gaussian",
]
if self.distance_function == "gaussian":
self.distance_expansion = GaussianSmearing(
0.0,
self.cutoff,
600,
2.0,
)
# self.distance_expansion = GaussianRadialBasisLayer(num_basis=self.num_distance_basis, cutoff=self.max_radius)
else:
raise ValueError
# Initialize the sizes of radial functions (input channels and 2 hidden channels)
self.edge_channels_list = [int(self.distance_expansion.num_output)] + [
self.edge_channels
] * 2
# Initialize atom edge embedding
if self.share_atom_edge_embedding and self.use_atom_edge_embedding:
self.source_embedding = nn.Embedding(
self.max_num_elements, self.edge_channels_list[-1]
)
self.target_embedding = nn.Embedding(
self.max_num_elements, self.edge_channels_list[-1]
)
self.edge_channels_list[0] = (
self.edge_channels_list[0] + 2 * self.edge_channels_list[-1]
)
else:
self.source_embedding, self.target_embedding = None, None
# Initialize the module that compute WignerD matrices and other values for spherical harmonic calculations
self.SO3_rotation = nn.ModuleList()
for i in range(self.num_resolutions):
self.SO3_rotation.append(SO3_Rotation(self.lmax_list[i]))
# Initialize conversion between degree l and order m layouts
self.mappingReduced = CoefficientMappingModule(
self.lmax_list, self.mmax_list
)
# Initialize the transformations between spherical and grid representations
self.SO3_grid = ModuleListInfo(
"({}, {})".format(max(self.lmax_list), max(self.lmax_list))
)
for lval in range(max(self.lmax_list) + 1):
SO3_m_grid = nn.ModuleList()
for m in range(max(self.lmax_list) + 1):
SO3_m_grid.append(
SO3_Grid(
lval,
m,
resolution=self.grid_resolution,
normalization="component",
)
)
self.SO3_grid.append(SO3_m_grid)
# Edge-degree embedding
self.edge_degree_embedding = EdgeDegreeEmbedding(
self.sphere_channels,
self.lmax_list,
self.mmax_list,
self.SO3_rotation,
self.mappingReduced,
self.max_num_elements,
self.edge_channels_list,
self.block_use_atom_edge_embedding,
rescale_factor=self.avg_degree,
)
# Initialize the blocks for each layer of EquiformerV2
self.blocks = nn.ModuleList()
for i in range(self.num_layers):
block = TransBlockV2(
self.sphere_channels,
self.attn_hidden_channels,
self.num_heads,
self.attn_alpha_channels,
self.attn_value_channels,
self.ffn_hidden_channels,
self.sphere_channels,
self.lmax_list,
self.mmax_list,
self.SO3_rotation,
self.mappingReduced,
self.SO3_grid,
self.max_num_elements,
self.edge_channels_list,
self.block_use_atom_edge_embedding,
self.use_m_share_rad,
self.attn_activation,
self.use_s2_act_attn,
self.use_attn_renorm,
self.ffn_activation,
self.use_gate_act,
self.use_grid_mlp,
self.use_sep_s2_act,
self.norm_type,
self.alpha_drop,
self.drop_path_rate,
self.proj_drop,
)
self.blocks.append(block)
# Output blocks for energy and forces
self.norm = get_normalization_layer(
self.norm_type,
lmax=max(self.lmax_list),
num_channels=self.sphere_channels,
)
self.energy_block = FeedForwardNetwork(
self.sphere_channels,
self.ffn_hidden_channels,
1,
self.lmax_list,
self.mmax_list,
self.SO3_grid,
self.ffn_activation,
self.use_gate_act,
self.use_grid_mlp,
self.use_sep_s2_act,
)
if self.regress_forces:
self.force_block = SO2EquivariantGraphAttention(
self.sphere_channels,
self.attn_hidden_channels,
self.num_heads,
self.attn_alpha_channels,
self.attn_value_channels,
1,
self.lmax_list,
self.mmax_list,
self.SO3_rotation,
self.mappingReduced,
self.SO3_grid,
self.max_num_elements,
self.edge_channels_list,
self.block_use_atom_edge_embedding,
self.use_m_share_rad,
self.attn_activation,
self.use_s2_act_attn,
self.use_attn_renorm,
self.use_gate_act,
self.use_sep_s2_act,
alpha_drop=0.0,
)
if self.load_energy_lin_ref:
self.energy_lin_ref = nn.Parameter(
torch.zeros(self.max_num_elements),
requires_grad=False,
)
self.apply(self._init_weights)
self.apply(self._uniform_init_rad_func_linear_weights)
@conditional_grad(torch.enable_grad())
def forward(self, data):
self.batch_size = len(data.natoms)
self.dtype = data.pos.dtype
self.device = data.pos.device
atomic_numbers = data.atomic_numbers.long()
num_atoms = len(atomic_numbers)
(
edge_index,
edge_distance,
edge_distance_vec,
cell_offsets,
_, # cell offset distances
neighbors,
) = self.generate_graph(
data,
enforce_max_neighbors_strictly=self.enforce_max_neighbors_strictly,
)
###############################################################
# Initialize data structures
###############################################################
# Compute 3x3 rotation matrix per edge
edge_rot_mat = self._init_edge_rot_mat(
data, edge_index, edge_distance_vec
)
# Initialize the WignerD matrices and other values for spherical harmonic calculations
for i in range(self.num_resolutions):
self.SO3_rotation[i].set_wigner(edge_rot_mat)
###############################################################
# Initialize node embeddings
###############################################################
# Init per node representations using an atomic number based embedding
offset = 0
x = SO3_Embedding(
num_atoms,
self.lmax_list,
self.sphere_channels,
self.device,
self.dtype,
)
offset_res = 0
offset = 0
# Initialize the l = 0, m = 0 coefficients for each resolution
for i in range(self.num_resolutions):
if self.num_resolutions == 1:
x.embedding[:, offset_res, :] = self.sphere_embedding(
atomic_numbers
)
else:
x.embedding[:, offset_res, :] = self.sphere_embedding(
atomic_numbers
)[:, offset : offset + self.sphere_channels]
offset = offset + self.sphere_channels
offset_res = offset_res + int((self.lmax_list[i] + 1) ** 2)
# Edge encoding (distance and atom edge)
edge_distance = self.distance_expansion(edge_distance)
if self.share_atom_edge_embedding and self.use_atom_edge_embedding:
source_element = atomic_numbers[
edge_index[0]
] # Source atom atomic number
target_element = atomic_numbers[
edge_index[1]
] # Target atom atomic number
source_embedding = self.source_embedding(source_element)
target_embedding = self.target_embedding(target_element)
edge_distance = torch.cat(
(edge_distance, source_embedding, target_embedding), dim=1
)
# Edge-degree embedding
edge_degree = self.edge_degree_embedding(
atomic_numbers, edge_distance, edge_index
)
x.embedding = x.embedding + edge_degree.embedding
###############################################################
# Update spherical node embeddings
###############################################################
for i in range(self.num_layers):
x = self.blocks[i](
x, # SO3_Embedding
atomic_numbers,
edge_distance,
edge_index,
batch=data.batch, # for GraphDropPath
)
# Final layer norm
x.embedding = self.norm(x.embedding)
###############################################################
# Energy estimation
###############################################################
node_energy = self.energy_block(x)
node_energy = node_energy.embedding.narrow(1, 0, 1)
energy = torch.zeros(
len(data.natoms),
device=node_energy.device,
dtype=node_energy.dtype,
)
energy.index_add_(0, data.batch, node_energy.view(-1))
energy = energy / self.avg_num_nodes
# Add the per-atom linear references to the energy.
if self.use_energy_lin_ref and self.load_energy_lin_ref:
# During training, target E = (E_DFT - E_ref - E_mean) / E_std, and
# during inference, \hat{E_DFT} = \hat{E} * E_std + E_ref + E_mean
# where
#
# E_DFT = raw DFT energy,
# E_ref = reference energy,
# E_mean = normalizer mean,
# E_std = normalizer std,
# \hat{E} = predicted energy,
# \hat{E_DFT} = predicted DFT energy.
#
# We can also write this as
# \hat{E_DFT} = E_std * (\hat{E} + E_ref / E_std) + E_mean,
# which is why we save E_ref / E_std as the linear reference.
with torch.cuda.amp.autocast(False):
energy = energy.to(self.energy_lin_ref.dtype).index_add(
0,
data.batch,
self.energy_lin_ref[atomic_numbers],
)
###############################################################
# Force estimation
###############################################################
if self.regress_forces:
forces = self.force_block(
x, atomic_numbers, edge_distance, edge_index
)
forces = forces.embedding.narrow(1, 1, 3)
forces = forces.view(-1, 3)
if not self.regress_forces:
return energy
else:
return energy, forces
# Initialize the edge rotation matrics
def _init_edge_rot_mat(self, data, edge_index, edge_distance_vec):
return init_edge_rot_mat(edge_distance_vec)
@property
def num_params(self):
return sum(p.numel() for p in self.parameters())
def _init_weights(self, m):
if isinstance(m, torch.nn.Linear) or isinstance(m, SO3_LinearV2):
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
if self.weight_init == "normal":
std = 1 / math.sqrt(m.in_features)
torch.nn.init.normal_(m.weight, 0, std)
elif isinstance(m, torch.nn.LayerNorm):
torch.nn.init.constant_(m.bias, 0)
torch.nn.init.constant_(m.weight, 1.0)
def _uniform_init_rad_func_linear_weights(self, m):
if isinstance(m, RadialFunction):
m.apply(self._uniform_init_linear_weights)
def _uniform_init_linear_weights(self, m):
if isinstance(m, torch.nn.Linear):
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
std = 1 / math.sqrt(m.in_features)
torch.nn.init.uniform_(m.weight, -std, std)
@torch.jit.ignore
def no_weight_decay(self):
no_wd_list = []
named_parameters_list = [name for name, _ in self.named_parameters()]
for module_name, module in self.named_modules():
if (
isinstance(module, torch.nn.Linear)
or isinstance(module, SO3_LinearV2)
or isinstance(module, torch.nn.LayerNorm)
or isinstance(module, EquivariantLayerNormArray)
or isinstance(
module, EquivariantLayerNormArraySphericalHarmonics
)
or isinstance(
module, EquivariantRMSNormArraySphericalHarmonics
)
or isinstance(
module, EquivariantRMSNormArraySphericalHarmonicsV2
)
or isinstance(module, GaussianRadialBasisLayer)
):
for parameter_name, _ in module.named_parameters():
if isinstance(module, torch.nn.Linear) or isinstance(
module, SO3_LinearV2
):
if "weight" in parameter_name:
continue
global_parameter_name = module_name + "." + parameter_name
assert global_parameter_name in named_parameters_list
no_wd_list.append(global_parameter_name)
return set(no_wd_list)
| Open-Catalyst-Project/ocp | ocpmodels/models/equiformer_v2/equiformer_v2_oc20.py | equiformer_v2_oc20.py | py | 25,033 | python | en | code | 518 | github-code | 1 | [
{
"api_name": "ocpmodels.models.base.BaseModel",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "typing.Opti... |
12828451545 | # pygame python第三方游戏库 .pyd 动态模块(可以导入,但是看不到源码) .py 静态模块
import pygame # 官方推荐这样导入
from pygame.locals import *
import sys
# 定义常量记录数据 (常量特点: 字母全大写 一旦定义,不要修改记录的值)
WINDOW_H = 768
WINDOW_W = 512
def main(): # 一般将程序的入口定义为main函数
"""主函数"""
# 1. 创建窗口
window = pygame.display.set_mode((WINDOW_W, WINDOW_H))
# 2.贴背景图
# 加载图片
bg_img = pygame.image.load("res/img_bg_level_1.jpg")
plane_img = pygame.image.load("res/hero2.png")
# 定义变量记录飞机坐标
x = WINDOW_W / 2 - 60
y = WINDOW_H / 2 - 39
while True:
# 贴图(指定坐标,将图片绘制到窗口)
window.blit(bg_img, (0, 0))
# 贴飞机图
window.blit(plane_img, (x, y)) # 290 500
# 3.刷新窗口
pygame.display.update()
# 获取新事件
for event in pygame.event.get():
# 1. 鼠标点击关闭窗口事件
if event.type == QUIT:
print("点击关闭窗口按钮")
sys.exit() # 关闭程序
# 2. 键盘按下事件
if event.type == KEYDOWN:
# 判断用户按键
if event.key == K_SPACE:
print("space")
# 检测键盘长按事件
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_a] or pressed_keys[K_LEFT]:
x -= 5
if pressed_keys[K_d] or pressed_keys[K_RIGHT]:
x += 5
if __name__ == '__main__':
main() | OreoCookiesYeah/base2 | hm_03_飞机移动.py | hm_03_飞机移动.py | py | 1,705 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.... |
12733332965 | import pygame
import random
class Monster(pygame.sprite.Sprite):
def __init__(self, game):
super().__init__()
self.game= game
self.health = 100
self.max_health=100
self.attack = 0.1
self.velocity= random.randint(1,2)
self.image= pygame.image.load("assets/koo.png")
self.rect = self.image.get_rect()
self.rect.x = 1000 + random.randint(0, 300)
self.rect.y = 510
def damage(self,amount):
self.health -= amount
if self.health<=0 :
self.rect.x=1000 + random.randint(0,300)
self.health= self.max_health
def updat_health_bar(self, surface):
bar_color= (111, 210, 46 )
bar_color_a= (60,63,60)
bar_position= (self.rect.x+40,self.rect.y-20, self.health, 5)
bar_pos_a=(self.rect.x+40,self.rect.y-20, self.max_health, 5)
pygame.draw.rect(surface, bar_color_a,bar_pos_a)
pygame.draw.rect(surface, bar_color, bar_position)
def move(self):
if not self.game.check_collision(self,self.game.all_players):
self.rect.x -= self.velocity
else:
self.game.player.damage(self.attack)
| taha-khiari/zombie-game | monter.py | monter.py | py | 1,192 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.image",
... |
35335374713 | from typing import List
from .Entry import Entry
class EntryFilter:
def __init__(self):
pass
def _filter_by_words_count(
self,
filter_fn: callable,
entries: List[Entry],
) -> List[Entry] :
return filter(
lambda entry: filter_fn(len(entry.title.split())),
entries
)
def filter_by_word_count_and_order_by_comments(self, entries: List[Entry], words_count_gt: int = 5, ascending: bool = True) -> List[Entry]:
"""
Filters entries whose length is greater than `words_count_gt` and sorts them by comments count
- `words_count_gt`: Words count greather than... Default: 5
"""
return sorted(
self._filter_by_words_count(lambda words_count: words_count > words_count_gt, entries),
key=lambda entry: entry.comments_count or 0,
reverse=not ascending
)
def filter_by_word_count_and_order_by_points(self, entries: List[Entry], words_count_le: int = 5, ascending: bool = True) -> List[Entry]:
"""
Filters entries whose length is less than or equal to `words_count_le` and sorts them by points
- `words_count_le`: Words count less than or equal to... Default: 5
"""
return sorted(
self._filter_by_words_count(lambda words_count: words_count <= words_count_le, entries),
key=lambda entry: entry.points or 0,
reverse=not ascending
)
| miguel-martinr/stackcrawler | stackcrawler/EntryFilter.py | EntryFilter.py | py | 1,486 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "Entry.Entry",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "Entry.Entry",
"line_number": ... |
5724769308 | """
Примеры:
1)Ввод: 5 15
Вывод: 1+2+3+4+5=15
2)Ввод: 4 46
Вывод: 12+34=46
Введите максимльное число N последовательности: 10
Введите число M, которое необходимо получить в качестве ответа: 46
"""
from typing import List
import numpy as np
import itertools
""" если посмотреть пристально на задачку, то можно заметить, что мы не плюсики расставляем между числами, а 0 и 1 в матрице перестановок.
как сгенерировать такую матрицу, содержащую все возможные перестановки 0 и 1 в векторе, размерностью N - гуглится очень быстро.
на этой идее основано решение"""
def input_array(max_number) -> List[int]:
i = 1
while i < max_number + 1:
s = str(i)
for c in s:
yield c
i += 1
def masks(K):
for i in itertools.product([0, 1], repeat = K*1):
yield np.reshape(np.array(i), (K, 1)).flatten()
def plusik_moves(max_number, solution):
print(f"Searching for solution: {solution}")
numbers = list(input_array(max_number))
pluses = masks(len(numbers)-1)
for mask in pluses:
eq = ""
sum = 0
running_number = 0
for i, n in enumerate(numbers):
op = mask[i-1]
if op:
sum += running_number
running_number = int(n)
eq += (" + " if eq else "") + n
else:
running_number *= 10
running_number += int(n)
eq += n
sum += running_number
if sum == solution:
print(f"Found solution: {eq} == {solution}")
n = int(input("Введите максимльное число N последовательности: "))
m = int(input("Введите число M, которое необходимо получить в качестве ответа: "))
plusik_moves(n, m)
"""
tests:
plusik_moves(5, 1+2+3+4+5)
plusik_moves(5, 12345)
plusik_moves(5, 12+34+5)
plusik_moves(10, 1+2+3+4+5+678910)
plusik_moves(10, 1+2+3+4+5+67+8+910)
plusik_moves(10, 12345678910)
plusik_moves(10, 12+34+56+78+91+0)
plusik_moves(10, 46)
plusik_moves(15, 120)
""" | MasheraAnna/Test_tasks | test1_2try.py | test1_2try.py | py | 2,440 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_n... |
6006514705 | import cv2
import os
import time
from TutorialMurtaza.Util import BaseFunction
import HandTrackingModule as htm
#############
wCam, hCam = 640, 480
#############
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
folderPath = BaseFunction.getBaseUrl() + '/TutorialMurtaza/Resources/hand_counting'
myList = os.listdir(folderPath)
print(myList)
overlayList = []
for imPath in myList:
image = cv2.imread(f'{folderPath}/{imPath}')
print(f'{folderPath}/{imPath}')
overlayList.append(image)
print(len(overlayList))
pTime = 0
detector = htm.HandDetector(detectionCon=0.75)
tipIds = [4, 8, 12, 16, 20]
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img, draw=False)
# print(lmList)
if len(lmList) != 0:
fingers = []
# thumb
if lmList[tipIds[0]][1] > lmList[tipIds[0] - 1][1]:
# print('Index Finger Open')
fingers.append(1)
else:
fingers.append(0)
# 4 other fingers
for id in range(1, 5):
if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]:
# print('Index Finger Open')
fingers.append(1)
else:
fingers.append(0)
# print(fingers)
totalFingers = fingers.count(1)
print(totalFingers)
h, w, c = overlayList[totalFingers - 1].shape
# img[0:y, 0:x] img[height, width] image size is 200
img[0:h, 0:w] = overlayList[totalFingers - 1]
# in python if we choose -1 in list then in will choose last item of the list
# show rectangle and text
cv2.rectangle(img, (20, 225), (170, 425), (0, 225, 0), cv2.FILLED)
cv2.putText(img, str(totalFingers), (45, 375), cv2.FONT_HERSHEY_PLAIN, 10, (255, 0, 0), 25)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS : {int(fps)}', (400, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
cv2.imshow('Image', img)
cv2.waitKey(1)
| palindungan/2021_skripsi | OpencvTutorial/TutorialMurtaza/Src/Mediapipe/FingerCountingProject.py | FingerCountingProject.py | py | 2,053 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "TutorialMurtaza.Util.BaseFunction.getBaseUrl",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "TutorialMurtaza.Util.BaseFunction",
"line_number": 15,
"usage_type": "name"... |
23613766958 | # -*- coding: utf-8 -*-
import math
import torch
import torch.nn.functional as F
from torch import nn
from ..utils.nn import get_activation_fn
class FF(nn.Module):
"""A smart feedforward layer with activation support.
Arguments:
in_features(int): Input dimensionality.
out_features(int): Output dimensionality.
bias(bool, optional): Enable/disable bias for the layer. (Default: True)
bias_zero(bool, optional): Start with a 0-vector bias. (Default: True)
activ(str, optional): A string like 'tanh' or 'relu' to define the
non-linearity type. `None` or `'linear'` is a linear layer (default).
"""
def __init__(self, in_features, out_features, bias=True,
bias_zero=True, activ=None):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.bias_zero = bias_zero
self.activ_type = activ
if self.activ_type in (None, 'linear'):
self.activ_type = 'linear'
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.activ = get_activation_fn(activ)
if self.use_bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.use_bias:
if self.bias_zero:
self.bias.data.zero_()
else:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
return self.activ(F.linear(input, self.weight, self.bias))
def __repr__(self):
repr_ = self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', activ=' + str(self.activ_type) \
+ ', bias=' + str(self.use_bias)
if self.use_bias:
repr_ += ', bias_zero=' + str(self.bias_zero)
return repr_ + ')'
| lium-lst/nmtpytorch | nmtpytorch/layers/ff.py | ff.py | py | 2,163 | python | en | code | 391 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
19936644653 | from functools import wraps
from flask import Flask, request, jsonify, url_for, redirect
from wallet import Wallet
from error import Error
from jose import jwt
from urllib.request import urlopen
import sys
import os
import json
app = Flask(__name__)
env = os.environ
app.debug = env.get('ENVIRONMENT', 'development') == 'development'
AUTH0_DOMAIN = env.get("AUTH0_DOMAIN", "sumana.auth0.com")
API_IDENTIFIER = env.get("API_IDENTIFIER", "http://localhost:8080")
ALGORITHMS = ["HS256", "RS256"]
def get_token_auth_header():
"""Obtains the access token from the Authorization Header
"""
auth = request.headers.get("Authorization", None)
if not auth:
raise Error({"code": "authorization_header_missing",
"description":
"Authorization header is expected"}, 401)
parts = auth.split()
if parts[0].lower() != "bearer":
raise Error({"code": "invalid_header",
"description":
"Authorization header must start with"
" Bearer"}, 401)
elif len(parts) == 1:
raise Error({"code": "invalid_header",
"description": "Token not found"}, 401)
elif len(parts) > 2:
raise Error({"code": "invalid_header",
"description":
"Authorization header must be"
" Bearer token"}, 401)
token = parts[1]
return token
def requires_auth(f):
"""Determines if the access token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
token = get_token_auth_header()
jsonurl = urlopen("https://"+AUTH0_DOMAIN+"/.well-known/jwks.json")
jwks = json.loads(jsonurl.read())
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
if rsa_key:
try:
jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_IDENTIFIER,
issuer="https://{domain}/".format(domain=AUTH0_DOMAIN)
)
except jwt.ExpiredSignatureError:
raise Error({"code": "token_expired",
"description": "token is expired"}, 401)
except jwt.JWTClaimsError:
raise Error({"code": "invalid_claims",
"description":
"incorrect claims,"
" please check the audience and issuer"}, 401)
except Exception as e:
raise Error({"code": "invalid_header",
"description":
"Unable to parse authentication token"}, 401)
return f(*args, **kwargs)
return decorated
def has_scope(required_scope):
"""Determines if the required scope is present in the Access Token
Args:
required_scope (str): The scope required to access the resource
"""
token = get_token_auth_header()
unverified_claims = jwt.get_unverified_claims(token)
if unverified_claims.get("scope"):
token_scopes = unverified_claims["scope"].split()
for token_scope in token_scopes:
if token_scope == required_scope:
return True
return False
def handle_error(f):
@wraps(f)
def callback(*args, **kwargs):
try:
return f(*args, **kwargs)
except Error as e:
response = jsonify(e.error)
response.status_code = e.status_code
return response
except Exception as ex:
response = jsonify({'description': str(ex), 'code': 'generic:error'})
response.status_code = 500
return response
return callback
@app.route('/balance', methods=['POST', 'GET'])
@requires_auth
@handle_error
def balance():
if request.method == 'GET' and has_scope('read:balance'):
card_id = int(request.args.get('card_id'))
return jsonify({'balance': Wallet.get_balance(card_id), 'card_id': card_id})
elif request.method == 'POST':
body = request.get_json()
card_id = int(body['card_id'])
balance = int(body['amount'])
#print(card_id,balance)
if balance > 0 and has_scope('add:balance'):
#print("in add balance")
Wallet.modify_balance(card_id, balance)
return '', 204
elif balance < 0 and has_scope('delete:balance'):
#print("in delete balance")
Wallet.modify_balance(card_id, balance)
return '', 204
raise Error({'code': 'missing:right', 'description': 'Invalid action'}, 401)
@app.route('/cards', methods=['GET', 'PUT', 'DELETE'])
@requires_auth
@handle_error
def cards():
if request.method == 'GET' and has_scope('read:cards'):
return jsonify(Wallet.get_wallet())
elif request.method == 'PUT' and has_scope('add:card'):
body = request.get_json()
Wallet.add_card(int(body['card_id']), int(body['amount']))
return '', 201
elif request.method == 'DELETE' and has_scope('delete:cards'):
body = request.get_json()
Wallet.remove_card(int(body['card_id']))
return '', 204
raise Error({'code': 'missing:right', 'description': 'Invalid action'}, 401)
if __name__ == '__main__':
port = sys.argv[1]
app.run(port=int(port),debug=True)
| SumanaMalkapuram/auth0 | api/server.py | server.py | py | 5,856 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request.headers.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.request.h... |
29629691028 | import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
class K_Means:
def __init__(self, k, input):
self.k = k
self.df = input
self.C = None
def centroids(self):
import random
C = {i:[data for data in self.df.values[i]] for i, j in zip(range(self.k), random.sample(range(len(self.df)), self.k))}
return C
def classify(self, C):
import copy
cluster_df = copy.deepcopy(self.df)
col_n=cluster_df.shape[1]
for i in C.keys():
cluster_df["Distance_from_{}".format(i)]=np.linalg.norm(np.array(cluster_df)[:, :col_n]-C[i], axis=1)
dist_cols=["Distance_from_{}".format(i) for i in C.keys()]
cluster_df["Closet_Cluster"]=cluster_df.loc[:, dist_cols].idxmin(axis=1).map(lambda x: int(x.lstrip("Distance_from_")))
return cluster_df
def update(self, C):
c_df = self.classify(C)
self.C = {
i:[c for c in np.mean(self.df[c_df["Closet_Cluster"]==i], axis=0)] for i in c_df["Closet_Cluster"].unique()
}
return self.C
def train_cluster(self):
assignments = None
C = self.centroids()
while True:
cluster_df = self.classify(C)
new_assignments = list(self.classify(C)["Closet_Cluster"])
new_C = self.update(C)
if assignments == new_assignments:
break
assignments = new_assignments
C = new_C
return new_C, np.array(new_assignments), cluster_df
df = pd.read_csv('movieParsing.dat')
X = df[["Action","Adventure","Animation","Children's","Comedy","Crime","Documentary","Drama","Fantasy","Film-Noir","Horror","Musical","Mystery","Romance","Sci-Fi","Thriller","War","Western"]]
Xs = np.array(df.drop(['MovieID'], 1).astype(float))
Xs= preprocessing.scale(Xs)
clf = KMeans(n_clusters=10)
clf.fit(Xs)
model1 = K_Means(10, X)
print(model1.train_cluster())
print(clf.labels_) | gospel306/Movie_recommend | data/kmeans.py | kmeans.py | py | 2,008 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.sample",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"lin... |
31637485511 | # coding=utf-8
import os
import unittest
import shutil
import zipfile
from mock import patch
from testfixtures import TempDirectory
from provider import cleaner
import activity.activity_OutputAcceptedSubmission as activity_module
from activity.activity_OutputAcceptedSubmission import (
activity_OutputAcceptedSubmission as activity_object,
)
from tests.activity.classes_mock import FakeLogger, FakeSession, FakeStorageContext
from tests.activity import helpers, settings_mock, test_activity_data
import tests.test_data as test_case_data
def input_data(file_name_to_change=""):
activity_data = test_case_data.ingest_accepted_submission_data
activity_data["file_name"] = file_name_to_change
return activity_data
class TestOutputAcceptedSubmission(unittest.TestCase):
def setUp(self):
fake_logger = FakeLogger()
self.activity = activity_object(settings_mock, fake_logger, None, None, None)
def tearDown(self):
TempDirectory.cleanup_all()
# clean the temporary directory completely
shutil.rmtree(self.activity.get_tmp_dir())
@patch.object(activity_module, "get_session")
@patch.object(cleaner, "storage_context")
@patch.object(activity_module, "storage_context")
def test_do_activity(
self, fake_storage_context, fake_cleaner_storage_context, fake_session
):
test_data = {
"comment": "accepted submission zip file example",
"filename": "30-01-2019-RA-eLife-45644.zip",
"expected_result": True,
"expected_download_status": True,
}
directory = TempDirectory()
# copy files into the input directory using the storage context
# expanded bucket files
zip_file_path = os.path.join(
test_activity_data.ExpandArticle_files_source_folder,
test_data.get("filename"),
)
resources = helpers.expanded_folder_bucket_resources(
directory,
test_activity_data.accepted_session_example.get("expanded_folder"),
zip_file_path,
)
dest_folder = os.path.join(directory.path, "files_dest")
os.mkdir(dest_folder)
fake_storage_context.return_value = FakeStorageContext(
directory.path, resources, dest_folder=dest_folder
)
fake_cleaner_storage_context.return_value = FakeStorageContext(
directory.path, resources
)
# mock the session
fake_session.return_value = FakeSession(
test_activity_data.accepted_session_example
)
# do the activity
result = self.activity.do_activity(input_data(test_data.get("filename")))
filename_used = input_data(test_data.get("filename")).get("file_name")
# check assertions
self.assertEqual(
result,
test_data.get("expected_result"),
(
"failed in {comment}, got {result}, filename {filename}, "
+ "input_file {input_file}"
).format(
comment=test_data.get("comment"),
result=result,
input_file=self.activity.input_file,
filename=filename_used,
),
)
self.assertEqual(
self.activity.statuses.get("download"),
test_data.get("expected_download_status"),
"failed in {comment}".format(comment=test_data.get("comment")),
)
# check output bucket folder contents
output_bucket_list = [
file_name
for file_name in os.listdir(dest_folder)
if file_name != ".gitkeep"
]
self.assertEqual(
sorted(output_bucket_list),
[test_data.get("filename")],
)
# check the contents of the zip file
zip_file_path = os.path.join(
dest_folder,
test_data.get("filename"),
)
with zipfile.ZipFile(zip_file_path, "r") as open_zipfile:
resources = open_zipfile.namelist()
self.assertEqual(len(resources), 42)
self.assertEqual(
sorted(resources),
[
"30-01-2019-RA-eLife-45644/30-01-2019-RA-eLife-45644.pdf",
"30-01-2019-RA-eLife-45644/30-01-2019-RA-eLife-45644.xml",
"30-01-2019-RA-eLife-45644/Answers for the eLife digest.docx",
"30-01-2019-RA-eLife-45644/Appendix 1.docx",
"30-01-2019-RA-eLife-45644/Appendix 1figure 1.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 10.pdf",
"30-01-2019-RA-eLife-45644/Appendix 1figure 11.pdf",
"30-01-2019-RA-eLife-45644/Appendix 1figure 12.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 13.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 14.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 15.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 2.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 3.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 4.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 5.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 6.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 7.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 8.png",
"30-01-2019-RA-eLife-45644/Appendix 1figure 9.png",
"30-01-2019-RA-eLife-45644/Figure 1.tif",
"30-01-2019-RA-eLife-45644/Figure 2.tif",
"30-01-2019-RA-eLife-45644/Figure 3.png",
"30-01-2019-RA-eLife-45644/Figure 4.svg",
"30-01-2019-RA-eLife-45644/Figure 4source data 1.zip",
"30-01-2019-RA-eLife-45644/Figure 5.png",
"30-01-2019-RA-eLife-45644/Figure 5source code 1.c",
"30-01-2019-RA-eLife-45644/Figure 6.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 10_HorC.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 1_U crassus.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 2_U pictorum.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 3_M margaritifera.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 4_P auricularius.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 5_PesB.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 6_HavA.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 7_HavB.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 8_HavC.png",
"30-01-2019-RA-eLife-45644/Figure 6figure supplement 9_HorB.png",
"30-01-2019-RA-eLife-45644/Figure 6source data 1.pdf",
"30-01-2019-RA-eLife-45644/Manuscript.docx",
"30-01-2019-RA-eLife-45644/Potential striking image.tif",
"30-01-2019-RA-eLife-45644/Table 2source data 1.xlsx",
"30-01-2019-RA-eLife-45644/transparent_reporting_Sakalauskaite.docx",
],
)
@patch.object(activity_module, "get_session")
@patch.object(cleaner, "storage_context")
@patch.object(cleaner, "download_asset_files_from_bucket")
@patch.object(activity_module, "storage_context")
def test_do_activity_download_exception(
self,
fake_storage_context,
fake_download,
fake_cleaner_storage_context,
fake_session,
):
directory = TempDirectory()
zip_filename = "30-01-2019-RA-eLife-45644.zip"
zip_file_path = os.path.join(
test_activity_data.ExpandArticle_files_source_folder,
zip_filename,
)
resources = helpers.expanded_folder_bucket_resources(
directory,
test_activity_data.accepted_session_example.get("expanded_folder"),
zip_file_path,
)
fake_cleaner_storage_context.return_value = FakeStorageContext(
directory.path, resources
)
fake_storage_context.return_value = FakeStorageContext(
directory.path, resources
)
# mock the session
fake_session.return_value = FakeSession(
test_activity_data.accepted_session_example
)
fake_download.side_effect = Exception()
# do the activity
result = self.activity.do_activity(input_data(zip_filename))
self.assertEqual(result, True)
self.assertEqual(
self.activity.logger.logexception,
(
(
"OutputAcceptedSubmission, exception in "
"download_all_files_from_bucket for file %s"
)
% zip_filename
),
)
| elifesciences/elife-bot | tests/activity/test_activity_output_accepted_submission.py | test_activity_output_accepted_submission.py | py | 8,902 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "tests.test_data.ingest_accepted_submission_data",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tests.test_data",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 25,
"usage_type": "attribute"
}... |
34264370130 | # -*- coding: utf-8 -*-
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.listview import ListView
class basitListeUyg(App):
def build(self):
duzen=BoxLayout()
programlama_dilleri=["Perl", "PHP", "Pure", "Python", "Rebol",
"Rexx", "Ruby", "Scheme", "Tcl"]
liste = ListView(item_strings=programlama_dilleri)
duzen.add_widget(Label(text="Programlama Dilleri"))
duzen.add_widget(liste)
return duzen
basitListeUyg().run()
| mustafa-altinisik/kivy-tr | docs/programlar/listeEylem/programlar/1/listeGorunumu.py | listeGorunumu.py | py | 593 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "kivy.app.App",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "kivy.uix.boxlayout.BoxLayout",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "kivy.uix.listview.ListView",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "k... |
15103892958 | import imp
from django.shortcuts import render
from mywatchlist.models import Watchlist
from django.http import HttpResponse
from django.core import serializers
# Create your views here.
def show_watchlist(request):
data_watchlist = Watchlist.objects.all()
counter_watched = 0
for show in data_watchlist :
if show.watched == True :
counter_watched += 1
if counter_watched >= len(data_watchlist):
tontonan = 'banyak'
else :
tontonan = 'sedikit'
context = {
'watchlist': data_watchlist,
'nama': 'Muhammad Ruzain',
'npm' : '2106750250',
'tontonan' : tontonan
}
return render(request, "watchlist.html", context)
def show_watchlist_xml(request):
data_watchlist = Watchlist.objects.all()
return HttpResponse(serializers.serialize("xml", data_watchlist), content_type="application/xml")
def show_watchlist_json(request):
data_watchlist = Watchlist.objects.all()
return HttpResponse(serializers.serialize("json", data_watchlist), content_type="application/json") | eruzetaien/PBPtugas2 | mywatchlist/views.py | views.py | py | 1,071 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mywatchlist.models.Watchlist.objects.all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mywatchlist.models.Watchlist.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mywatchlist.models.Watchlist",
"line_number": 9,
"usage_... |
36682252501 | import cv2
import mediapipe as mp
import time
wcam, hcam = 680, 480
cap = cv2.VideoCapture(0)
cap.set(3, wcam)
cap.set(4, hcam)
ptime = 0
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
drawspecs= mpDraw.DrawingSpec(thickness = 1, circle_radius = 2)
while True:
success, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = faceMesh.process(img)
print(results)
if results.multi_face_landmarks:
for landmarks in results.multi_face_landmarks:
mpDraw.draw_landmarks(img, landmarks, mpFaceMesh.FACE_CONNECTIONS,drawspecs,drawspecs)
ctime = time.time()
fps =1/(ctime-ptime)
ptime = ctime
cv2.putText(img, f'FPS: {str(int(fps))}', (10,70), cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
cv2.imshow("image", img)
cv2.waitKey(1) | kanojia-gaurav/Advance_opencv | FaceMesh/facemesh.py | facemesh.py | py | 880 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "mediapipe.solutions",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2... |
25869305063 | # -*- coding: utf-8 -*-
from dateutil import tz
import datetime
from django.contrib.auth.models import User, Group
from django.db import models
from django.db.models import Min
from django.utils.timezone import utc
from academy.models import Profile
class Attendance(models.Model):
profile = models.ForeignKey(Profile)
image = models.ImageField(upload_to="attendance/attendance", blank=True)
datetime = models.DateTimeField(auto_now=True)
date = models.DateField(auto_now=True, null=True, blank=True)
class Meta:
ordering = ['-datetime']
def __unicode__(self):
datetime = self.datetime.astimezone(tz.tzlocal())
username = self.profile.user.username if self.profile.user else "No username"
return str(datetime.date().isoformat()) + " " + str(datetime.time().isoformat()) + " " + username + " (" + self.get_status() + ")"
def get_status(self):
import datetime
result = ""
first_attendance = Attendance.objects.filter(profile=self.profile, date=self.date).earliest('datetime')
is_first = first_attendance == self
current_datetime = self.datetime.astimezone(tz.tzlocal())
first_datetime = first_attendance.datetime.astimezone(tz.tzlocal())
try:
attend_time = self.profile.attendancemanager.get_attend_time()
leave_time = self.profile.attendancemanager.get_leave_time()
except:
attend_time = first_datetime.time()
leave_time = (datetime.datetime(2000, 1, 1, attend_time.hour, attend_time.minute, attend_time.second) + datetime.timedelta(hours=1)).time()
if is_first and current_datetime.time() <= attend_time:
result = u"출석했습니다."
elif is_first and current_datetime.time() > attend_time:
result = u"출석했습니다."
elif not is_first and current_datetime.time() < leave_time:
result = u"출석했습니다."
elif not is_first and current_datetime.time() >= leave_time:
result = u"출석했습니다."
return result
class AttendanceManager(models.Model):
profile = models.OneToOneField(Profile, null=True)
group = models.OneToOneField(Group, blank=True, null=True)
policy = models.ForeignKey("AttendancePolicy", blank=True, null=True)
nfc_id = models.CharField(max_length=50, blank=True, null=True)
# phone_id = models.CharField(max_length=50, blank=True, null=True)
def __unicode__(self):
profile_name = self.profile.__unicode__() if self.profile else "No Profile"
return profile_name + " --- " + (self.nfc_id if self.nfc_id else "no nfc card")
def set_nfc(self, nfc_id, force_set=False):
#TODO academy specific
message = ""
success = False
if not nfc_id:
message += "&카드 UID를 확인하세요."
if self.nfc_id and self.nfc_id != nfc_id:
message += "&이미 카드를 가지고 있습니다."
if self.nfc_id and self.nfc_id == nfc_id:
message += "&해당 카드에 이미 등록되어 있습니다."
if not self.nfc_id and AttendanceManager.objects.filter(nfc_id=nfc_id).exists():
message += "&타인이 해당 카드에 등록되어 있습니다."
if force_set or (not self.nfc_id and not AttendanceManager.objects.filter(nfc_id=nfc_id).exists()):
AttendanceManager.objects.filter(nfc_id=nfc_id).update(nfc_id=None)
self.nfc_id = nfc_id
self.save()
success = True
message += "&등록 되었습니다."
return success, message
#
# def set_phone(self, phone_id, force_set=False):
#
# message = ""
# success = False
# if not phone_id:
# message += "&휴대폰 단말기 ID를 확인하세요."
# if phone_id:
# self.phone_id = phone_id
# self.save()
# success = True
# message += self.profile.student.name + "& 학생의 출석 알람을 받아보실 수 있습니다."
# return success, message
#
def get_stu_id(self, nfc):
user = self.user
while user:
if user.nfc == nfc:
return user.id
else:
return 0
def get_attend_time(self):
return self.policy.attend_time
def get_leave_time(self):
return self.policy.leave_time
class AttendancePolicy(models.Model):
attend_time = models.TimeField()
leave_time = models.TimeField()
def __unicode__(self):
return str(self.attend_time) + "/" + str(self.leave_time)
import signals | enoch2110/dodream | attendance/models.py | models.py | py | 4,670 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 12,
"usage_type": "call"
},
{
"api_name... |
44094509671 | from utils.timestamp_converter import TimestampConverter
class MarketSituation:
def __init__(self, csv_row=None, kafka_row=None):
self.amount = None
self.merchant_id = None
self.offer_id = None
self.price = None
self.prime = None
self.product_id = None
self.quality = None
self.shipping_time_prime = None
self.shipping_time_standard = None
self.timestamp = None
self.timestamp_object = None
self.triggering_merchant_id = None
self.uid = None
if csv_row:
self.from_csv(csv_row)
elif kafka_row:
self.from_kafka(kafka_row)
def from_csv(self, csv_row):
self.amount = csv_row[0]
self.merchant_id = csv_row[1]
self.offer_id = csv_row[2]
self.price = csv_row[3]
self.prime = csv_row[4]
self.product_id = csv_row[5]
self.quality = csv_row[6]
self.shipping_time_prime = csv_row[7]
self.shipping_time_standard = csv_row[8]
self.timestamp = csv_row[9]
self.timestamp_object = TimestampConverter.from_string(self.timestamp)
self.triggering_merchant_id = csv_row[10]
self.uid = csv_row[11]
def from_kafka(self, kafka_row):
self.amount = kafka_row[1]
self.merchant_id = kafka_row[2]
self.offer_id = kafka_row[3]
self.price = kafka_row[4]
self.prime = kafka_row[5]
self.product_id = kafka_row[6]
self.quality = kafka_row[7]
self.shipping_time_prime = kafka_row[8]
self.shipping_time_standard = kafka_row[9]
self.timestamp = kafka_row[10]
self.timestamp_object = TimestampConverter.from_string(self.timestamp)
self.triggering_merchant_id = kafka_row[11]
| marcelja/dynamicpricing | merchant/models/market_situation.py | market_situation.py | py | 1,799 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "utils.timestamp_converter.TimestampConverter.from_string",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utils.timestamp_converter.TimestampConverter",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "utils.timestamp_converter.TimestampConverte... |
73424587235 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import cv2
import numpy as np
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from function_collection import custom_deal, my_utility
import function_collection.ImageProcessing.scaling as scaling
import function_collection.ImageProcessing.AffineTransformation as AffineTransformation
class Me(QWidget):
function_map_index = {1: '灰度图像反转', 2: '灰度直方图', 3: '直方图均衡化',
4: '直方图规定化', 5: '分段线性变换', 6: '幂律变换',
7: '平滑滤波器', 8: '中值滤波器', 9: '旋转变换',
10: '水平垂直平移变换', 11: '图像金字塔', 12: '图像三种插值改变图像大小',
13: '放射变换'}
function_map_key = dict(zip(list(function_map_index.values()), list(function_map_index.keys())))
function_widgets = [None]*14
file_addr = ''
def __init__(self):
super().__init__()
self.my_ui()
self.choose_id = 0
default_path = '/home/dr/图片/董睿一寸照.png'
self.anotherWidget = None
self.file_addr = None
self.q_text_editor = None
def my_ui(self):
v_box = QVBoxLayout()
grid = QGridLayout()
q_button = QPushButton('选择文件', self)
q_button.clicked.connect(self.show_dialog)
q_button.resize(q_button.sizeHint())
q_button.move(0, 0)
self.file_addr_text = QLineEdit()
q_func_button = QPushButton('选择功能')
q_func_button.setCheckable(False)
choose_button = QComboBox(self)
for i in range(1, len(self.function_map_index) + 1):
choose_button.addItem(self.function_map_index[i])
choose_button.move(0, 50)
choose_button.setCurrentIndex(1)
self.choose_id = 2
choose_button.activated[str].connect(self.on_activated)
grid.addWidget(q_button, 1, 0)
grid.addWidget(self.file_addr_text, 1, 1)
grid.addWidget(q_func_button, 2, 0)
grid.addWidget(choose_button, 2, 1)
q_run_button = QPushButton('演示')
q_run_button.clicked.connect(self.run_function)
self.q_text_editor = QPlainTextEdit('test')
self.q_text_editor.adjustSize()
v_box.addLayout(grid)
v_box.addWidget(q_run_button, alignment=Qt.AlignHCenter)
v_box.addStretch(2)
self.setLayout(v_box)
self.resize(500, 400)
self.center()
self.setWindowTitle('大项目')
self.setWindowIcon(QIcon('icon.png'))
self.show()
# 点击退出按钮触发函数
def closeEvent(self, q_close_event):
reply = QMessageBox.question(QMessageBox(self), 'message', '你确定要退出吗', QMessageBox.No | QMessageBox.Yes,
QMessageBox.Yes)
if reply == QMessageBox.Yes:
q_close_event.accept()
else:
q_close_event.ignore()
# 移动应用位置函数
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
pass
# 选择文件处理函数
def show_dialog(self):
file_name = QFileDialog.getOpenFileName(QFileDialog(self), 'open file', '/mnt/sdb/opencv/ImageMaterial')
self.file_addr_text.setText(str(file_name[0]))
self.file_addr = file_name[0]
print('file_addr: ', file_name[0])
# 功能列表选择触发函数
def on_activated(self, text):
print(text, self.function_map_key[text])
self.choose_id = self.function_map_key[text]
# 演示按钮绑定函数
def run_function(self):
self.deal(self.file_addr, self.choose_id)
# 功能处理函数
def deal(self, file_addr, function_index):
# print('-----',function_map_index[function_index],'-----')
origin_image = cv2.imread(file_addr, cv2.IMREAD_COLOR)
if function_index == 1:
res_image = custom_deal.reversal(origin_image)
my_utility.custom_show(origin_image, [1, 2, 1])
my_utility.plt.title('origin image')
my_utility.custom_show(res_image, [1, 2, 2])
my_utility.plt.title('reversal image')
elif function_index == 2:
my_utility.custom_show(origin_image, [1, 2, 1])
res_image = cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY)
my_utility.custom_show_hist(res_image, [1, 2, 2])
elif function_index == 3:
my_utility.custom_show(origin_image, [1, 2, 1])
res_image = cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY)
res_image = custom_deal.grayscale_histogram(res_image)
my_utility.custom_show(res_image, [1, 2, 2])
elif function_index == 4:
self.q_text_editor.setPlainText('还未实现')
return
elif function_index == 5:
my_utility.plt.figure(1)
my_utility.plt.plot([0, 10, 15, 25], [0, 5, 20, 25], 'ro-')
my_utility.plt.figure(2)
my_utility.custom_show(origin_image, [1, 3, 1])
my_utility.plt.title('origin image')
res_image = custom_deal.linear_translation(cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY))
my_utility.custom_show(res_image, [1, 3, 2])
my_utility.plt.title('Contrast stretch image')
ret, res_image = cv2.threshold(res_image, np.mean(res_image), 255, type=cv2.THRESH_BINARY)
my_utility.custom_show(res_image, [1, 3, 3])
my_utility.plt.title('threshold image')
pass
elif function_index == 6:
my_utility.custom_show(origin_image, [1, 4, 1])
my_utility.plt.title('origin image')
res_image = custom_deal.gamma_translation(cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY), 1, 0.6)
my_utility.custom_show(res_image, [1, 4, 2])
my_utility.plt.title('gamma = 0.6')
res_image = custom_deal.gamma_translation(cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY), 1, 0.4)
my_utility.custom_show(res_image, [1, 4, 3])
my_utility.plt.title('gamma = 0.4')
res_image = custom_deal.gamma_translation(cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY), 1, 0.3)
my_utility.custom_show(res_image, [1, 4, 4])
my_utility.plt.title('gamma = 0.3')
elif function_index == 7:
my_utility.custom_show(origin_image, [1, 2, 1])
my_utility.plt.title('origin image')
res_image = custom_deal.smooth_fliter(cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY))
my_utility.custom_show(res_image, [1, 2, 2])
my_utility.plt.title('3x3 smooth filter')
elif function_index == 8:
my_utility.custom_show(origin_image, [1, 2, 1])
my_utility.plt.title('origin image')
res_image = custom_deal.median_fliter(cv2.cvtColor(origin_image, code=cv2.COLOR_RGB2GRAY))
my_utility.custom_show(res_image, [1, 2, 2])
my_utility.plt.title('3x3 median filter')
elif function_index == 9:
my_utility.custom_show(origin_image, [1, 2, 1])
my_utility.plt.title('origin image')
res_image = custom_deal.rotate_translation(origin_image, 90)
my_utility.custom_show(res_image, [1, 2, 2])
my_utility.plt.title('60 degree')
elif function_index == 10:
my_utility.custom_show(origin_image, [1, 3, 1])
my_utility.plt.title('origin image')
res_image_h, res_image_v = custom_deal.vh_translation(origin_image)
my_utility.custom_show(res_image_h, [1, 3, 2])
my_utility.plt.title('horizontal')
my_utility.custom_show(res_image_v, [1, 3, 3])
my_utility.plt.title('vertical')
elif function_index == 11:
my_utility.custom_show(origin_image, [1, 3, 1])
my_utility.plt.title('origin image')
res_down_image, res_up_image = custom_deal.pyramid(origin_image)
my_utility.custom_show(res_down_image, [1, 3, 2])
my_utility.custom_show(res_up_image, [1, 3, 3])
pass
elif function_index == 12:
self.function_widgets[function_index] = scaling.Me(file_addr)
pass
elif function_index == 13:
self.function_widgets[function_index] = AffineTransformation.Me(file_addr)
pass
if function_index != 12:
my_utility.plt.show()
def nothing(self, x):
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Me()
sys.exit(app.exec_())
| freedomDR/pyqt5_project | src/qt.py | qt.py | py | 8,875 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "function_collection.custom_deal.reversal",
"line_number": 119,
"usage_type": "call"
},
{
"api_... |
34989838921 | import matplotlib
import matplotlib.pyplot as plt
import xlrd
file = u'./data.xlsx'
xlrd.open_workbook(file)
data = xlrd.open_workbook(file)
table = data.sheet_by_name(u'data') # 获得表格
x = table.col_values(0)
x.pop(0)
y1=table.col_values(1)
y1.pop(0)
y2=table.col_values(2)
y2.pop(0)
y3=table.col_values(3)
y3.pop(0)
y4=table.col_values(4)
y4.pop(0)
plt.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.figure(12)
plt.subplot(221)
plt.xlabel('地铁站数量')
plt.ylabel('值')
plt.plot(range(len(y1)), y2, 'bo-',label="gdp/亿元")
plt.xticks(range(len(y1)), y1, rotation=45)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.0, 1), loc=1, borderaxespad=0.)
plt.subplot(222)
plt.xlabel('地铁站数量')
plt.ylabel('值')
plt.plot(range(len(y1)), y3, 'go-',label="增长/百分比")
plt.xticks(range(len(y1)), y1, rotation=45)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.0, 1), loc=1, borderaxespad=0.)
plt.subplot(212)
plt.xlabel('地铁站数量')
plt.ylabel('值')
plt.plot(range(len(y1)), y4, 'yo-',label="人口/万人")
plt.xticks(range(len(y1)), y1, rotation=45)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.0, 1), loc=1, borderaxespad=0.)
plt.show() | kismet-laoqiu/undergrad-rep | 2019KDD-邱柯铭-201693043-中国地铁数据分析-ver02 最新补充版/subway_analysis-master/plot.py | plot.py | py | 1,269 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "m... |
17979032121 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pyramid.config import Configurator
from TechLurker.models import RedditData, PyjobData, SecurityNewsData, TechRepublicData
import os
def main():
""" This functon returns a Pyramid WSGI application."""
settings = {}
settings['sqlalchemy.url'] = os.environ['DATABASE_URL']
config = Configurator(settings=settings)
config.include('TechLurker.models')
SessionFactory = config.registry["dbsession_factory"]
session = SessionFactory()
return session
class AddTablePipeline(object):
def __init__(self, url):
"""."""
self.url = url
def process_item(self, item, spider):
"""."""
if 'score' in item.keys(): # Reddit data
record = RedditData(title=item['title'],
content=' '.join(item['content']),
score=item['score'])
elif 'job_type' in item.keys(): # pyjob data
record = PyjobData(title=item['title'],
descrip=' '.join(item['descrip']),
loc=item['loc'],
job_type=item['job_type'],
url=item['url'])
elif 'articleContent' in item.keys(): # security news data
record = SecurityNewsData(title=item['title'],
articleContent=item['articleContent'],
date=item['date'],
url=item['url'])
elif 'votes' in item.keys(): # tech republic data
record = TechRepublicData(title=item['title'],
content=item['content'],
votes=item['votes'],
from_forum=item['from_forum'])
if not check_repeat(self.session, record):
try:
self.session.add(record)
self.session.commit()
except:
self.session.rollback()
return item
@classmethod
def from_crawler(cls, crawler):
return cls(
url=os.environ['DATABASE_URL'])
def open_spider(self, spider):
self.session = main()
def close_spider(self, spider):
self.session.close()
def check_repeat(db, record):
"""Check for repeats by comparing them in the database."""
models = [TechRepublicData, SecurityNewsData, PyjobData, RedditData]
temp = db.query(*models)
for model in models:
if temp.filter(model.title == record.title).count():
return True
| han8909227/TechLurker | TechLurker/scripts/my_scraper/my_scraper/pipelines.py | pipelines.py | py | 2,800 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pyramid.config.Configurator",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "TechLurker.models.RedditData",
"line_number": 32,
"usage_type": "call"
},
{
"api_name... |
33951359126 | import tkinter as tk
from tkinter import ttk
from PIL import ImageTk, Image
from tkinter import messagebox
from datetime import datetime
import csv
from pizza import *
from sauce import *
italian_pizza = {"Pizza Napoletana": PizzaNapoletana, "Pizza Capricciosa": PizzaCapricciosa,
"Pizza Quattro Formaggi": PizzaQuattroFormaggi,
"Sardenara": Sardenara, "Pizza Margherita": PizzaMargherita}
normal_pizza = ['Classic', 'Turk Pizza', 'Regular Pizza']
sauces = {"Ketchup": Ketchup, "Mayonnaise": Mayonnaise, "Mustard": Mustard, "Ranch": Ranch}
def enter_only_digits(entry, action_type) -> bool:
if action_type == '1' and not entry.isdigit():
return False
return True
class MainView(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, SecondPage):
page_name = F.__name__
frame = F(container, self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("StartPage")
def show_frame(self, page_name):
frame = self.frames[page_name]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
img = ImageTk.PhotoImage(Image.open('images/Pizzaland.png').resize((250, 250), Image.ANTIALIAS))
label1 = tk.Label(self, image=img)
label1.image = img
label1.place(x="200",y="30")
def menus():
newWindow = tk.Toplevel()
newWindow.title("PizzaLand")
newWindow.geometry(f'{650}x{340}+{350}+{80}')
newWindow.resizable(False, False)
newWindow.iconbitmap('images/Pizzaland.ico')
upper_container = tk.Frame(newWindow)
upper_container.pack()
pizza_columns = ('PIZZA', 'COST')
pizza_menu = [("Classic", "30"), ("Turk Pizza", "35"), ("Regular Pizza", "30"),
("Napoletana", "60"), ("Pizza Capricciosa", "60"), ("Pizza Quattro Formaggi", "60"),
("Sardenara", "60"), ("Pizza Margherita", "40")]
left_tree = ttk.Treeview(upper_container, columns=pizza_columns, show='headings', height=10)
left_tree.heading('PIZZA', text='PIZZA')
left_tree.heading('COST', text='COST')
left_tree.column("PIZZA", anchor=tk.W, minwidth=150, width=150, stretch=tk.NO)
left_tree.column("COST", anchor=tk.CENTER, minwidth=150, width=150, stretch=tk.NO)
for menu in pizza_menu:
left_tree.insert('', tk.END, values=menu)
left_tree.pack(side=tk.LEFT)
ingredients_column = ('INGREDIENT', 'COST')
ingredients_menu = [("Black Olives", "2"), ("Mushrooms", "6"), ("Meat", "10"),
("Onion", "2"), ("Corn", "2"), ("Mozzarella", "8"),
("Tomatoes", "3"), ("Parmesan", "7"), ("Basil", "2"),
("San Marzano Tomatoes", "4"), ("Tomato Sauce", "3"),
("Gorgonzola", "4"), ("Parmigiano Reggiano", "4"),
("Goat Cheese", "4"), ("Sardines", "10"), ("Red Onions", "3"),
("Prosciutto Cotto (ham)", "6"), ("Parsley", "3")]
right_tree = ttk.Treeview(upper_container, columns=ingredients_column, show='headings')
right_tree.heading('INGREDIENT', text='INGREDIENT')
right_tree.heading('COST', text='COST')
right_tree.column("INGREDIENT", anchor=tk.W, minwidth=150, width=150, stretch=tk.NO)
right_tree.column("COST", anchor=tk.CENTER, minwidth=150, width=150, stretch=tk.NO)
for menu in ingredients_menu:
right_tree.insert('', tk.END, values=menu)
right_tree.pack(side=tk.LEFT)
sauces_menu = [("Ketchup", "1"), ("Mayonnaise", "2"),
("Mustard", "3"), ("Ranch", "3")]
sauces_column = ("SAUCE", "COST")
lower_tree = ttk.Treeview(newWindow, columns=sauces_column, show='headings', height=5)
lower_tree.heading('SAUCE', text='SAUCE')
lower_tree.heading('COST', text='COST')
lower_tree.column("SAUCE", anchor=tk.W, minwidth=150, width=150, stretch=tk.NO)
lower_tree.column("COST", anchor=tk.CENTER, minwidth=150, width=150, stretch=tk.NO)
for menu in sauces_menu:
lower_tree.insert('', tk.END, values=menu)
lower_tree.pack()
menu_button =tk.Button(self,
text='MENU',
command=menus,
font=('Arial', 10),
width=20,
height=3,
activebackground="#caeffa")
menu_button.place(x="250",y="350")
order_button =tk.Button(self,
text='ORDER',
command=lambda : controller.show_frame("SecondPage"),
font=('Arial', 10),
width=20,
height=3,
activebackground="#caeffa")
order_button.place(x="250",y="450")
exit_button =tk.Button(self,
text='EXIT',
command=lambda : app.destroy(),
font=('Arial', 10),
width=20,
height=3,
activebackground="#caeffa")
exit_button.place(x="250",y="550")
v1_space_label = tk.Label(self, height=20,width=2, bg='#364859')
v1_space_label.place(x="100",y="330")
v2_space_label = tk.Label(self, height=20,width=2, bg='#364859')
v2_space_label.place(x="548",y="330")
h1_space_label = tk.Label(self, width=66, bg='#364859')
h1_space_label.place(x="100",y="310")
h2_space_label = tk.Label(self, width=66, bg='#364859')
h2_space_label.place(x="100",y="628")
class SecondPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
vcmd = (self.register(enter_only_digits), '%P', '%d')
def pizza_section():
global pizza_frame
pizza_frame =tk.LabelFrame(self, text="Choose Pizza",padx=62, pady=10)
pizza_frame.grid(row=0, column=0, padx=20, pady=10)
pizza_type = tk.StringVar()
pizza_label = tk.Label(pizza_frame, text="Pizza")
pizza_label.grid(row=0, column=0, sticky="W")
pizza_combobox = ttk.Combobox(pizza_frame,
values=["Classic", "Turk Pizza", "Regular Pizza",
"Pizza Napoletana", "Pizza Capricciosa",
"Pizza Quattro Formaggi", "Sardenara",
"Pizza Margherita"],
textvariable=pizza_type)
pizza_combobox.grid(row=0, column=1,sticky="NEWS")
def restrict_ing(*args):
y = pizza_type.get()
if y in italian_pizza.keys():
for widget in ingredients_frame.winfo_children():
widget.configure(state='disabled')
for widgets in sauce_frame.winfo_children():
widgets.configure(state='disabled')
else:
for widget in ingredients_frame.winfo_children():
widget.configure(state='normal')
for widgets in sauce_frame.winfo_children():
widgets.configure(state='normal')
pizza_type.trace("w", restrict_ing)
pizza_quantity = tk.Label(pizza_frame, text="Quantity")
pizza_quantity.grid(row=1, column=0, sticky="W")
pizza_quantity_entry = tk.Entry(pizza_frame,width=60,validate='key', validatecommand=vcmd)
pizza_quantity_entry.grid(row=1, column=1, sticky="NEWS")
for widget in pizza_frame.winfo_children():
widget.grid_configure(padx=26, pady=5)
return pizza_combobox, pizza_quantity_entry
def ingredient_section():
global ingredients_frame
ingredients_frame = tk.LabelFrame(self, text="Ingredients",padx=2,pady=10)
ingredients_frame.grid(row=1, column=0, padx=20, pady=10)
ing_array = ('Meat', 'Onion', 'Mushrooms', 'Black Olives', 'Goat Cheese', 'San Marzano Tomatoes',
'Mozzarella', 'Tomatoes', 'Parmesan', 'Basil', 'Corn', 'Tomato Sauce',
'Gorgonzola', 'Parmigiano Reggiano', 'Sardines', 'Red Onions', 'Parsley', 'Prosciutto Cotto (ham)')
checkboxes = {}
c = c2 = c3 = 0
for Checkbox,i in enumerate(ing_array):
name = ing_array[Checkbox]
current_var = tk.IntVar()
current_box = tk.Checkbutton(ingredients_frame, text=name, variable=current_var)
current_box.var = current_var
if Checkbox < 5:
current_box.grid(row=0 ,column=Checkbox , sticky="w")
elif 5 <= Checkbox <=9:
current_box.grid(row=1 ,column=c, sticky="w")
c += 1
elif 10 <= Checkbox <= 14:
current_box.grid(row=2 ,column=c2, sticky="w")
c2 += 1
else:
current_box.grid(row=3, column=c3, sticky="w")
c3 += 1
checkboxes[current_box] = name # so checkbutton object is the key and value is string
for widget in ingredients_frame.winfo_children():
widget.grid_configure(pady=5)
return checkboxes
def sauce_section():
global sauce_frame
sauce_frame = tk.LabelFrame(self, text="Sauce", padx=70, pady=10)
sauce_frame.grid(row=2, column=0, padx=20, pady=10)
sauces = (('Ketchup', 'Ketchup'),
('Mayonnaise', 'Mayonnaise'),
('Ranch', 'Ranch'),
('Mustard', 'Mustard'))
selected_sauce = tk.StringVar(value="Ketchup")
for row_index, row in enumerate(sauces):
r = tk.Radiobutton(sauce_frame, text=row[0], value=row[1], variable=selected_sauce)
r.grid(row=0, column=row_index)
sauce_quantity = tk.Label(sauce_frame, text="Quantity")
sauce_quantity.grid(row=1, column=0, sticky="W")
sauce_quantity_entry = tk.Entry(sauce_frame, width=60,validate='key', validatecommand=vcmd)
sauce_quantity_entry.grid(row=1, column=1,columnspan=5)
for widget in sauce_frame.winfo_children():
widget.grid_configure(padx=17,pady=5)
return selected_sauce, sauce_quantity_entry
def user_info():
global user_info_frame
user_info_frame = tk.LabelFrame(self, text="User Information", padx=70, pady=10)
user_info_frame.grid(row=3, column=0, padx=20, pady=10)
full_name_label = tk.Label(user_info_frame, text="Full Name: ")
full_name_label.grid(row=0, column=0, sticky="W")
full_name_entry = tk.Entry(user_info_frame, width=60)
full_name_entry.grid(row=0, column=1)
user_id_label = tk.Label(user_info_frame, text="ID: ")
user_id_label.grid(row=1, column=0, sticky="W")
user_id_entry = tk.Entry(user_info_frame, width=60, validate='key', validatecommand=vcmd)
user_id_entry.grid(row=1, column=1)
credit_card_label = tk.Label(user_info_frame, text="Credit Card: ")
credit_card_label.grid(row=2, column=0, sticky="W")
credit_card_entry = tk.Entry(user_info_frame, width=60, validate='key', validatecommand=vcmd)
credit_card_entry.grid(row=2, column=1)
password_label = tk.Label(user_info_frame, text="Password: ")
password_label.grid(row=3, column=0, sticky="W")
password_entry = tk.Entry(user_info_frame, width=60, validate='key', validatecommand=vcmd)
password_entry.grid(row=3, column=1)
for widget in user_info_frame.winfo_children():
widget.grid_configure(padx=17, pady=5)
return full_name_entry, user_id_entry, credit_card_entry, password_entry
pizza_name, pizza_q = pizza_section()
checkboxes = ingredient_section()
sauce_name, sauce_q = sauce_section()
user_name, user_id, credit_card, password = user_info()
def get_order():
#XOR
validate = (bool(pizza_name.get()) ^ bool(pizza_q.get()) ^ bool(user_name.get())^
bool(user_id.get()) ^ bool(credit_card.get()) ^ bool(password.get()) )
validate_true = (bool(pizza_name.get()) , bool(pizza_q.get()) , bool(user_name.get()),
bool(user_id.get()) , bool(credit_card.get()) , bool(password.get()) ).count(True)
if validate:
messagebox.showinfo("PizzaLand", "Please Provide The Information ")
#XNOR
elif not validate:
#if specific number of inputs is True and equal to 6
if validate_true == 6:
output = []
now = datetime.now()
for box in checkboxes:
if box.var.get() == 1:
output.append(checkboxes[box])
if pizza_name.get() in italian_pizza.keys():
pizza = italian_pizza[pizza_name.get()](name=pizza_name.get(), ingredients=None, quantity=int(pizza_q.get()))
desc = pizza.get_description()
cost = pizza.get_cost()
ing = None
sauce_quant = None
elif pizza_name.get() in normal_pizza:
ing = output
if sauce_q.get():
sauce_quant = sauce_q.get()
pizza = sauces[sauce_name.get()](Pizza(name=pizza_name.get(), ingredients=ing, quantity=int(pizza_q.get())),
quantity=int(sauce_quant))
desc = pizza.get_description()
cost = pizza.get_cost()
else:
pizza = Pizza(name=pizza_name.get(), ingredients=ing, quantity=int(pizza_q.get()))
desc = pizza.get_description()
cost = pizza.get_cost()
sauce_quant = None
else:pass
msg = messagebox.askquestion("PizzaLand", f"You ordered {desc}\n"
f"Total: ${cost}\n"
"Complete the order?")
if msg == 'yes':
inputs = [pizza_name, pizza_q, sauce_q, user_name, user_id, credit_card, password]
order_date = now.strftime("%d/%m/%Y")
order_time = now.strftime("%H:%M:%S")
ing = ', '.join([str(i) for i in ing]) if ing != None else 'None'
sauce_quant = sauce_quant if sauce_quant != None else 'None'
header = ["Full Name", "User ID", "Credit Card", "Password", "Order Date",
"Order Time", "Desc", "Cost", "Quantity of Pizza", "Ingredients", "Sauce Quantity"]
data = [user_name.get(), user_id.get(), credit_card.get(), password.get(), order_date,
order_time, desc, cost, pizza_q.get(), ing, sauce_quant]
with open('Orders_Database.csv', 'a',newline='') as file:
writer = csv.writer(file)
if file.tell() == 0:
writer.writerow(header)
writer.writerow(data)
for i in inputs:
i.delete(0, tk.END)
sauce_name.set(value='Ketchup')
else:
pass
else:
messagebox.showinfo("PizzaLand", "Please Provide The Information ")
else:
messagebox.showinfo("PizzaLand", "Please Provide The Information ")
go_back_button = tk.Button(self,
text='GO BACK',
command= lambda: controller.show_frame("StartPage"),
font=('Arial', 10),
width=10,
height=1,
activebackground="#caeffa")
go_back_button.place(x="200",y="630")
order_button =tk.Button(self,
text='ORDER',
command=get_order,
font=('Arial', 10),
width=10,
height=1,
activebackground="#caeffa")
order_button.place(x="350",y="630")
if __name__ == '__main__':
app = MainView()
app.iconbitmap('images/PizzaLand.ico')
app.title("PizzaLand")
app_height = 670
app_width = 675
app.geometry(f'{app_width}x{app_height}+{400}+{32}')
app.resizable(False, False)
app.mainloop() | MelihGulum/Tkinter-Projects | Pizza Order System/main.py | main.py | py | 18,745 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "tkinter.Tk",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Tk.__init__",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame",
... |
41334774112 | import torch
import math
import numpy as np
# def dct1d(x, half_shift=None):
# N = x.shape[-1]
# if half_shift == None:
# half_shift = torch.stack([torch.tensor(-1j * math.pi * k / (2 * N)).exp() for k in range(N + 1)]).to(x.device)[None]
# x_ext = torch.cat([x, x.flip(-1)], -1)
# z = torch.fft.rfft(x_ext, norm='ortho') * half_shift
# return z.real
# def dct2d(x, half_shift_x, half_shift_y):
# Nx, Ny = x.shape[-2], x.shape[-1]
# if half_shift_x == None:
# half_shift_x = torch.stack([torch.tensor(-1j * math.pi * k / (2 * Nx)).exp()
# for k in range(2* Nx)]).to(x.device)[None, None, :, None]
# if half_shift_y == None:
# half_shift_y = torch.stack([torch.tensor(-1j * math.pi * k / (2 * Ny)).exp()
# for k in range(Ny + 1)]).to(x.device)[None, None, None]
# x_ext = torch.cat([x, x.flip(-1)], -1)
# x_ext = torch.cat([x_ext, x_ext.flip(-2)], -2)
# x_ext = torch.cat([x, x.flip(-1)], -1)
# z = torch.fft.rfft2(x_ext, norm='ortho') * half_shift_x * half_shift_y
# return z.real
def dct1d(x, norm=None):
x_shape = x.shape
N = x_shape[-1]
x = x.contiguous().view(-1, N)
v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=1)
Vc = torch.view_as_real(torch.fft.fft(v, dim=1)) # add this line
k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, 0] * W_r - Vc[:, :, 1] * W_i
if norm == 'ortho':
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
return V
def idct1d(X, norm=None):
x_shape = X.shape
N = x_shape[-1]
X_v = X.contiguous().view(-1, x_shape[-1]) / 2
if norm == 'ortho':
X_v[:, 0] *= np.sqrt(N) * 2
X_v[:, 1:] *= np.sqrt(N / 2) * 2
k = torch.arange(x_shape[-1], dtype=X.dtype, device=X.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V_t_r = X_v
V_t_i = torch.cat([X_v[:, :1] * 0, -X_v.flip([1])[:, :-1]], dim=1)
V_r = V_t_r * W_r - V_t_i * W_i
V_i = V_t_r * W_i + V_t_i * W_r
V = torch.cat([V_r.unsqueeze(2), V_i.unsqueeze(2)], dim=2)
# v = torch.irfft(V, 1, onesided=False) # comment this line
v= torch.fft.irfft(torch.view_as_complex(V), n=V.shape[1], dim=1) # add this line
x = v.new_zeros(v.shape)
x[:, ::2] += v[:, :N - (N // 2)]
x[:, 1::2] += v.flip([1])[:, :N // 2]
return x.view(*x_shape)
def dct2d(x, norm=None):
"""
2-dimentional Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
:param x: the input signal
:param norm: the normalization, None or 'ortho'
:return: the DCT-II of the signal over the last 2 dimensions
"""
X1 = dct1d(x, norm=norm)
X2 = dct1d(X1.transpose(-1, -2), norm=norm)
return X2.transpose(-1, -2)
def idct2d(X, norm=None):
"""
The inverse to 2D DCT-II, which is a scaled Discrete Cosine Transform, Type III
Our definition of idct is that idct_2d(dct_2d(x)) == x
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
:param X: the input signal
:param norm: the normalization, None or 'ortho'
:return: the DCT-II of the signal over the last 2 dimensions
"""
x1 = idct1d(X, norm=norm)
x2 = idct1d(x1.transpose(-1, -2), norm=norm)
return x2.transpose(-1, -2)
| DiffEqML/kairos | src/utils/numerics.py | numerics.py | py | 3,660 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "torch.cat",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.view_as_real",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.fft.fft",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.fft",
"line_numb... |
28335860302 | import abc
import uuid
from typing import TYPE_CHECKING, Any, Generic, Tuple, Type, TypeVar, Union
import pydantic
from typing_extensions import Self
import prefect
from prefect.blocks.core import Block
from prefect.client.utilities import inject_client
from prefect.exceptions import MissingContextError
from prefect.filesystems import LocalFileSystem, ReadableFileSystem, WritableFileSystem
from prefect.logging import get_logger
from prefect.serializers import Serializer
from prefect.settings import (
PREFECT_LOCAL_STORAGE_PATH,
PREFECT_RESULTS_DEFAULT_SERIALIZER,
PREFECT_RESULTS_PERSIST_BY_DEFAULT,
)
from prefect.utilities.annotations import NotSet
from prefect.utilities.asyncutils import sync_compatible
from prefect.utilities.pydantic import add_type_dispatch
if TYPE_CHECKING:
from prefect import Flow, Task
from prefect.client.orion import OrionClient
ResultStorage = Union[WritableFileSystem, str]
ResultSerializer = Union[Serializer, str]
LITERAL_TYPES = {type(None), bool}
logger = get_logger("results")
# from prefect.orion.schemas.states import State
R = TypeVar("R")
def get_default_result_storage() -> ResultStorage:
"""
Generate a default file system for result storage.
"""
return LocalFileSystem(basepath=PREFECT_LOCAL_STORAGE_PATH.value())
def get_default_result_serializer() -> ResultSerializer:
"""
Generate a default file system for result storage.
"""
return PREFECT_RESULTS_DEFAULT_SERIALIZER.value()
def get_default_persist_setting() -> bool:
"""
Return the default option for result persistence (False).
"""
return PREFECT_RESULTS_PERSIST_BY_DEFAULT.value()
def flow_features_require_result_persistence(flow: "Flow") -> bool:
"""
Returns `True` if the given flow uses features that require its result to be
persisted.
"""
if not flow.cache_result_in_memory:
return True
return False
def flow_features_require_child_result_persistence(flow: "Flow") -> bool:
"""
Returns `True` if the given flow uses features that require child flow and task
runs to persist their results.
"""
if flow.retries:
return True
return False
def task_features_require_result_persistence(task: "Task") -> bool:
"""
Returns `True` if the given task uses features that require its result to be
persisted.
"""
if task.cache_key_fn:
return True
if not task.cache_result_in_memory:
return True
return False
class ResultFactory(pydantic.BaseModel):
"""
A utility to generate `Result` types.
"""
persist_result: bool
cache_result_in_memory: bool
serializer: Serializer
storage_block_id: uuid.UUID
storage_block: WritableFileSystem
@classmethod
@inject_client
async def default_factory(cls, client: "OrionClient" = None, **kwargs):
"""
Create a new result factory with default options.
Keyword arguments may be provided to override defaults. Null keys will be
ignored.
"""
# Remove any null keys so `setdefault` can do its magic
for key, value in tuple(kwargs.items()):
if value is None:
kwargs.pop(key)
# Apply defaults
kwargs.setdefault("result_storage", get_default_result_storage())
kwargs.setdefault("result_serializer", get_default_result_serializer())
kwargs.setdefault("persist_result", get_default_persist_setting())
kwargs.setdefault("cache_result_in_memory", True)
return await cls.from_settings(**kwargs, client=client)
@classmethod
@inject_client
async def from_flow(
cls: Type[Self], flow: "Flow", client: "OrionClient" = None
) -> Self:
"""
Create a new result factory for a flow.
"""
from prefect.context import FlowRunContext
ctx = FlowRunContext.get()
if ctx:
# This is a child flow run
return await cls.from_settings(
result_storage=flow.result_storage or ctx.result_factory.storage_block,
result_serializer=flow.result_serializer
or ctx.result_factory.serializer,
persist_result=(
flow.persist_result
if flow.persist_result is not None
else
# !! Child flows persist their result by default if the it or the
# parent flow uses a feature that requires it
(
flow_features_require_result_persistence(flow)
or flow_features_require_child_result_persistence(ctx.flow)
or get_default_persist_setting()
)
),
cache_result_in_memory=flow.cache_result_in_memory,
client=client,
)
else:
# This is a root flow run
# Pass the flow settings up to the default which will replace nulls with
# our default options
return await cls.default_factory(
client=client,
result_storage=flow.result_storage,
result_serializer=flow.result_serializer,
persist_result=(
flow.persist_result
if flow.persist_result is not None
else
# !! Flows persist their result by default if uses a feature that
# requires it
(
flow_features_require_result_persistence(flow)
or get_default_persist_setting()
)
),
cache_result_in_memory=flow.cache_result_in_memory,
)
@classmethod
@inject_client
async def from_task(
cls: Type[Self], task: "Task", client: "OrionClient" = None
) -> Self:
"""
Create a new result factory for a task.
"""
from prefect.context import FlowRunContext
ctx = FlowRunContext.get()
if not ctx:
raise MissingContextError(
"A flow run context is required to create a result factory for a task."
)
result_storage = task.result_storage or ctx.result_factory.storage_block
result_serializer = task.result_serializer or ctx.result_factory.serializer
persist_result = (
task.persist_result
if task.persist_result is not None
else
# !! Tasks persist their result by default if their parent flow uses a
# feature that requires it or the task uses a feature that requires it
(
flow_features_require_child_result_persistence(ctx.flow)
or task_features_require_result_persistence(task)
or get_default_persist_setting()
)
)
cache_result_in_memory = task.cache_result_in_memory
return await cls.from_settings(
result_storage=result_storage,
result_serializer=result_serializer,
persist_result=persist_result,
cache_result_in_memory=cache_result_in_memory,
client=client,
)
@classmethod
@inject_client
async def from_settings(
cls: Type[Self],
result_storage: ResultStorage,
result_serializer: ResultSerializer,
persist_result: bool,
cache_result_in_memory: bool,
client: "OrionClient",
) -> Self:
storage_block_id, storage_block = await cls.resolve_storage_block(
result_storage, client=client
)
serializer = cls.resolve_serializer(result_serializer)
return cls(
storage_block=storage_block,
storage_block_id=storage_block_id,
serializer=serializer,
persist_result=persist_result,
cache_result_in_memory=cache_result_in_memory,
)
@staticmethod
async def resolve_storage_block(
result_storage: ResultStorage, client: "OrionClient"
) -> Tuple[uuid.UUID, WritableFileSystem]:
"""
Resolve one of the valid `ResultStorage` input types into a saved block
document id and an instance of the block.
"""
if isinstance(result_storage, Block):
storage_block = result_storage
storage_block_id = (
# Avoid saving the block if it already has an identifier assigned
storage_block._block_document_id
# TODO: Overwrite is true to avoid issues where the save collides with
# a previously saved document with a matching hash
or await storage_block._save(
is_anonymous=True, overwrite=True, client=client
)
)
elif isinstance(result_storage, str):
storage_block = await Block.load(result_storage, client=client)
storage_block_id = storage_block._block_document_id
assert storage_block_id is not None, "Loaded storage blocks must have ids"
else:
raise TypeError(
"Result storage must be one of the following types: 'UUID', 'Block', "
f"'str'. Got unsupported type {type(result_storage).__name__!r}."
)
return storage_block_id, storage_block
@staticmethod
def resolve_serializer(serializer: ResultSerializer) -> Serializer:
"""
Resolve one of the valid `ResultSerializer` input types into a serializer
instance.
"""
if isinstance(serializer, Serializer):
return serializer
elif isinstance(serializer, str):
return Serializer(type=serializer)
else:
raise TypeError(
"Result serializer must be one of the following types: 'Serializer', "
f"'str'. Got unsupported type {type(serializer).__name__!r}."
)
@sync_compatible
async def create_result(self, obj: R) -> Union[R, "BaseResult[R]"]:
"""
Create a result type for the given object.
If persistence is disabled, the object is returned unaltered.
Literal types are converted into `LiteralResult`.
Other types are serialized, persisted to storage, and a reference is returned.
"""
if obj is None:
# Always write nulls as result types to distinguish from unpersisted results
return await LiteralResult.create(None)
if not self.persist_result:
# Attach the object directly if persistence is disabled; it will be dropped
# when sent to the API
if self.cache_result_in_memory:
return obj
# Unless in-memory caching has been disabled, then this result will not be
# available downstream
else:
return None
if type(obj) in LITERAL_TYPES:
return await LiteralResult.create(obj)
return await PersistedResult.create(
obj,
storage_block=self.storage_block,
storage_block_id=self.storage_block_id,
serializer=self.serializer,
cache_object=self.cache_result_in_memory,
)
@add_type_dispatch
class BaseResult(pydantic.BaseModel, abc.ABC, Generic[R]):
type: str
_cache: Any = pydantic.PrivateAttr(NotSet)
def _cache_object(self, obj: Any) -> None:
self._cache = obj
def has_cached_object(self) -> bool:
return self._cache is not NotSet
@abc.abstractmethod
@sync_compatible
async def get(self) -> R:
...
@abc.abstractclassmethod
@sync_compatible
async def create(
cls: "Type[BaseResult[R]]",
obj: R,
**kwargs: Any,
) -> "BaseResult[R]":
...
class Config:
extra = "forbid"
class LiteralResult(BaseResult):
"""
Result type for literal values like `None`, `True`, `False`.
These values are stored inline and JSON serialized when sent to the Prefect API.
They are not persisted to external result storage.
"""
type = "literal"
value: Any
def has_cached_object(self) -> bool:
# This result type always has the object cached in memory
return True
@sync_compatible
async def get(self) -> R:
return self.value
@classmethod
@sync_compatible
async def create(
cls: "Type[LiteralResult]",
obj: R,
) -> "LiteralResult[R]":
if type(obj) not in LITERAL_TYPES:
raise TypeError(
f"Unsupported type {type(obj).__name__!r} for result literal. "
f"Expected one of: {', '.join(type_.__name__ for type_ in LITERAL_TYPES)}"
)
return cls(value=obj)
class PersistedResult(BaseResult):
"""
Result type which stores a reference to a persisted result.
When created, the user's object is serialized and stored. The format for the content
is defined by `PersistedResultBlob`. This reference contains metadata necessary for retrieval
of the object, such as a reference to the storage block and the key where the
content was written.
"""
type = "reference"
serializer_type: str
storage_block_id: uuid.UUID
storage_key: str
_should_cache_object: bool = pydantic.PrivateAttr(default=True)
@sync_compatible
@inject_client
async def get(self, client: "OrionClient") -> R:
"""
Retrieve the data and deserialize it into the original object.
"""
if self.has_cached_object():
return self._cache
blob = await self._read_blob(client=client)
obj = blob.serializer.loads(blob.data)
if self._should_cache_object:
self._cache_object(obj)
return obj
@inject_client
async def _read_blob(self, client: "OrionClient") -> "PersistedResultBlob":
block_document = await client.read_block_document(self.storage_block_id)
storage_block: ReadableFileSystem = Block._from_block_document(block_document)
content = await storage_block.read_path(self.storage_key)
blob = PersistedResultBlob.parse_raw(content)
return blob
@classmethod
@sync_compatible
async def create(
cls: "Type[PersistedResult]",
obj: R,
storage_block: WritableFileSystem,
storage_block_id: uuid.UUID,
serializer: Serializer,
cache_object: bool = True,
) -> "PersistedResult[R]":
"""
Create a new result reference from a user's object.
The object will be serialized and written to the storage block under a unique
key. It will then be cached on the returned result.
"""
data = serializer.dumps(obj)
blob = PersistedResultBlob(serializer=serializer, data=data)
key = uuid.uuid4().hex
await storage_block.write_path(key, content=blob.to_bytes())
result = cls(
serializer_type=serializer.type,
storage_block_id=storage_block_id,
storage_key=key,
)
if cache_object:
# Attach the object to the result so it's available without deserialization
result._cache_object(obj)
object.__setattr__(result, "_should_cache_object", cache_object)
return result
class PersistedResultBlob(pydantic.BaseModel):
"""
The format of the content stored by a persisted result.
Typically, this is written to a file as bytes.
"""
serializer: Serializer
data: bytes
prefect_version: str = pydantic.Field(default=prefect.__version__)
def to_bytes(self) -> bytes:
return self.json().encode()
| PatrickVieira1/dataengineering-zoomcamp | week_2_workflow_orchestration/venv/lib/python3.9/site-packages/prefect/results.py | results.py | py | 15,848 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "prefect.filesystems.WritableFileSystem",
"line_number": 29,
"usage_type": "name"
},
{
"api_name... |
40420432354 | from pathlib import Path
from numpy.lib.twodim_base import diag
from numpy.ma.core import count
import parse
import numpy as np
data_folder = Path(".").resolve()
def parse_data(data):
lines = []
line_parse = parse.compile("{},{} -> {},{}")
for line in data.split("\n"):
lines.append([int(d) for d in line_parse.parse(line).fixed])
return np.array(lines, dtype=int)
def count_overlap_points(lines, include_diag):
grid = np.zeros((np.max(lines[:, [0, 2]]) + 1, np.max(lines[:, [1, 3]]) + 1), dtype=int)
horisontal = np.equal(lines[:, 1], lines[:, 3])
hor_rows = lines[horisontal]
for row in hor_rows:
row_lim = np.sort(row[[0, 2]])
row_lim[1] += 1
grid[row[1], row_lim[0] : row_lim[1]] += 1
vertical = np.equal(lines[:, 0], lines[:, 2])
ver_rows = lines[vertical]
for row in ver_rows:
row_lim = np.sort(row[[1, 3]])
row_lim[1] += 1
grid[row_lim[0] : row_lim[1], row[0]] += 1
if include_diag:
diag_rows = lines[np.logical_not(np.logical_or(horisontal, vertical))]
for row in diag_rows:
ind = np.argsort(row[[0, 2]])
x = row[[0, 2]][ind]
y = row[[1, 3]][ind]
slope = 1 if y[1] > y[0] else -1
for i in range(1 + x[1] - x[0]):
grid[y[0] + i * slope, x[0] + i] += 1
return np.sum(grid >= 2)
def main():
data = data_folder.joinpath("input.txt").read_text()
data = parse_data(data)
print("Part 1")
n_overlap = count_overlap_points(data, False)
print(f"At least two lines overlap in {n_overlap} points when not including diagonal lines")
print()
print("Part 2")
n_overlap = count_overlap_points(data, True)
print(f"At least two lines overlap in {n_overlap} points when including diagonal lines")
print()
if __name__ == "__main__":
main()
| eirikhoe/advent-of-code | 2021/05/sol.py | sol.py | py | 1,887 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "parse.compile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number"... |
74540231072 | import os
import glob
import re
import shutil
import sqlalchemy
import traceback
import importlib
from rapidfuzz import fuzz
from traitlets.config import LoggingConfigurable, Config
from traitlets import Bool, List, Dict, Integer, Instance, Type, Any
from traitlets import default, validate
from textwrap import dedent
from nbconvert.exporters import Exporter, NotebookExporter
from nbconvert.writers import FilesWriter
from ..coursedir import CourseDirectory
from ..utils import find_all_files, rmtree, remove
from ..preprocessors.execute import UnresponsiveKernelError
from ..nbgraderformat import SchemaTooOldError, SchemaTooNewError
import typing
from nbconvert.exporters.exporter import ResourcesDict
class NbGraderException(Exception):
pass
class BaseConverter(LoggingConfigurable):
notebooks = List([])
assignments = Dict({})
writer = Instance(FilesWriter)
exporter = Instance(Exporter)
exporter_class = Type(NotebookExporter, klass=Exporter).tag(config=True)
preprocessors = List([])
force = Bool(False, help="Whether to overwrite existing assignments/submissions").tag(config=True)
pre_convert_hook = Any(
None,
config=True,
allow_none=True,
help=dedent("""
An optional hook function that you can implement to do some
bootstrapping work before converting.
This function is called before the notebooks are converted
and should be used for specific converters such as Autograde,
GenerateAssignment or GenerateFeedback.
It will be called as (all arguments are passed as keywords)::
hook(assignment=assignment, student=student, notebooks=notebooks)
""")
)
post_convert_hook = Any(
None,
config=True,
allow_none=True,
help=dedent("""
An optional hook function that you can implement to do some
work after converting.
This function is called after the notebooks are converted
and should be used for specific converters such as Autograde,
GenerateAssignment or GenerateFeedback.
It will be called as (all arguments are passed as keywords)::
hook(assignment=assignment, student=student, notebooks=notebooks)
""")
)
permissions = Integer(
help=dedent(
"""
Permissions to set on files output by nbgrader. The default is
generally read-only (444), with the exception of nbgrader
generate_assignment and nbgrader generate_feedback, in which case
the user also has write permission.
"""
)
).tag(config=True)
@default("permissions")
def _permissions_default(self) -> int:
return 664 if self.coursedir.groupshared else 444
@validate('pre_convert_hook')
def _validate_pre_convert_hook(self, proposal):
value = proposal['value']
if isinstance(value, str):
module, function = value.rsplit('.', 1)
value = getattr(importlib.import_module(module), function)
if not callable(value):
raise TraitError("pre_convert_hook must be callable")
return value
@validate('post_convert_hook')
def _validate_post_convert_hook(self, proposal):
value = proposal['value']
if isinstance(value, str):
module, function = value.rsplit('.', 1)
value = getattr(importlib.import_module(module), function)
if not callable(value):
raise TraitError("post_convert_hook must be callable")
return value
coursedir = Instance(CourseDirectory, allow_none=True)
def __init__(self, coursedir: CourseDirectory = None, **kwargs: typing.Any) -> None:
self.coursedir = coursedir
super(BaseConverter, self).__init__(**kwargs)
if self.parent and hasattr(self.parent, "logfile"):
self.logfile = self.parent.logfile
else:
self.logfile = None
c = Config()
c.Exporter.default_preprocessors = []
self.update_config(c)
def start(self) -> None:
self.init_notebooks()
self.writer = FilesWriter(parent=self, config=self.config)
self.exporter = self.exporter_class(parent=self, config=self.config)
for pp in self.preprocessors:
self.exporter.register_preprocessor(pp)
currdir = os.getcwd()
os.chdir(self.coursedir.root)
try:
self.convert_notebooks()
finally:
os.chdir(currdir)
@default("classes")
def _classes_default(self):
classes = super(BaseConverter, self)._classes_default()
classes.append(FilesWriter)
classes.append(Exporter)
for pp in self.preprocessors:
if len(pp.class_traits(config=True)) > 0:
classes.append(pp)
return classes
@property
def _input_directory(self):
raise NotImplementedError
@property
def _output_directory(self):
raise NotImplementedError
def _format_source(self, assignment_id: str, student_id: str, escape: bool = False) -> str:
return self.coursedir.format_path(self._input_directory, student_id, assignment_id, escape=escape)
def _format_dest(self, assignment_id: str, student_id: str, escape: bool = False) -> str:
return self.coursedir.format_path(self._output_directory, student_id, assignment_id, escape=escape)
def init_notebooks(self) -> None:
self.assignments = {}
self.notebooks = []
assignment_glob = self._format_source(self.coursedir.assignment_id, self.coursedir.student_id)
for assignment in glob.glob(assignment_glob):
notebook_glob = os.path.join(assignment, self.coursedir.notebook_id + ".ipynb")
found = glob.glob(notebook_glob)
if len(found) == 0:
self.log.warning("No notebooks were matched by '%s'", notebook_glob)
continue
self.assignments[assignment] = found
if len(self.assignments) == 0:
msg = "No notebooks were matched by '%s'" % assignment_glob
self.log.error(msg)
assignment_glob2 = self._format_source("*", self.coursedir.student_id)
found = glob.glob(assignment_glob2)
if found:
scores = sorted([(fuzz.ratio(assignment_glob, x), x) for x in found])
self.log.error("Did you mean: %s", scores[-1][1])
raise NbGraderException(msg)
def init_single_notebook_resources(self, notebook_filename: str) -> typing.Dict[str, typing.Any]:
regexp = re.escape(os.path.sep).join([
self._format_source("(?P<assignment_id>.*)", "(?P<student_id>.*)", escape=True),
"(?P<notebook_id>.*).ipynb"
])
m = re.match(regexp, notebook_filename)
if m is None:
msg = "Could not match '%s' with regexp '%s'" % (notebook_filename, regexp)
self.log.error(msg)
raise NbGraderException(msg)
gd = m.groupdict()
self.log.debug("Student: %s", gd['student_id'])
self.log.debug("Assignment: %s", gd['assignment_id'])
self.log.debug("Notebook: %s", gd['notebook_id'])
resources = {}
resources['unique_key'] = gd['notebook_id']
resources['output_files_dir'] = '%s_files' % gd['notebook_id']
resources['nbgrader'] = {}
resources['nbgrader']['student'] = gd['student_id']
resources['nbgrader']['assignment'] = gd['assignment_id']
resources['nbgrader']['notebook'] = gd['notebook_id']
resources['nbgrader']['db_url'] = self.coursedir.db_url
return resources
def write_single_notebook(self, output: str, resources: ResourcesDict) -> None:
# configure the writer build directory
self.writer.build_directory = self._format_dest(
resources['nbgrader']['assignment'], resources['nbgrader']['student'])
# write out the results
self.writer.write(output, resources, notebook_name=resources['unique_key'])
def init_destination(self, assignment_id: str, student_id: str) -> bool:
"""Initialize the destination for an assignment. Returns whether the
assignment should actually be processed or not (i.e. whether the
initialization was successful).
"""
if self.coursedir.student_id_exclude:
exclude_ids = self.coursedir.student_id_exclude.split(',')
if student_id in exclude_ids:
return False
dest = os.path.normpath(self._format_dest(assignment_id, student_id))
# the destination doesn't exist, so we haven't processed it
if self.coursedir.notebook_id == "*":
if not os.path.exists(dest):
return True
else:
# if any of the notebooks don't exist, then we want to process them
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if not os.path.exists(path):
return True
# if we have specified --force, then always remove existing stuff
if self.force:
if self.coursedir.notebook_id == "*":
self.log.warning("Removing existing assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Removing existing notebook: {}".format(path))
remove(path)
return True
src = self._format_source(assignment_id, student_id)
new_timestamp = self.coursedir.get_existing_timestamp(src)
old_timestamp = self.coursedir.get_existing_timestamp(dest)
# if --force hasn't been specified, but the source assignment is newer,
# then we want to overwrite it
if new_timestamp is not None and old_timestamp is not None and new_timestamp > old_timestamp:
if self.coursedir.notebook_id == "*":
self.log.warning("Updating existing assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Updating existing notebook: {}".format(path))
remove(path)
return True
# otherwise, we should skip the assignment
self.log.info("Skipping existing assignment: {}".format(dest))
return False
def init_assignment(self, assignment_id: str, student_id: str) -> None:
"""Initializes resources/dependencies/etc. that are common to all
notebooks in an assignment.
"""
source = self._format_source(assignment_id, student_id)
dest = self._format_dest(assignment_id, student_id)
# detect other files in the source directory
for filename in find_all_files(source, self.coursedir.ignore + ["*.ipynb"]):
# Make sure folder exists.
path = os.path.join(dest, os.path.relpath(filename, source))
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if os.path.exists(path):
remove(path)
self.log.info("Copying %s -> %s", filename, path)
shutil.copy(filename, path)
def set_permissions(self, assignment_id: str, student_id: str) -> None:
self.log.info("Setting destination file permissions to %s", self.permissions)
dest = os.path.normpath(self._format_dest(assignment_id, student_id))
permissions = int(str(self.permissions), 8)
for dirname, _, filenames in os.walk(dest):
for filename in filenames:
os.chmod(os.path.join(dirname, filename), permissions)
# If groupshared, set dir permissions - see comment below.
st_mode = os.stat(dirname).st_mode
if self.coursedir.groupshared and st_mode & 0o2770 != 0o2770:
try:
os.chmod(dirname, (st_mode|0o2770) & 0o2777)
except PermissionError:
self.log.warning("Could not update permissions of %s to make it groupshared", dirname)
# If groupshared, set write permissions on directories. Directories
# are created within ipython_genutils.path.ensure_dir_exists via
# nbconvert.writer, (unless there are supplementary files) with a
# default mode of 755 and there is no way to pass the mode arguments
# all the way to there! So we have to walk and fix.
if self.coursedir.groupshared:
# Root may be created in this step, and is not included above.
rootdir = self.coursedir.format_path(self._output_directory, '.', '.')
# Add 2770 to existing dir permissions (don't unconditionally override)
st_mode = os.stat(rootdir).st_mode
if st_mode & 0o2770 != 0o2770:
try:
os.chmod(rootdir, (st_mode|0o2770) & 0o2777)
except PermissionError:
self.log.warning("Could not update permissions of %s to make it groupshared", rootdir)
def convert_single_notebook(self, notebook_filename: str) -> None:
"""
Convert a single notebook.
Performs the following steps:
1. Initialize notebook resources
2. Export the notebook to a particular format
3. Write the exported notebook to file
"""
self.log.info("Converting notebook %s", notebook_filename)
resources = self.init_single_notebook_resources(notebook_filename)
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
self.write_single_notebook(output, resources)
def convert_notebooks(self) -> None:
errors = []
def _handle_failure(gd: typing.Dict[str, str]) -> None:
dest = os.path.normpath(self._format_dest(gd['assignment_id'], gd['student_id']))
if self.coursedir.notebook_id == "*":
if os.path.exists(dest):
self.log.warning("Removing failed assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Removing failed notebook: {}".format(path))
remove(path)
for assignment in sorted(self.assignments.keys()):
# initialize the list of notebooks and the exporter
self.notebooks = sorted(self.assignments[assignment])
# parse out the assignment and student ids
regexp = self._format_source("(?P<assignment_id>.*)", "(?P<student_id>.*)", escape=True)
m = re.match(regexp, assignment)
if m is None:
msg = "Could not match '%s' with regexp '%s'" % (assignment, regexp)
self.log.error(msg)
raise NbGraderException(msg)
gd = m.groupdict()
try:
# determine whether we actually even want to process this submission
should_process = self.init_destination(gd['assignment_id'], gd['student_id'])
if not should_process:
continue
self.run_pre_convert_hook()
# initialize the destination
self.init_assignment(gd['assignment_id'], gd['student_id'])
# convert all the notebooks
for notebook_filename in self.notebooks:
self.convert_single_notebook(notebook_filename)
# set assignment permissions
self.set_permissions(gd['assignment_id'], gd['student_id'])
self.run_post_convert_hook()
except UnresponsiveKernelError:
self.log.error(
"While processing assignment %s, the kernel became "
"unresponsive and we could not interrupt it. This probably "
"means that the students' code has an infinite loop that "
"consumes a lot of memory or something similar. nbgrader "
"doesn't know how to deal with this problem, so you will "
"have to manually edit the students' code (for example, to "
"just throw an error rather than enter an infinite loop). ",
assignment)
errors.append((gd['assignment_id'], gd['student_id']))
_handle_failure(gd)
except sqlalchemy.exc.OperationalError:
_handle_failure(gd)
self.log.error(traceback.format_exc())
msg = (
"There was an error accessing the nbgrader database. This "
"may occur if you recently upgraded nbgrader. To resolve "
"the issue, first BACK UP your database and then run the "
"command `nbgrader db upgrade`."
)
self.log.error(msg)
raise NbGraderException(msg)
except SchemaTooOldError:
_handle_failure(gd)
msg = (
"One or more notebooks in the assignment use an old version \n"
"of the nbgrader metadata format. Please **back up your class files \n"
"directory** and then update the metadata using:\n\nnbgrader update .\n"
)
self.log.error(msg)
raise NbGraderException(msg)
except SchemaTooNewError:
_handle_failure(gd)
msg = (
"One or more notebooks in the assignment use an newer version \n"
"of the nbgrader metadata format. Please update your version of \n"
"nbgrader to the latest version to be able to use this notebook.\n"
)
self.log.error(msg)
raise NbGraderException(msg)
except KeyboardInterrupt:
_handle_failure(gd)
self.log.error("Canceled")
raise
except Exception:
self.log.error("There was an error processing assignment: %s", assignment)
self.log.error(traceback.format_exc())
errors.append((gd['assignment_id'], gd['student_id']))
_handle_failure(gd)
if len(errors) > 0:
for assignment_id, student_id in errors:
self.log.error(
"There was an error processing assignment '{}' for student '{}'".format(
assignment_id, student_id))
if self.logfile:
msg = (
"Please see the error log ({}) for details on the specific "
"errors on the above failures.".format(self.logfile))
else:
msg = (
"Please see the the above traceback for details on the specific "
"errors on the above failures.")
self.log.error(msg)
raise NbGraderException(msg)
def run_pre_convert_hook(self):
if self.pre_convert_hook:
self.log.info('Running pre-convert hook')
try:
self.pre_convert_hook(
assignment=self.coursedir.assignment_id,
student=self.coursedir.student_id,
notebooks=self.notebooks)
except Exception:
self.log.info('Pre-convert hook failed', exc_info=True)
def run_post_convert_hook(self):
if self.post_convert_hook:
self.log.info('Running post-convert hook')
try:
self.post_convert_hook(
assignment=self.coursedir.assignment_id,
student=self.coursedir.student_id,
notebooks=self.notebooks)
except Exception:
self.log.info('Post-convert hook failed', exc_info=True) | jupyter/nbgrader | nbgrader/converters/base.py | base.py | py | 20,870 | python | en | code | 1,232 | github-code | 1 | [
{
"api_name": "traitlets.config.LoggingConfigurable",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "traitlets.List",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "traitlets.Dict",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tr... |
27960984590 | from polygon import arc
import turtle
bob = turtle.Turtle()
turtle.speed(speed=100)
turtle.delay(0)
petals = 16
angle = 30
radius = 10000/ angle
def draw_petal(t, radius, angle):
for i in range(2):
arc(t, radius, angle)
t.lt(180-angle)
for i in range(petals):
draw_petal(bob, radius, angle)
bob.lt(360 / petals)
turtle.mainloop()
| MJC-code/thinkpython | Chapter04/Ex4_2.py | Ex4_2.py | py | 382 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "turtle.Turtle",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "turtle.speed",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "turtle.delay",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "polygon.arc",
"line_number":... |
72878881634 | from tornado.web import RequestHandler
from tornado.escape import json_decode
import json
class BaseHandler(RequestHandler):
def prepare(self):
if self.request.body:
try:
self.data = json_decode(self.request.body)
except TypeError or json.JSONDecodeError as e:
self.write_error(400, error_msg=e)
def write_error(self, status_code, **kwargs):
"""
Override RequestHandler.write_error to produce a custom error response.
For more information, refers to
https://www.tornadoweb.org/en/stable/guide/structure.html?highlight=write_error
"""
self.write({"error_code": status_code, "error_msg": kwargs.get("error_msg", "")})
self.finish()
| zshuangyan/search_bookmark | base_handler.py | base_handler.py | py | 761 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tornado.web.RequestHandler",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "tornado.escape.json_decode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 10,
"usage_type": "attribute"
}
] |
32474589299 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('booking', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='meetuprequest',
name='start_time',
field=models.TimeField(null=True),
),
]
| jkol36/glidewithus | glidewithus/booking/migrations/0002_auto_20141106_0150.py | 0002_auto_20141106_0150.py | py | 398 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
6360755344 | from scoringengine.scoringengine import ScoringEngine, ServiceReport
import requests
import requests.exceptions
import os
SERVICE_NAME = 'http'
ScoringEngine.register_service(SERVICE_NAME)
with open(os.path.join(os.path.dirname(__file__), ('http.sample'))) as f:
sample_response = f.read()
@ScoringEngine.scheduler.interval_schedule(seconds=10)
def http_all():
ScoringEngine.logger.info('Scoring http')
for team in ScoringEngine.teams:
http(team)
def http(team):
teamName = team['name']
host = team['services'][SERVICE_NAME]['host']
try:
response = requests.get('http://' + host)
# Verify that the http response matches the sample
if response.text == sample_response:
report(teamName, True)
else:
report(teamName, False, 'invalid response')
except requests.exceptions.RequestException:
report(teamName, False, 'HTTP error')
except Exception:
report(teamName, False, 'error')
def report(team, up, status=''):
ScoringEngine.report(ServiceReport(team=team, service=SERVICE_NAME, up=up, status=status))
| bburky/scoringengine | services/http.py | http.py | py | 1,124 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "scoringengine.scoringengine.ScoringEngine.register_service",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scoringengine.scoringengine.ScoringEngine",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usag... |
28049663211 | """
This script summarized DE results using mash model.
"""
import pandas as pd
import session_info
from pyhere import here
from gtfparse import read_gtf
from functools import lru_cache
@lru_cache()
def get_annotation(feature):
config = {
"genes": here("input/text_files_counts/_m/caudate/gene_annotation.tsv"),
"transcripts": here("input/text_files_counts/_m/caudate/tx_annotation.tsv"),
"exons": here("input/text_files_counts/_m/caudate/exon_annotation.tsv"),
"junctions": here("input/text_files_counts/_m/caudate/jxn_annotation.tsv"),
}
return pd.read_csv(config[feature], sep='\t')\
.loc[:, ["names", "seqnames", "start", "end", "Symbol", "gencodeID"]]\
.rename(columns=str.lower)
@lru_cache()
def get_gtf():
gtf_file = "/dcl02/lieber/apaquola/genome/human/gencode25/gtf.CHR/"+\
"_m/gencode.v25.annotation.gtf"
return read_gtf(gtf_file)
def gene_annotation():
gtf = get_gtf()[get_gtf()["feature"] == "transcript"]
return gtf[["transcript_id", "gene_id", "gene_name", "gene_type",
"seqname", "start", "end", "strand"]]
@lru_cache()
def get_mash_degs(feature, tissue, fdr):
if tissue == "Dentate Gyrus":
new_tissue = "dentateGyrus"
else:
new_tissue = tissue.lower()
df = pd.DataFrame()
for perm in range(1,11):
fname = f"../../_m/{feature}/mash_lfsr_perm_{perm:{'0'}{2}}.txt"
df0 = pd.read_csv(fname,sep='\t').loc[:, ["feature_id", new_tissue]]
df0["permutation"] = perm
df = pd.concat([df, df0], axis=0)
return df[(df[new_tissue] < fdr)].rename(columns={new_tissue: "lfsr"})
@lru_cache()
def get_mash_es(feature, tissue):
if tissue == "Dentate Gyrus":
new_tissue = "dentateGyrus"
else:
new_tissue = tissue.lower()
df = pd.DataFrame()
for perm in range(1,11):
fname = f"../../_m/{feature}/mash_effectsize_perm_{perm:{'0'}{2}}.txt"
df0 = pd.read_csv(fname,sep='\t').loc[:, ["feature_id", new_tissue]]
df0["permutation"] = perm
df = pd.concat([df, df0], axis=0)
return df.rename(columns={new_tissue: "posterior_mean"})
@lru_cache()
def median_values(feature, tissue):
return get_mash_degs(feature, tissue, 1)\
.merge(get_mash_es(feature, tissue), on=["feature_id", "permutation"])\
.groupby(["feature_id"]).median().drop(["permutation"], axis=1)\
.reset_index()
@lru_cache()
def filter_fdr(feature, tissue, fdr):
df = median_values(feature, tissue)
return df[(df["lfsr"] < fdr)].copy()
@lru_cache()
def annotate_degs(feature, tissue, fdr):
return filter_fdr(feature, tissue, fdr)\
.merge(get_annotation(feature), left_on="feature_id", right_on="names")\
.drop(["names"], axis=1)
@lru_cache()
def extract_features(tissue, fdr):
# Gene annotation
gtf_annot = gene_annotation()
annot_gene = gtf_annot.loc[:, ["gene_id", "gene_type"]].drop_duplicates()
annot_tx = gtf_annot.loc[:, ["transcript_id", "gene_id", "gene_type"]]
# Extract DE from mash model
genes = annotate_degs("genes", tissue, fdr)\
.merge(annot_gene, left_on="gencodeid", right_on="gene_id", how="left")\
.drop("gencodeid", axis=1)
trans = annotate_degs("transcripts", tissue, fdr)\
.merge(annot_tx, left_on="feature_id", right_on="transcript_id", how="left")\
.drop(["gencodeid", "transcript_id"], axis=1)
exons = annotate_degs("exons", tissue, fdr)\
.merge(annot_gene, left_on="gencodeid", right_on="gene_id", how="left")\
.drop("gencodeid", axis=1)
juncs = annotate_degs("junctions", tissue, fdr)\
.merge(annot_gene, left_on="gencodeid", right_on="gene_id", how="left")\
.drop("gencodeid", axis=1)
return genes, trans, exons, juncs
def print_summary(tissue, fdr=0.05):
genes, trans, exons, juncs = extract_features(tissue, fdr)
if tissue == "Caudate":
w_mode = "w"
else:
w_mode = "a"
statement = "Significant DE (lfsr < 0.05) in %s" % tissue
with open("summarize_results.log", mode=w_mode) as f:
print(statement, file=f)
for variable in ["feature_id", "gene_id"]:
print(variable, file=f)
gg = len(set(genes[variable]))
tt = len(set(trans[variable]))
ee = len(set(exons[variable]))
jj = len(set(juncs[variable]))
print("\nGene:\t\t%d\nTranscript:\t%d\nExon:\t\t%d\nJunction:\t%d\n" %
(gg, tt, ee, jj), file=f)
def get_DEGs_result_by_tissue(tissue, fdr=0.05):
genes, trans, exons, juncs = extract_features(tissue, fdr)
genes["feature_type"] = "Gene"
trans["feature_type"] = "Transcript"
exons["feature_type"] = "Exon"
juncs["feature_type"] = "Junction"
df = pd.concat([genes, trans, exons, juncs])
df["feature_type"] = df.feature_type\
.astype("category")\
.cat.reorder_categories(["Gene", "Transcript", "Exon", "Junction"])
df["region"] = tissue
return df
def main():
bigdata1 = []; bigdata2 = [];
for tissue in ["Caudate", "Dentate Gyrus", "DLPFC", "Hippocampus"]:
print_summary(tissue)
data1 = get_DEGs_result_by_tissue(tissue)
data2 = get_DEGs_result_by_tissue(tissue, 1)
bigdata1.append(data1); bigdata2.append(data2)
df1 = pd.concat(bigdata1); df2 = pd.concat(bigdata2)
# Summary
with open("effect_sizes.log", mode="w") as f:
print("feature_id size:", file=f)
print(df1.loc[:, ["region", "feature_type", "posterior_mean"]]\
.groupby(["region", "feature_type"]).describe().to_string(), file=f)
print("\nSummary:")
gene = df1[(df1["feature_type"] == "Gene")].drop_duplicates(subset="gene_id")
print(gene.shape)
print(gene.groupby("gene_type").size())
# Output
cols = ["region", "feature_id", "gene_id", "symbol", "seqnames", "start", "end",
"lfsr", "posterior_mean", "feature_type"]
df1.sort_values(["region", "feature_type", "lfsr", "posterior_mean"]).loc[:, cols]\
.to_csv("BrainSeq_ancestry_binary_4features_4regions.txt.gz",sep='\t', index=False)
df2.sort_values(["region", "feature_type", "lfsr", "posterior_mean"]).loc[:, cols]\
.to_csv("BrainSeq_ancestry_binary_4features_4regions_allFeatures.txt.gz",
sep='\t', index=False)
# Session infomation
session_info.show()
if __name__ == "__main__":
main()
| LieberInstitute/aanri_phase1 | differential_analysis/permutation_environmental/tissue_comparison/summary_table/_h/summarize_results.py | summarize_results.py | py | 6,514 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyhere.here",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyhere.here",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyhere.here",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyhere.here",
"line_number": ... |
35075300303 | from aiogram import Bot, Dispatcher, executor, types
bot = Bot('')
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def start(message: types.Message):
# await bot.send_message(message.chat.id, 'Hello')
await message.answer('Hello, how are you?')
@dp.message_handler(content_types=['photo'])
async def start(message: types.Message):
await message.answer('Cool photo')
@dp.message_handler(content_types=['vodeo'])
async def start(message: types.Message):
await message.answer('Cool video')
#@dp.message_handler(content_types=['text'])
#async def start(message: types.Message):
# #await message.answer('Рад знакомству с тобой. Расскажи о себе')
# await message.reply('Рад знакомству с тобой. Расскажи о себе.')
# file = open('/photo.jpg', 'rb')
# await message.answer_photo(file)
@dp.message_handler(commands=['inline'])
async def info(message: types.Message):
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton('Site', url=''))
markup.add(types.InlineKeyboardButton('Hello', callback_data='hello'))
await message.reply('Hello', reply_markup=markup)
@dp.callback_query_handler()
async def callback(call):
await call.message.answer(call.data)
@dp.message_handler(commands=['reply'])
async def reply(message: types.Message):
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True)
markup.add(types.KeyboardButton('Site'))
markup.add(types.KeyboardButton('Website'))
await message.answer('Hello', reply_markup=markup)
executor.start_polling(dp)
| AntoshkaNaumov/bot_project | aiogram_bot.py | aiogram_bot.py | py | 1,631 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aiogram.Bot",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "aiogram.Dispatcher",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "aiogram.types.Message",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types"... |
10501114655 | from packets.Messages import Message
from utils.reader import Reader
from utils.writer import Writer
import json
class LeaderboardMessage(Message):
def __init__(self, dataPlayers):
super().__init__()
self.id = 24403
self.dataPlayers = dataPlayers
def decode(self, buffer):
Reader.__init__(self, buffer)
def encode(self):
self.writeVint(0)
self.writeScID(0,0)
self.writeString()
self.writeVint(0)
#for x in self.dataPlayers:
# d = json.loads(x[1])
self.writeVint(-64)
self.writeVint(-64)
self.writeVint(1)
self.writeVint(0)
self.writeString("RU") # country
| rostokdev/retrobrawl-py-old | cheese/packets/Messages/Server/LeaderboardMessage.py | LeaderboardMessage.py | py | 628 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "packets.Messages.Message",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.reader.Reader.__init__",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.reader.Reader",
"line_number": 13,
"usage_type": "name"
}
] |
22942055132 | import logging
from typing import Callable, Union
import numpy as np
import torch
from monai.apps.deepgrow.transforms import AddRandomGuidanced, FindDiscrepancyRegionsd
from monai.handlers import MeanDice, from_engine
from monai.handlers.ignite_metric import IgniteMetric
from monai.inferers import SimpleInferer
from monai.losses import DiceLoss
from monai.metrics import MeanIoU
from monai.transforms import (
Activationsd,
AsDiscreted,
EnsureChannelFirstd,
EnsureTyped,
LoadImaged,
RandRotated,
RandZoomd,
Resized,
ScaleIntensityRangeD,
SelectItemsd,
ToNumpyd,
TorchVisiond,
ToTensord,
)
from monai.utils import MetricReduction
from monailabel.deepedit.handlers import TensorBoard2DImageHandler
from monailabel.deepedit.interaction import Interaction
from monailabel.deepedit.transforms import AddGuidanceSignald, AddInitialSeedPointd
from monailabel.tasks.train.basic_train import BasicTrainTask, Context
from monailabel.transform.pre import NormalizeLabeld
logger = logging.getLogger(__name__)
class DeepEdit(BasicTrainTask):
def __init__(
self,
model_dir,
network,
dimension,
roi_size,
max_train_interactions,
max_val_interactions,
description="Train DeepEdit Model for Endoscopy",
**kwargs,
):
self._network = network
self.dimension = dimension
self.roi_size = roi_size
self.max_train_interactions = max_train_interactions
self.max_val_interactions = max_val_interactions
super().__init__(model_dir, description, **kwargs)
def network(self, context: Context):
return self._network
def optimizer(self, context: Context):
return torch.optim.Adam(context.network.parameters(), lr=0.0001)
def loss_function(self, context: Context):
return DiceLoss(sigmoid=True, squared_pred=True)
def get_click_transforms(self, context: Context):
return [
Activationsd(keys="pred", sigmoid=True),
ToNumpyd(keys=("image", "label", "pred")),
FindDiscrepancyRegionsd(label="label", pred="pred", discrepancy="discrepancy"),
AddRandomGuidanced(guidance="guidance", discrepancy="discrepancy", probability="probability"),
AddGuidanceSignald(keys="image", guidance="guidance"),
ToTensord(keys=("image", "label")),
]
def train_pre_transforms(self, context: Context):
return [
LoadImaged(keys=("image", "label"), dtype=np.uint8, image_only=False),
EnsureChannelFirstd(keys="image"),
EnsureChannelFirstd(keys="label", channel_dim="no_channel"),
Resized(keys=("image", "label"), spatial_size=self.roi_size, mode=("area", "nearest")),
ToTensord(keys="image"),
TorchVisiond(
keys="image", name="ColorJitter", brightness=64.0 / 255.0, contrast=0.75, saturation=0.25, hue=0.04
),
RandRotated(keys=("image", "label"), range_x=np.pi, prob=0.5, mode=("bilinear", "nearest")),
RandZoomd(keys=("image", "label"), min_zoom=0.8, max_zoom=1.2, prob=0.5, mode=("bilinear", "nearest")),
ScaleIntensityRangeD(keys="image", a_min=0.0, a_max=255.0, b_min=-1.0, b_max=1.0),
NormalizeLabeld(keys="label", value=1),
AddInitialSeedPointd(keys="guidance", label="label", connected_regions=5),
AddGuidanceSignald(keys="image", guidance="guidance"),
SelectItemsd(keys=("image", "label", "guidance")),
EnsureTyped(keys=("image", "label")),
]
def train_post_transforms(self, context: Context):
return [
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold=0.5),
]
def val_inferer(self, context: Context):
return SimpleInferer()
def val_key_metric(self, context):
return {
"val_mean_iou": MeanIoUMetric(output_transform=from_engine(["pred", "label"])),
self.VAL_METRIC_MEAN_DICE: MeanDice(output_transform=from_engine(["pred", "label"])),
}
def train_handlers(self, context: Context):
handlers = super().train_handlers(context)
if context.local_rank == 0:
handlers.append(TensorBoard2DImageHandler(log_dir=context.events_dir, batch_limit=0))
return handlers
def val_handlers(self, context: Context):
handlers = super().val_handlers(context)
if context.local_rank == 0 and handlers:
handlers.append(TensorBoard2DImageHandler(log_dir=context.events_dir, batch_limit=0, tag_prefix="val-"))
return handlers
def train_iteration_update(self, context: Context):
return Interaction(
deepgrow_probability=0.5,
transforms=self.get_click_transforms(context),
max_interactions=self.max_train_interactions,
train=True,
)
def val_iteration_update(self, context: Context):
return Interaction(
deepgrow_probability=0.5,
transforms=self.get_click_transforms(context),
max_interactions=self.max_val_interactions,
train=False,
)
class MeanIoUMetric(IgniteMetric):
def __init__(
self,
include_background: bool = True,
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
output_transform: Callable = lambda x: x,
save_details: bool = True,
) -> None:
metric_fn = MeanIoU(include_background=include_background, reduction=reduction)
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
| Project-MONAI/MONAILabel | sample-apps/endoscopy/lib/trainers/deepedit.py | deepedit.py | py | 5,716 | python | en | code | 472 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "monailabel.tasks.train.basic_train.BasicTrainTask",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "monailabel.tasks.train.basic_train.Context",
"line_number": 58,
"usag... |
33893292705 | from rest_framework import serializers
from rest_framework.reverse import reverse
from api.serializers import UserPublicSerializer
from .models import Product
from .validators import validate_title
class ProductInlineSerializer(serializers.Serializer):
url = serializers.HyperlinkedIdentityField(
view_name="product-detail",
lookup_field="pk",
read_only=True
)
title = serializers.CharField(read_only=True)
class ProductSerializer(serializers.ModelSerializer):
owner = UserPublicSerializer(source="user", read_only=True)
# my_user_data = serializers.SerializerMethodField(read_only=True)
# my_discount = serializers.SerializerMethodField(read_only=True)
edit_url = serializers.SerializerMethodField(read_only=True)
# ne fonctionne que sur ModelSerializer :
url = serializers.HyperlinkedIdentityField(
view_name="product-detail", lookup_field="pk"
)
title = serializers.CharField(validators=[validate_title])
class Meta:
model = Product
fields = [
"owner", # user_id, a commenter en prod;)
"url",
"edit_url",
"pk",
"title",
"content",
"price",
"sale_price",
# "my_discount",
# "my_user_data",
]
def get_my_user_data(self, obj):
return {"username": obj.user.username}
def get_edit_url(self, obj):
# return f"/api/v2/products/{obj.pk}/"
request = self.context.get("request")
if request is None:
return None
return reverse("product-edit", kwargs={"pk": obj.pk}, request=request)
# Si create product, les url seront automatiquement générées
| AlizeeBoc/DRF-API | backend/products/serializers.py | serializers.py | py | 1,723 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.HyperlinkedIdentityField",
"line_number": 9,
... |
21728937931 | import requests
import json
import datetime
def format_date(date_str):
# 将日期字符串解析为datetime对象
date = datetime.datetime.fromisoformat(date_str)
# 将datetime对象转换为年月日格式字符串
return date.strftime("%Y.%m.%d")
devices = {
"device1": "iPhone",
"device2": "iPad",
"device3": "Mac"
}
for device_key, device_name in devices.items():
device_url = f"https://betahub.cn/api/apple/devices/{device_name}"
response = requests.get(device_url)
devices_data = response.json()
all_data = {}
for device in devices_data:
identifier = device.get("identifier")
url1 = f"https://betahub.cn/api/apple/firmwares/{identifier}?type=1"
url2 = f"https://betahub.cn/api/apple/firmwares/{identifier}?type=2"
response1 = requests.get(url1)
data1 = response1.json()
filtered_data1 = {
"id": data1.get("id"),
"name": data1.get("name"),
"identifier": data1.get("identifier"),
"release_date": data1.get("release_date"),
"firmwares": []
}
for firmware1 in data1.get("firmwares", []):
filtered_firmware1 = {
"id": firmware1.get("id"),
"version": "iOS " + firmware1.get("version") if device_name == "iPhone" else
"iPadOS " + firmware1.get("version") if device_name == "iPad" else
"MacOS " + firmware1.get("version") if device_name == "Mac" else firmware1.get("version"),
"build_id": firmware1.get("build_id"),
"size": f"{round(firmware1.get('size') / 1073741824, 2):.2f}GB",
"url": firmware1.get("url"),
"created_at": firmware1.get("created_at"),
"type": firmware1.get("type"),
"signing": firmware1.get("signing")
}
filtered_data1["firmwares"].append(filtered_firmware1)
if url2:
response2 = requests.get(url2)
data2 = response2.json()
filtered_data2 = {
"id": data2.get("id"),
"name": data2.get("name"),
"identifier": data2.get("identifier"),
"release_date": data2.get("release_date"),
"firmwares": []
}
for firmware2 in data2.get("firmwares", []):
filtered_firmware2 = {
"id": firmware2.get("id"),
"version": firmware2.get("version"),
"build_id": firmware2.get("build_id"),
"size": f"{round(firmware2.get('size') / 1073741824, 2):.2f}GB",
"url": firmware2.get("url"),
"created_at": firmware2.get("created_at"),
"type": firmware2.get("type"),
"signing": firmware2.get("signing")
}
filtered_data2["firmwares"].append(filtered_firmware2)
# 合并firmwares
filtered_data1["firmwares"].extend(filtered_data2["firmwares"])
# 保存到JSON文件
with open(f"{identifier}.json", "w", encoding="utf-8") as json_file:
json.dump(filtered_data1, json_file, ensure_ascii=False, indent=4)
| y0123456789/ipsw | ipsw1.py | ipsw1.py | py | 3,288 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.fromisoformat",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "req... |
5587030547 | #!/usr/bin/env python3
import argparse
import collections
import pdb
def dump_map(map, field):
print(f"(field: {field})")
for row in map:
print(row)
print("\n%%%%%%\n")
def count_lit(map):
c = collections.Counter()
for row in map:
c += collections.Counter(row)
return c["#"]
def translate(alg, s):
s = s.replace(".", "0").replace("#", "1")
return alg[int(s, base=2)]
def map_get(map, x, y, field="."):
m, n = len(map), len(map[0])
if x < 0 or x >= m or y < 0 or y >= n:
return field
return map[y][x]
def enhance(alg, map, field):
if field == "." and alg[0] == "#":
new_field = "#"
elif field == "#" and alg[511] == ".":
new_field = "."
else:
new_field = field
new_map = []
n, m = len(map), len(map[0])
for y in range(-1, n + 1, 1):
new_row = ""
for x in range(-1, m + 1, 1):
s = ""
for y_offset in [-1, 0, 1]:
for x_offset in [-1, 0, 1]:
s += map_get(map, x + x_offset, y + y_offset, field=field)
new_row += translate(alg, s)
new_map.append(new_row)
return new_map, new_field
def part1(alg, map, steps=2):
field = "."
for _ in range(steps):
map, field = enhance(alg, map, field)
dump_map(map, field)
return count_lit(map)
def part2(alg, map):
return part1(alg, map, steps=50)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("part", type=int, help="Which part to run.")
parser.add_argument("input_file", type=str)
args = parser.parse_args()
alg = None
map = []
with open(args.input_file) as fh:
for line in fh.readlines():
line = line.strip()
if alg is None:
alg = line
elif len(line) != 0:
map.append(line)
if args.part == 1:
print(part1(alg, map))
elif args.part == 2:
print(part2(alg, map))
elif args.part == 3:
pdb.set_trace()
else:
print("illegal part")
| vmizener/adventofcode | 2021/20/main.py | main.py | py | 2,107 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pdb.s... |
42646033615 | import pandas as pd
import numpy as np
import ajf_plts
import code
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from BeamModel import Beam
from Simulation import perform_static_sim
from scipy.stats import wasserstein_distance
from numba import njit
from numba import objmode
# Fixed temperature T = 25 degrees C
# Fixed velocity, v = 15 m/s
SPACING_LABELS = ["S0", "S1", "S2", "S3", "S4"]
WEIGHT_LABELS = ["W1", "W2", "W3", "W4", "W5"]
wim_df = pd.read_feather("./wim_data.feather")
# Filter to leave only 5 axle vehicles
wim_df = wim_df.loc[wim_df.Axles.isin([5])]
# Round to 1 dp
wim_df = wim_df.round(1)
# Filter to leave only the most common axle plan
ax_plan = wim_df.groupby(SPACING_LABELS).agg("count").W1.idxmax()
wim_df = wim_df.loc[(wim_df[SPACING_LABELS] == ax_plan).all(axis=1)]
WEIGHTS = wim_df[WEIGHT_LABELS].values
SPACINGS = wim_df[SPACING_LABELS].values
N_WIM = WEIGHTS.shape[0]
N_VEHICLES = 1000
DELTA = 0.1
ITERATIONS = 100
# delta1_vals = [0.0, 0.0, 0.0, 0.0, 0.0, 0.1]
# xd1_vals = [0.0, 0.0, 0.0, 0.0, 0.0, 11]
# delta2_vals = [0.0, 0.1, 0.2, 0.1, 0.2, 0.2]
# xd2_vals = [0.0, 17, 17, 11, 11, 11]
delta1_vals = [0.1]
xd1_vals = [17]
delta2_vals = [0.2]
xd2_vals = [17]
@njit(cache=True, parallel=False, fastmath=True)
def get_wim_sample(n):
all_idxs = np.arange(N_WIM)
idx = np.random.choice(all_idxs, size=n, replace=True)
w = WEIGHTS[idx, :]
s = SPACINGS[idx, :]
return w, s
@njit(cache=True, parallel=False, fastmath=True)
def process_one_beam(Kg, n, WEIGHTS, SPACINGS):
all_idxs = np.arange(N_WIM)
idx = np.random.choice(all_idxs, size=n, replace=True)
weights_arr = WEIGHTS[idx, :]
spacings_arr = SPACINGS[idx, :]
results_L = np.zeros(len(weights_arr))
results_R = np.zeros(len(weights_arr))
for row in range(len(weights_arr)):
steps, disp = perform_static_sim(Kg, weights_arr[row, :], spacings_arr[row, :])
results_L[row] = np.max(np.abs(disp[1]))
results_R[row] = np.max(np.abs(disp[-1]))
return results_L, results_R
@njit(cache=True, parallel=False, fastmath=True)
def process_one_iter(Kg1, Kg2, n, WEIGHTS, SPACINGS):
L1, R1 = process_one_beam(Kg1, n, WEIGHTS, SPACINGS)
L2, R2 = process_one_beam(Kg2, n, WEIGHTS, SPACINGS)
with objmode(emd_l="float64", emd_r="float64"):
emd_l = wasserstein_distance(L1, L2)
emd_r = wasserstein_distance(R1, R2)
return emd_l, emd_r
@njit(cache=True, parallel=False, fastmath=True)
def process_all_iters(n_iters, Kg1, Kg2, n_vehicles, WEIGHTS, SPACINGS):
emd_l = np.zeros(n_iters)
emd_r = np.zeros(n_iters)
for i in range(n_iters):
emd_l[i], emd_r[i] = process_one_iter(Kg1, Kg2, n_vehicles, WEIGHTS, SPACINGS)
return emd_l, emd_r
# Run these as a static simulation for healthy and damaged cases
beam_1 = Beam()
beam_2 = Beam()
emd_L = np.zeros((len(delta1_vals), ITERATIONS))
emd_R = np.zeros((len(delta1_vals), ITERATIONS))
for i, (d1, xd1, d2, xd2) in enumerate(
zip(delta1_vals, xd1_vals, delta2_vals, xd2_vals)
):
print("i=", i)
beam_1.reset_damage()
beam_1.inflict_damage_at_x(xd1, d1)
print("updated beam 1")
beam_2.reset_damage()
beam_2.inflict_damage_at_x(xd2, d2)
print("updated beam 2")
emd_L[i, :], emd_R[i, :] = process_all_iters(
ITERATIONS, beam_1.Kg, beam_2.Kg, N_VEHICLES, WEIGHTS, SPACINGS
)
print(emd_L.mean(axis=1))
print(emd_L.std(axis=1))
print(emd_R.mean(axis=1))
print(emd_R.std(axis=1))
| alanjferguson/thesis-figure-code | chapter_3/tab02_gen_data.py | tab02_gen_data.py | py | 3,551 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_feather",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
41571308589 | import pygame
from decoration import Sky
class GameOver:
def __init__(self, surface, coin_data, reset_overworld):
self.display_surface = surface
self.coin_data = coin_data
self.reset_overworld = reset_overworld
self.sky = Sky(8, 'overworld')
self.font = pygame.font.Font('graphics/ui/ARCADEPI.TTF', 25)
self.font_game_over = pygame.font.Font('graphics/ui/ARCADEPI.TTF', 50)
def input(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
self.reset_overworld()
def display_game_over(self):
game_over_text = self.font_game_over.render('GAME OVER', False, '#33323d')
game_over_rect = game_over_text.get_rect(center = (600, 400))
self.display_surface.blit(game_over_text, game_over_rect)
coins_text = self.font.render('Coins: {}'.format(self.coin_data), False, '#33323d')
coins_rect = coins_text.get_rect(center = (600, 450))
self.display_surface.blit(coins_text, coins_rect)
def run(self):
self.sky.draw(self.display_surface)
self.display_game_over()
self.input() | Stellar2004/Stellar2004---audio_testing | code/game_over.py | game_over.py | py | 1,143 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "decoration.Sky",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.font.Font",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pygame.font.Font",
... |
40585445114 | import sys
sys.path.insert(0, "../..")
import ply.lex as lex
# Reserved words
reserved = (
'RENDERER', 'INTEGRATOR', 'TRANSFORM', 'SAMPLER', 'FILTER', 'FILM', 'CAMERA',
'WORLDBEGIN', 'WORLDEND', 'ATTRIBUTEBEGIN', 'ATTRIBUTEEND', 'TRANSFORMBEGIN', 'TRANSFORMEND',
'MAKENAMEDMATERIAL', 'NAMEDMATERIAL', 'MATERIAL', 'SHAPE', 'TEXTURE', 'AREALIGHTSOURCE', 'LIGHTSOURCE',
'INTEGER', 'BOOL', 'STRING', 'FLOAT', 'COLOR', 'POINT', 'NORMAL', 'TEX',
'TRUE', 'FALSE',
'INCLUDE'
)
tokens = reserved + (
'ID', 'SCONST', 'ICONST', 'FCONST',
'LBRACKET', 'RBRACKET', 'QUOTE'
)
# Completely ignored characters
t_ignore = ' \t\x0c'
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Delimeters
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_QUOTE = r'\"'
# Identifiers and reserved words
reserved_map = {
'Renderer': 'RENDERER', 'SurfaceIntegrator': 'INTEGRATOR', 'Transform': 'TRANSFORM', 'Sampler': 'SAMPLER', 'PixelFilter': 'FILTER', 'Film': 'FILM', 'Camera': 'CAMERA',
'WorldBegin': 'WORLDBEGIN', 'WorldEnd': 'WORLDEND', 'AttributeBegin': 'ATTRIBUTEBEGIN', 'AttributeEnd': 'ATTRIBUTEEND',
'TransformBegin': 'TRANSFORMBEGIN', 'TransformEnd': 'TRANSFORMEND',
'MakeNamedMaterial': 'MAKENAMEDMATERIAL', 'NamedMaterial': 'NAMEDMATERIAL', 'Material': 'MATERIAL', 'Shape': 'SHAPE', 'Texture': 'TEXTURE',
'AreaLightSource': 'AREALIGHTSOURCE', 'LightSource': 'LIGHTSOURCE',
'integer': 'INTEGER', 'bool': 'BOOL', 'string': 'STRING', 'float': 'FLOAT', 'color': 'COLOR', 'point': 'POINT', 'normal': 'NORMAL',
'texture': 'TEX', 'true': 'TRUE', 'false': 'FALSE',
'Include': 'INCLUDE'
}
# def t_SCONST(t):
# r'[A-Za-z_][\w_|\.|/|-]*'
# t.type = reserved_map.get(t.value, "SCONST")
# return t
def t_ID(t):
r'[A-Za-z_][\w_|\.|/|-]*'
t.type = reserved_map.get(t.value, "ID")
return t
# String literal
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
# Integer literal
t_ICONST = r'[+|-]?\d+'
# Floating literal
t_FCONST = r'[+|-]?((\d+)(\.\d+)(e(\+|-)?(\d+))? | [+|-]?(\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# def t_SCONST(t):
# r'[.]+'
# t.type = reserved_map.get(t.value, "SCONST")
# return t
#t_SCONST = r'[.]+'
# Comments
def t_comment(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_preprocessor(t):
r'\#(.)*?\n'
t.lexer.lineno += 1
def t_error(t):
print("Illegal character %s" % repr(t.value[0]))
t.lexer.skip(1)
lexer = lex.lex()
if __name__ == "__main__":
lex.runmain(lexer)
| lahagemann/pbr_scene_converter | src/core/LuxLex.py | LuxLex.py | py | 2,530 | python | en | code | 18 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "ply.lex.lex",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "ply.lex",
"line_number": ... |
75271721632 | __author__ = 'eric'
import cherrypy
import threading
import re
import os
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import *
cherrypy.config.update({'log.screen': False})
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
class CommandWebSocket(WebSocket):
def received_message(self, message):
response = self.main.run_server_command(message.data)
response = re.sub("\n", "<br />", response)
response = re.sub("\t", " ", response)
self.send(response, message.is_binary)
class WSServer:
def __init__(self, main, port):
self.main = main
self.port = port
self.CommandWebSocket = CommandWebSocket
self.CommandWebSocket.main = self.main
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start()
def run(self):
cherrypy.config.update({'server.socket_port': self.port})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
class Root(object):
@cherrypy.expose
def index(self):
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "../web/command.html"
abs_file_path = os.path.join(script_dir, rel_path)
commandMarkup = open(abs_file_path).read()
return commandMarkup
@cherrypy.expose
def ws(self):
# you can access the class instance through
handler = cherrypy.request.ws_handler
cherrypy.quickstart(Root(), '/', config={'/ws':
{
'tools.websocket.on': True,
'tools.websocket.handler_cls': self.CommandWebSocket
},
'global': {
'log.screen': False,
'log.error_file': '',
'log.access_file': ''
}})
| eburlingame/lightingserver | main/server.py | server.py | py | 2,376 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "cherrypy.config.update",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cherrypy.config",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cherrypy.config.update",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cher... |
4848604123 | #!/usr/bin/env python3
"""
Launches a registered algorithm on the given set of media
"""
import argparse
import logging
import time
import tator
logging.basicConfig(
filename='launch_algorithm.log',
filemode='w',
format='%(asctime)s %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO)
logger = logging.getLogger(__name__)
def parse_args() -> argparse.Namespace:
""" Parse the provided script arguments
Returns:
Parsed arguments in a namespace object
"""
parser = argparse.ArgumentParser(
description="Launches the registered algorithm workflow (via name) on the given set of media")
parser = tator.get_parser(parser=parser)
parser.add_argument(
'--project', type=int, required=True, help='Unique project id')
parser.add_argument(
'--algorithm', type=str, required=True, help='Name of registered algorithm to launch')
parser.add_argument(
'--media', type=int, required=True, nargs='+', help='Media IDs to process')
args = parser.parse_args()
logger.info(args)
return args
def launch_algorithm(
host: str,
token: str,
project: int,
algorithm_name: str,
media_ids: list) -> None:
""" Launches the registered algorithm on the given set of media
Args:
host (str): Tator server url
token (str): User access token to tator server
project_id (int): Unique identifier of project that contains the algorithm/media
algorithm_name (str): Name of algorithm to launch
media_ids (list of ints): List of media IDs for the algorithm to process
Postconditions:
Algorithm launched. UID and GID are printed.
"""
# Get the interface to tator
tator_api = tator.get_api(host=host, token=token)
# Launch the algorithm
spec = tator.models.AlgorithmLaunchSpec(
algorithm_name=algorithm_name,
media_ids=media_ids)
logger.info("Algorithm Spec")
logger.info(f"{spec}")
response = tator_api.algorithm_launch(
project=project,
algorithm_launch_spec=spec)
print('Algorithm launch response logged.')
logger.info('Algorithm launch response')
logger.info(response)
# Monitor progress, wait until complete.
while True:
jobs = tator_api.get_job_list(project, gid=response.gid)
all_done = True
for job in jobs:
if job.status != 'Succeeded':
all_done = False
if job.status == 'Failed':
raise ValueError("Algorithm job failed!")
time.sleep(10)
def main() -> None:
""" Main routine of this script
"""
args = parse_args()
launch_algorithm(
host=args.host,
token=args.token,
project=args.project,
algorithm_name=args.algorithm,
media_ids=args.media)
print('[FINISHED] launch_algorithm.py')
if __name__ == "__main__":
main()
| cvisionai/tator-py | examples/launch_algorithm.py | launch_algorithm.py | py | 2,972 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "argparse.Argu... |
19163726958 | import pygame
import random
pasta = "..\\assets\\"
class parede():
distancia = 200
speed =4
canos = [pygame.sprite.Sprite(),pygame.sprite.Sprite()]
points = 0
reinitX = 3000
def __init__(self):
self.canos[1-1].image = pygame.image.load(pasta+"canos_a.png")
self.canos[1-1].rect = self.canos[0].image.get_rect()
self.canos[2-1].image = pygame.image.load(pasta+"canos_b.png")
self.canos[2-1].rect = self.canos[1].image.get_rect()
self.canos[0].rect.top = random.randint(-350,-200)
self.reinitX = pygame.display.get_window_size()[0]
self.original()
def atualize(self):
self.canos[1].rect.centerx = self.canos[0].rect.centerx
self.canos[1].rect.top = (self.canos[0].rect.bottom + 200)
def walk(self):
self.canos[0].rect.right -= self.speed
if self.canos[0].rect.right < 0:
self.canos[0].rect.top = random.randint(-314,-100)
self.original()
if round(self.canos[0].rect.right) < round(pygame.display.get_window_size()[0]/2):
self.points+=1
self.atualize()
def original(self):
self.canos[0].rect.left = self.reinitX
self.atualize()
class player(pygame.sprite.Sprite):
jumpForce = 0.0
momentum = 0.0
gravidade = 0.0
fps = 1
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(pasta+"ave.png") # qual imagem
self.rect = self.image.get_rect()
def jump(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_SPACE or event.key == pygame.K_k:
self.momentum = -self.jumpForce/self.fps
if event.type == pygame.MOUSEBUTTONDOWN:
self.momentum = -self.jumpForce/self.fps
self.momentum += self.gravidade/self.fps
self.rect.y += self.momentum
class button(pygame.sprite.Sprite):
def __init__(self, a: str):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(pasta+a)
self.rect = self.image.get_rect()
| Pseudo-nimo/Jorge | código/parede.py | parede.py | py | 2,276 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite.Sprite",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.image"... |
30522792727 | """
Title: Find closest number
Problem:
Given an array of sorted integers. We need to find the closest value to
the given number. Array may contain duplicate values and negative numbers.
Execution: python find_closest_number.py
"""
import unittest
from typing import List, Optional
def find_closest_num(arr: List[int], target: int) -> Optional[int]:
min_diff = float("inf")
low = 0
high = len(arr) - 1
closest_num = None
# Edge cases for empty list of list
# with only one element:
if len(arr) == 0:
return None
if len(arr) == 1:
return arr[0]
min_diff_right, min_diff_left = None, None
while low <= high:
mid = (low + high) // 2
# Ensure you do not read beyond the bounds
# of the list.
if mid + 1 < len(arr):
min_diff_right = abs(arr[mid + 1] - target)
if mid > 0:
min_diff_left = abs(arr[mid - 1] - target)
# Check if the absolute value between left and right elements are
# smaller than any seen prior.
if min_diff_left < min_diff:
min_diff = min_diff_left
closest_num = arr[mid - 1]
if min_diff_right < min_diff:
min_diff = min_diff_right
closest_num = arr[mid + 1]
# Move the mid-point appropriately as is done via binary search.
if arr[mid] < target:
low = mid + 1
elif arr[mid] > target:
high = mid - 1
# If the element itself is the target, the closest number to it is
# itself. Return the number.
else:
return arr[mid]
return closest_num
class TestFindClosestNumber(unittest.TestCase):
"""Unit tests for find_closest_number."""
def test_1(self):
input_arr = [1, 2, 4, 5, 6, 6, 8, 9]
self.assertEqual(find_closest_num(input_arr, 4), 4)
if __name__ == "__main__":
unittest.main()
| samgh/6-Weeks-to-Interview-Ready | quickstart_guides/sorting_searching/python/find_closest_number.py | find_closest_number.py | py | 1,927 | python | en | code | 104 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
... |
22716966869 | # importing modules from flask library
from flask import Flask , render_template
# creating instance of class Flask, by providing __name__ keyword as argument
app = Flask(__name__)
# write the routes using decorator functions
# default route or 'URL'
@app.route("/me")
def home():
name = "TANIA SHAIKH" # write your name
age = "14" # write your age
return render_template('index.html' , name = name , age = age)
# define the route to father webpage
@app.route("/pup")
def homep():
name = "ASHIF SHAIKH" # write your name
age = "43" # write your age
return render_template('index.html' , name = name , age = age)
# define the route to mother webpage
@app.route("/mum")
def homem():
name = "RIZWANA SHAIKH" # write your name
age = "35" # write your age
return render_template('index.html' , name = name , age = age)
# define the route to friends webpage
@app.route("/friend")
def homef():
name = "VOICE BOX" # write your name
age = "14" # write your age
return render_template('index.html' , name = name , age = age)
# add other routes, if you want
# run the file
if __name__ == '__main__':
app.run(debug=True)
| 92009/family-info-web-pge | family tree/app.py | app.py | py | 1,232 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.render_t... |
13043600258 | """Sensor Graph main object."""
from collections import deque
import logging
import struct
from pkg_resources import iter_entry_points
from toposort import toposort_flatten
from iotile.core.exceptions import ArgumentError
from iotile.core.hw.reports import IOTileReading
from iotile.core.utilities.hash_algorithms import KNOWN_HASH_ALGORITHMS
from .node_descriptor import parse_node_descriptor
from .slot import SlotIdentifier
from .stream import DataStream
from .known_constants import config_fast_tick_secs, config_tick1_secs, config_tick2_secs, known_metadata
from .exceptions import NodeConnectionError, ProcessingFunctionError, ResourceUsageError
class SensorGraph:
"""A graph based data processing engine.
Args:
sensor_log (SensorLog): The sensor log where we should store our
data
model (DeviceModel): The device model to use for limiting our
available resources
enforce_limits (bool): Enforce the sensor graph size limits imposed
by the chosen device model. This can be useful for getting early
failures on sensor graphs that cannot work on a given device model.
Defaults to False.
"""
def __init__(self, sensor_log, model=None, enforce_limits=False):
self.roots = []
self.nodes = []
self.streamers = []
self.constant_database = {}
self.metadata_database = {}
self.config_database = {}
self.checksums = None
self.sensor_log = sensor_log
self.model = model
self._manually_triggered_streamers = set()
self._logger = logging.getLogger(__name__)
if enforce_limits:
if model is None:
raise ArgumentError("You must pass a device model if you set enforce_limits=True")
self._max_nodes = model.get(u'max_nodes')
self._max_streamers = model.get(u'max_streamers')
else:
self._max_nodes = None
self._max_streamers = None
def clear(self):
"""Clear all nodes from this sensor_graph.
This function is equivalent to just creating a new SensorGraph() object
from scratch. It does not clear any data from the SensorLog, however.
"""
self.roots = []
self.nodes = []
self.streamers = []
self.constant_database = {}
self.metadata_database = {}
self.config_database = {}
def add_node(self, node_descriptor):
"""Add a node to the sensor graph based on the description given.
The node_descriptor must follow the sensor graph DSL and describe
a node whose input nodes already exist.
Args:
node_descriptor (str): A description of the node to be added
including its inputs, triggering conditions, processing function
and output stream.
"""
if self._max_nodes is not None and len(self.nodes) >= self._max_nodes:
raise ResourceUsageError("Maximum number of nodes exceeded", max_nodes=self._max_nodes)
node, inputs, processor = parse_node_descriptor(node_descriptor, self.model)
in_root = False
for i, input_data in enumerate(inputs):
selector, trigger = input_data
walker = self.sensor_log.create_walker(selector)
# Constant walkers begin life initialized to 0 so they always read correctly
if walker.selector.inexhaustible:
walker.reading = IOTileReading(0xFFFFFFFF, walker.selector.as_stream(), 0)
node.connect_input(i, walker, trigger)
if selector.input and not in_root:
self.roots.append(node)
in_root = True # Make sure we only add to root list once
else:
found = False
for other in self.nodes:
if selector.matches(other.stream):
other.connect_output(node)
found = True
if not found and selector.buffered:
raise NodeConnectionError("Node has input that refers to another node that has not been created yet", node_descriptor=node_descriptor, input_selector=str(selector), input_index=i)
# Also make sure we add this node's output to any other existing node's inputs
# this is important for constant nodes that may be written from multiple places
# FIXME: Make sure when we emit nodes, they are topologically sorted
for other_node in self.nodes:
for selector, trigger in other_node.inputs:
if selector.matches(node.stream):
node.connect_output(other_node)
# Find and load the processing function for this node
func = self.find_processing_function(processor)
if func is None:
raise ProcessingFunctionError("Could not find processing function in installed packages", func_name=processor)
node.set_func(processor, func)
self.nodes.append(node)
def add_config(self, slot, config_id, config_type, value):
"""Add a config variable assignment to this sensor graph.
Args:
slot (SlotIdentifier): The slot identifier that this config
variable is assigned to.
config_id (int): The 16-bit id of this config_id
config_type (str): The type of the config variable, currently
supported are fixed width integer types, strings and binary
blobs.
value (str|int|bytes): The value to assign to the config variable.
"""
if slot not in self.config_database:
self.config_database[slot] = {}
self.config_database[slot][config_id] = (config_type, value)
def add_streamer(self, streamer):
"""Add a streamer to this sensor graph.
Args:
streamer (DataStreamer): The streamer we want to add
"""
if self._max_streamers is not None and len(self.streamers) >= self._max_streamers:
raise ResourceUsageError("Maximum number of streamers exceeded", max_streamers=self._max_streamers)
streamer.link_to_storage(self.sensor_log)
streamer.index = len(self.streamers)
self.streamers.append(streamer)
def add_constant(self, stream, value):
"""Store a constant value for use in this sensor graph.
Constant assignments occur after all sensor graph nodes have been
allocated since they must be propogated to all appropriate virtual
stream walkers.
Args:
stream (DataStream): The constant stream to assign the value to
value (int): The value to assign.
"""
if stream in self.constant_database:
raise ArgumentError("Attempted to set the same constant twice", stream=stream, old_value=self.constant_database[stream], new_value=value)
self.constant_database[stream] = value
def add_metadata(self, name, value):
"""Attach a piece of metadata to this sensorgraph.
Metadata is not used during the simulation of a sensorgraph but allows
it to convey additional context that may be used during code
generation. For example, associating an `app_tag` with a sensorgraph
allows the snippet code generator to set that app_tag on a device when
programming the sensorgraph.
Arg:
name (str): The name of the metadata that we wish to associate with this
sensorgraph.
value (object): The value we wish to store.
"""
if name in self.metadata_database:
raise ArgumentError("Attempted to set the same metadata value twice", name=name, old_value=self.metadata_database[name], new_value=value)
self.metadata_database[name] = value
def initialize_remaining_constants(self, value=0):
"""Ensure that all constant streams referenced in the sensor graph have a value.
Constant streams that are automatically created by the compiler are initialized
as part of the compilation process but it's possible that the user references
other constant streams but never assigns them an explicit initial value. This
function will initialize them all to a default value (0 if not passed) and
return the streams that were so initialized.
Args:
value (int): Optional value to use to initialize all uninitialized constants.
Defaults to 0 if not passed.
Returns:
list(DataStream): A list of all of the constant streams that were not previously
initialized and were initialized to the given value in this function.
"""
remaining = []
for node, _inputs, _outputs in self.iterate_bfs():
streams = node.input_streams() + [node.stream]
for stream in streams:
if stream.stream_type is not DataStream.ConstantType:
continue
if stream not in self.constant_database:
self.add_constant(stream, value)
remaining.append(stream)
return remaining
def load_constants(self):
"""Load all constants into their respective streams.
All previous calls to add_constant stored a constant value that
should be associated with virtual stream walkers. This function
actually calls push_stream in order to push all of the constant
values to their walkers.
"""
for stream, value in self.constant_database.items():
self.sensor_log.push(stream, IOTileReading(0, stream.encode(), value))
def get_config(self, slot, config_id):
"""Get a config variable assignment previously set on this sensor graph.
Args:
slot (SlotIdentifier): The slot that we are setting this config variable
on.
config_id (int): The 16-bit config variable identifier.
Returns:
(str, str|int): Returns a tuple with the type of the config variable and
the value that is being set.
Raises:
ArgumentError: If the config variable is not currently set on the specified
slot.
"""
if slot not in self.config_database:
raise ArgumentError("No config variables have been set on specified slot", slot=slot)
if config_id not in self.config_database[slot]:
raise ArgumentError("Config variable has not been set on specified slot", slot=slot, config_id=config_id)
return self.config_database[slot][config_id]
def is_output(self, stream):
"""Check if a stream is a sensor graph output.
Return:
bool
"""
for streamer in self.streamers:
if streamer.selector.matches(stream):
return True
return False
def get_tick(self, name):
"""Check the config variables to see if there is a configurable tick.
Sensor Graph has a built-in 10 second tick that is sent every 10
seconds to allow for triggering timed events. There is a second
'user' tick that is generated internally by the sensorgraph compiler
and used for fast operations and finally there are several field
configurable ticks that can be used for setting up configurable
timers.
This is done by setting a config variable on the controller with the
desired tick interval, which is then interpreted by this function.
The appropriate config_id to use is listed in `known_constants.py`
Returns:
int: 0 if the tick is disabled, otherwise the number of seconds
between each tick
"""
name_map = {
'fast': config_fast_tick_secs,
'user1': config_tick1_secs,
'user2': config_tick2_secs
}
config = name_map.get(name)
if config is None:
raise ArgumentError("Unknown tick requested", name=name)
slot = SlotIdentifier.FromString('controller')
try:
var = self.get_config(slot, config)
return var[1]
except ArgumentError:
return 0
def _parse_configs_ignorelist(self):
"""Parses the string containing configs to ignore into a list of tuples
Returns:
list(tuple): A list of config variable entries that are formatted
as tuples (slot, config_id)
"""
configs_ignorelist = []
slot = SlotIdentifier.FromString('controller')
configs_ignorelist_string = self.metadata_database['hash_configs_ignorelist']
configs_ignorelist_address = self.metadata_database['hash_configs_ignorelist_address']
configs_ignorelist_type, _ = self.get_config(slot, configs_ignorelist_address)
self.add_config(slot, configs_ignorelist_address, configs_ignorelist_type, configs_ignorelist_string)
if configs_ignorelist_string[0] != '{' and configs_ignorelist_string[-1] != '}':
configs_ignorelist_string = '{' + configs_ignorelist_string + '}'
configs_ignorelist_dict = eval(configs_ignorelist_string)
for target, config_ids in configs_ignorelist_dict.items():
slot = SlotIdentifier.FromString(target)
for config_id in config_ids:
configs_ignorelist.append((slot, config_id))
return configs_ignorelist
def add_checksum(self, force_checksum=False):
"""Check metadata if sensorgraph's checksum needs to be added.
The sensorgraph must contain all the meta tags required to calculate the
checksum.
hash_address: Address where calculated checksum will be flashed.
hash_algorihm: Algorithm to use to calculate checksums, only "sha256"
and "crc32_0x104C11DB7" are currently supported.
hash_algorithm_address: Address where the algorithm specifier is
flashed. This is used so the programmer knows which algorithm to use
to debug the device.
hash_configs_ignorelist: Dict of config variables to exclude during the
calculation of checksum. This MUST contain the 'hash_address'.
The ignore list must be formatted like so...
meta hash_configs_ignorelist = "'controller':[0xcafe,0xdead],'slot 1':[0xbabe]";
This entry should be formatted as a stringified dictionary.
hash_configs_ignorelist_address: Address where the ignore list is
flashed. This is used so the programmer knows which config variables
to exclude in the calculation of the device's checksums.
"""
if force_checksum:
pass
elif 'hash_address' not in self.metadata_database or\
'hash_algorithm' not in self.metadata_database or\
'hash_algorithm_address' not in self.metadata_database or\
'hash_configs_ignorelist' not in self.metadata_database or\
'hash_configs_ignorelist_address' not in self.metadata_database:
return
slot = SlotIdentifier.FromString('controller')
if not force_checksum:
hash_address = self.metadata_database['hash_address']
algorithm = self.metadata_database['hash_algorithm']
algorithm_address = self.metadata_database['hash_algorithm_address']
algorithm_config_type, _ = self.get_config(slot, algorithm_address)
self.add_config(slot, algorithm_address, algorithm_config_type, algorithm)
configs_ignorelist = self._parse_configs_ignorelist()
else:
algorithm = "sha256"
configs_ignorelist = []
hash_algorithm = KNOWN_HASH_ALGORITHMS[algorithm]
nodes_checksum = hash_algorithm.calculate(hash_algorithm.algorithm,
self.get_nodes_binary())
self._logger.debug("nodes_checksum: %s", nodes_checksum)
streamers_checksum = hash_algorithm.calculate(hash_algorithm.algorithm,
self.get_streamers_binary())
self._logger.debug("streamers_checksum: %s", streamers_checksum)
configs_checksum = hash_algorithm.calculate(hash_algorithm.algorithm,
self.get_config_database_binary(ignore_configs=configs_ignorelist))
self._logger.debug("configs_checksum: %s", configs_checksum)
constants_checksum = hash_algorithm.calculate(hash_algorithm.algorithm,
self.get_constant_database_binary())
self._logger.debug("constants_checksum: %s", constants_checksum)
metadata_checksum = hash_algorithm.calculate(hash_algorithm.algorithm,
self.get_metadata_database_binary())
self._logger.debug("metadata_checksum: %s", metadata_checksum)
# The order of building the following string is important for other
# applications to calculate the proper checksum
combined_checksum_string = nodes_checksum + streamers_checksum +\
configs_checksum + constants_checksum +\
metadata_checksum
combined_checksum_bytes = bytes(combined_checksum_string, 'utf-8')
device_checksum = hash_algorithm.calculate(hash_algorithm.algorithm,
combined_checksum_bytes)
self._logger.debug("device_checksum: %s", device_checksum)
self.checksums = {
"nodes": nodes_checksum,
"streamers": streamers_checksum,
"configs": configs_checksum,
"constants": constants_checksum,
"metadata": metadata_checksum,
"device": device_checksum
}
if not force_checksum:
hash_config_type, _ = self.get_config(slot, hash_address)
self.add_config(slot, hash_address, hash_config_type, device_checksum)
def process_input(self, stream, value, rpc_executor):
"""Process an input through this sensor graph.
The tick information in value should be correct and is transfered
to all results produced by nodes acting on this tick.
Args:
stream (DataStream): The stream the input is part of
value (IOTileReading): The value to process
rpc_executor (RPCExecutor): An object capable of executing RPCs
in case we need to do that.
"""
self.sensor_log.push(stream, value)
# FIXME: This should be specified in our device model
if stream.important:
associated_output = stream.associated_stream()
self.sensor_log.push(associated_output, value)
to_check = deque([x for x in self.roots])
while len(to_check) > 0:
node = to_check.popleft()
if node.triggered():
try:
results = node.process(rpc_executor, self.mark_streamer)
for result in results:
result.raw_time = value.raw_time
self.sensor_log.push(node.stream, result)
except:
self._logger.exception("Unhandled exception in graph node processing function for node %s", str(node))
# If we generated any outputs, notify our downstream nodes
# so that they are also checked to see if they should run.
if len(results) > 0:
to_check.extend(node.outputs)
def mark_streamer(self, index):
"""Manually mark a streamer that should trigger.
The next time check_streamers is called, the given streamer will be
manually marked that it should trigger, which will cause it to trigger
unless it has no data.
Args:
index (int): The index of the streamer that we should mark as
manually triggered.
Raises:
ArgumentError: If the streamer index is invalid.
"""
self._logger.debug("Marking streamer %d manually", index)
if index >= len(self.streamers):
raise ArgumentError("Invalid streamer index", index=index, num_streamers=len(self.streamers))
self._manually_triggered_streamers.add(index)
def check_streamers(self, blacklist=None):
"""Check if any streamers are ready to produce a report.
You can limit what streamers are checked by passing a set-like
object into blacklist.
This method is the primary way to see when you should poll a given
streamer for its next report.
Note, this function is not idempotent. If a streamer is marked as
manual and it is triggered from a node rule inside the sensor_graph,
that trigger will only last as long as the next call to
check_streamers() so you need to explicitly build a report on all
ready streamers before calling check_streamers again.
Args:
blacklist (set): Optional set of streamer indices that should
not be checked right now.
Returns:
list of DataStreamer: A list of the ready streamers.
"""
ready = []
selected = set()
for i, streamer in enumerate(self.streamers):
if blacklist is not None and i in blacklist:
continue
if i in selected:
continue
marked = False
if i in self._manually_triggered_streamers:
marked = True
self._manually_triggered_streamers.remove(i)
if streamer.triggered(marked):
self._logger.debug("Streamer %d triggered, manual=%s", i, marked)
ready.append(streamer)
selected.add(i)
# Handle streamers triggered with another
for j, streamer2 in enumerate(self.streamers[i:]):
if streamer2.with_other == i and j not in selected and streamer2.triggered(True):
self._logger.debug("Streamer %d triggered due to with-other on %d", j, i)
ready.append(streamer2)
selected.add(j)
return ready
def iterate_bfs(self):
"""Generator that yields node, [inputs], [outputs] in breadth first order.
This generator will iterate over all nodes in the sensor graph, yielding
a 3 tuple for each node with a list of all of the nodes connected to its
inputs and all of the nodes connected to its output.
Returns:
(SGNode, list(SGNode), list(SGNode)): A tuple for each node in the graph
"""
working_set = deque(self.roots)
seen = []
while len(working_set) > 0:
curr = working_set.popleft()
# Now build input and output node lists for this node
inputs = []
for walker, _ in curr.inputs:
for other in seen:
if walker.matches(other.stream) and other not in inputs:
inputs.append(other)
outputs = [x for x in curr.outputs]
yield curr, inputs, outputs
working_set.extend(curr.outputs)
seen.append(curr)
def sort_nodes(self):
"""Topologically sort all of our nodes.
Topologically sorting our nodes makes nodes that are inputs to other
nodes come first in the list of nodes. This is important to do before
programming a sensorgraph into an embedded device whose engine assumes
a topologically sorted graph.
The sorting is done in place on self.nodes
"""
node_map = {id(node): i for i, node in enumerate(self.nodes)}
node_deps = {}
for node, inputs, _outputs in self.iterate_bfs():
node_index = node_map[id(node)]
deps = {node_map[id(x)] for x in inputs}
node_deps[node_index] = deps
# Now that we have our dependency tree properly built, topologically
# sort the nodes and reorder them.
node_order = toposort_flatten(node_deps)
self.nodes = [self.nodes[x] for x in node_order]
#Check root nodes all topographically sorted to the beginning
for root in self.roots:
if root not in self.nodes[0:len(self.roots)]:
raise NodeConnectionError("Inputs not sorted in the beginning", node=str(root), node_position=self.nodes.index(root))
@classmethod
def find_processing_function(cls, name):
"""Find a processing function by name.
This function searches through installed processing functions
using pkg_resources.
Args:
name (str): The name of the function we're looking for
Returns:
callable: The processing function
"""
for entry in iter_entry_points(u'iotile.sg_processor', name):
return entry.load()
def dump_roots(self):
"""Dump all the root nodes in this sensor graph as a list of strings."""
return [str(x) for x in self.roots]
def dump_nodes(self):
"""Dump all of the nodes in this sensor graph as a list of strings."""
return [str(x) for x in self.nodes]
def dump_streamers(self):
"""Dump all of the streamers in this sensor graph as a list of strings."""
return [str(streamer) for streamer in self.streamers]
def dump_constant_database(self):
"""Dump all of the constants in this sensor graph as a list of strings."""
constant_dump = []
for stream, value in sorted(self.constant_database.items(), key=lambda x: x[0].encode()):
constant_dump.append("'{}' {}".format(stream, value))
return constant_dump
def dump_metadata_database(self):
"""Dump all of the metadata in this sensor graph as a list of strings."""
metadata_dump = []
for metadata, value in sorted(self.metadata_database.items(), key=lambda x: x[0].encode()):
metadata_dump.append("{}: {}".format(metadata, value))
return metadata_dump
def dump_config_database(self, dump_config_type=True, ignore_configs=[]):
"""Dump all of the config variables in this sensor graph as a list of strings."""
config_dump = []
for slot, conf_vars in sorted(self.config_database.items(), key=lambda x: x[0].encode()):
for conf_var, conf_def in sorted(conf_vars.items(), key=lambda x: x[0]):
conf_type, conf_val = conf_def
conf_val_bytes = _convert_to_bytearray(conf_type, conf_val)
if _is_ignored_config(ignore_configs, slot, conf_var):
self._logger.debug("Ignoring '%s:%s' in checksum calculation",
slot, conf_var)
continue
if dump_config_type:
config_dump.append("'{}' {} {} {}".format(slot, conf_var,
conf_type,
conf_val_bytes))
else:
config_dump.append("'{}' {} {}".format(slot, conf_var,
conf_val_bytes))
return config_dump
def get_nodes_binary(self):
"""Returns the binary representation of all the nodes"""
from .node_descriptor import create_binary_descriptor
binary_representation = bytearray()
for node in self.dump_nodes():
binary_representation += create_binary_descriptor(node)
return binary_representation
def get_streamers_binary(self):
"""Returns the binary representation of all the streamers"""
from .streamer_descriptor import parse_string_descriptor, create_binary_descriptor
binary_representation = bytearray()
for streamer in self.dump_streamers():
streamer_obj = parse_string_descriptor(streamer)
binary_representation += create_binary_descriptor(streamer_obj)
return binary_representation
def get_constant_database_binary(self):
"""Returns the binary representation of all the constant streams"""
binary_representation = bytearray()
for constant in self.dump_constant_database():
binary_representation += bytes(constant, 'utf-8')
return binary_representation
def get_metadata_database_binary(self):
"""Returns the binary representation of the KNOWN metadata variables"""
binary_representation = bytearray()
for metadata in self.dump_metadata_database():
entry, _ = metadata.split(': ')
if entry in known_metadata:
binary_representation += bytes(metadata, 'utf-8')
return binary_representation
def get_config_database_binary(self, ignore_configs=[]):
"""Returns the binary representation of all the config variables"""
binary_representation = bytearray()
for config in self.dump_config_database(dump_config_type=False,
ignore_configs=ignore_configs):
binary_representation += bytes(config, 'utf-8')
return binary_representation
def _is_ignored_config(ignore_configs, slot, config_var):
"""Checks if an entry is in the list of variables to ignore"""
for ignore_config in ignore_configs:
if slot == ignore_config[0] and config_var == ignore_config[1]:
return True
return False
def _convert_to_bytearray(type_name, value):
"""Convert a typed value to a binary array"""
int_types = {'uint8_t': 'B', 'int8_t': 'b', 'uint16_t': 'H', 'int16_t': 'h', 'uint32_t': 'L', 'int32_t': 'l'}
type_name = type_name.lower()
is_array = False
if type_name[-2:] == '[]':
if value[0] != '[' or value[-1] != ']':
raise ArgumentError("Array value improperly formated, must be a stringified list")
is_array = True
type_name = type_name[:-2]
if type_name not in int_types and type_name not in ['string', 'binary']:
raise ArgumentError('Type must be a known integer type, integer type array, string', known_integers=int_types.keys(), actual_type=type_name)
if type_name == 'string':
#value should be passed as a string
bytevalue = bytearray(value, 'utf-8')
elif type_name == 'binary':
bytevalue = bytearray(value)
elif is_array:
value = [int(n,0) for n in value[1:-1].split(',')]
bytevalue = bytearray(struct.pack("<%s" % (int_types[type_name]*len(value)), *value))
else:
bytevalue = bytearray(struct.pack("<%s" % int_types[type_name], value))
return bytevalue | iotile/coretools | iotilesensorgraph/iotile/sg/graph.py | graph.py | py | 31,087 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "iotile.core.exceptions.ArgumentError",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "exceptions.ResourceUsageError",
"line_number": 87,
"usage_type": "call"
},
{
... |
5573233633 | import os
import sys
import logging
from flask import Flask
# pylint: disable=no-member
# Get configuration from environment
DATABASE_URI = os.getenv('DATABASE_URI', 'postgres://soqerjpq:YZCacYhoNGHPtbX0zixiq7Lu81MrRJ1U@salt.db.elephantsql.com:5432/soqerjpq')
SECRET_KEY = os.getenv('SECRET_KEY', ' f869ba13-9684-40ce-a41e-eb5e2c72a289')
# Create Flask application
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = SECRET_KEY
# Import the routes After the Flask app is created
from service import service, models
# Set up logging for production
service.initialize_logging()
app.logger.info(70 * '*')
app.logger.info(' R E C O M M E N D A T I O N S E R V I C E R U N N I N G '.center(70, '*'))
app.logger.info(70 * '*')
service.init_db() # make our sqlalchemy tables
app.logger.info('Service inititalized!')
| nyudevops-recommendation/recommendations | service/__init__.py | __init__.py | py | 928 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "service.service.initialize_logging",
... |
35390527776 | from fastapi import FastAPI, Request
from src.api.architectures import RecommenderNet
from src.api.preprocessing import load_data, get_place_encodings
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import os
import joblib
import numpy as np
import pandas as pd
app = FastAPI()
User_Id = 10000
Place_Id = 10000
Age = 10000
Location_Encoded = 10000
embedding_size = 100
def load_model():
try:
model = RecommenderNet(User_Id, Place_Id, Age, Location_Encoded, embedding_size)
dummy_input = tf.constant([[0, 0, 0, 0]], dtype=tf.int32)
_ = model(dummy_input)
model.load_weights('models/recommender_model_weights.h5')
return model # Return the loaded model, not a response dictionary
except Exception as e:
response = {
"status": 204,
"message": str(e)
}
return response
def load_encoder():
try:
ohe_location = joblib.load('models/encoder.pkl')
return ohe_location
except Exception as e:
response = {
"status": 204,
"message": str(e)
}
return response
def get_top_recommendations(model, user_id, age_encoded, location_encoded, place_to_place_encoded, place_encoded_to_place, user_to_user_encoded, place_df, num_recommendations=10):
place_visited_by_user = place_df[place_df['User_Id'] == user_id]['Place_Id']
place_not_visited = place_df[~place_df['Place_Id'].isin(place_visited_by_user)]['Place_Id']
# Convert to a list of not visited place IDs
place_not_visited = list(
set(place_not_visited)
.intersection(set(place_to_place_encoded.keys()))
)
# Encode the not visited places and user
place_not_visited = [[place_to_place_encoded.get(x)] for x in place_not_visited]
user_encoder = user_to_user_encoded.get(user_id)
user_place_array = np.hstack(
([[user_encoder, age_encoded, location_encoded]] * len(place_not_visited), place_not_visited)
)
ratings = model.predict(user_place_array).flatten()
top_ratings_indices = ratings.argsort()[-num_recommendations:][::-1]
recommended_place_ids = [
place_encoded_to_place.get(place_not_visited[x][0]) for x in top_ratings_indices
]
top_place_user = (
place_df[place_df['Place_Id'].isin(place_visited_by_user)]
.sort_values(by='Place_Ratings', ascending=False)
.head(5)
.Place_Id.values
)
top_places = place_df[place_df['Place_Id'].isin(top_place_user)].drop_duplicates(subset=['Place_Id'])
top_recommendations = place_df[place_df['Place_Id'].isin(recommended_place_ids)].drop_duplicates(subset=['Place_Id'])
user_recommendations = {
"top_places_user": [
f"{row.Place_Name} : {row.City}" for row in top_places.itertuples()
],
"recommended_places": [
f"{row.Place_Name} : {row.City}" for row in top_recommendations.itertuples()
]
}
return user_recommendations
@app.post("/collaborative")
async def recommendation(data: Request):
data = await data.json()
user = data['user_id']
user = int(user)
model = load_model()
encoder = load_encoder()
df = load_data()
place_to_place_encoded, place_encoded_to_place, user_to_user_encoded = get_place_encodings(df)
place_df = df.copy()
user_data = df[df['User_Id'] == user]
if not user_data.empty:
age = int(user_data.iloc[0]['Age'])
Location = user_data.iloc[0]['Location']
location_encoded = encoder.transform([Location])
location_encoded = location_encoded[0]
else:
response = {
"status": 204,
"message": "User not found"
}
return response
try:
recommendations = get_top_recommendations(model, user, age, location_encoded, place_to_place_encoded, place_encoded_to_place, user_to_user_encoded, place_df)
response = {
"status": 200,
"input": [user, age, Location],
"recommendations": recommendations
}
except Exception as e:
response = {
"status": 500,
"message": str(e)
}
return response | Tamirgading/Pacmann-Recommendation-System | src/api/collaborative.py | collaborative.py | py | 4,243 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "src.api.architectures.RecommenderNet",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 25,
"usage_type": "call"
},
{
"api_name... |
32013676632 | from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torchvision
from torchvision import transforms, datasets, models
import os
import cv2
from model.residual_attention_network import ResidualAttentionModel
# Image Preprocessing
transform = transforms.Compose([
transforms.Scale(224),
transforms.ToTensor()])
# CIFAR-10 Dataset
train_dataset = datasets.CIFAR10(root='./data/',
train=True,
transform=transform,
download=True)
test_dataset = datasets.CIFAR10(root='./data/',
train=False,
transform=transform)
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=20,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=20,
shuffle=False)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
model = ResidualAttentionModel().cuda()
print(model)
lr = 0.001
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# Training
for epoch in range(100):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.cuda())
# print(images.data)
labels = Variable(labels.cuda())
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print("hello")
if (i+1) % 100 == 0:
print ("Epoch [%d/%d], Iter [%d/%d] Loss: %.4f" %(epoch+1, 80, i+1, 500, loss.data[0]))
# Decaying Learning Rate
if (epoch+1) % 20 == 0:
lr /= 3
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# Save the Model
torch.save(model.state_dict(), 'model.pkl')
# Test
correct = 0
total = 0
#
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
for images, labels in test_loader:
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.data).sum()
#
c = (predicted == labels.data).squeeze()
for i in range(4):
label = labels.data[i]
class_correct[label] += c[i]
class_total[label] += 1
print('Accuracy of the model on the test images: %d %%' % (100 * correct / total))
# class_correct = list(0. for i in range(10))
# class_total = list(0. for i in range(10))
# for data in testloader:
# images, labels = data
# outputs = model(Variable(images.cuda()))
# _, predicted = torch.max(outputs.data, 1)
# c = (predicted == labels).squeeze()
# for i in range(4):
# label = labels[i]
# class_correct[label] += c[i]
# class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
| liudaizong/Residual-Attention-Network | train.py | train.py | py | 3,426 | python | en | code | 37 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Scale",
"line_number": 16,
"usage_type": "call"
},
{
... |
33350375465 | import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def weights_init_conv(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Generator(nn.Module):
def forward(self, *input):
return input
class MLPGenerator(Generator):
def __init__(self, nz, nx, nhidden, nhiddenlayer, negative_slope=1e-2):
super(MLPGenerator, self).__init__()
self.net = nn.Sequential()
i = 0
self.net.add_module('linear_%d' % i, nn.Linear(nz, nhidden))
self.net.add_module('act_%d' % i, nn.LeakyReLU(inplace=True, negative_slope=negative_slope))
for i in range(1, nhiddenlayer):
self.net.add_module('linear_%d' % i, nn.Linear(nhidden, nhidden))
self.net.add_module('act_%d' % i, nn.LeakyReLU(inplace=True, negative_slope=negative_slope))
self.net.add_module('linear_%d' % (i + 1), nn.Linear(nhidden, nx))
# self.net.apply(weights_init)
def forward(self, inputs):
return self.net(inputs)
class DCGenerator(Generator):
def __init__(self, nz, ngf, img_size, nc):
super(DCGenerator, self).__init__()
self.nz = nz
self.ngf = ngf
self.img_size = img_size
self.nc = nc
self.net = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
self.apply(weights_init_conv)
def forward(self, inputs):
return self.net(inputs)
| htt210/GAN-GenAndMetric | Generators.py | Generators.py | py | 2,673 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
32552634273 | # requests 모듈
import requests
from bs4 import BeautifulSoup
'''
url = "http://www.python.org"
response = requests.get(url)
print(response)
print(response.status_code)
html = response.text
#print(html)
url2 = "http://www.python.org/3"
response = requests.get(url2)
print(response)
print(response.status_code)
urls = ["http://www.python.org/", "http://www.daum.net/"]
filename = "robots.txt"
# print(urls[0] + filename)
for url in urls:
url_path = url + filename
print(url_path)
response = requests.get(url_path)
print(response)
'''
# html 태그 만들어 연습하기
"""
html_str = '''
<html>
<head>
</head>
<body>
<ul class='item'>
<li>인공지능</li>
<li>Big Data</li>
<li>로봇</li>
</ul>
</body>
</html>
'''
soup = BeautifulSoup(html_str, "html.parser")
first_ul = soup.find('ul', attrs={'class':'item'})
# print(first_ul)
print(first_ul.text)
# 리스트에 저장
all_li = first_ul.findAll('li')
print(all_li)
# print(all_li[1].text)
for li in all_li:
print(li.text)
"""
# Naver에서 필요한 정보 추출
"""
url = "http://www.naver.com"
response = requests.get(url)
html = BeautifulSoup(response.text, 'html.parser')
# print(html.head)
print(html.title)
print(html.title.name)
print(html.title.text)
find_div = html.find('div', attrs={'class':'service_area'})
#print(find_div)
first_a = find_div.find('a')
print(first_a.text)
all_a = find_div.findAll('a')
# print(all_a[0])
print(all_a[1].text)
# naver 메뉴 가져오기
menu_ul = html.find('ul', attrs={'class':'list_nav type_fix'})
menu_lis = menu_ul.findAll('li')
# print(menu_lis)
menu_all_a = menu_ul.findAll('a')
# for menu_a in menu_all_a:
# print(menu_a)
print(menu_all_a[1].text)
"""
# Naver 증권 > 시장지표 > 환전 고시 환율
url = "https://finance.naver.com/marketindex/"
response = requests.get(url)
html = BeautifulSoup(response.text, 'html.parser')
# find(), findAll() 사용
"""
ul = html.find('ul', attrs={'class':'data_lst'})
#print(ul)
# 첫번째 미국만 추출하기
first_li = ul.find('li')
# print(li)
exchange = first_li.find('span', attrs={'class':'blind'})
print(exchange)
value = first_li.find('span', attrs={'class':'value'})
print(value)
print(exchange.text, ':', value.text)
# 전체 횐율 추출하기
all_li = ul.findAll('li')
# print(all_li)
for li in all_li:
exchange = li.find('span', attrs={'class':'blind'})
value = li.find('span', attrs={'class':'value'})
# print(exchange.text, ':', value.text)
print(exchange.text.split(' ')[-1], ':', value.text)
"""
'''
# select(), select_one() 사용
ul = html.select_one('ul.data_lst')
# print(ul)
first_li = ul.select_one('li.on') # USD만 검색
# print(first_ul)
exchange = first_li.select_one('span.blind')
print(exchange.string)
value = first_li.select_one('span.value')
print(value.string)
print(exchange.string, ':', value.string)
all_li = ul.select('ul.data_lst li')
# print(all_li)
for li in all_li:
exchange = li.select_one('span.blind')
value = li.select_one('span.value')
# print(exchange.string, ':', value.string)
print(exchange.string.split(' ')[-1], ':', value.string)
'''
# 주식 정보
# 주식 1 종목
"""
def getcontent():
url = 'https://finance.naver.com/item/main.naver?code=086520'
response = requests.get(url)
content = BeautifulSoup(response.text, 'html.parser')
return content
content = getcontent()
today = content.find('div', attrs={'class':'today'})
# print(today)
price = today.find('span', attrs={'class':'blind'})
print(price.text)
print(f'에코프로 주가 : {price.text}원')
# 주식 여러 종목
def getcontent(item_code):
url = 'https://finance.naver.com/item/main.naver?code=' + item_code
response = requests.get(url)
content = BeautifulSoup(response.text, 'html.parser')
return content
def getprice(item_code):
content = getcontent(item_code)
today = content.find('div', attrs={'class': 'today'})
price = today.find('span', attrs={'class': 'blind'})
return price
에코프로 = getprice('086520')
네이버 = getprice('035420')
NCSOFT = getprice('036570')
print(f'에코프로 주가 : {에코프로.text}원')
print(f'에코프로 주가 : {네이버.text}원')
print(f'에코프로 주가 : {NCSOFT.text}원')
"""
# 사진 가져오기(서울 지하철- 위키디피아)
url = 'https://en.wikipedia.org/wiki/Seoul_Metropolitan_Subway'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# print(html.head)
print(soup.title)
print(soup.title.name)
print(soup.title.string)
# 지하철 사진 경로
target_img = soup.find('img',
attrs={'alt':'Seoul Metro 2000 series train on Line 2'})
print(target_img)
# 소스 사진 읽기
target_img_src = target_img.get('src')
print("이미지 경로:", target_img_src)
target_img_response = requests.get('http:' + target_img_src)
print(target_img_response)
# 바이너리 파일 모드로 쓰기
with open('./output/train.jpg', 'wb') as f:
f.write(target_img_response.content) # 이미지: content
print("이미지 파일로 저장했습니다.")
| kiyongee2/green_pyworks | pylearning/웹 스크래핑.py | 웹 스크래핑.py | py | 5,186 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
33685054332 | from typing import List
from time import sleep
from models.cliente import Cliente
from models.conta import Conta
from utils.helper import verifica_tipo, validar_cpf, validar_nome, validar_email
contas: List[Conta] = []
def main() -> None:
menu()
def menu() -> None:
print('==========================================')
print('================= ATM ====================')
print("=========== Wellington's Bank ============")
print('==========================================')
print('Selecione uma opção no menu: ')
print('1 - Criar conta')
print('2 - Efetuar saque')
print('3 - Efetuar depósito')
print('4 - Efetuar transferência')
print('5 - Listar contas')
print('6 - Sair do sistema')
print('==========================================')
while True:
opcao: str = input('Digite a opção desejada: ')
if verifica_tipo(opcao):
if opcao == '1':
criar_conta()
elif opcao == '2':
efetuar_saque()
elif opcao == '3':
efetuar_deposito()
elif opcao == '4':
efetuar_transferencia()
elif opcao == '5':
listar_contas()
elif opcao == '6':
print('\nVolte sempre!')
sleep(2)
exit(0)
else:
print('Opção inválida.')
sleep(2)
menu()
else:
print('A opção desejada deve ser um número de 1 a 6. Tente novamente.')
sleep(1)
continue
def criar_conta() -> None:
print('--------------------------------')
print('| Informe os dados do cliente |')
print('--------------------------------')
print("\n(Para retornar ao menu principal, digite '0' seguido de 'Enter')\n")
sleep(1)
while True:
nome: str = input('Nome do cliente: ')
if nome == '0':
menu()
else:
if validar_nome(nome):
break
else:
print('Nome inválido. Por favor, digite novamente.')
sleep(1)
continue
while True:
email: str = input('E-mail do cliente: ')
if email == '0':
menu()
else:
if validar_email(email):
break
else:
print('E-mail inválido. Por favor, digite novamente.')
sleep(1)
continue
while True:
cpf: str = input('CPF do cliente: ')
if cpf == '0':
menu()
else:
if validar_cpf(cpf):
break
else:
print("CPF inválido. Por favor, digite no seguinte formato: '123.456.789-09'.")
sleep(2)
continue
while True:
data_nascimento: str = input('Data de nascimento do cliente: ')
if data_nascimento == '0':
menu()
else:
try:
cliente: Cliente = Cliente(nome, email, cpf, data_nascimento)
conta: Conta = Conta(cliente)
contas.append(conta)
break
except ValueError:
print('Por favor, digite no seguinte formato: dd/mm/aaaa')
sleep(2)
continue
print('')
print('Conta criada com sucesso!')
print('')
print('Dados da conta: ')
print('-----------------')
print(conta)
sleep(3)
menu()
def efetuar_saque() -> None:
if len(contas) > 0:
print("\n(Para retornar ao menu principal, digite '0' seguido de 'Enter')\n")
sleep(1)
while True:
try:
numero: int = int(input('Informe o número da sua conta: '))
break
except ValueError:
print('Por favor, digite somente números. Tente novamente.')
sleep(1)
continue
if numero == 0:
menu()
else:
conta: Conta = buscar_conta_por_numero(numero)
if conta:
valor: float = float(input('Informe o valor do saque: '))
conta.sacar(valor)
else:
print(f'Não foi encontrada a conta com número {numero}')
else:
print('\nAinda não existem contas cadastradas.\n')
sleep(3)
menu()
def efetuar_deposito() -> None:
if len(contas) > 0:
print("\n(Para retornar ao menu principal, digite '0' seguido de 'Enter')\n")
sleep(1)
numero: int = int(input('Informe o número da sua conta: '))
if numero == '0':
menu()
else:
conta: Conta = buscar_conta_por_numero(numero)
if conta:
valor: float = float(input('Informe o valor do depósito: '))
conta.depositar(valor)
else:
print(f'Não foi encontrada uma conta com número {numero}')
else:
print('\nAinda não existem contas cadastradas.\n')
sleep(3)
menu()
def efetuar_transferencia() -> None:
if len(contas) > 0:
print("\n(Para retornar ao menu principal, digite '0' seguido de 'Enter')\n")
sleep(1)
numero_o: int = int(input('Informe o número da sua conta: '))
if numero_o == '0':
menu()
else:
conta_o: Conta = buscar_conta_por_numero(numero_o)
if conta_o:
numero_d: int = int(input('Informe o número da conta destino: '))
conta_d: Conta = buscar_conta_por_numero(numero_d)
if conta_d:
valor: float = float(input('Informe o valor da transferência: '))
conta_o.transferir(conta_d, valor)
else:
print(f'A conta destino com número {numero_d} não foi encontrada.')
else:
print(f'A sua conta com número {numero_o} não foi encontrada.')
else:
print('\nAinda não existem contas cadastradas.\n')
sleep(3)
menu()
def listar_contas() -> None:
if len(contas) > 0:
print('')
print('--------------------')
print('Listagem de contas')
print('--------------------')
for conta in contas:
print(conta)
print('--------------------')
sleep(2)
print('')
else:
print('\nNão existem contas cadastradas.\n')
sleep(2)
menu()
def buscar_conta_por_numero(numero: int) -> Conta:
c: Conta = None
if len(contas) > 0:
for conta in contas:
if conta.numero == numero:
c = conta
return c
if __name__ == '__main__':
main()
| Wellington8962/Projetos_Python | BancoPy/banco.py | banco.py | py | 6,754 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.conta.Conta",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "utils.helper.verifica_tipo",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
30880145691 | import numpy as np
import cv2
import requests
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cv2.imwrite('pic.png',frame)
url = "http://localhost:8000/meter/piupload/"
fin = open('pic.png', 'rb')
files = {'file': fin}
try:
r = requests.post(url, files=files)
print(r.text)
finally:
fin.close() | SothanaV/smartmeter | pi/capup.py | capup.py | py | 305 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 13,
"usage_type": "call"
}
] |
31805780073 | import pickle
import time
from dnslib import A, NS, QTYPE, RR
qtype_to_int = {1: (QTYPE.A, A),
2: (QTYPE.NS, NS)}
class Cache:
TIME_CACHE_CLEANED = time.time()
def __init__(self):
self.cache = {}
for record_type in qtype_to_int.keys():
self.cache[record_type] = {}
def get_record(self, parsed_packet):
record_name = str(parsed_packet.q.qname)
q_type = parsed_packet.q.qtype
if q_type not in self.cache or record_name not in self.cache[q_type]:
return
reply = parsed_packet.reply()
for record in self.cache[q_type][record_name][0]:
reply.add_answer(self.get_pr_record(q_type, record_name, record[0], record[2]))
if len(self.cache[q_type][record_name][1]) > 0:
for record in self.cache[q_type][record_name][1]:
reply.add_auth(self.get_pr_record(q_type, record_name, record[0], record[2]))
if len(self.cache[q_type][record_name][2]) > 0:
for record in self.cache[q_type][record_name][2]:
reply.add_ar(self.get_pr_record(q_type, record_name, record[0], record[2]))
return reply.pack()
def get_pr_record(self, q_type, body, rec, time_tl):
return RR(body, qtype_to_int[q_type][0], rdata=qtype_to_int[q_type][1](rec), ttl=time_tl)
def add_rr_records(self, records):
q_type = records[0].rtype
q_name = str(records[0].rname)
self.cache[q_type][q_name] = [[], [], []]
res_list = []
for record in records:
print(record.ttl)
res_list.append((str(record.rdata), time.time(), record.ttl))
self.cache[q_type][q_name][0] = res_list
def add_auth_records(self, records):
if len(records) == 0:
return
res_list = []
for record in records:
print(record.ttl)
res_list.append((str(record.rdata), time.time(), record.ttl))
self.cache[records[0].rtype][str(records[0].rname)][1] = res_list
def add_ar_records(self, records):
if len(records) == 0:
return
res_list = []
for record in records:
print(record.ttl)
res_list.append((str(record.rdata), time.time(), record.ttl))
self.cache[records[0].rtype][str(records[0].rname)][2] = res_list
def remove_expired_records(self):
q_types = self.cache.keys()
for q_type in q_types:
q_names = self.cache[q_type].keys()
list_to_del = []
for q_name in q_names:
for record_part in range(0, 3):
len_of_part = len(self.cache[q_type][q_name][record_part])
if len_of_part == 0:
continue
for res_rec in self.cache[q_type][q_name][record_part]:
time_record_created = res_rec[1]
ttl = res_rec[2]
if time.time() - time_record_created > ttl:
list_to_del.append(q_name)
break
for q_name in list_to_del:
del self.cache[q_type][q_name]
self.TIME_CACHE_CLEANED = time.time()
def save_cache(self, cache_file_name):
with open(cache_file_name, 'wb+') as dump:
pickle.dump(self, dump)
@staticmethod
def load_cache(cache_file_name):
try:
with open(cache_file_name, 'rb') as dump:
cache = pickle.load(dump)
print('Cache loaded')
return cache
except FileNotFoundError:
print('Cache created')
return Cache()
except EOFError:
print('Cache is empty')
return Cache()
| Eepakura/task_2-dns_server | cache.py | cache.py | py | 3,777 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dnslib.QTYPE.A",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "dnslib.QTYPE",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "dnslib.A",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "dnslib.QTYPE.NS",
"line_nu... |
27188793196 | from Quote import Quote
from csv import reader
from pyfiglet import Figlet
from termcolor import colored
from random import randint
end = 1
book_of_quote = []
with open("qoute.csv" , newline='') as plik:
csvv = reader(plik)
csvv.__next__()
for q in csvv:
book_of_quote.append(Quote(q[0],q[1],q[2]))
hello= Figlet()
print(colored(hello.renderText("zgaduj zgadula!"),"green"))
#print(len(book_of_quote))
while(end):
cytat = book_of_quote.pop(randint(0, len(book_of_quote)-1))
print(cytat.cytat)
gues = input("who said this? \n")
if gues != cytat.tworca:
cytat.daj_pierwsza_podpowiedz()
gues = input("who said this? \n")
if gues != cytat.tworca:
cytat.daj_druga_podpowiedz()
gues = input("who said this? \n")
if gues != cytat.tworca:
cytat.daj_trzecia_podpowiedz()
gues = input("who said this? \n")
if gues != cytat.tworca:
if input(f"Unfortunetly No, this is quote of {cytat.tworca} do you wanna play again? y/n \n ") == "n": end = 0
else:
if input("Bravo! do you wanna play again? y/n \n") == "n": end = 0
else:
if input("Bravo! do you wanna play again? y/n \n") == "n": end = 0
else:
if input("Bravo! do you wanna play again? y/n \n") == "n": end = 0
else:
if input("Bravo! do you wanna play again? y/n \n") == "n": end=0
| Swiatomil/scrap-qoute-game | game.py | game.py | py | 1,490 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "Quote.Quote",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyfiglet.Figlet",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_... |
15219909785 | import os
from pathlib import Path
import coloredlogs, logging
class FileObserver:
"""Searches in the time interval of search_interval for files in the Input folder and saves the filename, filetype
and tablename in a list of list in the object atribute input_files.
For instance: [{filename: 'file1', filetype: 'json', tablename:'kunden'},
{filename: 'file2', filetype: 'csv', tablename:'produkt'}]
"""
def __init__(self) -> None:
self.data_path = Path(__file__).parent.parent / "Input"
self.input_files = []
coloredlogs.install()
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
def get_files(self) -> list:
"""calls the search function and returns the
Returns:
list: the table the content of the file should go, or none
"""
# delete files in list von previous searches
self.input_files.clear()
self.search_files()
self.input_files = sorted(self.input_files, key=lambda d: d['table_name'], reverse=True)
return self.input_files
def search_files(self):
"""Searching for files in data_path und calles the set_input_file function to update filename and filetype
"""
logging.info('Start file searching process..')
for filename in os.listdir(self.data_path):
logging.info('Adding file to list: %s', filename)
tablename = self.get_table_name(filename)
if not tablename:
# skip the file if tablename is None
logging.warning('Could not identify talbe name, skipping file: %s.', filename)
continue
# split filname in name and extension
root, ext = os.path.splitext(filename)
# call add_input_file function to set depending on the filetype
if root.startswith('application') and ext == '.csv':
self.add_input_file(filename,"csv", tablename)
elif root.startswith('application') and ext == '.json':
self.add_input_file(filename,"json", tablename)
else:
logging.warning('Filename or type incorrect, skipping file: %s.', filename)
if not os.listdir(self.data_path):
# Info that there are no files in the folder
logging.info("The input folder is empty.")
def add_input_file(self, filename: str, filetype: str, table_name: str):
"""Set the object input_file variable to the filename filetype and table_name as a list in a list.
Args:
filename (str): The filename with filetypeextension.
filetype (str): The filetype of the file.
filetype (str): The table_name where the data is spossed to go.
"""
if filename:
self.input_files.append({"filename": filename,
"filetype": filetype,
"table_name": table_name})
def get_table_name(self, filename):
"""Checking for strings in the filename and returns the table the content sould go
Args:
filename (str): filename with filetype extension
Returns:
str: the table the content of the file should go, or none
"""
if "produkt" in filename:
return "produkt"
elif "kunde" in filename:
return "kunde"
elif "application2" in filename:
return "auftrag"
else:
return None
if __name__ == '__main__':
observer = FileObserver()
input_files = observer.get_files()
print(input_files)
for file in input_files:
print(f'Filename: {file["filename"]} | Filetype {file["filetype"]} | Tablename: {file["table_name"]}') | kteppris/dwh | scripts/file_observer.py | file_observer.py | py | 3,847 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "coloredlogs.install",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
... |
14905298243 | import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""A class to manage bullets fired from the ship.
Attributes
----------
rect: pygame.Rect
Rectangular coordinates of the bullet
Methods
-------
update()
Move the bullet up the screen
draw_bullet()
Draw the bullet to the screen
"""
def __init__(self, ai_settings, screen, ship):
"""Create a bullet object at the ship's current location.
Parameters
----------
ai_settings: Settings
All settings for alien invasion
screen: pygame.Surface
The screen on which the bullet will be drawn
ship: Ship
The ship that fires the bullet
"""
super().__init__()
self._screen = screen
# Create a bullet rect at (0, 0) and set correct position.
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width,
ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.bottom = ship.rect.top
# Store the bullet's position as a decimal value.
self._y = float(self.rect.y)
self._color = ai_settings.bullet_color
self._speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""Move the bullet up the screen."""
# Update the decimal position of the bullet.
self._y -= self._speed_factor
self.rect.y = self._y
def draw_bullet(self):
"""Draw the bullet to the screen."""
pygame.draw.rect(self._screen, self._color, self.rect)
| DeepWalter/python-projects | alien_invasion/bullet.py | bullet.py | py | 1,622 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite.Sprite",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pygame.Rect",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"l... |
15973370719 | from dataclasses import dataclass
import glob
import json
from multiprocessing import Pool, cpu_count
import os
from config_manager.config import Config
import commits
import variability
class MetricsConfig(Config):
repos_folder: str
output_json: str
@dataclass
class RepoMetrics:
variability: variability.Dependencies
commit_count: list[int]
def is_repo(path: str) -> bool:
return os.path.exists(
os.path.join(path, ".git")
)
def calculate_repo_metrics(
folder: str,
java_extensions: tuple[str] = (".java", )
) -> dict[tuple[str], RepoMetrics]:
"""
Arguments:
folder: str -> path to repo folder
Returns:
mapping [class_path, metrics] -> metrics for each class
"""
java_files = list(filter(
lambda name: any(name.endswith(ext) for ext in java_extensions),
glob.glob(os.path.join(folder, "**"), recursive=True)
))
variabilities = variability.get_variability(folder, java_files)
changes_per_commit = commits.get_changes_count(folder, java_files)
common_modules = set(variabilities.keys()).intersection(
set(changes_per_commit.keys())
)
return {
module: RepoMetrics(
variability=variabilities[module],
commit_count=changes_per_commit[module]
)
for module in common_modules
}
def dump_metrics(path: str, repo_data: dict[str, dict[tuple[str], RepoMetrics]]):
with open(path, "w+") as f:
decoded = {
repo_name:
{
"/".join(module): {
"depends_on": metrics.variability.depends_on,
"dependency_for": metrics.variability.dependency_for,
"dependency_score": metrics.variability.score,
"commit_count": metrics.commit_count
}
for module, metrics in repo_metrics.items()
}
for repo_name, repo_metrics in repo_data.items()
}
f.write(json.dumps(decoded))
def main(config: MetricsConfig):
repos = list(filter(
is_repo,
map(
lambda name: os.path.join(config.repos_folder, name),
os.listdir(config.repos_folder)
)
))
print(f"{len(repos)} repositories")
repo_data = dict()
with Pool(cpu_count()) as pool:
for repo_path, metrics in zip(repos, pool.map(calculate_repo_metrics, repos)):
repo_data[repo_path.removeprefix(config.repos_folder)] = metrics
dump_metrics(config.output_json, repo_data)
if __name__ == "__main__":
main(MetricsConfig().parse_arguments("Metrics calculator"))
| evjeny/code_main_sequence_analysis | calculate_metrics.py | calculate_metrics.py | py | 2,663 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config_manager.config.Config",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "variability.Dependencies",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 18,
"usage_type": "name"
},
{
"... |
72995391073 | import pandas as pd
import numpy as np
import sys
sys.path.append(
'/groups/umcg-lifelines/tmp01/projects/ov20_0554/umcg-aewijk/covid19-qol-modelling/src/python')
from config import get_config
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import warnings
warnings.filterwarnings('ignore')
def add_weather_QOL(data_QOL_path, question_15_or_more_path):
"""
Add the weather data #https://www.knmi.nl/nederland-nu/klimatologie/daggegevens
data comes from
Return
final_dataframe:
total_df:
df_participants:
"""
# Read file
knmi_data = pd.read_excel(
f'{data_QOL_path}Weather_data.xlsx')
knmi_data.drop('Unnamed: 0', axis=1, inplace=True)
knmi_data = knmi_data.replace(r'^\s*$', np.NaN, regex=True)
knmi_data['date'] = pd.to_datetime(knmi_data['date'])
# How the students did earlier with the average of the temperature
avg = (knmi_data['Minimum Temperature (\u00B0C)'] + knmi_data['Maximum Temperature (\u00B0C)']) / 2
knmi_data.insert(3, "avg_temp", avg)
# adding .. day rolling mean weather avg
for days in range(1, 22):
knmi_data[f'{days}day_rolling_avg_temp'] = knmi_data.rolling(days, min_periods=1)['avg_temp'].mean()
knmi_data[f'{days}_max_temp'] = knmi_data.rolling(days, min_periods=1)['Maximum Temperature (\u00B0C)'].mean()
# read_file
df = pd.read_csv(f'{question_15_or_more_path}num_quest_1_filter.tsv.gz', sep='\t',
encoding='utf-8', compression='gzip') # num_quest_1_filter, QOL_data_VL29
# Select columns
df = df[['project_pseudo_id', 'responsedate', 'qualityoflife']]
# Groupby responsedate
df['size_responsedate'] = df.groupby(['responsedate'])[["responsedate"]].transform('size')
# df = df[(df['size_participants'] >= 15) & (df['size_responsedate'] >= 50)]
df = df[df['size_responsedate'] >= 50]
df['responsedate'] = pd.to_datetime(df['responsedate'])
df.rename({'responsedate': 'date'}, axis=1, inplace=True)
# Merge df and knmi_data
df_participants = pd.merge(df, knmi_data, how='left', on=['date'])
# Groupby qualityoflife and size_responsedate
df_new = df.groupby(['date'])[["qualityoflife", 'size_responsedate']].mean().reset_index()
df_new.columns = ['date', 'qualityoflife', 'size_responsedate']
# Merge files
final_dataframe = pd.merge(df_new, knmi_data, how="left", on=["date"])
total_df = pd.merge(df_new, knmi_data, how="outer", on=["date"])
# Drop columns
final_dataframe = final_dataframe.drop(['station'], axis=1)
# Split date
final_dataframe['date'] = pd.to_datetime(final_dataframe['date'])
final_dataframe['Year'] = final_dataframe['date'].dt.year
final_dataframe['Month'] = final_dataframe['date'].dt.month
final_dataframe['Day'] = final_dataframe['date'].dt.day
return final_dataframe, total_df, df_participants
def add_hospitalization(final_dataframe, total_df, df_participants, data_QOL_path):
"""
Add hospitalization data RIVM
data comes from # https://data.rivm.nl/meta/srv/dut/catalog.search#/metadata/4f4ad069-8f24-4fe8-b2a7-533ef27a899f
Return
final_dataframe:
total_df:
df_participants:
"""
# Read files
hospitalization_1 = pd.read_csv(f'{data_QOL_path}COVID-19_ziekenhuisopnames.csv', sep=';', encoding='utf-8')
hospitalization_2 = pd.read_csv(f'{data_QOL_path}COVID-19_ziekenhuisopnames_tm_03102021.csv', sep=';',
encoding='utf-8')
# Concat files
hospitalization = pd.concat([hospitalization_1, hospitalization_2])
hospitalization['Date_of_statistics'] = pd.to_datetime(hospitalization['Date_of_statistics'])
hospitalization_grouped = hospitalization.groupby(['Date_of_statistics']).sum().reset_index()
# Rename columns
hospitalization_grouped.rename(
columns={'Date_of_statistics': 'date', 'Hospital_admission_notification': 'daily_hospitalization'},
inplace=True)
hospitalization_grouped = hospitalization_grouped[['date', 'daily_hospitalization']]
# Merge files
final_dataframe = pd.merge(final_dataframe, hospitalization_grouped, how="left", on=["date"])
df_participants = pd.merge(df_participants, hospitalization_grouped, how="left", on=["date"])
total_df = pd.merge(total_df, hospitalization_grouped, how="outer", on=["date"])
return final_dataframe, total_df, df_participants
def add_stringency_index(final_dataframe, total_df, df_participants, data_QOL_path):
"""
Add stringency index and other information of outwoldindata
data comes from #https://ourworldindata.org/covid-stringency-index#learn-more-about-the-data-source-the-oxford-coronavirus-government-response-tracker
Return
final_dataframe:
total_df:
df_participants:
"""
stringency_index = pd.read_csv(
f'{data_QOL_path}owid-covid-data.csv', sep=';',
encoding='utf-8')
# Select on the Netherlands
stringency_index = stringency_index[stringency_index['location'] == 'Netherlands']
stringency_index['date'] = pd.to_datetime(stringency_index['date'], dayfirst=True)
stringency_index.rename(columns={'hosp_patients': 'daily_hospitalization_2'}, inplace=True)
# Merge files
final_dataframe = pd.merge(final_dataframe, stringency_index, how="left", on=["date"])
df_participants = pd.merge(df_participants, stringency_index, how="left", on=["date"])
total_df = pd.merge(total_df, stringency_index, how="outer", on=["date"])
return final_dataframe, total_df, df_participants
def sunrise_sunset(final_dataframe, total_df, df_participants, data_QOL_path):
"""
Add sunrise - sunset data
data comes from #https://www.msimons.nl/tools/daglicht-tabel/index.php?year=2020&location=Groningen
Return
final_dataframe:
total_df:
df_participants:
"""
# Read data
daylight_hours = pd.read_csv(
f'{data_QOL_path}sunrise_sunset.csv', sep=';',
encoding='utf-8')
daylight_hours['date'] = pd.to_datetime(daylight_hours['date'], dayfirst=True)
daylight_hours.dropna(inplace=True)
daylight_hours['sunrise'] = daylight_hours['sunrise'].apply(lambda x: pd.to_datetime(x).strftime('%H:%M:%S'))
daylight_hours['sunset'] = daylight_hours['sunset'].apply(lambda x: pd.to_datetime(x).strftime('%H:%M:%S'))
daylight_hours['sunrise'] = pd.to_timedelta(daylight_hours['sunrise'].astype(str)).dt.total_seconds() / 3600
daylight_hours['sunset'] = pd.to_timedelta(daylight_hours['sunset'].astype(str)).dt.total_seconds() / 3600
# daylight_hours = sunset - sunrise
daylight_hours['daylight_hours'] = (daylight_hours['sunset'] - daylight_hours['sunrise'])
# .. day shift daylight hours
for i in range(1, 20, 1):
daylight_hours[f'{i}day_daylight_hours'] = daylight_hours['daylight_hours'].shift(i)
# daylight_hours[f'{i}day_daylight_hours'] = daylight_hours.rolling(i, min_periods=1)['daylight_hours'].mean()
daylight_hours = daylight_hours.drop(['sunset', 'sunrise'], axis=1)
# Merge files
df_participants = pd.merge(df_participants, daylight_hours, how="left", on=["date"])
final_dataframe = pd.merge(final_dataframe, daylight_hours, how="left", on=["date"])
total_df = pd.merge(total_df, daylight_hours, how="outer", on=["date"])
return final_dataframe, total_df, df_participants
def add_other_cat(final_dataframe, data_QOL_path):
"""
Add financial data
Return
final_dataframe:
"""
finacial_data_news_sentiment = pd.read_excel(
f'{data_QOL_path}finacial_data.xlsx')
final_dataframe = pd.merge(final_dataframe, finacial_data_news_sentiment, how='left', on=['date'])
return final_dataframe
| molgenis/covid19-qol-modelling | src/python/make_dataframe_for_correlation.py | make_dataframe_for_correlation.py | py | 7,731 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.switch_backend",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotl... |
25452018907 | import pygame, sys
from grid import Grid
from ai import AI
#Get the number of rows and cols from the user.
rows = cols = int(input('Enter the number of rows and cols: '))
screen = pygame.display.set_mode((600,600))
pygame.display.set_caption('Maze Solver')
#Calculating res of each cell
res = screen.get_height()//rows
def main():
#Initializing the maze.
grid = Grid(rows, cols, res)
grid.generate()
#Initialize the ai
ai = AI(grid.cells[0], grid.cells[(rows-1)*cols+(cols-1)], grid)
while True:
for event in pygame.event.get():
#Quiting if asked to
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
#Ai searches for path
ai.search()
#Renders maze and ai's path from start
grid.render(screen)
ai.render_path(screen)
pygame.display.update()
main()
| pratripat/Maze-solving-AI | visualizer.py | visualizer.py | py | 1,057 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "p... |
40003845498 | from ortools.linear_solver import pywraplp
from collections import namedtuple
Item = namedtuple("Item", ['index', 'value', 'weight'])
DEBUG = 0
def solve_it(input_data):
# parse the input
lines = input_data.split('\n')
firstLine = lines[0].split()
item_count = int(firstLine[0])
capacity = int(firstLine[1])
conflict_count = int(firstLine[2])
items = []
conflicts = []
for i in range(1, item_count+1):
line = lines[i]
parts = line.split()
items.append(Item(i-1, int(parts[0]), int(parts[1])))
for i in range(1, conflict_count+1):
line = lines[item_count + i]
parts = line.split()
conflicts.append((int(parts[0]), int(parts[1])))
return knapsackNaive(item_count, items, capacity, conflict_count, conflicts)
def knapsackNaive(num_items, items, capacity, num_conflicts, conflicts):
if DEBUG >= 1:
print(f"numero de itens = {num_items}")
print(f"capacidade da mochila = {capacity}")
print(f"numero de conflitos = {num_conflicts}")
if DEBUG >= 2:
print("Itens na ordem em que foram lidos")
for item in items:
print(item)
print()
if DEBUG >= 2:
print("Conflitos na ordem em que foram lidos")
for conflict in conflicts:
print(conflict)
print()
# Modify this code to run your optimization algorithm
solution = [0]*num_items
solution_value = 0
solution_weight = 0
# Using OR-Tools
solver = pywraplp.Solver('SolveIntegerProblem', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
x = []
for j in range(0, num_items):
x.append(solver.IntVar(0, 1, 'x[%d]' % j))
# Capacity restrictions
solver.Add(solver.Sum([items[i].weight*x[i] for i in range(num_items)]) <= capacity)
# Conflict restrictions
for i in range(num_conflicts):
solver.Add((x[conflicts[i][0]]+x[conflicts[i][1]]) <= 1)
# Objective -> maximize total value in the bag
solver.Maximize(solver.Sum([items[i].value*x[i] for i in range(num_items)]))
# solution status (not used in the moment)
result_status = solver.Solve()
# formating solution output
solution_value = int(solver.Objective().Value())
for i in range(len(x)):
solution[i] = int(x[i].solution_value())
# prepare the solution in the specified output format
output_data = str(solution_value) + '\n'
output_data += ' '.join(map(str, solution))
return output_data
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
output_data = solve_it(input_data)
print(output_data)
solution_file = open(file_location + ".sol", "w")
solution_file.write(output_data)
solution_file.close()
else:
print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)')
| thiagoaraujocampos/Knapsack-Problem | solver.py | solver.py | py | 3,161 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "ortools.linear_solver.pywraplp.Solver",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "ortools.linear_solver.pywraplp",
"line_number": 54,
"usage_type": "name"
},... |
28567878265 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Task controller.
The controller stores:
- lists of tasks (ProcessorController.model),
- their associated processors and cache (ProcessorController.data).
It can add/delete/update tasks, emitting the corresponding events
"""
from typing import Union, Type, Dict, Optional, List, TYPE_CHECKING
from . import Task, TaskIsUniqueError
if TYPE_CHECKING:
from taskcontrol.processor import Cache, Processor # noqa
appendtask = type('appendtask', (), {}) # pylint: disable=invalid-name
class TaskCacheList:
"data and model for tasks"
model: List[Task]
data: 'Cache'
copy: bool
__slots__ = ('model', 'data', 'copy')
def __init__(self, copy = True):
self.model: List[Task] = []
self.copy = copy
def __contains__(self, itm):
return self.task(itm) in self.model
def task(self, task:Union[Type[Task],int], noemission = False) -> Optional[Task]:
"returns a task"
if isinstance(task, Task):
return task
if isinstance(task, int):
return self.model[task]
tsk = None
if isinstance(task, type):
try:
tsk = next((i for i in self.model if isinstance(i, task)), None)
except StopIteration:
pass
if tsk is None and noemission:
raise KeyError("Missing task")
return tsk
def add(self, task, proctype, index = appendtask):
"adds a task to the list"
TaskIsUniqueError.verify(task, self.model)
proc = proctype(task)
if index is appendtask:
self.model.append(task)
self.data.append(proc)
return []
self.model.insert(index, task)
return self.data .insert(index, proc)
def remove(self, task):
"removes a task from the list"
tsk = self.task(task)
if tsk in self.model:
ind = self.model.index(tsk)
self.model.pop(ind)
return self.data.remove(ind)
return None
def update(self, tsk):
"clears data starting at *tsk*"
return self.data.delcache(tsk)
def cleancopy(self) -> 'TaskCacheList':
"returns a cache with only the processors"
cpy = self.__class__(copy = self.copy)
cpy.model = self.model
cpy.data = self.data.cleancopy()
return cpy
def clear(self):
"clears data starting at *tsk*"
self.data.delcache()
def keepupto(self, tsk:Task = None, included = True) -> 'TaskCacheList':
"Returns a processor for a given root and range"
ind = None if tsk is None else self.data.index(tsk)
other = type(self)(copy = self.copy)
other.model = self.model[:None if ind is None else ind+(1 if included else 0)]
other.data = self.data.keepupto(ind, included)
return other
def run(self, tsk:Task = None, copy = None, pool = None):
"""
Iterates through the list up to and including *tsk*.
Iterates through all if *tsk* is None
"""
raise NotImplementedError("implemented in the controller module")
@classmethod
def create(cls, *models: Task, processors = None) -> 'TaskCacheList':
"""
Creates a ProcessorController containing a list of task-processor pairs.
Parameters:
-----------
models: Tuple[Task]
a sequence of tasks
processors: Dict[Type[Task], Processor], Iterable[Type[Processor]] or None
this argument allows defining which processors to use for implementing
the provided tasks
"""
raise NotImplementedError("implemented in the controller module")
@classmethod
def register(
cls,
processor = None,
cache = None,
force = False,
) -> Dict[Type[Task], Type['Processor']]:
"registers a task processor"
raise NotImplementedError("implemented in the controller module")
| depixusgenome/trackanalysis | src/taskmodel/processors.py | processors.py | py | 4,081 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line... |
7499822304 | import numpy as np
import sympy
from sympy.abc import alpha
from funx import fun,gfun
from armijo import armijo
def line_search(xk,dk): # 精确线搜索
alphak = None
eq = np.dot(gfun(xk+alpha*dk).T,dk)[0]
alpha0 = sympy.solve(eq,[alpha])
if len(alpha0) > 1:
for a in alpha0:
a = a[0]
try:
if fun(xk + float(a) * dk) < fun(xk):
alphak = float(a)
break
except TypeError:
continue
else:
alphak = float(alpha0[alpha])
if alphak is None: # 精准线搜索失效,手动选取步长因子alphak
alphak = 0.01
print('精准线搜索失效,使用固定步长法')
return alphak
def Min(x0,cho=1,eps=10**(-5)): # 核心函数
xk = x0
count = 0
while np.linalg.norm(gfun(xk)) > eps: # 梯度导数的范数
count += 1
dk = -gfun(xk)
if cho == 2:
alpha = line_search(xk,dk) # 精准线搜索步长
elif cho == 3:
alpha = armijo(xk,dk) # 非精准线搜索步长
else:
alpha = 0.01
xk = xk+alpha*dk
# print('x_(k+1)点:',x_k1.T, ' 函数值:',fun(x_k1))
return xk,count
if __name__ == '__main__':
x0 = np.array([[0],[0]])
x1 = Min(x0,1) # 固定步长
x2 = Min(x0,2) # 精确线搜索
x = Min(x0,3) # 非精确线搜索
print('固定步长法,近似极小值点',x1[0].T,'函数值:',fun(x1[0]),'迭代次数:',x1[1])
print('精确线搜索,近似极小值点',x2[0].T,'函数值:',fun(x2[0]),'迭代次数:',x2[1])
print('非精确线搜索,近似极小值点',x[0].T,'函数值:',fun(x[0]),'迭代次数:',x[1])
| LMC20020909/HUST-SSE-Curriculum-Design | 数学建模与最优化/最优化方法-源代码/梯度下降法.py | 梯度下降法.py | py | 1,730 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.dot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "funx.gfun",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sympy.abc.alpha",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sympy.solve",
"line_number": 10,... |
38425972289 | #!/usr/bin/env python3
# coding: utf-8
# add to crontab
import json, time
import time, datetime
import yaml
import argparse
import os
from influxdb import InfluxDBClient
MEASUREMENT_OUT = 'holtwinters'
MEASUREMENTS= [
{
'measurement_in': 'elasticsearch_jvm',
'value_in': 'mem_heap_used_percent',
'measurement_out': MEASUREMENT_OUT,
'value_out': 'es_jvm_heap_used_percent',
'group': [
'node_name'
]
},
{
'measurement_in': 'mem',
'value_in': 'used_percent',
'measurement_out': MEASUREMENT_OUT,
'value_out': 'mem_used_percent',
'group': [
'host'
]
},
{
'measurement_in': 'disk',
'value_in': 'used_percent',
'measurement_out': MEASUREMENT_OUT,
'value_out': 'disk_used_percent',
'group': [
'host',
'path'
]
},
{
'measurement_in': 'system',
'value_in': 'load1',
'measurement_out': MEASUREMENT_OUT,
'value_out': 'sys_load1',
'group':[
'host'
]
}
]
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', default='config.yml')
parser.add_argument('-p', '--predictions', default=1, type=int)
parser.add_argument('-t', '--time', default=86400, type=int)
parser.add_argument('-b', '--bucket', default='1h')
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('-s', '--seasons', default=0)
return parser.parse_args()
def parseConfig(file):
with open(file, 'r') as ymlfile:
return yaml.load(ymlfile)
def getPeriod(interval=60):
timeformat = '%Y-%m-%d %H:%M:%S'
now_epoch = time.time()
then_epoch = now_epoch - interval
now = str(datetime.datetime.fromtimestamp(now_epoch).strftime(timeformat))
then = str(datetime.datetime.fromtimestamp(then_epoch).strftime(timeformat))
return then, now
def holtwintersQuery(value, predictions, seasons, measurement, start, stop, bucket, group):
group_by = ", ".join(group)
return """select holt_winters(last("%s"), %s, %s) from %s
WHERE time >= \'%s\'
AND time <= \'%s\'
GROUP BY time(%s), %s """ % (
value,
predictions,
seasons,
measurement,
start,
stop,
bucket,
group_by
)
def connect(config):
return InfluxDBClient(
host=config['host'],
port=config['port'],
database=config['database'],
username=config['username'],
password=config['password'],
ssl=config['ssl'],
verify_ssl=config['verify_ssl']
)
def main():
args = parse_arguments()
cfg = parseConfig(args.config)
period = args.time
db_in = connect(cfg['input'])
if 'output' in cfg:
db_out = connect(cfg['output'])
else:
db_out = db_in
then, now = getPeriod(period)
bulk = []
for query in MEASUREMENTS:
q = holtwintersQuery(
value=query['value_in'],
predictions=args.predictions,
seasons=args.seasons,
measurement=query['measurement_in'],
start=then,
stop=now,
bucket=args.bucket,
group=query['group']
)
results = db_in.query(q)
for res in results._get_series():
for prediction in res['values']:
json = {
'time': prediction[0],
'measurement': query['measurement_out'],
'tags': res['tags'],
'fields': {
query['value_out']: prediction[1]
}
}
bulk.append(json)
if args.test == False:
try:
db_out.write_points(bulk, time_precision='s')
print("Sent", (str(len(bulk))), "items to influxdb")
except Exception as e:
print("FAIL:", e)
else:
print(bulk)
if __name__ == "__main__":
main()
| markuskont/influx-holtwinters | main.py | main.py | py | 4,227 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtim... |
15074079513 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 09:00:01 2023
@author: Cathe
"""
import matplotlib.pyplot as plt
import os
import numpy as np
import scipy.io
import re
from matplotlib import rc
from scipy.stats import pearsonr
def is_float(string):
try:
float(string)
return True
except ValueError:
return False
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 30}
aacode = {'A': 'ALA', 'R': 'ARG', 'N': 'ASN', 'D': 'ASP', 'C': 'CYS', 'Q': 'GLN', 'E': 'GLU', 'G': 'GLY', 'H': 'HIS', 'I': 'ILE', 'L': 'LEU', 'K': 'LYS', 'M': 'MET', 'F': 'PHE', 'P': 'PRO', 'S': 'SER', 'T': 'THR', 'W': 'TRP', 'Y': 'TYR', 'V': 'VAL'}
with open('/scratch/qz886/Clustercenters_15res/Pnear_list.txt') as f:
lines = f.readlines()
candidates = []
scores = []
seqs = []
for l in lines:
if l != "\n":
candidates.append(l.split()[0])
scores.append(float(l.split()[1]))
seq = l.split()[2]
res1 = seq.split('[')[1].split(':')[0]
resn = seq.split('[')[-1].split(':')[0]
if res1 == 'HIS_D':
res1 = 'DHIS'
if resn == 'HIS_D':
resn = 'DHIS'
residues = [res1]
lpos = [m.start() for m in re.finditer('\[', seq)]
rpos = [m.start() for m in re.finditer('\]', seq)]
seq = seq[rpos[0]+1:lpos[-1]-1]
p = 0
while p < len(seq):
if p == len(seq)-1:
residues.append(aacode[seq[p]])
p += 1
else:
if seq[p+1] != '[':
residues.append(aacode[seq[p]])
p += 1
else:
if seq[p+2:p+6] == 'HIS_':
residues.append('DHIS')
p += 8
else:
residues.append(seq[p+2:p+6])
p += 7
residues.append(resn)
s = residues[0]
for r in range(1,len(residues)):
s = s+'-'+residues[r]
seqs.append(s)
lamda = 1.5
kbT = 0.62
Pnear = []
with open('/scratch/qz886/Clustercenters_15res/Pnear_Candidates/Pnear_Cartesian_15res_1.out', 'r') as f:
plines = f.readlines()
with open('/scratch/qz886/Clustercenters_15res/Pnear_Candidates/Pnear_Cartesian_15res_2.out', 'r') as f:
plines2 = f.readlines()
with open('/scratch/qz886/Clustercenters_15res/Pnear_Candidates/Pnear_Cartesian_15res_3.out', 'r') as f:
plines3 = f.readlines()
plines += plines2
plines += plines3
l = 0
while l < len(plines):
if len(plines[l].split()) > 0:
if plines[l].split()[0] == "MPI_worker_node":
l += 1
Energy = []
RMSD = []
while plines[l].split()[0] != "End":
Energy.append(float(plines[l].split()[3]))
RMSD.append(float(plines[l].split()[2]))
l += 1
p = sum(np.exp(-np.array(RMSD)**2 / lamda**2) * np.exp(-np.array(Energy) / kbT)) / sum(np.exp(-np.array(Energy) / kbT))
Pnear.append(p)
else:
l += 1
else:
l += 1
PnearM = []
for i in range(len(candidates)):
path = 'After_GA_'+candidates[i]+'_15res.mat'
if os.path.exists(path):
mat = scipy.io.loadmat(path)
Energy = mat["candScores"][0]
RMSD = mat["candRMSD"][0]
p = sum(np.exp(-np.array(RMSD)**2 / lamda**2) * np.exp(-np.array(Energy) / kbT)) / sum(np.exp(-np.array(Energy) / kbT))
PnearM.append(p)
with open('Pnear_values_15res.txt', 'w') as f:
f.write('Name\t\t\t\tEnergy\t\tRosetta\t\tGA\t\tSequence\n')
for i in range(len(PnearM)):
if Pnear[i] > 0.9:
f.write(candidates[i].split('_')[0]+f'\t\t{scores[i]:.3f}\t\t{Pnear[i]:.3f}\t\t{PnearM[i]:.3f}\t\t'+seqs[i]+'\n')
f.write('\nName\t\t\t\tEnergy\t\tRosetta\t\tGA\t\tSequence\n')
for i in range(len(PnearM)):
if PnearM[i] > 0.9:
f.write(candidates[i].split('_')[0]+f'\t\t{scores[i]:.3f}\t\t{Pnear[i]:.3f}\t\t{PnearM[i]:.3f}\t\t'+seqs[i]+'\n')
f.write('\nName\t\t\t\tEnergy\t\tRosetta\t\tGA\t\tSequence\n')
for i in range(len(PnearM)):
if abs(Pnear[i]-PnearM[i]) > 0.4:
f.write(candidates[i].split('_')[0]+f'\t\t{scores[i]:.3f}\t\t{Pnear[i]:.3f}\t\t{PnearM[i]:.3f}\t\t'+seqs[i]+'\n')
| qiyaozhu/CyclicPeptide | Clustercenters_15res/Pnear_Modified/Pnear_15res.py | Pnear_15res.py | py | 4,511 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.finditer",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "re.finditer",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 1... |
36926953733 | # coding: utf-8
import json
import re
def sort_dict(dict):
sort_dict = []
#print(len(dict))
all_vote=0
for i in range(len(dict)):
flag=0
max=0
for key,value in dict.items() :
if flag == 0:
max=int(value)
temp=key
flag +=1
elif int(value) > max:
max=int(value)
temp=key
flag +=1
sort_dict.append({temp:max})
dict.pop(temp)
all_vote+=max
sort_dict.append({"总点赞数":all_vote})
#print (sort_dict)
return (sort_dict)
list_sort=[]
with open ('votersdict.txt','r',encoding='utf-8') as f:
dict_str=f.read()
dict_str=re.sub('[\'|\"]','\"',dict_str)
dict_dict=json.loads(dict_str)
dict=sort_dict(dict_dict)
#print (type(dict_str))
print (dict)
'''
with open ('votersdict.txt','r',encoding='utf-8') as f:
dict_str=f.read()
dict_str=re.sub('[\'|\"]','\"',dict_str)
dict_dict=json.loads(dict_str)
print (type(dict_str))
print ((dict_dict))
for key,value in dict_dict.items():
if list_sort == []:
list_sort.append({key:value})
#print (list_sort[0][key])
else :
flag=0
for i in len(list_sort):
if flag==0:
if value<=list_sort[i][key]:
flag=i
else:
flag=0
elif (value <= i[key]):
flag = i
print (list_sort)
'''
| herolf/Crawler_herolf | voters_read_sort_611.py | voters_read_sort_611.py | py | 1,666 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.sub",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 35,
"usage_type": "call"
}
] |
8692692038 | import datetime
import itertools
from .models import Period
# https://www.womenshealth.gov/a-z-topics/menstruation-and-menstrual-cycle
AVERAGE_MENSTRUAL_CYCLE = datetime.timedelta(days=28)
def avg(lst):
length = 0
total = 0
for item in lst:
total += item
length += 1
return total / length
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
# https://docs.python.org/3/library/itertools.html?highlight=itertools#module-itertools
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def calculate_next_period(phone_number):
latest_periods = Period.objects.filter(phone_number=phone_number)[:3]
if len(latest_periods) == 0:
return None
if len(latest_periods) == 1:
average_period_length = AVERAGE_MENSTRUAL_CYCLE
else:
deltas = (
(a - b).days
for a, b in pairwise([
period.start_date
for period in latest_periods
])
)
average_period_length = datetime.timedelta(days=round(avg(deltas)))
return latest_periods[0].start_date + average_period_length
| rockymeza/ymrj | periods/utils.py | utils.py | py | 1,161 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.timedelta",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "itertools.tee",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "models.Period.objects.filter",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.P... |
71509366755 | import sys
import os
import glob
import csv
import logging
from collections import OrderedDict
from util import *
def trade_dates(file):
"""Analyzes a trades csv file and returns a list of the corresponding dates
that are covered in the file.
"""
dates = []
# Open the trades file and get each date
with open(file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for line in reader:
date = int(float(line['date']))
if not date in dates:
dates.append(date)
return dates
def order_quotes(dir):
"""Analyzes each of the dates in the quotes csv files and returns an ordered
list of the files to execute from earliest to latest date and the corresponding
dates and the stock tickers, with their line counts, that are covered in each file.
"""
files = OrderedDict({})
for file in glob.glob(os.path.join(os.path.normpath(dir), '*.csv')):
with open(file, 'rb') as csvfile:
files[file] = {}
reader = csv.DictReader(csvfile)
for line in reader:
date = int(float(line['date']))
if not date in files[file]:
files[file][date] = {line['SYMBOL']: reader.line_num}
elif not line['SYMBOL'] in files[file][date]:
files[file][date][line['SYMBOL']] = reader.line_num
return files
def write_taq(taq_output, file):
"""Writes the data in the the TAQ buffer to file, appending the TAQ buffer data
to any existing entries.
"""
# Write the headers of the file if the file does not exist
if not os.path.isfile(file):
with open(file, 'wb') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=taq_output[0].keys())
writer.writeheader()
with open(file, 'ab') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=taq_output[0].keys())
writer.writerows(taq_output)
def calculate_NBBO(exchanges):
"""Calculates the national best bid and best offer of all exchanges
and uses the corresponding bid size and offer size for each.
Accepts a dictionary containing the exchanges and the corresponding
BID, OFR, BIDSIZ, and OFRSIZ, for each.
example: { 'N' => { 'BID' => 50.0,
'OFR' => 48.00,
'BIDSIZ' => 10,
'OFRSIZ' => 5}
... }
"""
# The current NBBO out of all exchanges
NBBO = {'BID': 0, 'OFR': sys.maxint, 'BIDSIZ': 0, 'OFRSIZ': 0}
# Get the best bid and offer
for exchange in exchanges:
# Ignore any bids or offers less than 1 USD (error in quotes file)
if exchanges[exchange]['BID'] >= NBBO['BID'] and exchanges[exchange]['BID'] >= 1.0:
NBBO['BID'] = exchanges[exchange]['BID']
NBBO['BIDSIZ'] = exchanges[exchange]['BIDSIZ']
if exchanges[exchange]['OFR'] <= NBBO['OFR'] and exchanges[exchange]['OFR'] >= 1.0:
NBBO['OFR'] = exchanges[exchange]['OFR']
NBBO['OFRSIZ'] = exchanges[exchange]['OFRSIZ']
# If there is no valid OFR for the current NBBO set it to 0
if NBBO['OFR'] == sys.maxint:
NBBO['OFR'] = 0
return NBBO
def add_taq_entry(taq_output, trade, NBBO, exchanges):
"""Creates a new TAQ entry and appends it to the TAQ output,
the format of the new entry added to the TAQ output buffer is
as follows:
<All trade columns>, NBB, NBO, NBBSIZ, NBOSIZ, NQBB, NQBO, NQBBSIZ, NQBOSIZ
If there is no available TAQ entry for NASDAQ then the columns in the TAQ output
will contain 0 for each NSDQ entry.
"""
taq_entry = OrderedDict({})
# Add all trade data columns
taq_entry['time'] = trade['time']
taq_entry['symbol'] = trade['symbol']
taq_entry['shares'] = trade['shares']
taq_entry['buysell'] = trade['buysell']
taq_entry['price'] = trade['price']
taq_entry['type'] = trade['type']
taq_entry['date'] = trade['date']
# Add all NBBO columns
if all(k in NBBO for k in ('BID', 'OFR', 'BIDSIZ', 'OFRSIZ')):
taq_entry['NBB'] = NBBO['BID']
taq_entry['NBO'] = NBBO['OFR']
taq_entry['NBBSIZ'] = NBBO['BIDSIZ']
taq_entry['NBOSIZ'] = NBBO['OFRSIZ']
else:
taq_entry['NBB'] = 0
taq_entry['NBO'] = 0
taq_entry['NBBSIZ'] = 0
taq_entry['NBOSIZ'] = 0
# Add the NASDAQ NBBO entries if they exist
if 'T' in exchanges:
taq_entry['NQBB'] = exchanges['T']['BID']
taq_entry['NQBO'] = exchanges['T']['OFR']
taq_entry['NQBBSIZ'] = exchanges['T']['BIDSIZ']
taq_entry['NQBOSIZ'] = exchanges['T']['OFRSIZ']
else:
taq_entry['NQBB'] = 0
taq_entry['NQBO'] = 0
taq_entry['NQBBSIZ'] = 0
taq_entry['NQBOSIZ'] = 0
# Append the TAQ entry to the TAQ output buffer
taq_output.append(taq_entry)
# The trades file, quotes directory, and resultant taq file
trades_file = ''
quotes_file = ''
taq_file = ''
log_file = ''
# Process command line arguments
if len(sys.argv) < 4:
sys.stderr.write("Invalid arguments given\n")
sys.stderr.write("Usage: align_quotes.py {trades file} {quotes file} {output file}\n")
sys.stderr.write("Example: " + sys.argv[0] + "trades_08oct2012.csv quotes_08oct2012.csv taq_08oct2012.csv\n")
sys.exit(1)
if os.path.isfile(sys.argv[1]) and os.path.isfile(sys.argv[2]):
# Error if output file is directory
if os.path.isdir(sys.argv[3]):
sys.stderr.write("The output file cannot be a directory, please specify an output file!\n")
sys.exit(1)
# Error if trying to overwrite an existing output file
if os.path.isfile(sys.argv[3]):
sys.stderr.write("The output file already exists, please specify a different output file!\n")
sys.exit(1)
trades_file, quotes_file, taq_file = sys.argv[1:4]
log_file = 'errors_' + os.path.basename(taq_file)
else:
sys.stderr.write("Trades or quotes file does not exist, check arguments and try again!\n")
sys.exit(1)
# Configure the logging
logging.basicConfig(format='', filename=log_file, level=logging.DEBUG)
# A buffer of the final taq data to be written to file, which is periodically flushed to disk
taq_output = list()
# For each entry in the trades file, iterate through the quotes files and produce a combined file
with open(trades_file, 'rb') as trades_csv:
trades_reader = csv.DictReader(trades_csv)
# Open the quotes file and read the first entry
quotes_csv = open(quotes_file, 'rb')
quotes_reader = csv.DictReader(quotes_csv)
quote = quotes_reader.next()
# Used to simplify processing duplicate trades file entries
prev_symbol = None
prev_trade_date = None
prev_trade_time = None
# The list of exchanges, each exchange has its own NBBO
exchanges = {}
NBBO = {}
# Calculate current NBBO/NASDAQ BBO in the quotes file for each line in trades file
for trade in trades_reader:
symbol = trade['symbol']
trade_date = float_to_int(trade['date'])
trade_time = convert_mil(trade['time'])
# Ignore the trade if it occurs when the markets are closed
if not market_hours(trade_time):
continue
# If the current date and time is the same, use the previous results
if prev_symbol == symbol and prev_trade_date == trade_date and prev_trade_time == trade_time:
# Use the data for the current trade, duplicate the previous NBBO calculations
add_taq_entry(taq_output, trade, NBBO, exchanges)
continue
else:
prev_trade_date = trade_date
prev_trade_time = trade_time
# Reset the exchanges and NBBO for the new symbol
if prev_symbol and (prev_symbol != symbol):
exchanges.clear()
NBBO.clear()
# Set the previous symbol to the current
prev_symbol = symbol
# Iterate through the quotes file to the matching symbol entry
while symbol > quote['SYMBOL']:
try:
quote = quotes_reader.next()
except StopIteration:
logging.warning(quotes_file + " : Ended before trades file :" + trades_file
+ " at following trade: " + trade['symbol'] + ", " + trade_time.isoformat())
break
# If no matching entry in the quotes file add the most recent results, the NBBx results can
# be all 0 in the case where there is no matching symbol in the quotes file
if symbol < quote['SYMBOL']:
add_taq_entry(taq_output, trade, NBBO, exchanges)
continue
# Get the NBBO for each exchange entry in the quotes file
while (symbol == quote['SYMBOL']):
try:
quote_date = float_to_int(quote['date'])
quote_time = convert_sec(quote['time'])
# Ignore any quotes that occur when the markets are closed
if not market_hours(quote_time):
quote = quotes_reader.next()
continue
# Get the current NBBO for each exchange in the quotes file using the most recent entry
if trade_date == quote_date and trade_time >= quote_time:
# Parse the latest NBBO bid, ofr, bidsiz, ofrsiz for each exchange
if not quote['EX'] in exchanges and quote['EX'] != 'D':
exchanges[quote['EX']] = {}
if quote['EX'] != 'D':
exchanges[quote['EX']]['BID'] = float(quote['BID'])
exchanges[quote['EX']]['OFR'] = float(quote['OFR'])
exchanges[quote['EX']]['BIDSIZ'] = float(quote['BIDSIZ'])
exchanges[quote['EX']]['OFRSIZ'] = float(quote['OFRSIZ'])
# Error if NO matching entries in the quotes file has been found
elif not exchanges and (trade_date < quote_date or trade_time < quote_time):
logging.warning(trades_file + " : No quotes entry found for the following trade: " + trade['symbol']
+ ", " + trade_time.isoformat())
break
# Break when the last matching quote entry for the time has been parsed
elif exchanges and (symbol != quote['SYMBOL'] or trade_date < quote_date or trade_time < quote_time):
break
# Read the next line of the quotes file
quote = quotes_reader.next()
# Error should not have reached the end of quotes file before trades file
# TODO add support for the case where a single trades file spans across more than one quotes file
except StopIteration:
logging.warning(quotes_file + " : Ended before trades file :" + trades_file
+ " at following trade: " + trade['symbol'] + ", " + trade_time.isoformat())
break
# Calculate the current NBBO out of all exchanges, add the results as a new entry in the taq output buffer
NBBO = calculate_NBBO(exchanges)
add_taq_entry(taq_output, trade, NBBO, exchanges)
# If the TAQ buffer is > 250K lines, flush the buffer to disk
if taq_output.__len__() >= 250000:
write_taq(taq_output, taq_file)
# Clear the buffer in memory
del taq_output[:]
# Close the open quotes files
quotes_csv.close()
# Write any remaining content in the TAQ buffer to disk
if taq_output:
write_taq(taq_output, taq_file)
| gnu-user/finance-research | scripts/align_quotes.py | align_quotes.py | py | 11,709 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "csv.DictReader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.