seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42882353355 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Please refer the tutorial ":ref:`tutorial-parse_parser`".
"""
# pylint: disable=invalid-name, no-self-use
__author__ = "Mu Yang <http://muyang.pro>"
__copyright__ = "2018-2021 CKIP Lab"
__license__ = "GPL-3.0"
import re
from wcwidth import wcswidth
from ply.lex import lex
from ply.yacc import yacc
from .node import (
EhnParseAnchor,
EhnParseAnyPlaceholder,
EhnParseCoindexReference,
EhnParseFunction,
EhnParseFunctionEntity,
EhnParseFunctionFeature,
EhnParseNameEntity,
EhnParseNormalEntity,
EhnParseNormalFeature,
EhnParseNumberEntity,
EhnParseRestrictionPlaceholder,
EhnParseSubject,
EhnParseSubjectReference,
EhnParseTildeReference,
)
################################################################################################################################
# Core
#
EHN_TOKENS_CHAR = {
"QUOTE": '"',
"EQUAL": "=",
"COLON": ":",
"COMMA": ",",
"SLASH": "/",
"ULINE": "_",
"LPAREN": "(",
"RPAREN": ")",
"LBRACE": "{",
"RBRACE": "}",
"TILDE": "~",
}
EHN_TOKENS = ["TEXT", "NUMBER", "COINDEX", "COINDEX0", *EHN_TOKENS_CHAR.keys()]
class EhnSyntaxError(SyntaxError):
"""E-HowNet Syntax Error."""
def __init__(self, *args, pos=None):
super().__init__(*args)
self.pos = pos
def show_pos(self, text):
"""Show error position.
Parameters
----------
text
original input text
"""
return " " * wcswidth(text[: self.pos]) + "^"
################################################################################################################################
# Lexer
#
class _EhnLexer:
def __init__(self, **kwargs):
self._lexer = lex(module=self, **kwargs)
tokens = EHN_TOKENS
# Skip all spaces
# t_ignore = ' \t\n\r\f\v'
# Default state tokens
t_QUOTE = r'"'
t_EQUAL = r"="
t_COLON = r":"
t_COMMA = r","
t_SLASH = r"/"
t_ULINE = r"_"
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_LBRACE = r"{"
t_RBRACE = r"}"
t_TILDE = r"~"
def t_ANY_error(self, t):
raise EhnSyntaxError(f"Illegal character ‘{t.value[0]}’ at position {t.lexpos}.", pos=t.lexpos)
# t.lexer.skip(1)
def t_TEXT(self, t):
r"[A-Za-z0-9\x80-\U0010FFFF|#+\-.?]+"
if _isnumber(t.value):
t.type = "NUMBER"
elif t.value == "x?":
t.type = "COINDEX0"
elif _is_coindex(t.value):
t.type = "COINDEX"
else:
match = re.search(r"[+\-.?]", t.value)
if match:
pos = t.lexpos + match.start()
raise EhnSyntaxError(f"Illegal character ‘{match.group(0)}’ at position {pos}.", pos=pos)
return t
# Invoke the lexer
def __call__(self, data):
self._lexer.input(data)
return iter(self._lexer)
class EhnLexer(_EhnLexer):
"""E-HowNet Lexer.
.. method:: __call__(self, data)
Run tokenization.
"""
################################################################################################################################
# Parser
#
class _EhnParser:
def __init__(self, lexer=None, **kwargs):
if lexer is not None:
assert isinstance(lexer, EhnLexer), f"{lexer} is not an EhnLexer!"
self.lexer = lexer
else:
self.lexer = EhnLexer()
self._parser = yacc(module=self, **kwargs)
@property
def _lexer(self):
return self.lexer._lexer # pylint: disable=protected-access
tokens = EHN_TOKENS
# Define the parser
def p_error(self, t):
if t is None:
msg = "Unexpected ending."
pos = None
else:
msg = f"Unexpected symbol ‘{t.value}’ at position {t.lexpos}."
pos = t.lexpos
syms = []
for sym in self._parser.action[self._parser.state].keys():
sym = EHN_TOKENS_CHAR.get(sym, sym)
if sym == "$end":
syms.append("‘ENDING’")
else:
syms.append(f"‘{sym}’")
if len(syms) > 1:
syms[-1] = "or " + syms[-1]
msg += f' Expecting a {", ".join(syms)}.'
raise EhnSyntaxError(msg, pos=pos)
# Object
def p_expr(self, p):
"""expr : entity
| subject"""
p[0] = p[1]
# Subject
def p_subject(self, p):
"""subject : feature
| subject COMMA feature"""
if len(p) == 2:
p[0] = EhnParseSubject(p[1])
else:
p[1].add_feature(p[3])
p[0] = p[1]
# Entity
def p_entity_number(self, p):
"""entity : LBRACE NUMBER RBRACE"""
p[0] = EhnParseNumberEntity(p[2])
def p_entity_name(self, p):
"""entity : LBRACE QUOTE TEXT QUOTE RBRACE"""
p[0] = EhnParseNameEntity(p[3])
def p_entity_normal_open(self, p):
"""entityOpen : LBRACE TEXT"""
p[0] = EhnParseNormalEntity(p[2])
def p_entity_function_open(self, p):
"""entityOpen : LBRACE function"""
p[0] = EhnParseFunctionEntity(p[2])
def p_entity_anchor(self, p):
"""entityAnchor : entityOpen anchor"""
p[1].anchor = p[2]
p[0] = p[1]
def p_entity_feature0(self, p):
"""entityFeature : entityOpen COLON feature
| entityAnchor COLON feature"""
p[1].add_feature(p[3])
p[0] = p[1]
def p_entity_feature(self, p):
"""entityFeature : entityFeature COMMA feature"""
p[1].add_feature(p[3])
p[0] = p[1]
def p_entity_close(self, p):
"""entity : entityOpen RBRACE
| entityAnchor RBRACE
| entityFeature RBRACE"""
p[0] = p[1]
# Reference
def p_reference_coindex(self, p):
"""reference : LBRACE COINDEX RBRACE"""
p[0] = EhnParseCoindexReference(p[2])
def p_reference_subject(self, p):
"""reference : LBRACE COINDEX0 RBRACE"""
p[0] = EhnParseSubjectReference()
def p_reference_tilde(self, p):
"""reference : LBRACE TILDE RBRACE"""
p[0] = EhnParseTildeReference()
# Placeholder
def p_restriction(self, p):
"""restriction : SLASH entity
| SLASH reference"""
p[0] = EhnParseRestrictionPlaceholder(p[2])
def p_restriction_anchor(self, p):
"""restriction : SLASH entity anchor
| SLASH reference anchor"""
p[0] = EhnParseRestrictionPlaceholder(p[2], anchor=p[3])
def p_any(self, p):
"""any : LBRACE RBRACE"""
p[0] = EhnParseAnyPlaceholder()
# Feature
def p_feature(self, p):
"""feature : TEXT EQUAL entity
| TEXT EQUAL reference
| TEXT EQUAL restriction
| TEXT EQUAL any"""
p[0] = EhnParseNormalFeature(p[1], p[3])
def p_function_feature(self, p):
"""feature : function EQUAL entity
| function EQUAL reference
| function EQUAL restriction
| function EQUAL any"""
p[0] = EhnParseFunctionFeature(p[1], p[3])
# Function
def p_function_any(self, p):
"""function : TEXT LPAREN RPAREN"""
p[0] = EhnParseFunction(p[1], EhnParseAnyPlaceholder())
def p_function_restriction(self, p):
"""function : TEXT LPAREN restriction RPAREN"""
p[0] = EhnParseFunction(p[1], p[3])
def p_function_open(self, p):
"""functionOpen : TEXT LPAREN entity
| TEXT LPAREN reference"""
p[0] = EhnParseFunction(p[1], p[3])
def p_function_argument(self, p):
"""functionArgument : functionOpen COMMA entity
| functionOpen COMMA reference
| functionArgument COMMA entity
| functionArgument COMMA reference"""
p[1].add_argument(p[3])
p[0] = p[1]
def p_function_close(self, p):
"""function : functionOpen RPAREN
| functionArgument RPAREN"""
p[0] = p[1]
# Anchor
def p_anchor(self, p):
"""anchor : ULINE COINDEX"""
p[0] = EhnParseAnchor(p[2])
# Invoke the parser
def __call__(self, data: str, *args, debug=False, **kwargs):
if debug:
print(data)
for tok in self.lexer(data):
print(tok)
ret = self._parser.parse(data, lexer=self._lexer, *args, debug=debug, **kwargs)
return ret
class EhnParser(_EhnParser):
"""E-HowNet Parser.
.. method:: __call__(self, data: str)
Run parsing.
"""
################################################################################################################################
# Utility
#
def _isnumber(name):
try:
float(name)
return True
except ValueError:
return False
def _is_coindex(name):
return _is_coindex.pattern.match(name)
_is_coindex.pattern = re.compile(r"x[0-9]*")
| ckiplab/ehownet | ehn/parse/parser.py | parser.py | py | 8,932 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "wcwidth.wcswidth",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "ply.lex.lex",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "ply.yacc.yacc",
"line_num... |
12545797664 | #!/usr/bin/env python
def check_path(fp):
import os
if not os.path.exists(fp):
raise FileNotFoundError("Could not find the file {}".format(fp))
def main(sew_file, orb_file, hdf_file, symmetry, index):
import re
import numpy as np
import h5py
print(' ')
print(' M U L L I K E N A N A L Y S I S')
print(' of ')
print(' Molecular Orbital in terms of Atomic Orbital wt.%')
print(' ')
print('For questions/suggestions contact Gaurab Ganguly')
print(' gaurabganguly1989@gmail.com')
print(' ')
print('Molecular Orbital of interest:')
print('------------------------------')
print('Symmetry label=', symmetry,', Index=', index)
print(' ')
print('Files from Molcas/OpenMolcas Calculation:')
print('-----------------------------------------')
print('Seward file :', sew_file)
print('Orbital file :', orb_file)
print('HDF5 file :', hdf_file)
print(' ')
print(' ')
count = [] # index of basis fn (all irreps combined)
basisfn = [] # total number of basis fn (all irreps combined)
irrep = [] # irreps of the point group
sym_label = [] # indexing the irreps 1,2,3,...
sym_bas = [] # number of basis fn in each irrep
sym_block = [] # elements of AO overlap matrix in each irrep block
coeff = [] # store the MO coefficients of the requested MO in a list
check_path(sew_file)
check_path(orb_file)
check_path(hdf_file)
#Reading basis information from the provided SEWARD file:
with open(sew_file, 'r') as sfile:
for line in sfile:
if re.search(r'Basis Label Type Center', line):
for line in sfile:
if re.search(r'Basis set specifications \:', line):
break
if re.search(r'\W\d', line):
count.append(int(line.split()[0]))
basisfn.append(line.split()[1] + "-" + (line.split()[2]))
if len(count) == 0 and len(basisfn) == 0:
raise ValueError("Could not find basis set table in seward output file {}".format(sew_file))
with open(sew_file, 'r') as sfile:
lines = sfile.readlines()
try:
point_group = [x for x in lines if 'Character Table' in x][0].split()[3]
symmetry_species = [x for x in lines if 'Symmetry species' in x][0]
basis_functions = [x for x in lines if 'Basis functions' in x][-1]
#print("BAS", basis_functions)
except IndexError:
raise IndexError("Could not find 'Character Table', 'Symmetry species', or 'Basis functions' " \
+"search strings in seward output file {}".format(sew_file))
num_of_irreps = len(re.findall(r'\d+', basis_functions))
if num_of_irreps == 0:
raise ValueError("Did not find any Irreps. in seward output file {}".format(sew_file))
for i in range(num_of_irreps):
sym_label.append(i+1)
irrep.append(symmetry_species.split()[i+2])
sym_bas.append(int(basis_functions.split()[i+2]))
sym_block.append(int(basis_functions.split()[i+2])**2)
# Reading orbitals from GssOrb/ScfOrb/RASOrb/PT2Orb/SONOrb or any orbitals file:
search_string = r'\* ORBITAL{:>5d}{:>5d}'
with open(orb_file, 'r') as ofile:
for line in ofile:
if re.search(search_string.format(symmetry, index), line):
for line in ofile:
if re.search(search_string.format(symmetry, index+1), line):
break
if re.search(r'\s', line):
for item in line.strip().split():
coeff.append(float(item))
if len(coeff) == 0 and re.search(search_string.format(symmetry, index), line):
# found the search string
raise ValueError("Did not find orbitals in orbital file {}".format(orb_file))
elif re.search(search_string.format(symmetry, index), line) is not None:
# did not find the search string
raise RuntimeError("Something else went wrong.......Help me PLS. :(")
# Reading AO overlap integrals from the provided '.h5' file:
with h5py.File(hdf_file, 'r') as hdf:
overlap = np.array(hdf.get('AO_OVERLAP_MATRIX'))
print(' POINT GROUP =', point_group)
print('-------------------------------------------------------------')
print('Symm. label Irrep. No. of MOs')
print('-------------------------------------------------------------')
template = ' {:>10d} {:<10s} {:>10d}'
for i in range(num_of_irreps):
print(template.format(i+1, symmetry_species.split()[i+2], int(basis_functions.split()[i+2])))
print('-------------------------------------------------------------')
start_bas = 0
start_block = 0
end_bas = 0
end_block = 0
try:
if symmetry == 1:
end_bas = start_bas + sym_bas[0]
end_block = start_block + sym_block[0]
bas = np.array(basisfn[start_bas:end_bas])
block = np.reshape(overlap[start_block:end_block], (sym_bas[0], sym_bas[0]))
else:
for i in range(symmetry-1):
start_bas += sym_bas[i]
start_block += sym_block[i]
for i in range(symmetry):
end_bas += sym_bas[i]
end_block += sym_block[i]
bas = np.array(basisfn[start_bas:end_bas])
block = np.reshape(overlap[start_block:end_block], (sym_bas[symmetry-1],
sym_bas[symmetry-1]))
# TODO: find out what exception it raises that you would have to deal with.
# having a general Exception is not good to do.
# python has the great ability to handle different error cases separately and you can give the
# user valuable information as to what went wrong when you raise the appropriate error.
except Exception:
print("Error Exit:")
print("Symmetry label", symmetry, "is not possible for", point_group, "point group!")
print("Check the table and re run.")
# Multiplying coeff*overlap*coeff (CSC) to get MO wt%
if symmetry == 0 or index == 0:
# TODO: here a raise ValueError would be more appropriate also it will terminate the program
print("Error Exit:")
print("Symmetry or Index can't be 0!")
print("Check the Symmetry label for Irreps in the table and re run.")
elif symmetry not in sym_label:
pass
elif index > sym_bas[symmetry-1]:
# TODO: here a raise ValueError would be more appropriate also it will terminate the program
raise ValueError("Error Exit: Index", index, "is beyond range for", irrep[symmetry-1], \
"Irrep! Check the table and re run.")
#print("Error Exit:")
#print("Index", index, "is beyond range for", irrep[symmetry-1], "Irrep!")
#print("Check the table and re run.")
elif symmetry in sym_label and index <= sym_bas[symmetry-1]:
print('')
print('Mulliken Analysis of:')
template = "n-th ('n ={:>3}') MO in '{}' Symmetry (symm. label = '{}')."
print(template.format(index, irrep[symmetry - 1], symmetry))
print('All AO function with > 1.0% weight in the MO is printed.')
print('-------------------------------------------------------------')
print('AO-func. wt.% ')
print('-------------------------------------------------------------')
for i in range(len(coeff)):
tmp = []
for j in range(len(coeff)):
tmp.append(coeff[i] * block[i][j] * coeff[j])
if abs(sum(tmp))*100 > 1.0 : # user can change the thresold
print('{:<10s} {:>10.1f}%'.format(bas[i], sum(tmp)*100))
print('-------------------------------------------------------------')
print('')
else:
# TODO: here a raise ValueError would be more appropriate also it will terminate the program
raise ValueError("Error Exit: Symmetry label and Index is not possible! Check and re run.")
#print("Error Exit: Symmetry label and Index is not possible! Check and re run.")
if __name__ == "__main__":
import argparse, pathlib
parser = argparse.ArgumentParser(description="This program calculates AO wt% in a given MO.")
parser.add_argument('sew_file', type=pathlib.Path, metavar='1) file.out',
help="Gateway/Seward output file with print level = 3.")
parser.add_argument('orb_file', type=pathlib.Path, metavar='2) file.SCF/RAS/SONOrb',
help="Orbital file with MO co-efficients.")
parser.add_argument('hdf_file', type=pathlib.Path, metavar='3) file.h5',
help="HDF5 file for AO overlap matrix.")
parser.add_argument('-s', '--symmetry', type=int, metavar='MO_symmetry', required=True,
help="Symmetry/Irrep of the orbital of interest.")
parser.add_argument('-i', '--index', type=int, metavar='MO_Index', required=True,
help="Orbital index in the particular Symmetry/Irrep.")
args = parser.parse_args()
main(args.sew_file, args.orb_file, args.hdf_file, args.symmetry, args.index)
| gaurabganguly1989/molcas_mo2ao_weights | molcas_ao_weights.py | molcas_ao_weights.py | py | 9,516 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 46... |
24347774605 | import requests as req
from lxml import html
from tqdm import tqdm
url = 'http://swf.com.tw/scrap/'
page = req.get(url)
dom = html.fromstring(page.text)
images = dom.xpath('//img/@src')
def download(url):
filename = url.split('/')[-1]
r = req.get(url, stream=True)
with open(filename, 'wb') as f:
for data in tqdm(r.iter_content(1024)):
f.write(data)
return filename
for img in images:
if not img.startswith('http'):
img = url + img
h = req.head(img)
MIME = h.headers['content-type']
# 確認回應OK
if (h.status_code == 200) and ('image' in MIME):
print('下載檔案網址:' + img)
filename = download(img)
print(filename + ' 檔案下載完畢!') | theoyu13/python3 | python程式設計入門/F9796/ch11/download_img.py | download_img.py | py | 787 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_num... |
29573142583 | import tensorflow as tf
from model import char_rnn
from utils import build_dataset
import numpy as np
start_token = 'B'
end_token = 'E'
model_dir = 'result/poem'
corpus_file = 'data/poems.txt'
lr = 0.0002
def to_word(predict, vocabs):
predict = predict[0]
predict /= np.sum(predict)
sample = np.random.choice(np.arange(len(predict)), p=predict)
if sample > len(vocabs):
return vocabs[-1]
else:
return vocabs[sample]
def gen_poem(begin_word):
batch_size = 1
print('## loading corpus from %s' % model_dir)
poems_vector, word_int_map, vocabularies = build_dataset(corpus_file)
input_data = tf.placeholder(tf.int32, [batch_size, None])
end_points = char_rnn(model='lstm', input_data=input_data, output_data=None, vocab_size=len(
vocabularies), rnn_size=128, num_layers=2, batch_size=64, learning_rate=lr)
saver = tf.train.Saver(tf.global_variables())
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
checkpoint = tf.train.latest_checkpoint(model_dir)
saver.restore(sess, checkpoint)
x = np.array([list(map(word_int_map.get, start_token))])
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
feed_dict={input_data: x})
if begin_word:
word = begin_word
else:
word = to_word(predict, vocabularies)
poem_ = ''
i = 0
while word != end_token:
poem_ += word
i += 1
if i >= 24:
break
x = np.zeros((1, 1))
x[0, 0] = word_int_map[word]
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
feed_dict={input_data: x, end_points['initial_state']: last_state})
word = to_word(predict, vocabularies)
return poem_
def pretty_print_poem(poem_):
poem_sentences = poem_.split('。')
for s in poem_sentences:
if s != '' and len(s) > 10:
print(s + '。')
if __name__ == '__main__':
begin_char = input('## please input the first character:')
poem = gen_poem(begin_char)
pretty_print_poem(poem_=poem) | yanqiangmiffy/char-rnn-writer | generate_poem.py | generate_poem.py | py | 2,358 | python | en | code | 83 | github-code | 36 | [
{
"api_name": "numpy.sum",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"l... |
13840387121 | from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('joueur/',views.joueur, name="Joueur"),
path('club/',views.club,name="Club"),
path('player/',views.player,name="Player"),
path('',views.home, name="Home"),
path('affiche/',views.affiche_club, name="Affiche_club"),
path('affiche/',views.affiche_joueur, name="Affiche_joueur"),
path("affiche/<int:id>/",views.affiche_club),
path("affiche/<int:id>/",views.affiche_joueur),
path("traitement/", views.traitement_club, name="Traitement_club"),
path("traitement/", views.traitement_joueur, name="Traitement_joueur"),
path("/delete/<int:id>",views.delete_club, name="Delete_club"),
path("delete/<int:id>",views.delete_joueur, name="Delete_joueur"),
path("update/<int:id>",views.update_joueur, name="Update_joueur"),
path("update/<int:id>",views.update_club, name="Update_club"),
path("traitementupdate/<int:id>",views.traitementupdate_joueur, name="Traitementupdate_joueur"),
path("traitementupdate/<int:id>",views.traitementupdate_club, name="Traitementupdate_club"),
path('admin/', admin.site.urls),
]
| 2bFaycal/projet-django | foot/app/urls.py | urls.py | py | 1,182 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
7043720148 | from keras.applications.inception_v3 import InceptionV3
from tensorflow.keras import layers, models, optimizers
INPUT_SHAPE_300_300 = (300, 300, 3)
def create_model(input_shape=INPUT_SHAPE_300_300, weights=None):
if weights is not None:
inception_base = InceptionV3(
weights=None, include_top=False, input_shape=input_shape
)
inception_base.load_weights(weights)
else:
inception_base = InceptionV3(
weights="imagenet", include_top=False, input_shape=input_shape
)
inception_base.trainable = False
model = models.Sequential(
[
inception_base,
layers.GlobalAveragePooling2D(),
layers.Dropout(0.3),
layers.Dense(1024, activation="relu"),
layers.Dropout(0.3),
layers.Dense(6, activation="softmax"),
]
)
model.compile(
loss="categorical_crossentropy",
optimizer=optimizers.nadam(lr=0.001),
metrics=["accuracy"],
)
return model
| SalmanRafiullah/garbage-classification | models/inception_v3.py | inception_v3.py | py | 1,034 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.applications.inception_v3.InceptionV3",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.applications.inception_v3.InceptionV3",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 19... |
43163584792 | from flask import Flask
from flask_pymongo import PyMongo
from operator import itemgetter
from flask_login import LoginManager
import sys
# Initialize mongo db with app
app = Flask(__name__)
# mongodb_client = PyMongo(app, uri='mongodb://localhost:27017/todo_db')
mongodb_client = PyMongo(app, uri='mongodb://mongo:27017/todo_db')
db = mongodb_client.db
user_collection = db['users']
leaderboard = db['leaderboard']
rooms_collection = db['rooms']
lobby_collection = db['lobbies']
# add user as {'username' : username, 'wins' : '0', 'loss' : '0'}
def add_user(username, password):
record = {'username': username, 'password': password, 'wins': 0, 'loss': 0, 'draw': 0}
user_collection.insert_one(record)
leaderboard.insert_one({username: 0})
def check_for_user(username):
result = user_collection.find_one({'username': username})
if result is not None:
return result
else:
return None
def update_password(username, password):
user = user_collection.find_one({'username': username})
wins = user['wins']
losses = user['loss']
draws = user['draw']
new_record = {'$set': {'username': username, 'password': password, 'wins': wins, 'loss': losses, 'draw': draws}}
user_collection.update_one({'username': username}, new_record)
# add a win or loss to the users stats
def update_player_stats(username: str, stat_to_change: str, increment: int):
record = user_collection.find_one({'username': username})
wins = record['wins']
loss = record['loss']
draws = record['draw']
if stat_to_change == 'wins':
wins += increment
elif stat_to_change == 'loss':
loss += increment
elif stat_to_change == 'draw':
draws += increment
new_record = {'$set': {'username': username, 'wins': wins, 'loss': loss, 'draw': draws}}
user_collection.update_one({'username': username}, new_record)
update_leaderboard(record['username'])
# change users score to {'username' : username, 'score' : new_score}... or insert if not there
# score will be an integer that ranks the player based on # games played and W/L ratio
def update_leaderboard(username):
user = user_collection.find_one({'username': username})
old_record = leaderboard.find({})
old_score = None
for record in old_record:
data = record.popitem()
if data[0] == user['username']:
old_score = data[1]
games_played = user['wins'] + user['loss']
win_loss = user['wins'] - user['loss']
new_score = 0
if win_loss > 0:
new_score = games_played * win_loss
else:
new_score = games_played * 0.5
new_record = {'$set': {user['username']: new_score}}
leaderboard.update_one({user['username']: old_score}, new_record)
# returns a dictionary of form {rank : [score, username]}
def get_leaderboard():
records = leaderboard.find({})
record_list = []
# add all the users to a List of List to be sorted by score
for record in records:
item = record.popitem()
username = item[0]
score = int(item[1])
record_list.append([score, username])
sorted_list = sorted(record_list, key=itemgetter(0))
return_leaderboard = {}
rank = len(record_list)
for user in sorted_list:
return_leaderboard[rank] = user
rank -= 1
return return_leaderboard
def drop(collection):
collection.drop()
def assign_room(username, room):
record = {'username': username, 'room': room}
if get_users_room(username) is not None:
rooms_collection.update_one({'username': username}, {"$set": record})
else:
rooms_collection.insert_one(record)
def get_users_room(username):
return rooms_collection.find_one({'username': username})
def delete_rooms():
rooms_collection.delete_many({})
def create_lobby(lobby, username):
lobby_collection.insert_one({'lobby': lobby, 'user1': username})
def get_lobbies():
lobbies = list(lobby_collection.find({}))
ret_val = []
for lobby in lobbies:
ret_val.append(lobby.get('lobby'))
return ret_val
def get_lobby(username):
return lobby_collection.find_one({'user1': username})
def delete_lobby(lobby):
lobby_collection.delete_one({'lobby': lobby})
def delete_lobbies():
lobby_collection.delete_many({})
| rickyjorgensen2000/cse312 | flaskr/db.py | db.py | py | 4,316 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 96,
"usage_type": "call"
}
] |
11580020473 | import mysql.connector
from tabulate import tabulate
import getpass
def login():
# This function is for establishing connection
# between python and mysql database by taking
# input of user id and password and host name
# then it take the input of database from the
# user and if the database exits it will select
# it for further queries else it will create one
# and use it.
try:
global app_cursor
global connection
connection = mysql.connector.connect(user=input('Username: '),
password=input('Password: '), host=input('Host: '))
app_cursor = connection.cursor(buffered=True)
app_cursor.execute("show databases")
app_database = input('Database: ')
database_selected = False
for i in app_cursor.fetchall():
for j in i:
if app_database == j:
app_cursor.execute("use %s" % app_database)
print('\n', app_database, " is now the selected database.", '\n')
database_selected = True
break
if database_selected is False:
app_cursor.execute("create database %s" % app_database)
app_cursor.execute("use %s" % app_database)
print('\n', app_database, " is now the selected database.", '\n')
table_menu()
except mysql.connector.errors.ProgrammingError:
print("\nEnter valid Username and Password!!\n")
login()
except mysql.connector.errors.InterfaceError:
print("\nEnter valid Host name.\n")
login()
except mysql.connector.errors.DatabaseError:
print("\nSomething went wrong try again.\n")
login()
def table_menu():
# This function gives the user the menu for operation
# this is the main menu there is another menu for performing
# operations on the table and it will be triggered on users demand.
print('''To perform given functions enter the numerical value\nassigned to the function:-\n
1 => Create Table.
2 => Perform Operations on Table.
3 => To check Stock Tables in the selected Database.
4 => Delete table.
5 => Logout and exit.
Note:- To terminate any operation you selected by
mistake enter '?' symbol it will take you back
to the menu.
''')
try:
def table_menu_functions(a):
if a == 1:
# This set of code will be executed when user wants to create table.
# By taking a string input for table name.
# If the table of given name already exists in the selected database,
# the function will be again called with parameter 1
name = str(input("Enter table Name: "))
if name == '?':
table_menu()
else:
try:
app_cursor.execute('''Create table %s(
Id varchar (255) not null primary key,
Name varchar(255) not null,
Category varchar(255) not null,
Price int,
Stock int)''' % name)
print("Table Created successfully.\n")
connection.commit()
table_menu()
except mysql.connector.errors.ProgrammingError:
print("Table of this name already exists")
table_menu_functions(1)
elif a == 4:
# This set of code if for choice 4 that is for is for deleting table from selected database.
# By taking a string input and further asking for confirmation for deleting the table.
# If table not exists in the database then the exception is handled in except block.
name = str(input("Enter table Name: "))
try:
if name == '?':
table_menu()
else:
confirmation = str(input("Are you sure you want to delete the above table (y/n): "))
confirmation.lower()
if confirmation == 'y':
app_cursor.execute("Drop table %s" % name)
print("Table %s is deleted permanently.\n" % name)
connection.commit()
table_menu()
elif confirmation == 'n':
print("Table %s is not deleted\n." % name)
table_menu()
except mysql.connector.errors.ProgrammingError:
print("Table of this name do not exist\n.")
table_menu()
elif a == 5:
# This set of code is choice 5 that is Save and exit application.
# Its saves all the query processed and closes the connection and cursor.
# After that it leave a vague input statement to prevent to sudden close of console window.
import sys
connection.commit()
app_cursor.close()
connection.close()
input("Press any key to exit..")
sys.exit()
elif a == 3:
# This set of code is choice 3 that is to print the list of stock tables in the selected database.
# It print the list in a Table format with the help of Tabulate function of Tabulate module.
app_cursor.execute("Show tables")
data = app_cursor.fetchall()
tables = []
for i in data:
tables.append(i)
print("\n", tabulate(tables, headers=['Names'], tablefmt='psql'), "\n")
table_menu()
elif a == 2:
# This set of code is for performing operations on the table.
# By taking input of the table name on which user wants to perform functions.
# It checks whether the given table name exists in the database or not.
# If exists it triggers the function function_menu(args: Table name).
# If not exists it will ask again for input.
name = str(input("Enter table Name: "))
if name == '?':
table_menu()
else:
app_cursor.execute("show tables")
existance = False
for i in app_cursor:
for j in i:
if j == name:
existance = True
break
else:
continue
if existance is True:
function_menu(name)
else:
print("\nEnter valid table name. This table does not exist in the current database.\n")
choice = int(input("To go back to main menu enter 1 and To re-enter the table name enter 2."
"(1/2)"))
if choice == 1:
table_menu()
elif choice == 2:
table_menu_functions(2)
else:
print("Invalid input directing back to main menu.")
table_menu()
else:
# If users enter anything other than listed in menu then this code will be executed.
# It again asks for the input from the user.
print("Enter Number from The menu only.")
choice = int(input("Your Choice: "))
table_menu_functions(choice)
table_menu_choice = int(input("Your Choice: "))
table_menu_functions(table_menu_choice)
except ValueError:
# If user enter anything other than integer.
print("Enter valid input.")
table_menu()
def function_menu(name):
#This function is for editing table
# For tasks like Adding item to table, deleting item from table and updating item stock
global headers
headers = ['Id', 'Name', 'Category', 'Price', 'Stock']
name = name
print('''To perform given functions enter the numerical value\nassigned to the function:-\n
1 => To print The Stock Table.
2 => To add a product to stock table.
3 => To delete a product from the stock table.
4 => To Perform operations on a product.
5 => To export data of table to excel file.
6 => To go back to previous menu.
Note:- To terminate any operation you selected by
mistake enter '?' symbol it will take you back
to the menu.''')
try:
choice = int(input("Your choice: "))
if choice == 1:
app_cursor.execute("Select * from %s" % name)
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
function_menu(name)
if choice == 2:
while True:
try:
p_id = input("Enter the Product ID: ")
if p_id == '?':
table_menu()
break
else:
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_name = input("Enter the Product Name: ")
if p_name == '?':
table_menu()
else:
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_category = input("Enter the Product Category: ")
if p_category == '?':
table_menu()
else:
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_price = int(input("Enter the Product Price: "))
break
except ValueError:
print("Enter valid input.")
while True:
try:
p_quantity = int(input("Enter the Product stock: "))
break
except ValueError:
print("Enter valid input.")
app_cursor.execute("insert into %s values('%s','%s','%s',%d,%d)"
% (name, p_id, p_name, p_category, p_price, p_quantity))
connection.commit()
function_menu(name)
elif choice == 3:
p_id = input("Enter the Product ID of the product you want to delete: ")
app_cursor.execute("select * from %s where Id='%s'" % (name, p_id))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
while True:
conf = input("Are you sure you want to this product (y/n): ")
if conf == 'y':
app_cursor.execute("delete from %s where Id='%s'" % (name, p_id))
connection.commit()
break
elif conf == 'n':
function_menu(name)
break
else:
print("Enter valid input.")
function_menu(name)
elif choice == 6:
table_menu()
elif choice == 4:
product_update(name)
elif choice == 5:
import xlsxwriter
q = 0
while q < 1:
try:
filename = input("Enter file name: ")
print("File will be saved on the desktop")
workbook = xlsxwriter.Workbook("D:\\%s.xlsx" % filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "ID")
worksheet.write(0, 1, "NAME")
worksheet.write(0, 2, "CATEGORY")
worksheet.write(0, 3, "PRICE")
worksheet.write(0, 4, "STOCK")
app_cursor.execute("SELECT * FROM %s" % name)
data = app_cursor.fetchall()
row = 1
coloumn = 0
for (a, b, c, d, e) in data:
worksheet.write(row, coloumn, a)
worksheet.write(row, coloumn + 1, b)
worksheet.write(row, coloumn + 2, c)
worksheet.write(row, coloumn + 3, d)
worksheet.write(row, coloumn + 4, e)
row = row + 1
workbook.close()
print("Data exported successfully to %s at D drive" % filename)
break
except:
print("A file of this name already exists use a different name")
function_menu(name)
except ValueError:
print("Enter valid input.")
function_menu(name)
def product_update(name):
name = name
print('''To perform given functions enter the numerical value\nassigned to the function:-\n
1 => To update stock of product.
2 => To update name of product.
3 => To update price of product.
4 => To change category of product.
5 => To go back to previous menu.
Note:- To terminate any operation you selected by
mistake enter '?' symbol it will take you back
to the menu.''')
try:
choice = int(input("Your choice: "))
if choice == 2:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change name: "))
name_new = str(input("Enter the new name of the product: "))
if name_new == '?' or id_p == '?':
product_update(name)
connection.commit()
else:
app_cursor.execute("update %s set Name='%s' where Id='%s'" % (name, name_new, id_p))
print("Product name updated successfully.")
product_update(name)
elif choice == 1:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change stock: "))
stock_new = int(input("New stock of the product: "))
if id_p == '?':
product_update(name)
else:
app_cursor.execute("update %s set Stock=%d where Id='%s'" % (name, stock_new, id_p))
print("Product Stock updated successfully.")
connection.commit()
product_update(name)
elif choice == 5:
function_menu(name)
elif choice == 3:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change price: "))
price_new = int(input("New price of the product: "))
if id_p == '?':
product_update(name)
else:
app_cursor.execute("update %s set Price=%d where Id='%s'" % (name, price_new, id_p))
print("Product Price updated successfully.")
connection.commit()
product_update(name)
elif choice == 4:
name_p = str(input("Enter the name of the product: "))
if name_p == '?':
product_update(name)
else:
app_cursor.execute("select * from %s where Name='%s'" % (name, name_p))
data = []
for i in app_cursor:
data.append(i)
print(tabulate(data, headers=headers, tablefmt='psql'))
id_p = str(input("Enter the product id of product you want to change category: "))
category_new = str(input("New Category of the product: "))
if id_p == '?':
product_update(name)
else:
app_cursor.execute("update %s set Category='%s' where Id='%s'" % (name, category_new, id_p))
print("Product Category updated successfully.")
connection.commit()
product_update(name)
except ValueError:
print("Enter valid input.")
product_update(name)
login()
| manavmittal05/InventoryManagement | ManavMittal_2021538_Master Stock-1.py | ManavMittal_2021538_Master Stock-1.py | py | 18,424 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 17,
"usage_type": "name"
},
{
"... |
39277796402 | from PIL import Image, ImageSequence
from generators import Generator
class OverlayGenerator(Generator):
ALLOWED_OVERLAYS = {
"fire.gif": "burning",
"fire2.gif": "burning",
"sparkle.gif": "sparkling",
"sparkle2.gif": "sparkling",
"loving.gif": "loving",
}
def __init__(self):
super().__init__('overlay', defaults={
"overlay": "fire.gif"
})
def generate(self, original_name, input_path, output_dir, options):
options = {**self.defaults, **options}
overlay_file = options["overlay"]
if overlay_file not in OverlayGenerator.ALLOWED_OVERLAYS:
raise ValueError("Unknown overlay " + overlay_file)
overlay_name = OverlayGenerator.ALLOWED_OVERLAYS[overlay_file]
overlay = Image.open(f"resources/{overlay_file}")
emoji = self.load_image(input_path)
emoji = emoji[0]
emoji_name = Generator.get_emoji_name_from_file(original_name)
frames = []
emoji_w, emoji_h = emoji.size
palette = None
for i, overlay_frame in enumerate(ImageSequence.Iterator(overlay)):
canvas = Image.new("RGBA", emoji.size, (255, 255, 255))
if palette is None:
palette = overlay_frame.getpalette()
else:
overlay_frame.putpalette(palette)
# overlay_frame.save(f'../output/{overlay_name}.{i:02}.gif', 'GIF')
# cropped_frame = fire_frame.crop((0, 0, emoji_w, emoji_h))
overlay_frame.thumbnail(canvas.size)
overlay_frame = overlay_frame.convert('RGBA')
canvas.paste(emoji, (0, 0), mask=emoji)
offset = ((canvas.width - overlay_frame.width) // 2, (canvas.height - overlay_frame.height) // 2)
if overlay_name == 'burning':
offset = (0, emoji_h - overlay_frame.height + 5)
canvas.paste(overlay_frame, offset, mask=overlay_frame)
frames.append(canvas)
return self.write_gif(frames, output_dir, emoji_name + ".gif", options), f'{overlay_name}_{original_name}'
| grdaneault/emojigen | api/generators/overlay.py | overlay.py | py | 2,116 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "generators.Generator",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "generators.Generator.get... |
15826968262 | from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import *
from past.utils import old_div
from builtins import object
import pykov as pk
import emission.net.ext_service.otp.otp as otp
import emission.net.ext_service.geocoder.nominatim as geo
import emission.core.wrapper.trip_old as to
import emission.core.get_database as edb
import datetime
import random
import math
import urllib.request, urllib.error, urllib.parse
import json
import heapq
import time
import requests
import random
import os
CENTER_OF_CAMPUS = to.Coordinate(37.871790, -122.260005)
RANDOM_RADIUS = .3 # 300 meters around center of campus; for randomization
N_TOP_TRIPS = 3 # Number of top trips we return for the user to look at
class UserBase(object):
"""
Stores all the users and stores the population of areas
Also keeps state on other useful things that we need to know, like caches
"""
def __init__(self):
self.users = []
self.crowd_areas = {}
self.last_info = {} ## so we only call google maps if we change things
self.old_trips = None
self.geocode_cache = {} # fewer calls to google maps
def add_user(self, user):
self.users.append(user)
def add_crowd(self, area):
self.crowd_areas[area.name] = area
def get_crowd_info(self, area_name):
return self.crowd_areas[area_name]
def geocode_with_cache(self, place):
coder = geo.Geocoder()
if place in self.geocode_cache:
print(self.geocode_cache[place])
return self.geocode_cache[place]
else:
coded = coder.geocode(place)
self.geocode_cache[place] = coded
print(coded)
return coded
the_base = UserBase()
class CampusTrip(object):
def __init__(self, score_list, time_duration, points, source):
self.time = score_list[0]
self.sweat = score_list[1]
self.beauty = score_list[2]
self.social = score_list[3]
self.tot_score = sum(score_list)
self.time_duration = old_div(time_duration, float(60))
self.points = points
self.source = source
def make_points(self):
to_return = ""
for p in self.points:
to_return += str(p[0])
to_return += ","
to_return += str(p[1])
to_return += ","
return to_return
def make_for_browser(self):
return '%s;%s;%s;%s;%s;%s' % (self.beauty, self.time, self.social, self.sweat, self.time_duration, self.make_points())
def make_jsn(self):
return json.dumps({"time" : self.time, "beauty" : self.beauty, "social" : self.social, "sweat" : self.sweat, "duration" : self.time_duration, "points" : self.points})
def make_json(self):
return json.dumps(self.make_for_browser())
def __repr__(self):
return "total score : %f || source : %s || beauty : %f || sweat : %f || time : %f || social : %f" % (self.tot_score, self.source, self.beauty, self.sweat, self.time, self.social)
def __eq__(self, other):
return self.make_points() == other.make_points()
class UserModel(object):
"""
User Model class
Can do lots of cool things
"""
def __init__(self, has_bike=False):
self.utilities = pk.Chain()
self.has_bike = has_bike
self.user_base = the_base
self.user_base.add_user(self)
## Initialize utilities
self.utilities["sweat"] = 0
self.utilities["scenery"] = 0
self.utilities["social"] = 0
self.utilities["time"] = 0
def get_top_choice_places(self, start_place, end_place):
start = self.user_base.geocode_with_cache(start_place)
end = self.user_base.geocode_with_cache(end_place)
return self.get_top_choices_lat_lng(start, end)
def get_all_trips(self, start, end, curr_time=None):
if curr_time is None:
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
curr_day = curr_time.day
curr_hour = curr_time.hour
mode = "WALK"
if self.has_bike:
mode = "BICYCLE"
walk_otp = otp.OTP(os.environ("OTP_SERVER")).route(start, end, "WALK", write_day(curr_month, curr_day, curr_year), write_time(curr_hour, curr_minute), False)
lst_of_trips = walk_otp.get_all_trips(0, 0, 0)
tot_trips = lst_of_trips
return tot_trips
def get_top_choices_lat_lng(self, start, end, curr_time=None, tot_trips=None):
testing = True
if tot_trips is None:
tot_trips = self.get_all_trips(start, end, curr_time)
testing = False
scores = [ ]
times = get_normalized_times(tot_trips)
beauty = get_normalized_beauty(tot_trips)
sweat = get_normalized_sweat(tot_trips, testing=testing)
for i in range(len(times)):
scores.append(self.get_score_for_trip(tot_trips[i], times[i], beauty[i], sweat[i]))
top = self.get_top_n(scores, N_TOP_TRIPS)
return top
def get_score_for_trip(self, trip, time, beauty, sweat):
crowd_score = 0
lst_of_points = get_route(trip)
for crowd in self.user_base.crowd_areas.values():
crowd.update_times(trip.start_time)
crowd_score += crowd.get_crowd()
final_time = -(time * self.utilities["time"])
final_sweat = -sweat * self.utilities["sweat"]
final_beauty = (self.utilities['scenery']*beauty)
final_crowd = (self.utilities['social']*crowd_score)
final_score_tuple = (final_time, final_sweat, final_beauty, final_crowd)
print("final_score_tuple : %s" % str(final_score_tuple))
return CampusTrip(final_score_tuple, get_time_of_trip(trip), lst_of_points, "source")
def get_top_n(self, lst_of_trips, n):
return heapq.nlargest(n, lst_of_trips, key=lambda v: v.tot_score)
def increment_utility(self, which):
self.utilities[which] += 1
def increase_utility_by_n(self, which, n):
self.utilities[which] += n
def normalize_utilities(self):
self.utilities.normalize()
def save_to_db(self):
db = edb.get_utility_model_db()
db.insert({"utilities" : self.utilities, "name" : self.name})
def delta(self, start, end):
"""
Returns true if anything has changed and we should call google maps...
Otherwise no.
"""
if "start" not in self.user_base.last_info or "end" not in self.user_base.last_info or "utilities" not in self.user_base.last_info:
#print "first delta"
return True
return not (start == self.user_base.last_info["start"] and end == self.user_base.last_info["end"] and self.utilities == self.user_base.last_info["utilities"])
def add_to_last(self, start, end):
self.user_base.last_info["utilities"] = self.utilities.copy()
self.user_base.last_info["start"] = start
self.user_base.last_info["end"] = end
def normalize_noises(noise_areas):
to_return = []
for area in noise_areas:
area.normalize_sounds()
to_return.append(area)
return to_return
def get_time_of_trip(trip):
return (trip.end_time - trip.start_time).seconds
def get_normalized_times(lst_of_trips):
counter = pk.Vector()
i = 0
for trip in lst_of_trips:
counter[i] = get_time_of_trip(trip)
i += 1
counter.normalize()
to_return = []
for i in range(len(lst_of_trips)):
to_return.append(counter[i])
return to_return
def get_sweat_factor(trip, testing=False):
chng = get_elevation_change(trip, testing)
print("chng : %s" % str(chng))
return 71.112*chng[0] + 148.09
def get_normalized_sweat(lst_of_trips, testing=False):
counter = pk.Vector()
i = 0
for trip in lst_of_trips:
factor = get_sweat_factor(trip, testing)
print("sweat_factor : %s" % factor)
counter[i] = factor
i += 1
counter.normalize()
to_return = []
for i in range(len(lst_of_trips)):
to_return.append(counter[i])
return to_return
def get_normalized_beauty(lst_of_trips):
counter = pk.Vector()
i = 0
for trip in lst_of_trips:
factor = get_beauty_score_of_trip(trip)
print("beauty_factor : %s" % factor)
counter[i] = factor
i += 1
counter.normalize()
to_return = []
for i in range(len(lst_of_trips)):
to_return.append(counter[i])
return to_return
class Area(object):
""" Area class """
def __init__(self, name, tl, br, beauty=None, time_to_noise=None):
self.name = name
self.bounding_box = (tl, br)
self.beauty = beauty
self.time_to_noise = time_to_noise
self.times = set()
def point_in_area(self, lat, lng):
return in_bounding_box(lat, lng, self.bounding_box)
def add_time(self, time):
self.times.add(time)
def get_crowd(self):
return len(self.times)
def update_times(self, time_by):
for time in self.times:
if time < time_by:
self.times.remove(time)
def update_to_now(self):
self.update_times(datetime.datetime.now())
def normalize_sounds(self):
counter = pk.Vector()
for k,v in self.time_to_noise.items():
counter[k] = v
counter.normalize()
self.time_to_noise = counter
def __repr__(self):
return "beauty : %s" % (self.beauty)
def in_bounding_box(lat, lon, bounding_box):
return bounding_box[1][0] <= lat and lat <= bounding_box[0][0] and bounding_box[0][1] <= lon and lon <= bounding_box[1][1]
def parse_noise():
noise_file = open("emission/user_model_josh/noise_data.csv")
sproul_noises, glade_noises, wellmen_noises, leconte_noises = {}, {}, {}, {}
sproul_tl, sproul_br = (37.870637,-122.259722), (37.868926,-122.259005)
glade_tl, glade_br = (37.87359,-122.260098), (37.872707,-122.258687)
wellmen_tl, wellmen_br = (37.873045,-122.263377), (37.872501,-122.261803)
lecont_tl, lecont_br = (37.873278,-122.256959), (37.872277,-122.25639)
time = datetime.datetime(2040, 10, 10, 6, 0, 0)
td = datetime.timedelta(minutes=10)
for l in noise_file:
l = l.split(',')
sproul_noises[time] = float(l[0])
glade_noises[time] = float(l[1])
wellmen_noises[time] = float(l[2])
leconte_noises[time] = float(l[3])
sproul = Area("sproul", sproul_tl, sproul_br, time_to_noise=sproul_noises)
glade = Area("glade", glade_tl, glade_br, time_to_noise=glade_noises)
wellmen = Area("wellmen", wellmen_tl, wellmen_br, time_to_noise=wellmen_noises)
leconte = Area("leconte", lecont_tl, lecont_br, time_to_noise=leconte_noises)
return [sproul, glade, wellmen, leconte]
def parse_beauty():
beauty_file = open("emission/user_model_josh/beauty.csv")
beauty_areas = [ ]
for beauty_line in beauty_file:
beauty_line = beauty_line.split(',')
name = beauty_line[0]
tl = (float(beauty_line[1]), float(beauty_line[2]))
br = (float(beauty_line[5]), float(beauty_line[6]))
beauty = int(beauty_line[9])
a = Area(name, tl, br, beauty=beauty)
beauty_areas.append(a)
return beauty_areas
def get_noise_score(lat, lng, noises, time):
tot = 0
to_return = 0
for noise_area in noises:
if noise_area.point_in_area(lat, lng):
to_return += get_closest(time, noise_area)
if to_return > 0:
return to_return
return .5 ## if point isnt in any mapped area return the average
def get_closest(time, area):
for k, v in area.time_to_noise.items():
if time - k < datetime.timedelta(minutes=10):
return v
return 0
def get_beauty_score(lat, lng, beauties):
tot = 0
for beauty_area in beauties:
tot += beauty_area.beauty
if beauty_area.point_in_area(lat, lng):
return beauty_area.beauty
return old_div(float(tot), float(len(beauties))) ## if point isnt in any mapped area return the average
def get_beauty_score_of_trip(trip):
beauties = parse_beauty()
beauty_score = 0
tot_points = 0
for section in trip.sections:
for point in section.points:
tot_points += 1
beauty_score += get_beauty_score(point.get_lat(), point.get_lon(), beauties)
return old_div(float(beauty_score), float(tot_points))
def get_noise_score_of_trip(trip):
noises = parse_noise()
noise_score = 0
for section in trip.sections:
for point in section.points:
tot_points += 1
noise_score += get_noise_score(point.get_lat(), point.get_lon(), noises)
return old_div(float(beauty_score), float(tot_points))
def get_route_dict(trip):
route = []
for point in trip:
d = {'lat' : point[0], 'lng' : point[1]}
route.append(d)
return route
def get_route(trip):
i = 0
lst_of_points = []
lst_of_points.append( (trip.trip_start_location.get_lat(), trip.trip_start_location.get_lon()) )
for section in trip.sections:
for point in section.points:
if i % 2 == 0:
lst_of_points.append( (point.get_lat(), point.get_lon()) )
i += 1
lst_of_points.append( (trip.trip_end_location.get_lat(), trip.trip_end_location.get_lon()) )
return lst_of_points
def write_day(month, day, year):
return "%s-%s-%s" % (month, day, year)
def write_time(hour, minute):
return "%s:%s" % (hour, minute)
def get_one_random_point_in_radius(crd, radius):
# From https://gis.stackexchange.com/questions/25877/how-to-generate-random-locations-nearby-my-location
radius_in_degrees = kilometers_to_degrees(radius)
x_0 = crd.get_lon()
y_0 = crd.get_lat()
u = random.random()
v = random.random()
w = radius_in_degrees * math.sqrt(u)
t = 2 * math.pi * v
x = w * math.cos(t)
y = w * math.sin(t)
x = old_div(float(x), float(math.cos(y_0))) # To account for Earth curvature stuff
to_return = to.Coordinate(y + y_0, x + x_0)
return to_return
def kilometers_to_degrees(km):
## From stackexchnage mentioned above
return (old_div(float(km),float(40000))) * 360
def str_time_to_datetme(str_time):
t = str_time.split(":")
return datetime.datetime(2040, 10, 10, int(t[0]), int(t[1]), 0)
def make_user_from_jsn(jsn, base):
value_line = jsn["objects"]["Computer"]["streams"]["userData"]["points"][0]["value"]
value_line = value_line.split(";")
start = value_line[0]
end = value_line[1]
start = base.geocode_with_cache(start)
end = base.geocode_with_cache(end)
time_info = {}
print(value_line)
if value_line[2] == "leaveNow":
time_info["leave"] = True
time_info["when"] = datetime.datetime.now()
print("leaveNow")
elif value_line[2] == "leaveAt":
time_info["leave"] = True
time_info["when"] = str_time_to_datetme(value_line[3])
print("leaveAt")
elif value_line[2] == "thereBy":
time_info["leave"] = False
time_info["when"] = str_time_to_datetme(value_line[3])
print("arriveAt")
bike = get_bike_info(value_line[4])
user = UserModel(bike)
user.increase_utility_by_n("time", int(value_line[5]))
user.increase_utility_by_n("sweat", int(value_line[6]))
user.increase_utility_by_n("scenery", int(value_line[7]))
user.increase_utility_by_n("social", int(value_line[8]))
user.utilities.normalize()
print("utilities : %s" % user.utilities)
return {"user" : user, "start" : start, "end" : end, "time_info" : time_info}
def get_bike_info(bike_str):
if bike_str == "walk":
return False
return True
def get_elevation_change(trip, testing=False):
# TODO: re-implement using the open elevation API
pass
if __name__ == "__main__":
main()
| e-mission/e-mission-server | emission/analysis/modelling/user_model_josh/utility_model.py | utility_model.py | py | 16,233 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "future.standard_library.install_aliases",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "emission.core.wrapper.trip_old.Coordinate",
"line_number": 29,
"usage_type"... |
41643896349 | from _GLOBAL_OPTIONS_ import factionsOptionMenu, addPremiumItems, addRevive, AddNightmareTickets, removeAds, unlockProfiles
from _PROFILE_OPTIONS_ import addItemsMenu, changeUsername, addCash, setFreeSkillReset, setLevel, setBlackStronboxes, addBlackKeys, addAugmentCores, setSupportItems, addMultiplayerStats
from _UTILS_ import mainTitle
from _EDIT_MANUALLY_ import profileManualEdit
from _SET_PROFILE_PATH_ import setProfilePath
from _FIX_INVENTORY_CRASH_ import fixInventory
from os import _exit, path
from win32console import SetConsoleTitle
from string import ascii_letters
from time import sleep
from json import dump, load
from msvcrt import getch, kbhit
from sys import stdout
mainMenuSelection = ['Global', 'Profile', 'Edit manually', 'Settings', 'About', 'Exit']
configMenuSelection = ['Set Profile', 'Set Profile Path Folder', 'Back up Profile.save [WORK IN PROGRESS]', 'Fix Inventory', 'Back']
globalMenuSelection = ['Factions', 'Premium Items', 'Revive Tokens', 'Premium Nightmare Tickets', 'Remove ads (ADS ON MOBILE)', 'Unlock profile (5-6)', 'Back']
profileMenuSelection = ['Add items', 'Change username', 'Add SAS Cash', 'Set free skill reset', 'Set level', 'Add black Strongboxes', 'Add random Strongboxes [PLACE HOLDER (THIS FEATURE IS WORK IN PROGRESS)]', 'Add black keys', 'Add augment cores', 'Add support items', 'Set stats', 'Back']
def about():
mainTitle()
print('''
Developed by: <\\>#0077 | 0daxelagnia
Special thanks to: BlapertureMesa ( cso-idn-player ) and hemisemidemipresent
Official Github repository: https://github.com/0daxelagnia/SAS4Tool/
Latest version: 2.0.0
Made with <3 for the SAS 4 cheats community!
(Press any key to go back)''')
sleep(0.25)
while True:
if kbhit():
return mainMenu()
def globalMenu():
SetConsoleTitle('SAS4Tool - Global Menu')
mainTitle()
for i in range(len(globalMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {globalMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
factionsOptionMenu()
return mainMenu()
if key == b'b':
addPremiumItems()
return mainMenu()
if key == b'c':
addRevive()
return mainMenu()
if key == b'd':
AddNightmareTickets()
return mainMenu()
if key == b'e':
removeAds()
return mainMenu()
if key == b'f':
unlockProfiles()
return mainMenu()
if key == b'g':
return mainMenu()
def profileMenu():
SetConsoleTitle('SAS4Tool - Profile Menu')
mainTitle()
for i in range(len(profileMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {profileMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
addItemsMenu()
return mainMenu()
if key == b'b':
changeUsername()
return mainMenu()
if key == b'c':
addCash()
return mainMenu()
if key == b'd':
setFreeSkillReset()
return mainMenu()
if key == b'e':
setLevel()
return mainMenu()
if key == b'f':
setBlackStronboxes()
return mainMenu()
if key == b'g':
pass
if key == b'h':
addBlackKeys()
return mainMenu()
if key == b'i':
addAugmentCores()
return mainMenu()
if key == b'j':
setSupportItems()
return mainMenu()
if key == b'k':
addMultiplayerStats()
return mainMenu()
if key == b'l':
return mainMenu()
def setProfileConfig(consoleProfileList):
SetConsoleTitle('SAS4Tool - Set profile')
mainTitle()
print('Select a profile:\n')
for i in range(len(consoleProfileList)):
print(f'[{ascii_letters[26+i]}] - {consoleProfileList[i]}')
sleep(0.25)
with open('config.json', 'r+') as f:
data = load(f)
f.seek(0)
f.truncate()
while True:
if kbhit():
key = getch()
if key == b'a':
data['consoleDefaultProfile'] = 'Profile 1'
data['defaultProfile'] = 'Profile0'
dump(data, f)
break
if key == b'b':
data['consoleDefaultProfile'] = 'Profile 2'
data['defaultProfile'] = 'Profile1'
dump(data, f)
break
if key == b'c':
data['consoleDefaultProfile'] = 'Profile 3'
data['defaultProfile'] = 'Profile2'
dump(data, f)
break
if key == b'd':
data['consoleDefaultProfile'] = 'Profile 4'
data['defaultProfile'] = 'Profile3'
dump(data, f)
break
if key == b'e':
data['consoleDefaultProfile'] = 'Profile 5'
data['defaultProfile'] = 'Profile4'
dump(data, f)
break
if key == b'f':
data['consoleDefaultProfile'] = 'Profile 6'
data['defaultProfile'] = 'Profile5'
dump(data, f)
break
def configMenu():
consoleProfileList = ['Profile 1', 'Profile 2', 'Profile 3', 'Profile 4', 'Profile 5', 'Profile 6']
SetConsoleTitle('SAS4Tool - Config Menu')
mainTitle()
for i in range(len(configMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {configMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
setProfileConfig(consoleProfileList)
return mainMenu()
if key == b'b':
setProfilePath()
return mainMenu()
if key == b'c':
return mainMenu()
if key == b'd':
fixInventory()
return mainMenu()
if key == b'e':
return mainMenu()
def mainMenu():
SetConsoleTitle('SAS4Tool - Main Menu')
mainTitle()
for i in range(len(mainMenuSelection)):
print(f'[{ascii_letters[26+i]}] - {mainMenuSelection[i]}')
sleep(0.25)
while True:
if kbhit():
key = getch()
if key == b'a':
return globalMenu()
if key == b'b':
return profileMenu()
if key == b'c':
profileManualEdit()
return mainMenu()
if key == b'd':
configMenu()
return mainMenu()
if key == b'e':
about()
return mainMenu()
if key == b'f':
try:
stdout.flush()
exit()
except:
stdout.flush()
_exit(0)
if __name__ == '__main__':
contents = {'version': '2.0.0', 'developer': '<\\>#0077', 'defaultProfile': 'Profile0', 'consoleDefaultProfile': 'Profile 1', "profileSavePath": ""}
if not path.exists('config.json'):
with open('config.json', 'w') as f:
f.seek(0)
f.truncate()
f = dump(contents, f, indent=4)
if path.exists('config.json'):
if path.getsize('config.json') == 0:
with open('config.json', 'w') as f:
f.seek(0)
f.truncate()
f = dump(contents, f, indent=4)
mainMenu() | SWFplayer/SAS4Tool | _MAIN_.py | _MAIN_.py | py | 8,232 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "_UTILS_.mainTitle",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "msvcrt.kbhit",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "win32console.SetConsoleTitl... |
37407869802 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import os
import shutil
df = pd.read_csv('../data/speakers_all.csv')
df.drop(columns=['Unnamed: 9', 'Unnamed: 10', 'Unnamed: 11'], inplace=True)
df_full = df[df['file_missing?'] == False]
df_full['sex'] = df_full['sex'].apply(lambda x: 'female' if x == 'famale' else x)
top10languages = df_full['native_language'].value_counts()[:10].index.values
train_set_filenames = []
test_set_filenames = []
for language in top10languages:
X = df_full[df_full['native_language'] == language]['filename'].values
y = [language] * len(X)
X_train, X_test, y_train, y_test = train_test_split(X, y)
train_set_filenames.append(X_train)
test_set_filenames.append(X_test)
train_names = []
for lst in train_set_filenames:
for file in lst:
train_names.append(file)
test_names = []
for lst in test_set_filenames:
for file in lst:
test_names.append(file)
df_full['train_test_none'] = df_full['filename'].apply(lambda x: "train" if x in train_names else "test" if x in test_names else "none")
df_full[df_full['train_test_none'] != 'none'].to_csv("../data/train_test.csv")
current_dir = "../data/recordings/wav_16khz"
for idx, row in df_full.iterrows():
if row['train_test_none'] == 'none':
continue
elif row['train_test_none'] == 'train':
new_path = os.path.join("../data/recordings/train_set", row['native_language'], row['filename'] + ".wav")
elif row['train_test_none'] == 'test':
new_path = os.path.join("../data/recordings/test_set", row['native_language'], row['filename'] + ".wav")
current_path = os.path.join(current_dir, row['filename'] + ".wav")
shutil.copyfile(current_path, new_path) | acaldwell93/Accent-Classification | src/create_train_test_directories.py | create_train_test_directories.py | py | 1,769 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "... |
2888068360 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import logging
from tornado.escape import json_encode
from tornado.escape import json_decode
from tornado.escape import utf8
from .constant import *
logger = logging.getLogger('server.' + __name__)
class JsonStream:
def __init__(self, stream, address):
self._stream = stream
self._address = address
self._stream.read_until(bDELIMITER, self.on_read)
async def on_read(self, data):
try:
if data:
dict_ = json_decode(data)
await self.on_read_json(dict_)
except Exception as e:
logger.error('Error occurs during decoding data from device.\n\
{}'.format(e), exc_info=True)
self._stream.read_until(bDELIMITER, self.on_read)
def on_read_json(self, dict_):
pass
def send_json(self, dict_):
if not self._stream.closed():
self._stream.write(utf8(json_encode(dict_) + DELIMITER))
| All-less/exotic-server | lib/json_stream.py | json_stream.py | py | 1,023 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tornado.escape.json_decode",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tornado.escape.utf8",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torn... |
30509808806 | #!/home/meichen/anaconda3/bin/python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import glob
import matplotlib.ticker as mticker
from matplotlib.ticker import StrMethodFormatter, NullFormatter
def main():
data = pd.read_csv('pairsfile_rgp_select.csv',skipinitialspace=True)
data_array = np.array(data)
jpath = '/home/meichen/work1/SR_Attn/pair_events'
phase = 'P'
distance = '85'
pairs = {}
masterid = []
numberid = []
for i in np.arange(len(data_array[:,0])):
if data_array[i,0] not in pairs.keys():
masterid.append(data_array[i,0])
numberid.append(data_array[i,19])
pairs.setdefault(data_array[i,0],[]).append(data_array[i,6])
for key in list(pairs.keys()):
index=list(data_array[:,0]).index(key)
os.chdir('{}/master_{}'.format(jpath,key))
num = 0
fig = plt.figure(figsize=[4,2])
ax1 = fig.add_subplot(111)
for value in list(pairs.get(key)):
stn_num = glob.glob('egf_{}/{}/gcarc_{}/all*'.format(value,phase,distance))[0].split('.')[7]
d = np.genfromtxt('{}'.format(glob.glob('egf_{}/{}/gcarc_{}/all*'.format(value,phase,distance))[0]))
d = d[d[:,0]<2.0]
indices = [l for l,x in enumerate(data_array[:,0]) if x == key]
index = list(data_array[l,6] for l in indices).index(value)
fc = data_array[indices[0]+index,17]
a = data_array[indices[0]+index,15]
b = data_array[indices[0]+index,16]
ax1.loglog(d[:,0],d[:,1],'C{}'.format(num),label='{} stn:{}'.format(value,stn_num),lw=0.5,alpha=0.75)
ax1.loglog(d[:,0],func_Boatwright(d[:,0],a,b,fc),linestyle='--',color='grey',lw=1)
ax1.plot(fc,func_Boatwright(fc,a,b,fc),marker='v',markeredgecolor='C{}'.format(num),markerfacecolor='C{}'.format(num),linewidth=2)
num = num + 1
num = num % 9
ax1.set_xlabel('Frequency (Hz)',size=8)
ax1.set_ylabel('Spectral ratios',size=8)
ax1.set_xticks([0.025,0.1,1,2])
ax1.set_xticklabels([0.025,0.1,1,2])
ax1.yaxis.set_major_locator(mticker.LogLocator(subs=(0.3,1.0,)))
ax1.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax1.yaxis.set_minor_formatter(mticker.NullFormatter())
ax1.tick_params(axis='both',which='both',labelsize=6)
print(key,phase,distance)
n = masterid.index(key)
ax1.set_title('# {}'.format(numberid[n]),size=10)
fig.tight_layout()
plt.savefig('/home/meichen/Research/SR_Attn/pair_events/figures/master_{}.pdf'.format(numberid[n]))
plt.close()
def func(x,a,b,c):
return a * (1 + x**2 / b**2)/(1 + x**2 / c**2)
def func_Boatwright(x,a,b,c):
return a * (1 + x**4/ b**4)**0.5 / (1+x**4/c**4)**0.5
main()
| meichenl95/SR_deepfocus | figures/egfs_one.py | egfs_one.py | py | 2,853 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number"... |
26014460878 | from ..h2o import random_forest
from .estimator_base import H2OEstimator
class H2ORandomForestEstimator(H2OEstimator):
def __init__(self,mtries=None,sample_rate=None,build_tree_one_node=None,ntrees=None,
max_depth=None,min_rows=None,nbins=None,nbins_cats=None,
binomial_double_trees=None,balance_classes=None,max_after_balance_size=None,
seed=None,offset_column=None,weights_column=None):
super(H2ORandomForestEstimator, self).__init__()
self.parms = locals()
self.parms = {k:v for k,v in self.parms.iteritems() if k!="self"}
self._estimator_type="regressor"
def fit(self,X,y=None,**params):
if y is not None:
if y.isfactor(): self._estimator_type="classifier"
self.__dict__=random_forest(x=X,y=y,**self.parms).__dict__.copy() | tomasgreif/h2o-3 | h2o-py/h2o/estimators/random_forest.py | random_forest.py | py | 807 | python | en | code | null | github-code | 36 | [
{
"api_name": "estimator_base.H2OEstimator",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "h2o.random_forest",
"line_number": 17,
"usage_type": "call"
}
] |
14112986280 | import os
import allure
import pytest
import logging
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
import allure
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
class BasePage:
def __init__(self, driver):
self.driver = driver
@allure.step("Проверка URL: {expected_url}")
def check_url(self, expected_url):
assert self.driver.current_url == expected_url
logging.info(
f"Проверка URL: Ожидаемый URL - {expected_url}, "
f"текущий URL - {self.driver.current_url}"
)
class SbisHomePage(BasePage):
URL = "https://sbis.ru/"
@allure.step("Переход в раздел 'Контакты'.")
def go_to_contacts(self):
header_menu = self.driver.find_element(
By.CLASS_NAME,
"sbisru-Header__menu.ws-flexbox.ws-align-items-center"
)
contacts_link = header_menu.find_element(By.LINK_TEXT, "Контакты")
contacts_link.click()
logging.info("Переход на страницу 'Контакты' выполнен.")
@pytest.fixture
def browser():
download_folder = os.path.join(os.path.dirname(__file__), 'downloads')
chrome_options = Options()
chrome_options.add_experimental_option('prefs', {
'download.default_directory': download_folder,
'download.prompt_for_download': False,
'download.directory_upgrade': True,
'safebrowsing.enabled': False,
'profile.default_content_settings.popups': 0
})
chrome_options.add_argument('--disable-notifications')
chrome_options.add_argument('--disable-infobars')
chrome_options.add_argument('--disable-software-rasterizer')
chrome_options.add_argument('--safebrowsing-disable-download-protection')
chrome_options.add_argument('--disable-web-security')
driver = webdriver.Chrome(options=chrome_options)
yield driver
driver.quit()
def close_cookie_message(driver, class_name):
try:
close_cookie_message = driver.find_element(By.CLASS_NAME, class_name)
if close_cookie_message.is_displayed():
close_cookie_message.click()
logging.info("Закрытие сообщения о куки выполнено.")
except NoSuchElementException:
pass
| nasretdinovs/tensor_autotest | common.py | common.py | py | 2,451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "allure.step",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name... |
21418641611 |
import ismrmrd
import os
import itertools
import logging
import numpy as np
import numpy.fft as fft
import ctypes
import mrdhelper
from datetime import datetime
# Folder for debug output files
debugFolder = "/tmp/share/debug"
def groups(iterable, predicate):
group = []
for item in iterable:
group.append(item)
if predicate(item):
yield group
group = []
def conditionalGroups(iterable, predicateAccept, predicateFinish):
group = []
try:
for item in iterable:
if item is None:
break
if predicateAccept(item):
group.append(item)
if predicateFinish(item):
yield group
group = []
finally:
iterable.send_close()
def process(connection, config, metadata):
logging.info("Config: \n%s", config)
logging.info("Metadata: \n%s", metadata)
# Discard phase correction lines and accumulate lines until "ACQ_LAST_IN_SLICE" is set
for group in conditionalGroups(connection, lambda acq: not acq.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA), lambda acq: acq.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE)):
image = process_group(group, config, metadata)
logging.debug("Sending image to client:\n%s", image)
connection.send_image(image)
def process_group(group, config, metadata):
# Create folder, if necessary
if not os.path.exists(debugFolder):
os.makedirs(debugFolder)
logging.debug("Created folder " + debugFolder + " for debug output files")
# Format data into single [cha RO PE] array
data = [acquisition.data for acquisition in group]
data = np.stack(data, axis=-1)
logging.debug("Raw data is size %s" % (data.shape,))
np.save(debugFolder + "/" + "raw.npy", data)
# Fourier Transform
data = fft.fftshift(data, axes=(1, 2))
data = fft.ifft2(data)
data = fft.ifftshift(data, axes=(1, 2))
# Sum of squares coil combination
data = np.abs(data)
data = np.square(data)
data = np.sum(data, axis=0)
data = np.sqrt(data)
logging.debug("Image data is size %s" % (data.shape,))
np.save(debugFolder + "/" + "img.npy", data)
# Normalize and convert to int16
data *= 32767/data.max()
data = np.around(data)
data = data.astype(np.int16)
# Remove phase oversampling
nRO = np.size(data,0)
data = data[int(nRO/4):int(nRO*3/4),:]
logging.debug("Image without oversampling is size %s" % (data.shape,))
np.save(debugFolder + "/" + "imgCrop.npy", data)
# Format as ISMRMRD image data
image = ismrmrd.Image.from_array(data, acquisition=group[0])
image.image_index = 1
# Set field of view
image.field_of_view = (ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.x),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.y),
ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.z))
# Set ISMRMRD Meta Attributes
meta = ismrmrd.Meta({'DataRole': 'Image',
'ImageProcessingHistory': ['FIRE', 'PYTHON'],
'WindowCenter': '16384',
'WindowWidth': '32768'})
# Add image orientation directions to MetaAttributes if not already present
if meta.get('ImageRowDir') is None:
meta['ImageRowDir'] = ["{:.18f}".format(image.getHead().read_dir[0]), "{:.18f}".format(image.getHead().read_dir[1]), "{:.18f}".format(image.getHead().read_dir[2])]
if meta.get('ImageColumnDir') is None:
meta['ImageColumnDir'] = ["{:.18f}".format(image.getHead().phase_dir[0]), "{:.18f}".format(image.getHead().phase_dir[1]), "{:.18f}".format(image.getHead().phase_dir[2])]
xml = meta.serialize()
logging.debug("Image MetaAttributes: %s", xml)
logging.debug("Image data has %d elements", image.data.size)
image.attribute_string = xml
return image
| HMS-CardiacMR/MyoMapNet | InLine_Implementation/Code/simplefft.py | simplefft.py | py | 4,004 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "ismrmrd.ACQ_IS_PHASECORR_DATA",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "ismrmrd.... |
26493496083 |
from django.urls import path
from .import views
urlpatterns = [
path("", views.index, name="storehome"),
path("about/", views.about, name="aboutus"),
path("contact/", views.contact, name="contactus"),
path("Seller/", views.seller, name="sellerid"), #Seller/ is the html file name not the function name
path("search/", views.search, name="searchbar"),
path("productview/<int:getid>", views.prodView, name="productview1"),
path("checkout/", views.checkout, name="checkout"),
path("signup/", views.signup, name="signup"),
path("handlelogin/", views.handlelogin, name="handlelogin"),
path("handlelogout/", views.handlelogout, name="handlelogout")
] | princegupta003005/E-commerce-Website | Anapp/store/urls.py | urls.py | py | 697 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
495049737 | import imp
import importlib
import inspect
import os
import sys
import weakref
from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.definitions.partition import RepositoryPartitionsHandle
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.definitions.repository import RepositoryDefinition
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.scheduler import SchedulerHandle
from dagster.utils import load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPHEMERAL_NAME = '<<unnamed>>'
class PartitionLoaderEntrypoint(
namedtuple('_PartitionLoaderEntrypoint', 'module module_name fn_name from_handle')
):
def __new__(cls, module, module_name, fn_name, from_handle=None):
return super(PartitionLoaderEntrypoint, cls).__new__(
cls, module, module_name, fn_name, from_handle
)
def perform_load(self):
# in the decorator case the attribute will be the actual definition
if not hasattr(self.module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}.'.format(name=self.fn_name, module=self.module)
)
fn_partitions = getattr(self.module, self.fn_name)
if isinstance(fn_partitions, RepositoryPartitionsHandle):
inst = fn_partitions
elif callable(fn_partitions):
handle = fn_partitions()
if not isinstance(handle, RepositoryPartitionsHandle):
raise DagsterInvariantViolationError(
'{fn_name} is a function but must return a RepositoryPartitionsHandle.'.format(
fn_name=self.fn_name
)
)
inst = handle
else:
raise DagsterInvariantViolationError(
'{fn_name} must be a function that returns a RepositoryPartitionstHandle.'.format(
fn_name=self.fn_name
)
)
return inst
@staticmethod
def from_file_target(python_file, fn_name, from_handle=None):
file_directory = os.path.dirname(python_file)
if file_directory not in sys.path:
sys.path.append(file_directory)
module_name = os.path.splitext(os.path.basename(python_file))[0]
module = imp.load_source(module_name, python_file)
return PartitionLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_module_target(module_name, fn_name, from_handle=None):
module = importlib.import_module(module_name)
return PartitionLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_yaml(file_path, from_handle=None):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
if not config.get('partitions'):
return None
partitions = check.dict_elem(config, 'partitions')
module_name = check.opt_str_elem(partitions, 'module')
file_name = check.opt_str_elem(partitions, 'file')
fn_name = check.str_elem(partitions, 'fn')
if module_name:
return PartitionLoaderEntrypoint.from_module_target(module_name, fn_name, from_handle)
else:
# rebase file in config off of the path in the config file
file_name = os.path.join(os.path.dirname(os.path.abspath(file_path)), file_name)
return PartitionLoaderEntrypoint.from_file_target(file_name, fn_name, from_handle)
class SchedulerLoaderEntrypoint(
namedtuple('_SchedulerLoaderEntrypoint', 'module module_name fn_name from_handle')
):
def __new__(cls, module, module_name, fn_name, from_handle=None):
return super(SchedulerLoaderEntrypoint, cls).__new__(
cls, module, module_name, fn_name, from_handle
)
def perform_load(self, artifacts_dir):
artifacts_dir = check.str_param(artifacts_dir, 'artifacts_dir')
repository_name = self.from_handle.build_repository_definition().name
# in the decorator case the attribute will be the actual definition
if not hasattr(self.module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}.'.format(name=self.fn_name, module=self.module)
)
fn_scheduler = getattr(self.module, self.fn_name)
if callable(fn_scheduler):
scheduler = fn_scheduler(artifacts_dir=artifacts_dir, repository_name=repository_name)
if not isinstance(scheduler, SchedulerHandle):
raise DagsterInvariantViolationError(
'{fn_name} is a function but must return a SchedulerHandle.'.format(
fn_name=self.fn_name
)
)
inst = scheduler
else:
raise DagsterInvariantViolationError(
'{fn_name} must be a function that returns a SchedulerHandle.'.format(
fn_name=self.fn_name
)
)
return inst
@staticmethod
def from_file_target(python_file, fn_name, from_handle=None):
file_directory = os.path.dirname(python_file)
if file_directory not in sys.path:
sys.path.append(file_directory)
module_name = os.path.splitext(os.path.basename(python_file))[0]
module = imp.load_source(module_name, python_file)
return SchedulerLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_module_target(module_name, fn_name, from_handle=None):
module = importlib.import_module(module_name)
return SchedulerLoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_yaml(file_path, from_handle=None):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
if not config.get('scheduler'):
return None
scheduler = check.dict_elem(config, 'scheduler')
module_name = check.opt_str_elem(scheduler, 'module')
file_name = check.opt_str_elem(scheduler, 'file')
fn_name = check.str_elem(scheduler, 'fn')
if module_name:
return SchedulerLoaderEntrypoint.from_module_target(module_name, fn_name, from_handle)
else:
# rebase file in config off of the path in the config file
file_name = os.path.join(os.path.dirname(os.path.abspath(file_path)), file_name)
return SchedulerLoaderEntrypoint.from_file_target(file_name, fn_name, from_handle)
class LoaderEntrypoint(namedtuple('_LoaderEntrypoint', 'module module_name fn_name from_handle')):
def __new__(cls, module, module_name, fn_name, from_handle=None):
return super(LoaderEntrypoint, cls).__new__(cls, module, module_name, fn_name, from_handle)
def perform_load(self):
# in the decorator case the attribute will be the actual definition
if not hasattr(self.module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}.'.format(name=self.fn_name, module=self.module)
)
fn_repo_or_pipeline = getattr(self.module, self.fn_name)
# This is the @pipeline case
if isinstance(fn_repo_or_pipeline, PipelineDefinition):
inst = fn_repo_or_pipeline
# This is the define_pipeline() or define_repo() case
elif callable(fn_repo_or_pipeline):
repo_or_pipeline = fn_repo_or_pipeline()
if not isinstance(repo_or_pipeline, (RepositoryDefinition, PipelineDefinition)):
raise DagsterInvariantViolationError(
'{fn_name} is a function but must return a PipelineDefinition '
'or a RepositoryDefinition, or be decorated with @pipeline.'.format(
fn_name=self.fn_name
)
)
inst = repo_or_pipeline
else:
raise DagsterInvariantViolationError(
'{fn_name} must be a function that returns a PipelineDefinition '
'or a RepositoryDefinition, or a function decorated with @pipeline.'.format(
fn_name=self.fn_name
)
)
if self.from_handle:
return ExecutionTargetHandle.cache_handle(inst, self.from_handle)
return inst
@staticmethod
def from_file_target(python_file, fn_name, from_handle=None):
file_directory = os.path.dirname(python_file)
if file_directory not in sys.path:
sys.path.append(file_directory)
module_name = os.path.splitext(os.path.basename(python_file))[0]
module = imp.load_source(module_name, python_file)
return LoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_module_target(module_name, fn_name, from_handle=None):
module = importlib.import_module(module_name)
return LoaderEntrypoint(module, module_name, fn_name, from_handle)
@staticmethod
def from_yaml(file_path, from_handle=None):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
repository_config = check.dict_elem(config, 'repository')
module_name = check.opt_str_elem(repository_config, 'module')
file_name = check.opt_str_elem(repository_config, 'file')
fn_name = check.str_elem(repository_config, 'fn')
if module_name:
return LoaderEntrypoint.from_module_target(module_name, fn_name, from_handle)
else:
# rebase file in config off of the path in the config file
file_name = os.path.join(os.path.dirname(os.path.abspath(file_path)), file_name)
return LoaderEntrypoint.from_file_target(file_name, fn_name, from_handle)
class ExecutionTargetHandleCacheEntry(
namedtuple('_ExecutionTargetHandleCacheEntry', 'handle solid_subset')
):
def __new__(cls, handle, solid_subset=None):
check.inst_param(handle, 'handle', ExecutionTargetHandle)
check.opt_list_param(solid_subset, 'solid_subset', of_type=str)
return super(ExecutionTargetHandleCacheEntry, cls).__new__(cls, handle, solid_subset)
class ExecutionTargetHandle(object):
'''ExecutionTargetHandle represents an immutable, serializable reference to a Dagster
RepositoryDefinition or PipelineDefinition, to support dynamically loading these in various
contexts (e.g. across process boundaries).
This class must remain pickle-serializable to ensure multiprocessing compatibility, and is the
one of the primary reasons that we pass this around vs. an instantiated
RepositoryDefinition/PipelineDefinition object.
### Creation
ExecutionTargetHandles can be created via the staticmethod constructors below.
- for_repo_fn
- for_repo_yaml
- for_repo_python_file
- for_repo_module
- for_pipeline_fn
- for_pipeline_python_file
- for_pipeline_module
Also, the following constructors are provided to support construction from CLI tools in
dagster.cli.load_handle:
- handle_for_repo_cli_args
- handle_for_pipeline_cli_args
Since an ExecutionTargetHandle can reference either a RepositoryDefinition or a fully-qualified
pipeline, it provides a property `is_resolved_to_pipeline` which identifies whether it is fully-
qualified to a pipeline reference.
For repository-based handles, you can use the `with_pipeline_name(pipeline_name)` method on a
repository handle to construct and return a new fully-qualified pipeline handle.
### Usage
Handle objects support the following methods to construct `*Definition` objects:
- handle.build_repository_definition() => RepositoryDefinition
- handle.build_pipeline_definition() => PipelineDefinition
These are intended to support reconstructing definitions from their serialized representations
provided by this object wherever needed during execution.
The first is supported on all handles; the second requires a fully-qualified pipeline handle.
For more advanced usage, you can also construct an entrypoint object yourself with:
- handle.entrypoint() => LoaderEntrypoint
This should not be necessary in common usage.
'''
__cache__ = weakref.WeakKeyDictionary()
'''The cache is used to cache handles used to create PipelineDefinition and
RepositoryDefinition objects, so the handles can be passed across serialization boundaries (as
for dagstermill) by solid compute logic.'''
@classmethod
def get_handle(cls, repo_or_pipeline):
'''Get the handle and, optionally, solid subset used to construct a repo or (sub-)pipeline.
Returns: Union[ExecutionTargetHandleCacheEntry, (None, None)]
'''
check.inst_param(
repo_or_pipeline, 'repo_or_pipeline', (RepositoryDefinition, PipelineDefinition)
)
return cls.__cache__.get(repo_or_pipeline) or (None, None)
@classmethod
def cache_handle(cls, repo_or_pipeline_def, handle=None, solid_names=None):
'''Record a pipeline or repository in the cache.
Args:
repo_or_pipeline_def (Union[RepositoryDefinition, PipelineDefinition]): The repo or
pipeline definition for which to cache the handle.
Kwargs:
handle (ExecutionTargetHandle): The handle to cache.
solid_names (Optional[List[str]]): The solid names constituting the constructed
sub-pipeline, if any; arg should be as for
dagster.core.definitions.pipeline.build_sub_pipeline.
'''
check.inst_param(
repo_or_pipeline_def, 'repo_or_pipeline_def', (RepositoryDefinition, PipelineDefinition)
)
check.inst_param(handle, 'handle', ExecutionTargetHandle)
check.opt_list_param(solid_names, 'solid_names', of_type=str)
cls.__cache__[repo_or_pipeline_def] = ExecutionTargetHandleCacheEntry(handle, solid_names)
return repo_or_pipeline_def
@staticmethod
def for_pipeline_fn(fn):
'''This builder is a bit magical, but it inspects its caller to determine how to build a
ExecutionTargetHandle object via python_file and fn_name.
This will work since fn_name is ensured to be in scope in the python_file caller's scope.
'''
check.callable_param(fn, 'fn')
return ExecutionTargetHandle.for_pipeline_python_file(
python_file=_get_python_file_from_previous_stack_frame(), fn_name=fn.__name__
)
@staticmethod
def for_repo_fn(fn):
'''This builder is a bit magical, but it inspects its caller to determine how to build a
ExecutionTargetHandle object via python_file and fn_name.
This will work since fn_name is ensured to be in scope in the python_file caller's scope.
'''
check.callable_param(fn, 'fn')
return ExecutionTargetHandle.for_repo_python_file(
python_file=_get_python_file_from_previous_stack_frame(), fn_name=fn.__name__
)
@staticmethod
def for_repo_yaml(repository_yaml):
'''Builds an ExecutionTargetHandle for a repository.yml file.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(repository_yaml=os.path.abspath(repository_yaml)),
_ExecutionTargetMode.REPOSITORY,
)
@staticmethod
def for_repo_python_file(python_file, fn_name):
'''Builds an ExecutionTargetHandle for a repository python file and function which is
expected to return a RepositoryDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(python_file=python_file, fn_name=fn_name),
_ExecutionTargetMode.REPOSITORY,
)
@staticmethod
def for_repo_module(module_name, fn_name):
'''Builds an ExecutionTargetHandle for a repository module and function which is expected
to return a RepositoryDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(module_name=module_name, fn_name=fn_name),
_ExecutionTargetMode.REPOSITORY,
)
@staticmethod
def for_pipeline_python_file(python_file, fn_name):
'''Builds an ExecutionTargetHandle for a pipeline python file and function which is expected
to return a PipelineDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(python_file=python_file, fn_name=fn_name),
_ExecutionTargetMode.PIPELINE,
is_resolved_to_pipeline=True,
)
@staticmethod
def for_pipeline_module(module_name, fn_name):
'''Builds an ExecutionTargetHandle for a pipeline python module and function which is
expected to return a PipelineDefinition instance.
'''
return ExecutionTargetHandle(
_ExecutionTargetHandleData(module_name=module_name, fn_name=fn_name),
_ExecutionTargetMode.PIPELINE,
is_resolved_to_pipeline=True,
)
@staticmethod
def from_dict(handle_dict):
return ExecutionTargetHandle(
data=_ExecutionTargetHandleData(**handle_dict['data']),
mode=getattr(_ExecutionTargetMode, handle_dict['mode']),
is_resolved_to_pipeline=handle_dict['is_resolved_to_pipeline'],
)
def to_dict(self):
return {
'data': self.data._asdict(),
'mode': self.mode.name,
'is_resolved_to_pipeline': self.is_resolved_to_pipeline,
}
def with_pipeline_name(self, pipeline_name):
'''Returns a new ExecutionTargetHandle that references the pipeline "pipeline_name" within
the repository.
'''
if self.is_resolved_to_pipeline and self.data.pipeline_name == pipeline_name:
return self
check.invariant(
not (self.is_resolved_to_pipeline and self.data.pipeline_name is not None),
'''ExecutionTargetHandle already references a pipeline named {pipeline_name}, cannot
change to {new_pipeline_name}.'''.format(
pipeline_name=self.data.pipeline_name, new_pipeline_name=pipeline_name
),
)
data = self.data._replace(pipeline_name=pipeline_name)
return ExecutionTargetHandle(
data, mode=_ExecutionTargetMode.PIPELINE, is_resolved_to_pipeline=True
)
def build_scheduler_handle(self, artifacts_dir):
# Cannot create a scheduler handle if the target mode is not a repository
if self.mode != _ExecutionTargetMode.REPOSITORY:
return None
entrypoint = self.scheduler_handle_entrypoint
# entrypoint will be None if the repository yaml file does not define a scheduler entrypoint
if not entrypoint:
return None
return self.scheduler_handle_entrypoint.perform_load(artifacts_dir)
def build_partitions_handle(self):
if self.mode != _ExecutionTargetMode.REPOSITORY:
return None
entrypoint = self.partition_handle_entrypoint
if not entrypoint:
return None
return self.partition_handle_entrypoint.perform_load()
def build_repository_definition(self):
'''Rehydrates a RepositoryDefinition from an ExecutionTargetHandle object.
If this ExecutionTargetHandle points to a pipeline, we create an ephemeral repository to
wrap the pipeline and return it.
'''
obj = self.entrypoint.perform_load()
if self.mode == _ExecutionTargetMode.REPOSITORY:
# User passed in a function that returns a pipeline definition, not a repository. See:
# https://github.com/dagster-io/dagster/issues/1439
if isinstance(obj, PipelineDefinition):
return ExecutionTargetHandle.cache_handle(
RepositoryDefinition(name=EPHEMERAL_NAME, pipeline_defs=[obj]),
*ExecutionTargetHandle.get_handle(obj)
)
return ExecutionTargetHandle.cache_handle(check.inst(obj, RepositoryDefinition), self)
elif self.mode == _ExecutionTargetMode.PIPELINE:
# This handle may have originally targeted a repository and then been qualified with
# with_pipeline_name()
if isinstance(obj, RepositoryDefinition):
return ExecutionTargetHandle.cache_handle(
obj, *ExecutionTargetHandle.get_handle(obj)
)
return ExecutionTargetHandle.cache_handle(
RepositoryDefinition(name=EPHEMERAL_NAME, pipeline_defs=[obj]),
*ExecutionTargetHandle.get_handle(obj)
)
else:
check.failed('Unhandled mode {mode}'.format(mode=self.mode))
def build_pipeline_definition(self):
'''Rehydrates a PipelineDefinition from an ExecutionTargetHandle object.
'''
if self.mode == _ExecutionTargetMode.REPOSITORY:
raise DagsterInvariantViolationError(
'Cannot construct a pipeline from a repository-based ExecutionTargetHandle without'
' a pipeline name. Use with_pipeline_name() to construct a pipeline'
' ExecutionTargetHandle.'
)
elif self.mode == _ExecutionTargetMode.PIPELINE:
obj = self.entrypoint.perform_load()
if isinstance(obj, PipelineDefinition):
return ExecutionTargetHandle.cache_handle(obj, self)
else:
return ExecutionTargetHandle.cache_handle(
obj.get_pipeline(self.data.pipeline_name), self
)
else:
check.failed('Unhandled mode {mode}'.format(mode=self.mode))
@property
def partition_handle_entrypoint(self):
return self.data.get_partition_entrypoint(from_handle=self)
@property
def scheduler_handle_entrypoint(self):
return self.data.get_scheduler_entrypoint(from_handle=self)
@property
def entrypoint(self):
if self.mode == _ExecutionTargetMode.REPOSITORY:
return self.data.get_repository_entrypoint(from_handle=self)
elif self.mode == _ExecutionTargetMode.PIPELINE:
return self.data.get_pipeline_entrypoint(from_handle=self)
else:
check.failed('Unhandled mode {mode}'.format(mode=self.mode))
def __init__(self, data, mode, is_resolved_to_pipeline=False):
'''Not intended to be invoked directly. Use one of the factory functions above.
'''
self.data = check.inst_param(data, 'data', _ExecutionTargetHandleData)
self.mode = check.inst_param(mode, 'mode', _ExecutionTargetMode)
# By default, this only resolves to a repository
self.is_resolved_to_pipeline = is_resolved_to_pipeline
def _get_python_file_from_previous_stack_frame():
'''inspect.stack() lets us introspect the call stack; inspect.stack()[1] is the previous
stack frame.
In Python < 3.5, this is just a tuple, of which the python file of the previous frame is the 1st
element.
In Python 3.5+, this is a FrameInfo namedtuple instance; the python file of the previous frame
remains the 1st element.
'''
# Since this is now a function in this file, we need to go back two hops to find the
# callsite file.
previous_stack_frame = inspect.stack()[2]
# See: https://docs.python.org/3/library/inspect.html
if sys.version_info.major == 3 and sys.version_info.minor >= 5:
check.inst(previous_stack_frame, inspect.FrameInfo)
else:
check.inst(previous_stack_frame, tuple)
python_file = previous_stack_frame[1]
return os.path.abspath(python_file)
class _ExecutionTargetMode(Enum):
PIPELINE = 1
REPOSITORY = 2
class _ExecutionTargetHandleData(
namedtuple(
'_ExecutionTargetHandleData',
'repository_yaml module_name python_file fn_name pipeline_name',
)
):
def __new__(
cls,
repository_yaml=None,
module_name=None,
python_file=None,
fn_name=None,
pipeline_name=None,
):
return super(_ExecutionTargetHandleData, cls).__new__(
cls,
repository_yaml=check.opt_str_param(repository_yaml, 'repository_yaml'),
module_name=check.opt_str_param(module_name, 'module_name'),
python_file=check.opt_str_param(python_file, 'python_file'),
fn_name=check.opt_str_param(fn_name, 'fn_name'),
pipeline_name=check.opt_str_param(pipeline_name, 'pipeline_name'),
)
def get_partition_entrypoint(self, from_handle=None):
if self.repository_yaml:
return PartitionLoaderEntrypoint.from_yaml(
self.repository_yaml, from_handle=from_handle
)
def get_scheduler_entrypoint(self, from_handle=None):
if self.repository_yaml:
return SchedulerLoaderEntrypoint.from_yaml(
self.repository_yaml, from_handle=from_handle
)
def get_repository_entrypoint(self, from_handle=None):
if self.repository_yaml:
return LoaderEntrypoint.from_yaml(self.repository_yaml, from_handle=from_handle)
elif self.module_name and self.fn_name:
return LoaderEntrypoint.from_module_target(
module_name=self.module_name, fn_name=self.fn_name, from_handle=from_handle
)
elif self.python_file and self.fn_name:
return LoaderEntrypoint.from_file_target(
python_file=self.python_file, fn_name=self.fn_name, from_handle=from_handle
)
else:
raise DagsterInvariantViolationError(
(
'You have attempted to load a repository with an invalid '
'combination of properties. repository_yaml {repository_yaml} '
'module_name {module_name} python_file {python_file} '
'fn_name {fn_name}.'
).format(
repository_yaml=self.repository_yaml,
module_name=self.module_name,
fn_name=self.fn_name,
python_file=self.python_file,
)
)
def get_pipeline_entrypoint(self, from_handle=None):
if self.python_file and self.fn_name:
return LoaderEntrypoint.from_file_target(
python_file=self.python_file, fn_name=self.fn_name, from_handle=from_handle
)
elif self.module_name and self.fn_name:
return LoaderEntrypoint.from_module_target(
module_name=self.module_name, fn_name=self.fn_name, from_handle=from_handle
)
elif self.pipeline_name:
return self.get_repository_entrypoint(from_handle=from_handle)
raise DagsterInvariantViolationError(
(
'You have attempted to directly load a pipeline with an invalid '
'combination of properties module_name {module_name} python_file '
'{python_file} fn_name {fn_name}.'
).format(
module_name=self.module_name, fn_name=self.fn_name, python_file=self.python_file
)
)
def _asdict(self):
ddict = super(_ExecutionTargetHandleData, self)._asdict()
# Normalize to Posix paths
for key in ['repository_yaml', 'python_file']:
if ddict[key]:
ddict[key] = Path(ddict[key]).as_posix()
return ddict
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster/core/definitions/handle.py | handle.py | py | 28,007 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dagster.core.errors.DagsterInvariantViolationError",
"line_number": 38,
"usage_type": "call"... |
74060670184 | import re
from hashlib import sha256
from unittest import mock
import pytest
from aiohttp import web
from sqlalchemy import and_, select
from server.config import config
from server.db.models import ban, friends_and_foes
from server.exceptions import BanError, ClientError
from server.game_service import GameService
from server.gameconnection import GameConnection
from server.games import CustomGame, Game, GameState, InitMode, VisibilityState
from server.geoip_service import GeoIpService
from server.ice_servers.nts import TwilioNTS
from server.ladder_service import LadderService
from server.lobbyconnection import LobbyConnection
from server.matchmaker import Search
from server.oauth_service import OAuthService
from server.party_service import PartyService
from server.player_service import PlayerService
from server.players import PlayerState
from server.protocol import DisconnectedError, QDataStreamProtocol
from server.rating import InclusiveRange, RatingType
from server.team_matchmaker import PlayerParty
from server.types import Address
@pytest.fixture()
def test_game_info():
return {
"title": "Test game",
"visibility": VisibilityState.PUBLIC.value,
"mod": "faf",
"mapname": "scmp_007",
"password": None,
"lobby_rating": 1,
"options": []
}
@pytest.fixture()
def test_game_info_invalid():
return {
"title": "Title with non ASCI char \xc3",
"visibility": VisibilityState.PUBLIC.value,
"mod": "faf",
"mapname": "scmp_007",
"password": None,
"lobby_rating": 1,
"options": []
}
@pytest.fixture
def mock_player(player_factory):
return player_factory("Dummy", player_id=42, lobby_connection_spec=None)
@pytest.fixture
def mock_nts_client():
return mock.create_autospec(TwilioNTS)
@pytest.fixture
def mock_players():
return mock.create_autospec(PlayerService)
@pytest.fixture
def mock_games():
return mock.create_autospec(GameService)
@pytest.fixture
def mock_protocol():
return mock.create_autospec(QDataStreamProtocol(mock.Mock(), mock.Mock()))
@pytest.fixture
def mock_geoip():
return mock.create_autospec(GeoIpService)
@pytest.fixture
def lobbyconnection(
event_loop,
database,
mock_protocol,
mock_games,
mock_players,
mock_player,
mock_geoip,
mock_nts_client,
rating_service
):
lc = LobbyConnection(
database=database,
geoip=mock_geoip,
game_service=mock_games,
players=mock_players,
nts_client=mock_nts_client,
ladder_service=mock.create_autospec(LadderService),
party_service=mock.create_autospec(PartyService),
oauth_service=mock.create_autospec(OAuthService),
rating_service=rating_service
)
lc.player = mock_player
lc.protocol = mock_protocol
lc.player_service.fetch_player_data = mock.AsyncMock()
lc.peer_address = Address("127.0.0.1", 1234)
lc._authenticated = True
return lc
@pytest.fixture
def policy_server(event_loop):
host = "localhost"
port = 6080
app = web.Application()
routes = web.RouteTableDef()
@routes.post("/verify")
async def token(request):
data = await request.json()
return web.json_response({"result": data.get("uid_hash")})
app.add_routes(routes)
runner = web.AppRunner(app)
async def start_app():
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
event_loop.run_until_complete(start_app())
yield (host, port)
event_loop.run_until_complete(runner.cleanup())
async def test_unauthenticated_calls_abort(lobbyconnection, test_game_info):
lobbyconnection._authenticated = False
lobbyconnection.abort = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info
})
lobbyconnection.abort.assert_called_once_with(
"Message invalid for unauthenticated connection: game_host"
)
async def test_bad_command_calls_abort(lobbyconnection):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.abort = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "this_isnt_real"
})
lobbyconnection.send.assert_called_once_with({"command": "invalid"})
lobbyconnection.abort.assert_called_once_with("Error processing command")
async def test_command_pong_does_nothing(lobbyconnection):
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "pong"
})
lobbyconnection.send.assert_not_called()
async def test_command_create_account_returns_error(lobbyconnection):
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "create_account"
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "error",
"text": ("FAF no longer supports direct registration. "
"Please use the website to register.")
})
async def test_double_login(lobbyconnection, mock_players, player_factory):
lobbyconnection.check_policy_conformity = mock.AsyncMock(return_value=True)
old_player = player_factory(lobby_connection_spec="auto")
old_player.lobby_connection.player = old_player
mock_players.get_player.return_value = old_player
await lobbyconnection.on_message_received({
"command": "hello",
"login": "test",
"password": sha256(b"test_password").hexdigest(),
"unique_id": "blah"
})
old_player.lobby_connection.write_warning.assert_called_with(
"You have been signed out because you signed in elsewhere.",
fatal=True,
style="kick"
)
# This should only be reset in abort, which is mocked for this test
assert old_player.lobby_connection.player is not None
async def test_double_login_disconnected(lobbyconnection, mock_players, player_factory):
lobbyconnection.abort = mock.AsyncMock()
lobbyconnection.check_policy_conformity = mock.AsyncMock(return_value=True)
old_player = player_factory(lobby_connection_spec="auto")
mock_players.get_player.return_value = old_player
old_player.lobby_connection.send_warning.side_effect = DisconnectedError("Test disconnect")
# Should not raise
await lobbyconnection.on_message_received({
"command": "hello",
"login": "test",
"password": sha256(b"test_password").hexdigest(),
"unique_id": "blah"
})
lobbyconnection.abort.assert_not_called()
async def test_command_game_host_creates_game(
lobbyconnection, mock_games, test_game_info, players
):
players.hosting.state = PlayerState.IDLE
lobbyconnection.player = players.hosting
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info
})
expected_call = {
"game_mode": "faf",
"game_class": CustomGame,
"name": test_game_info["title"],
"host": players.hosting,
"visibility": VisibilityState.PUBLIC,
"password": test_game_info["password"],
"mapname": test_game_info["mapname"],
"rating_type": RatingType.GLOBAL,
"displayed_rating_range": InclusiveRange(None, None),
"enforce_rating_range": False
}
mock_games.create_game.assert_called_with(**expected_call)
async def test_launch_game(lobbyconnection, game, player_factory):
old_game_conn = mock.create_autospec(GameConnection)
lobbyconnection.player = player_factory()
lobbyconnection.game_connection = old_game_conn
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.launch_game(game)
# Verify all side effects of launch_game here
old_game_conn.abort.assert_called_with("Player launched a new game")
assert lobbyconnection.game_connection is not None
assert lobbyconnection.game_connection.game == game
assert lobbyconnection.player.game == game
assert lobbyconnection.player.game_connection == lobbyconnection.game_connection
assert lobbyconnection.game_connection.player == lobbyconnection.player
assert lobbyconnection.player.state == PlayerState.IDLE
lobbyconnection.send.assert_called_once()
async def test_command_game_host_creates_correct_game(
lobbyconnection, game_service, test_game_info, players):
lobbyconnection.player = players.hosting
players.hosting.state = PlayerState.IDLE
lobbyconnection.game_service = game_service
lobbyconnection.launch_game = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info
})
args_list = lobbyconnection.launch_game.call_args_list
assert len(args_list) == 1
args, kwargs = args_list[0]
assert isinstance(args[0], CustomGame)
async def test_command_game_join_calls_join_game(
mocker,
database,
lobbyconnection,
game_service,
test_game_info,
players,
game_stats_service
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = Game(42, database, game_service, game_stats_service)
game.state = GameState.LOBBY
game.password = None
game.game_mode = "faf"
game.id = 42
game.name = "Test Game Name"
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = 42
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
expected_reply = {
"command": "game_launch",
"args": ["/numgames", players.hosting.game_count[RatingType.GLOBAL]],
"uid": 42,
"mod": "faf",
"name": "Test Game Name",
"init_mode": InitMode.NORMAL_LOBBY.value,
"game_type": "custom",
"rating_type": "global",
}
lobbyconnection.send.assert_called_with(expected_reply)
async def test_command_game_join_uid_as_str(
mocker,
database,
lobbyconnection,
game_service,
test_game_info,
players,
game_stats_service
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = Game(42, database, game_service, game_stats_service)
game.state = GameState.LOBBY
game.password = None
game.game_mode = "faf"
game.id = 42
game.name = "Test Game Name"
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = "42" # Pass in uid as string
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
expected_reply = {
"command": "game_launch",
"args": ["/numgames", players.hosting.game_count[RatingType.GLOBAL]],
"mod": "faf",
"uid": 42,
"name": "Test Game Name",
"init_mode": InitMode.NORMAL_LOBBY.value,
"game_type": "custom",
"rating_type": "global",
}
lobbyconnection.send.assert_called_with(expected_reply)
async def test_command_game_join_without_password(
lobbyconnection,
database,
game_service,
test_game_info,
players,
game_stats_service
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game)
game.state = GameState.LOBBY
game.init_mode = InitMode.NORMAL_LOBBY
game.password = "password"
game.game_mode = "faf"
game.id = 42
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = 42
del test_game_info["password"]
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "info",
"text": "Bad password (it's case sensitive)."
})
async def test_command_game_join_game_not_found(
lobbyconnection,
game_service,
test_game_info,
players
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
lobbyconnection.player = players.joining
players.joining.state = PlayerState.IDLE
test_game_info["uid"] = 42
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "info",
"text": "The host has left the game."
})
async def test_command_game_join_game_bad_init_mode(
lobbyconnection,
game_service,
test_game_info,
players
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game)
game.state = GameState.LOBBY
game.init_mode = InitMode.AUTO_LOBBY
game.id = 42
game.host = players.hosting
game_service._games[42] = game
lobbyconnection.player = players.joining
lobbyconnection.player.state = PlayerState.IDLE
test_game_info["uid"] = 42
await lobbyconnection.on_message_received({
"command": "game_join",
**test_game_info
})
lobbyconnection.send.assert_called_once_with({
"command": "notice",
"style": "error",
"text": "The game cannot be joined in this way."
})
async def test_command_game_host_calls_host_game_invalid_title(
lobbyconnection, mock_games, test_game_info_invalid
):
lobbyconnection.send = mock.AsyncMock()
mock_games.create_game = mock.Mock()
await lobbyconnection.on_message_received({
"command": "game_host",
**test_game_info_invalid
})
assert mock_games.create_game.mock_calls == []
lobbyconnection.send.assert_called_once_with(
dict(command="notice", style="error", text="Title must contain only ascii characters."))
async def test_abort(mocker, lobbyconnection):
lobbyconnection.protocol.close = mock.AsyncMock()
await lobbyconnection.abort()
lobbyconnection.protocol.close.assert_any_call()
async def test_send_game_list(mocker, database, lobbyconnection, game_stats_service):
games = mocker.patch.object(lobbyconnection, "game_service") # type: GameService
game1, game2 = mock.create_autospec(Game(42, database, mock.Mock(), game_stats_service)), \
mock.create_autospec(Game(22, database, mock.Mock(), game_stats_service))
games.open_games = [game1, game2]
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.send_game_list()
lobbyconnection.send.assert_any_call({
"command": "game_info",
"games": [game1.to_dict(), game2.to_dict()]
})
async def test_coop_list(mocker, lobbyconnection):
await lobbyconnection.command_coop_list({})
args = lobbyconnection.protocol.write_message.call_args_list
assert len(args) == 5
coop_maps = [arg[0][0] for arg in args]
for info in coop_maps:
del info["uid"]
assert coop_maps == [
{
"command": "coop_info",
"name": "FA Campaign map",
"description": "A map from the FA campaign",
"filename": "maps/scmp_coop_123.v0002.zip",
"featured_mod": "coop",
"type": "FA Campaign"
},
{
"command": "coop_info",
"name": "Aeon Campaign map",
"description": "A map from the Aeon campaign",
"filename": "maps/scmp_coop_124.v0000.zip",
"featured_mod": "coop",
"type": "Aeon Vanilla Campaign"
},
{
"command": "coop_info",
"name": "Cybran Campaign map",
"description": "A map from the Cybran campaign",
"filename": "maps/scmp_coop_125.v0001.zip",
"featured_mod": "coop",
"type": "Cybran Vanilla Campaign"
},
{
"command": "coop_info",
"name": "UEF Campaign map",
"description": "A map from the UEF campaign",
"filename": "maps/scmp_coop_126.v0099.zip",
"featured_mod": "coop",
"type": "UEF Vanilla Campaign"
},
{
"command": "coop_info",
"name": "Prothyon - 16",
"description": "Prothyon - 16 is a secret UEF facility...",
"filename": "maps/prothyon16.v0005.zip",
"featured_mod": "coop",
"type": "Custom Missions"
}
]
async def test_command_admin_closelobby(mocker, lobbyconnection, player_factory):
player = lobbyconnection.player
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__getitem__.side_effect = data.__getitem__
await lobbyconnection.on_message_received({
"command": "admin",
"action": "closelobby",
"user_id": 55
})
tuna.lobby_connection.kick.assert_any_call()
async def test_command_admin_closeFA(lobbyconnection, player_factory):
player = lobbyconnection.player
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__getitem__.side_effect = data.__getitem__
await lobbyconnection.on_message_received({
"command": "admin",
"action": "closeFA",
"user_id": tuna.id
})
tuna.lobby_connection.write.assert_any_call({
"command": "notice",
"style": "kill",
})
async def test_game_subscription(lobbyconnection: LobbyConnection):
game = mock.Mock()
game.handle_action = mock.AsyncMock()
lobbyconnection.game_connection = game
await lobbyconnection.on_message_received({
"command": "test",
"args": ["foo", 42],
"target": "game"
})
game.handle_action.assert_called_with("test", ["foo", 42])
async def test_command_avatar_list(mocker, lobbyconnection: LobbyConnection):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.player.id = 2 # Dostya test user
await lobbyconnection.on_message_received({
"command": "avatar",
"action": "list_avatar"
})
lobbyconnection.send.assert_any_call({
"command": "avatar",
"avatarlist": [{"url": "https://content.faforever.com/faf/avatars/qai2.png", "tooltip": "QAI"}, {"url": "https://content.faforever.com/faf/avatars/UEF.png", "tooltip": "UEF"}]
})
async def test_command_avatar_select(mocker, database, lobbyconnection: LobbyConnection):
lobbyconnection.player.id = 2 # Dostya test user
await lobbyconnection.on_message_received({
"command": "avatar",
"action": "select",
"avatar": "https://content.faforever.com/faf/avatars/qai2.png"
})
async with database.acquire() as conn:
result = await conn.execute("SELECT selected from avatars where idUser=2")
row = result.fetchone()
assert row.selected == 1
async def get_friends(player_id, database):
async with database.acquire() as conn:
result = await conn.execute(
select(friends_and_foes.c.subject_id).where(
and_(
friends_and_foes.c.user_id == player_id,
friends_and_foes.c.status == "FRIEND"
)
)
)
return [row.subject_id for row in result]
async def test_command_social_add_friend(lobbyconnection, database):
lobbyconnection.player.id = 1
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == []
assert lobbyconnection.player.friends == set()
await lobbyconnection.on_message_received({
"command": "social_add",
"friend": 2
})
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == [2]
assert lobbyconnection.player.friends == {2}
async def test_command_social_remove_friend(lobbyconnection, database):
lobbyconnection.player.id = 2
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == [1]
lobbyconnection.player.friends = {1}
await lobbyconnection.on_message_received({
"command": "social_remove",
"friend": 1
})
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == []
assert lobbyconnection.player.friends == set()
# Removing twice does nothing
await lobbyconnection.on_message_received({
"command": "social_remove",
"friend": 1
})
friends = await get_friends(lobbyconnection.player.id, database)
assert friends == []
assert lobbyconnection.player.friends == set()
async def test_command_ice_servers(
lobbyconnection: LobbyConnection,
mock_nts_client
):
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.coturn_generator.server_tokens = mock.Mock(
return_value=["coturn_tokens"]
)
mock_nts_client.server_tokens.return_value = ["twilio_tokens"]
await lobbyconnection.on_message_received({"command": "ice_servers"})
mock_nts_client.server_tokens.assert_called_once()
lobbyconnection.send.assert_called_once_with({
"command": "ice_servers",
"ice_servers": ["coturn_tokens", "twilio_tokens"],
"ttl": config.TWILIO_TTL
})
async def test_broadcast(lobbyconnection: LobbyConnection, player_factory):
player = lobbyconnection.player
player.lobby_connection = lobbyconnection
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__iter__.side_effect = data.values().__iter__
lobbyconnection.write_warning = mock.Mock()
await lobbyconnection.on_message_received({
"command": "admin",
"action": "broadcast",
"message": "This is a test message"
})
player.lobby_connection.write_warning.assert_called_with("This is a test message")
tuna.lobby_connection.write_warning.assert_called_with("This is a test message")
async def test_broadcast_during_disconnect(lobbyconnection: LobbyConnection, player_factory):
player = lobbyconnection.player
player.lobby_connection = lobbyconnection
player.id = 1
# To simulate when a player has been recently disconnected so that they
# still appear in the player_service list, but their lobby_connection
# object has already been destroyed
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__iter__.side_effect = data.values().__iter__
lobbyconnection.write_warning = mock.Mock()
# This should not leak any exceptions
await lobbyconnection.on_message_received({
"command": "admin",
"action": "broadcast",
"message": "This is a test message"
})
player.lobby_connection.write_warning.assert_called_with("This is a test message")
async def test_broadcast_connection_error(lobbyconnection: LobbyConnection, player_factory):
player = lobbyconnection.player
player.lobby_connection = lobbyconnection
player.id = 1
tuna = player_factory("Tuna", player_id=55, lobby_connection_spec="auto")
tuna.lobby_connection.write_warning.side_effect = DisconnectedError("Some error")
data = {
player.id: player,
tuna.id: tuna
}
lobbyconnection.player_service.__iter__.side_effect = data.values().__iter__
lobbyconnection.write_warning = mock.Mock()
# This should not leak any exceptions
await lobbyconnection.on_message_received({
"command": "admin",
"action": "broadcast",
"message": "This is a test message"
})
player.lobby_connection.write_warning.assert_called_with("This is a test message")
async def test_game_connection_not_restored_if_no_such_game_exists(lobbyconnection: LobbyConnection, mocker):
del lobbyconnection.player.game_connection
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.player.state = PlayerState.IDLE
await lobbyconnection.on_message_received({
"command": "restore_game_session",
"game_id": 123
})
assert not lobbyconnection.player.game_connection
assert lobbyconnection.player.state == PlayerState.IDLE
lobbyconnection.send.assert_any_call({
"command": "notice",
"style": "info",
"text": "The game you were connected to does no longer exist"
})
@pytest.mark.parametrize("game_state", [GameState.INITIALIZING, GameState.ENDED])
async def test_game_connection_not_restored_if_game_state_prohibits(
lobbyconnection: LobbyConnection,
game_service: GameService,
game_stats_service,
game_state,
mocker,
database
):
del lobbyconnection.player.game_connection
lobbyconnection.send = mock.AsyncMock()
lobbyconnection.player.state = PlayerState.IDLE
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game(42, database, game_service, game_stats_service))
game.state = game_state
game.password = None
game.game_mode = "faf"
game.id = 42
game_service._games[42] = game
await lobbyconnection.on_message_received({
"command": "restore_game_session",
"game_id": 42
})
assert not lobbyconnection.game_connection
assert lobbyconnection.player.state == PlayerState.IDLE
lobbyconnection.send.assert_any_call({
"command": "notice",
"style": "info",
"text": "The game you were connected to is no longer available"
})
@pytest.mark.parametrize("game_state", [GameState.LIVE, GameState.LOBBY])
async def test_game_connection_restored_if_game_exists(
lobbyconnection: LobbyConnection,
game_service: GameService,
game_stats_service,
game_state,
database
):
del lobbyconnection.player.game_connection
lobbyconnection.player.state = PlayerState.IDLE
lobbyconnection.game_service = game_service
game = mock.create_autospec(Game(42, database, game_service, game_stats_service))
game.state = game_state
game.password = None
game.game_mode = "faf"
game.id = 42
game_service._games[42] = game
await lobbyconnection.on_message_received({
"command": "restore_game_session",
"game_id": 42
})
assert lobbyconnection.game_connection
assert lobbyconnection.player.state is PlayerState.PLAYING
assert lobbyconnection.player.game is game
async def test_command_invite_to_party(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "invite_to_party",
"recipient_id": 1
})
lobbyconnection.party_service.invite_player_to_party.assert_called_once()
async def test_command_accept_party_invite(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "accept_party_invite",
"sender_id": 1
})
lobbyconnection.party_service.accept_invite.assert_called_once()
async def test_command_kick_player_from_party(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "kick_player_from_party",
"kicked_player_id": 1
})
lobbyconnection.party_service.kick_player_from_party.assert_called_once()
async def test_command_leave_party(lobbyconnection, mock_player):
lobbyconnection.player = mock_player
lobbyconnection.player.id = 2
lobbyconnection._authenticated = True
await lobbyconnection.on_message_received({
"command": "leave_party"
})
lobbyconnection.party_service.leave_party.assert_called_once()
async def test_command_game_matchmaking(lobbyconnection):
lobbyconnection.player.id = 1
await lobbyconnection.on_message_received({
"command": "game_matchmaking",
"state": "stop"
})
lobbyconnection.ladder_service.cancel_search.assert_called_with(
lobbyconnection.player,
"ladder1v1"
)
async def test_command_game_matchmaking_not_party_owner(
lobbyconnection,
mock_player,
player_factory
):
party_owner = player_factory(player_id=2, lobby_connection_spec="auto")
party = PlayerParty(party_owner)
party.add_player(mock_player)
lobbyconnection.player.id = 1
lobbyconnection.party_service.get_party.return_value = party
await lobbyconnection.on_message_received({
"command": "game_matchmaking",
"state": "start",
"faction": "seraphim"
})
lobbyconnection.ladder_service.start_search.assert_not_called()
await lobbyconnection.on_message_received({
"command": "game_matchmaking",
"state": "stop"
})
lobbyconnection.ladder_service.cancel_search.assert_called_once()
async def test_command_match_ready(lobbyconnection):
await lobbyconnection.on_message_received({
"command": "match_ready"
})
async def test_command_matchmaker_info(
lobbyconnection,
ladder_service,
queue_factory,
player_factory,
mocker
):
queue = queue_factory("test", rating_type=RatingType.LADDER_1V1)
queue.timer.next_queue_pop = 1_562_000_000
queue.push(Search([
player_factory(player_id=1, ladder_rating=(2000, 100), ladder_games=200),
]))
queue.push(Search([
player_factory(player_id=2, ladder_rating=(500, 120), ladder_games=100),
player_factory(player_id=3, ladder_rating=(1500, 500), ladder_games=0),
]))
queue.push(Search([
player_factory(player_id=4, ladder_rating=(1000, 100), ladder_games=500),
player_factory(player_id=5, ladder_rating=(1300, 100), ladder_games=200),
player_factory(player_id=6, ladder_rating=(2000, 100), ladder_games=1000),
]))
mocker.patch(
"server.matchmaker.matchmaker_queue.time.time",
return_value=queue.timer.next_queue_pop - 1,
)
lobbyconnection.ladder_service.queues = {
"test": queue
}
lobbyconnection.send = mock.AsyncMock()
await lobbyconnection.on_message_received({
"command": "matchmaker_info"
})
lobbyconnection.send.assert_called_with({
"command": "matchmaker_info",
"queues": [
{
"queue_name": "test",
"queue_pop_time": "2019-07-01T16:53:20+00:00",
"queue_pop_time_delta": 1.0,
"team_size": 1,
"num_players": 6,
"boundary_80s": [(1800, 2200), (300, 700), (800, 1200)],
"boundary_75s": [(1900, 2100), (400, 600), (900, 1100)]
}
]
})
async def test_connection_lost(lobbyconnection):
lobbyconnection.game_connection = mock.create_autospec(GameConnection)
await lobbyconnection.on_connection_lost()
lobbyconnection.game_connection.on_connection_lost.assert_called_once()
async def test_connection_lost_send(lobbyconnection, mock_protocol):
await lobbyconnection.on_connection_lost()
await lobbyconnection.send({"command": "Some Message"})
mock_protocol.send_message.assert_not_called()
mock_protocol.send_messages.assert_not_called()
mock_protocol.send_raw.assert_not_called()
async def test_check_policy_conformity(lobbyconnection, policy_server):
host, port = policy_server
config.FAF_POLICY_SERVER_BASE_URL = f"http://{host}:{port}"
honest = await lobbyconnection.check_policy_conformity(1, "honest", session=100)
assert honest is True
async def test_check_policy_conformity_fraudulent(lobbyconnection, policy_server, database):
host, port = policy_server
config.FAF_POLICY_SERVER_BASE_URL = f"http://{host}:{port}"
# 42 is not a valid player ID which should cause a SQL constraint error
lobbyconnection.abort = mock.AsyncMock()
with pytest.raises(ClientError):
await lobbyconnection.check_policy_conformity(42, "fraudulent", session=100)
lobbyconnection.abort = mock.AsyncMock()
player_id = 200
honest = await lobbyconnection.check_policy_conformity(player_id, "fraudulent", session=100)
assert honest is False
lobbyconnection.abort.assert_called_once()
# Check that the user has a ban entry in the database
async with database.acquire() as conn:
result = await conn.execute(select(ban.c.reason).where(
ban.c.player_id == player_id
))
rows = result.fetchall()
assert rows is not None
assert rows[-1].reason == "Auto-banned because of fraudulent login attempt"
async def test_check_policy_conformity_fatal(lobbyconnection, policy_server):
host, port = policy_server
config.FAF_POLICY_SERVER_BASE_URL = f"http://{host}:{port}"
for result in ("already_associated", "fraudulent"):
lobbyconnection.abort = mock.AsyncMock()
honest = await lobbyconnection.check_policy_conformity(1, result, session=100)
assert honest is False
lobbyconnection.abort.assert_called_once()
async def test_abort_connection_if_banned(
lobbyconnection: LobbyConnection,
mock_nts_client
):
# test user that has never been banned
lobbyconnection.player.id = 1
await lobbyconnection.abort_connection_if_banned()
# test user whose ban has been revoked
lobbyconnection.player.id = 201
await lobbyconnection.abort_connection_if_banned()
# test user whose ban has expired
lobbyconnection.player.id = 202
await lobbyconnection.abort_connection_if_banned()
# test user who is permabanned
lobbyconnection.player.id = 203
with pytest.raises(BanError) as banned_error:
await lobbyconnection.abort_connection_if_banned()
assert banned_error.value.message() == \
"You are banned from FAF forever. <br>Reason: <br>Test permanent ban"
# test user who is banned for another 46 hours
lobbyconnection.player.id = 204
with pytest.raises(BanError) as banned_error:
await lobbyconnection.abort_connection_if_banned()
assert re.match(
r"You are banned from FAF for 1 day and 2[12]\.[0-9]+ hours. <br>"
"Reason: <br>Test ongoing ban with 46 hours left",
banned_error.value.message()
)
| FAForever/server | tests/unit_tests/test_lobbyconnection.py | test_lobbyconnection.py | py | 34,970 | python | en | code | 64 | github-code | 36 | [
{
"api_name": "server.games.VisibilityState.PUBLIC",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "server.games.VisibilityState",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 30,
"usage_type": "call"
},
{
... |
31347480186 | from PIL import Image
from gcbmanimation.util.tempfile import TempFileManager
Image.MAX_IMAGE_PIXELS = None
class Frame:
'''
Represents a presentation-format image that can be included in an animation.
A frame usually applies to a particular year and points to an image file on disk.
Arguments:
'year' -- the year this Frame applies to.
'path' -- the path to the image file this Frame represents.
'''
def __init__(self, year, path, scale=None):
self._year = year
self._path = path
self._scale = scale
@property
def year(self):
'''The year this Frame applies to.'''
return self._year
@property
def path(self):
'''The path to the Frame's image file.'''
return self._path
@property
def scale(self):
'''
The scale (in metres per pixel) of the image, where None means
unknown or not applicable.
'''
return self._scale
@property
def size(self):
'''The width and height of the image.'''
return Image.open(self._path).size
def composite(self, frame, send_to_bottom=False):
'''
Combines another RGBA Frame with this one using their alpha channels.
Arguments:
'frame' -- the frame to combine with this one.
'send_to_bottom' -- use the other frame as the background instead of
this one.
Returns the merged image as a new Frame with the same year as this one.
'''
out_path = TempFileManager.mktmp(suffix=".png")
this_image = Image.open(self._path)
other_image = Image.open(frame.path)
if send_to_bottom:
Image.alpha_composite(other_image, this_image).save(out_path)
else:
Image.alpha_composite(this_image, other_image).save(out_path)
return Frame(self._year, out_path, self._scale)
def merge_horizontal(self, *frames):
'''
Merges one or more Frames horizontally with this one.
Arguments:
'frames' -- one or more Frames to merge horizontally.
Returns the merged image as a new Frame with the same year as this one.
'''
images = [Image.open(self._path)] + [Image.open(frame.path) for frame in frames]
widths, heights = zip(*(image.size for image in images))
total_width = sum(widths)
max_height = max(heights)
merged_image = Image.new("RGBA", (total_width, max_height), color=(255, 255, 255, 255))
x_offset = 0
for image in images:
merged_image.paste(image, (x_offset, 0))
x_offset += image.size[0]
out_path = TempFileManager.mktmp(suffix=".png")
merged_image.save(out_path)
return Frame(self._year, out_path, scale=None)
def resize(self, max_width, max_height):
'''
Resizes the image as closely as possible to the specified width and height
while preserving the aspect ratio.
Arguments:
'max_width' -- the new maximum width.
'max_height' -- the new maximum height.
Returns the resized image as a new Frame with the same year as this one
and updated scale reflecting the new pixel size in metres.
'''
original_width, original_height = self.size
aspect_ratio = original_width / original_height
if aspect_ratio > 1:
new_width = max_width
new_height = int(new_width / aspect_ratio)
if new_height > max_height:
new_height = max_height
new_width = int(new_height * aspect_ratio)
else:
new_height = max_height
new_width = int(new_height * aspect_ratio)
if new_width > max_width:
new_width = max_width
new_height = int(new_width / aspect_ratio)
out_path = TempFileManager.mktmp(suffix=".png")
Image.open(self.path).resize((new_width, new_height), Image.ANTIALIAS).save(out_path)
new_scale = self._scale * (original_width / new_width) if self._scale else None
return Frame(self._year, out_path, new_scale)
| moja-global/GCBM.Animation | gcbmanimation/animator/frame.py | frame.py | py | 4,162 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PIL.Image.MAX_IMAGE_PIXELS",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
... |
19865653500 | import streamlit as st
import base64
import os
from nbanalyzer import *
from PIL import Image
import time
script_directory = os.getcwd()
PROGRESS_BAR_CUSTOM_COLOR = '#f63366'
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
def load_data(year: int, stat_type: str):
if stat_type == 'play-by-play':
return get_players_data(year, stat_type, 1)
elif stat_type == 'advanced_box_score':
return get_advanced_metrics(year)
return get_players_data(year, stat_type, 0)
def filedownload(df):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="playerstats.csv">Download CSV File</a>'
return href
def translate_stat_type(stat_type):
if stat_type == 'per_game':
return 'Per Game'
elif stat_type == 'totals':
return 'Total'
elif stat_type == 'per_minute':
return 'Per 36 Minutes'
elif stat_type == 'advanced':
return 'Advanced'
elif stat_type == 'per_poss':
return 'Per 100 Possessions'
elif stat_type == 'play-by-play':
return 'Play-by-Play'
elif stat_type == 'advanced_box_score':
return 'Advanced Box Score'
return 'None'
def main():
st.set_option('deprecation.showPyplotGlobalUse', False)
icon = Image.open(os.path.join(script_directory, 'favicon.ico'))
st.set_page_config('NBA Stats Explorer', icon)
st.markdown('<img src=\"https://cdn.nba.com/logos/nba/nba-logoman-75-word_white.svg\" alt=\"NBA logo\" style=\"width:150px\"> ' ,
unsafe_allow_html=True)
st.title('NBA Stats Explorer')
st.markdown("""
This app performs simple webscraping of NBA player stats data!
* **Python libraries:** base64, matplotlib, pandas, plotly, streamlit
* **Data source:** [Basketball-reference.com](https://www.basketball-reference.com/).
""")
st.sidebar.header('User Input Features')
selected_year = st.sidebar.selectbox('Year', list(reversed(range(1977,2023))))
selected_stat = st.sidebar.selectbox('Player Stats', STAT_TYPES, format_func=translate_stat_type)
playerstats = load_data(selected_year, selected_stat)
# Sidebar - Team selection
sorted_unique_team = sorted(playerstats.Tm.unique())
selected_team = st.sidebar.multiselect('Team', sorted_unique_team, sorted_unique_team)
# Sidebar - Position selection
unique_pos = ['C','PF','SF','PG','SG']
selected_pos = st.sidebar.multiselect('Position', unique_pos, unique_pos)
# Filtering data
df_selected_team = playerstats[(playerstats.Tm.isin(selected_team)) & (playerstats.Pos.isin(selected_pos))]
st.header('Displaying Players\' ' + translate_stat_type(selected_stat) + ' Stats of Selected Team(s)')
st.write('Data Dimension: ' + str(df_selected_team.shape[0]) + ' rows and ' + str(df_selected_team.shape[1]) + ' columns.')
st.dataframe(df_selected_team)
st.markdown(filedownload(df_selected_team), unsafe_allow_html=True)
if selected_year < 2022:
best_players = get_mvp_voting(selected_year, 5)
else:
best_players = ['Nikola Jokić', 'Joel Embiid', 'Chris Paul', 'Stephen Curry', 'Kevin Durant', 'Giannis Antetokounmpo',
'Ja Morant', 'Luka Dončić', 'Devin Booker', 'DeMar DeRozan', 'Jimmy Butler']
with st.spinner('Loading season summary...'):
st.header(f'{selected_year} Season Summary')
st.write(f"""
The {selected_year} season was the {ordinal(selected_year - 1946)} of the [National Basketball Association](https://en.wikipedia.org/wiki/National_Basketball_Association).
As usual, we have to analyze its vast data and explore player performances to decide which players performed the best!
""")
if selected_year < 2022:
with st.expander(f'{selected_year} NBA MVP'):
st.write(f"""
### MVP
This season's MVP was **{best_players[0]}** who won the prize against the likes of {best_players[1]}, {best_players[2]}
and {best_players[3]}.
""")
with st.expander(f'Intercorrelation Matrix Heatmap - {selected_year}'):
st.markdown("""
### Intercorrelation Matrix Heatmap
The matrix is calculated from a cross-tabulation and shows how statistically similar all pairs of variables are in their
distributions across the various samples. The table below shows the intercorrelations between per game player stats.
""")
with st.spinner('Loading heatmap...'):
draw_intercorrelation_heatmap(selected_year)
st.pyplot()
with st.expander(f'Scoring - {selected_year}'):
st.markdown("""
### Points per 75 possessions x TS% Scatter Plot
The scatter plot is used to analyze the relation between \"inflation adjusted\" scoring and efficiency from players across the league.
""")
with st.spinner('Loading scatter plot'):
st.write(gen_scoring_efficiency_plot(selected_year, best_players))
if selected_year >= 1980:
with st.expander(f'Shooting - {selected_year}'):
st.markdown("""
### 3-Point Attempts x 3P% Scatter Plot
The scatter plot is used to analyze the relation between 3-Point Field Goal attempts per 100 possessions and 3-Point Field Goal
Percentage from players across the league as well as observe the evolution of shooting along the decades.
""")
with st.spinner('Loading scatter plot'):
st.write(gen_shooting_efficiency_plot(selected_year))
with st.expander(f'Playmaking - {selected_year}'):
st.markdown("""
### Offensive Load x Box Creation Scatter Plot
The scatter plot is used to analyze the relation between a per 100 estimate of the number of true shots created for teammates and
the percentage of possessions a player is directly or indirectly involved in a true shooting attempt, or commits a turnover.
""")
with st.spinner('Loading scatter plot'):
st.write(gen_playmaking_plot(selected_year))
with st.expander('Player Finder'):
st.markdown("""
### Player Finder
Player Finder is a tool to explore the database and see how specific players are performing relative to the league in 5 major categories
**Scoring, Efficiency, Shooting, Creation and Load**. Try it out and see how your favorite NBA star is doing :triumph::basketball:.
""")
st.markdown(f"""
<style>
.st-g3 {{
background-color: {PROGRESS_BAR_CUSTOM_COLOR};
}}
</style>
""", unsafe_allow_html=True)
advanced_box_score = get_advanced_metrics(selected_year)
selected_player = st.selectbox('Player Name', advanced_box_score['Player'])
showed_name = False
if selected_player != '':
with st.spinner('Loading player summary'):
for stat in ADVANCED_BOX_SCORE_COLS[3:]:
result = get_player_percentile_from_advanced_stat(advanced_box_score, selected_player, stat)
if result.empty:
break
if not showed_name:
player_name = result.iloc[0]['Player']
st.markdown(f'#### {player_name} {selected_year} Summary')
showed_name = True
player_stat = int(result.iloc[0][stat] * 100)
st.markdown(f'{stat} - {ordinal(player_stat)} Percentile')
st.progress(player_stat)
if selected_year >= 1997:
with st.expander(f'Impact - {selected_year}'):
st.markdown("""
### Impact metrics
Impact metrics are used to measure a player's impact on the success of a given team. In this selection:
* **On-Off**: Average difference between the Plus/Minus when player is on the court vs. off the court.
* **OnCourt**: Plus/Minus Per 100 Possessions (On Court only).
* **BPM**: A box score estimate of the points per 100 possessions a player contributed above a league-average player, translated to an average team.
""")
st.write(gen_on_off_plot(selected_year, best_players))
if __name__ == '__main__':
main()
| tta13/NBA-Stats-Explorer | nba_app.py | nba_app.py | py | 9,140 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "streamlit.set_option",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"... |
70786129065 | from Bio.Seq import Seq
import os
def parse_query_results(path):
"""
This fucntion will p[arse the query results
:param path: The path of the file that contains the query results
:return: An array that contains the results described in the file
"""
file = open(path,"r")
results = []
for line in file:
split_by_tab = line.split("\t")
dictionary = {}
dictionary["qseqid"] = split_by_tab[0]
dictionary["sseqid"] = split_by_tab[1]
dictionary["pident"] = split_by_tab[2]
dictionary["length"] = split_by_tab[3]
dictionary["mismatch"] = split_by_tab[4]
dictionary["gapopen"] = split_by_tab[5]
dictionary["qstart"] = split_by_tab[6]
dictionary["qend"] = split_by_tab[7]
dictionary["sstart"] = split_by_tab[8]
dictionary["send"] = split_by_tab[9]
dictionary["evalue"] = split_by_tab[10]
dictionary["bitscore"] = split_by_tab[11][:-1]
results.append(dictionary)
return results
def get_pairs(query_results):
"""
This function will build and return the pairs of (miRNA,host) for every full match
:param query_results:The given array of results
:return: An array of pairs
"""
pairs = []
for res in query_results:
pident = float(res["pident"])
if pident == 100:
miRNA = res['qseqid']
host = res["sseqid"]
try:
index_semicolon = host.index(";")
index = host.index("|")
pre = host[:index_semicolon]
post = host[index]
host = "%s%s" % (pre,post)
except(ValueError):
pass
pairs.append((miRNA, host))
return pairs
def get_3_tag_UTR(host_header,file_name):
"""
This function will return the sequence that the given header describes (in the mRNA file)
:param host_header: The given host header
:return: The sequence that the given header describes (in the mRNA file)
"""
mRNA_path_file = r"%s\Resources\%s.fasta" %(os.path.dirname(os.path.abspath(__file__)),file_name)
mRNA_file = open(mRNA_path_file,"r")
string_to_find = ">%s" % host_header
start_sequence_assembly = False
stop_search = False
sequence = ""
for line in mRNA_file:
if stop_search:
return sequence
if start_sequence_assembly:
if line[0] == ">":
stop_search = True
else:
sequence = "%s%s" % (sequence,line[:-1])
else:
if string_to_find in line:
start_sequence_assembly = True
if sequence!= "":
return sequence
return None # Should not get here
def get_cbr_miRNA_That_Met_threshold(query_results_cbr):
"""
This function will add the 'is_conserved' key to the dictionaries
:param query_results_cbr: the list of dictionaries
:return: None
"""
for query_res in query_results_cbr:
pident = float(query_res['pident'])
if pident == 100:
query_res['is_conserved'] = True
else:
query_res['is_conserved'] = False
def get_miRNA_and_host_sequences(pairs,miRNA_flie_name,mRNA_file_name):
"""
This function will find the sequences of all the host mRNAs and will add them to the pairs array
:param pairs: [(miRNA,mRNA)]
:return: [(miRNA,(mRNA,sequence))]
"""
new_pairs = []
leng = len(pairs)
count = 1
for pair in pairs:
print("%d/%d" % (leng,count))
count+=1
host_header = pair[1]
host_sequence = get_3_tag_UTR(host_header,mRNA_file_name)
miRNA_header = pair[0]
miRNA_sequence = get_3_tag_UTR(miRNA_header, miRNA_flie_name)
new_pairs.append(((miRNA_header,miRNA_sequence),(host_header,host_sequence)))
return new_pairs
def get_seed(sequence):
"""
This function will return the seed of the sequence
:param sequence: The sequence
:return: The seed of the sequence
"""
return sequence[1:8]
def is_seed_criteria_met(miRNA,mRNA):
"""
This function will check if the seed criteria is met
:param miRNA: The miRNA sequence
:param mRNA: The mRNA sequence
:return: True IFF the seed criteria is met
"""
if mRNA == "Sequence unavailable":
return "unknown"
miRNA_seed = get_seed(miRNA)
miRNA_seed = miRNA_seed.replace("U","T")
miRNA_seq = Seq(miRNA_seed)
miRNA_seed = miRNA_seq.reverse_complement()
return miRNA_seed._data in mRNA
def is_seed_criteria_met_in_pairs(pairs):
"""
This function will return a dictionary that contains weather the seed criteria is met.
This process occurs for every pair in the pairs' list
:param pairs: The given pairs list
:return: A dictionary that contains weather the seed criteria is met.
"""
res = []
for pair in pairs:
dictionary = {}
miRNA_seq = pair[0][1]
mRNA_seq = pair[1][1]
is_target = is_seed_criteria_met(miRNA_seq,mRNA_seq)
dictionary["miRNA_name"] = pair[0][0]
dictionary["miRNA_seq"] = pair[0][1]
dictionary["Host_name"] = pair[1][0]
dictionary["Host_seq"] = pair[1][1]
dictionary["is_target"] = is_target
res.append(dictionary)
return res
def parse_results_to_csv(query_results):
"""
This function will parse the results and save them in a csv file name 'results.csv'
:param query_results: The given data to save
:return: None
"""
with open('results.csv', 'w') as file:
file.write("")
with open('results.csv','a') as file:
header = ""
for key in query_results[0].keys():
header = "%s%s," % (header,key)
header = "%s\n" % header
file.write(header)
for parse_dictionary in query_results:
with open('results.csv','a') as f:
line = ""
for key in parse_dictionary.keys():
line = "%s%s," % (line, parse_dictionary[key])
line = "%s\n" % line
f.write(line)
def get_all_cell_dictionaries(path_cel,path_cel_pre):
"""
This function will return all the C.elegans miRNA's
:param path_cel: The path to the C.elegans miRNA's files
:param path_cel_pre: The path to the pre-mature C.elegans miRNA's files
:return: A dictionary containing all the C.elegans miRNA's
"""
cel_file = open(path_cel,'r')
all_cell_dictionary = []
odd = True
name = None
seq = None
for line in cel_file:
if odd:
name = line[1:-1]
odd = False
else:
cell_dictionary = {}
seq = line[:-1]
odd = True
cell_dictionary['C.elegans mature name'] = name
cell_dictionary['C.elegans mature sequence'] = seq
all_cell_dictionary.append(cell_dictionary)
odd = True
name = None
seq = None
cel_file_pre = open(path_cel_pre, 'r')
all_cell_pre_dictionary = []
for line in cel_file_pre:
if odd:
name = line[1:-1]
odd = False
else:
cell_dictionary = {}
seq = line[:-1]
odd = True
cell_dictionary['C.elegans pre-miRNA name'] = name
cell_dictionary['C.elegans pre-mRNA sequence'] = seq
all_cell_pre_dictionary.append(cell_dictionary)
combined = []
for i in range(len(all_cell_dictionary)):
for j in range(len(all_cell_pre_dictionary)):
pre_name = all_cell_pre_dictionary[j]['C.elegans pre-miRNA name']
pre_seq = all_cell_pre_dictionary[j]['C.elegans pre-mRNA sequence']
cel_name = all_cell_dictionary[i]['C.elegans mature name']
cel_seq = all_cell_dictionary[i]['C.elegans mature sequence']
if pre_name[:pre_name.rindex('_')] == cel_name[:cel_name.rindex('_')]:
dict = {}
dict['C.elegans pre-miRNA name'] = pre_name
dict['C.elegans pre-miRNA sequence'] = pre_seq
dict['C.elegans mature name'] = cel_name
dict['C.elegans mature sequence'] = cel_seq
combined.append(dict)
return combined
def add_host_data(all_cell_dictionary, final_pairs_mRNA):
"""
This function will add the host data to the given cel dictionary
:param all_cell_dictionary: The cell dictionary
:param final_pairs_mRNA: The host data
:return: None
"""
for i in range(len(all_cell_dictionary)):
cell_dictionary = all_cell_dictionary[i]
name = cell_dictionary['C.elegans mature name']
host_exists = False
for j in range(len(final_pairs_mRNA)):
if final_pairs_mRNA[j]["miRNA_name"] == name:
host_exists = True
host_name = final_pairs_mRNA[j]["Host_name"]
is_target = final_pairs_mRNA[j]["is_target"]
cell_dictionary["Host gene name"] = host_name
if is_target == True:
cell_dictionary["Targets the host gene"] = 'yes'
elif is_target == False:
cell_dictionary["Targets the host gene"] = 'no'
else:
cell_dictionary["Targets the host gene"] = 'unknown'
if not host_exists:
cell_dictionary["Host gene name"] = "-"
cell_dictionary["Targets the host gene"] = '-'
def add_cbr_data(all_cell_dictionary, query_results_cbr):
"""
This function will add the C.briggsae data to the given cel dictionary
:param all_cell_dictionary: The cell dictionary
:param query_results_cbr: The C.briggsae data
:return: None
"""
for i in range(len(all_cell_dictionary)):
cell_dictionary = all_cell_dictionary[i]
name = cell_dictionary['C.elegans mature name']
cbr_exists = False
for j in range(len(query_results_cbr)):
if query_results_cbr[j]["qseqid"] == name:
cbr_exists = True
cbr_name = query_results_cbr[j]["sseqid"]
if query_results_cbr[j]["is_conserved"]:
cell_dictionary["Conserved in C.briggsae"] = cbr_name
else:
cell_dictionary["Conserved in C.briggsae"] = False
if not cbr_exists:
cell_dictionary["Conserved in C.briggsae"] = "-"
if __name__ == "__main__":
path_mRNA = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\res_blastn_compact_mRNA.fasta")
path_cbr = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\res_blastn_compact_cbr.fasta")
path_cel = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\cel.fasta")
path_cel_pre = "%s\%s" %(os.path.dirname(os.path.abspath(__file__)),r"Resources\cel-pre.fasta")
print("parsing query result")
query_results_mRNA = parse_query_results(path_mRNA)
query_results_cbr = parse_query_results(path_cbr)
print("get pairs")
pairs_mRNA = get_pairs(query_results_mRNA)
get_cbr_miRNA_That_Met_threshold(query_results_cbr)
print("get host")
new_pairs_mRNA = get_miRNA_and_host_sequences(pairs_mRNA,"cel","mRNA")
print("Updating seed criteria")
final_pairs_mRNA = is_seed_criteria_met_in_pairs(new_pairs_mRNA)
print("Gathering data")
all_cell_dictionary = get_all_cell_dictionaries(path_cel,path_cel_pre)
add_host_data(all_cell_dictionary,final_pairs_mRNA)
add_cbr_data(all_cell_dictionary,query_results_cbr)
parse_results_to_csv(all_cell_dictionary) | guys79/BioInformaticsProject | main.py | main.py | py | 11,616 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "Bio.Seq.Seq",
"line_... |
38930132887 | """
from pytube import Playlist
import pytube
itemlist = {}
playlist = Playlist("")
for item in playlist:
j = pytube.YouTube(item).title.title()
oi = pytube.YouTube(item).metadata.metadata
print(oi)
print(j)
itemlist[j] = [item, oi]
print(itemlist)
from mutagen.easyid3 import EasyID3
audio = EasyID3("example.mp3")
audio['title'] = u"Example Title"
audio['artist'] = u"Me"
audio['album'] = u"My album"
audio['composer'] = u"" # clear
audio.save()
"""
import re
import os
from mutagen.easyid3 import EasyID3
universal_folder = r"D:\Pycharm\PycharmProjects\AHHHHHHHH\DM"
nld = []
os.chdir(universal_folder)
for song in os.listdir(universal_folder):
audio = EasyID3(song)
audio['title'] = song.replace(".mp3", "")
audio['artist'] = u"Panic! At The Disco"
audio['album'] = u"A Fever You Can't Sweat Out"
audio['composer'] = u""
audio.save()
print(song)
| Suave101/pytube-AHHHHHH | Tags.py | Tags.py | py | 903 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "mutagen.easyid3.EasyID3",
"line_number": 33,
"usage_type": "call"
}
] |
12151884978 | import torch # noqa
from model import softmax_classifier
from model import softmax_classifier_backward
from model import cross_entropy
from utils import Metric, accuracy # noqa
__all__ = ['create_model', 'test_epoch', 'test_epoch', 'train_loop']
#################################################
# create_model
#################################################
def create_model():
"""Creates a Softmax Classifier model `(w, b)`.
Returns:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
"""
# BEGIN SOLUTION
num_classes = 10
in_dim = 28*28
# Initialize W,b with uniform distribution on the interval [0,1)
w = torch.rand(num_classes, in_dim)
b = torch.rand(num_classes)
# Scale & Shift W,b distribution to the interval (-sqrt(k), sqrt(k))
# https://pytorch.org/docs/stable/generated/torch.nn.Linear.html
sqrt_k = (1 / in_dim)**0.5
w = (w * (2 * sqrt_k)) - sqrt_k
b = (b * (2 * sqrt_k)) - sqrt_k
# END SOLUTION
return w, b
#################################################
# train_epoch
#################################################
def train_epoch(w, b, lr, loader):
"""Trains over an epoch, and returns the accuracy and loss over the epoch.
Note: The accuracy and loss are average over the epoch. That's different from
running the classifier over the data again at the end of the epoch, as the
weights changed over the iterations. However, it's a common practice, since
iterating over the training set (again) is time and resource exhustive.
Args:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
lr (float): The learning rate.
loader (torch.utils.data.DataLoader): A data loader. An iterator over the dataset.
Returns:
acc_metric (Metric): The accuracy metric over the epoch.
loss_metric (Metric): The loss metric over the epoch.
"""
device = w.device
loss_metric = Metric()
acc_metric = Metric()
for x, y in loader:
x, y = x.to(device=device), y.to(device=device)
# BEGIN SOLUTION
# NOTE: In your solution you MUST keep the loss in a tensor called `loss`
# NOTE: In your solution you MUST keep the acurracy in a tensor called `acc`
num_classes, in_dim = w.shape
batch_size = x.shape[0]
# Reshape the input x
x = x.reshape(batch_size, in_dim)
# Run the model to get a prediction
pred = softmax_classifier(x, w, b)
# Compute the cross-entropy loss
loss = cross_entropy(pred, y)
acc = accuracy(pred, y)
# Compute the gradients of the weights
softmax_classifier_backward(x, w, b, pred, y)
# Update the weights
w -= lr * w.grad
b -= lr * b.grad
# END SOLUTION
loss_metric.update(loss.item(), x.size(0))
acc_metric.update(acc.item(), x.size(0))
return loss_metric, acc_metric
#################################################
# test_epoch
#################################################
def test_epoch(w, b, loader):
"""Evaluating the model at the end of the epoch.
Args:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
loader (torch.utils.data.DataLoader): A data loader. An iterator over the dataset.
Returns:
acc_metric (Metric): The accuracy metric over the epoch.
loss_metric (Metric): The loss metric over the epoch.
"""
device = w.device
loss_metric = Metric()
acc_metric = Metric()
for x, y in loader:
x, y = x.to(device=device), y.to(device=device)
# BEGIN SOLUTION
# NOTE: In your solution you MUST keep the loss in a tensor called `loss`
# NOTE: In your solution you MUST keep the acurracy in a tensor called `acc`
num_classes, in_dim = w.shape
batch_size = x.shape[0]
# Reshape the input x
x = x.reshape(batch_size, in_dim)
# Run the model to get a prediction
pred = softmax_classifier(x, w, b)
# Compute the cross-entropy loss
loss = cross_entropy(pred, y)
acc = accuracy(pred, y)
# END SOLUTION
loss_metric.update(loss.item(), x.size(0))
acc_metric.update(acc.item(), x.size(0))
return loss_metric, acc_metric
#################################################
# PROVIDED: train
#################################################
def train_loop(w, b, lr, train_loader, test_loader, epochs, test_every=1):
"""Trains the Softmax Classifier model and report the progress.
Args:
w (torch.Tensor): The weight tensor, has shape `(num_classes, in_dim)`.
b (torch.Tensor): The bias tensor, has shape `(num_classes,)`.
lr (float): The learning rate.
train_loader (torch.utils.data.DataLoader): The training set data loader.
test_loader (torch.utils.data.DataLoader): The test set data loader.
epochs (int): Number of training epochs.
test_every (int): How frequently to report progress on test data.
"""
for epoch in range(1, epochs + 1):
train_loss, train_acc = train_epoch(w, b, lr, train_loader)
print('Train', f'Epoch: {epoch:03d} / {epochs:03d}',
f'Loss: {train_loss.avg:7.4g}',
f'Accuracy: {train_acc.avg:.3f}',
sep=' ')
if epoch % test_every == 0:
test_loss, test_acc = test_epoch(w, b, test_loader)
print(' Test', f'Epoch: {epoch:03d} / {epochs:03d}',
f'Loss: {test_loss.avg:7.4g}',
f'Accuracy: {test_acc.avg:.3f}',
sep=' ')
| antebi-itai/Weizmann | DL for CV/HW1/Solution/Code/train.py | train.py | py | 5,534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.rand",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "utils.Metric",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "utils.Metric",
"line_number": ... |
28890245799 | """Score network module."""
import torch
import copy
import math
from torch import nn
from torch.nn import functional as F
from openfold.utils.rigid_utils import Rigid, Rotation
from data import utils as du
from data import all_atom
from model import ipa_pytorch
from motif_scaffolding import twisting
import functools as fn
Tensor = torch.Tensor
def get_index_embedding(indices, embed_size, max_len=2056):
"""Creates sine / cosine positional embeddings from a pruespecified indices.
Args:
indices: offsets of size [..., N_edges] of type integer
max_len: maximum length.
embed_size: dimension of the embeddings to create
Returns:
positional embedding of shape [N, embed_size]
"""
K = torch.arange(embed_size//2).to(indices.device)
pos_embedding_sin = torch.sin(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding_cos = torch.cos(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding = torch.cat([
pos_embedding_sin, pos_embedding_cos], axis=-1)
return pos_embedding
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
# Code from https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
assert len(timesteps.shape) == 1
timesteps = timesteps * max_positions
half_dim = embedding_dim // 2
emb = math.log(max_positions) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
class Embedder(nn.Module):
def __init__(self, model_conf):
super(Embedder, self).__init__()
self._model_conf = model_conf
self._embed_conf = model_conf.embed
# Time step embedding
index_embed_size = self._embed_conf.index_embed_size
t_embed_size = index_embed_size
node_embed_dims = t_embed_size + 1
edge_in = (t_embed_size + 1) * 2
# Sequence index embedding
node_embed_dims += index_embed_size
edge_in += index_embed_size
node_embed_size = self._model_conf.node_embed_size
self.node_embedder = nn.Sequential(
nn.Linear(node_embed_dims, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.LayerNorm(node_embed_size),
)
if self._embed_conf.embed_self_conditioning:
edge_in += self._embed_conf.num_bins
edge_embed_size = self._model_conf.edge_embed_size
self.edge_embedder = nn.Sequential(
nn.Linear(edge_in, edge_embed_size),
nn.ReLU(),
nn.Linear(edge_embed_size, edge_embed_size),
nn.ReLU(),
nn.Linear(edge_embed_size, edge_embed_size),
nn.LayerNorm(edge_embed_size),
)
self.timestep_embedder = fn.partial(
get_timestep_embedding,
embedding_dim=self._embed_conf.index_embed_size
)
self.index_embedder = fn.partial(
get_index_embedding,
embed_size=self._embed_conf.index_embed_size
)
def _cross_concat(self, feats_1d, num_batch, num_res):
return torch.cat([
torch.tile(feats_1d[:, :, None, :], (1, 1, num_res, 1)),
torch.tile(feats_1d[:, None, :, :], (1, num_res, 1, 1)),
], dim=-1).float().reshape([num_batch, num_res**2, -1])
def forward(
self,
*,
seq_idx,
t,
fixed_mask,
self_conditioning_ca,
):
"""Embeds a set of inputs
Args:
seq_idx: [..., N] Positional sequence index for each residue.
t: Sampled t in [0, 1].
fixed_mask: mask of fixed (motif) residues.
self_conditioning_ca: [..., N, 3] Ca positions of self-conditioning
input.
Returns:
node_embed: [B, N, D_node]
edge_embed: [B, N, N, D_edge]
"""
num_batch, num_res = seq_idx.shape
node_feats = []
# Set time step to epsilon=1e-5 for fixed residues.
fixed_mask = fixed_mask[..., None]
prot_t_embed = torch.tile(
self.timestep_embedder(t)[:, None, :], (1, num_res, 1))
prot_t_embed = torch.cat([prot_t_embed, fixed_mask], dim=-1)
node_feats = [prot_t_embed]
pair_feats = [self._cross_concat(prot_t_embed, num_batch, num_res)]
# Positional index features.
node_feats.append(self.index_embedder(seq_idx))
rel_seq_offset = seq_idx[:, :, None] - seq_idx[:, None, :]
rel_seq_offset = rel_seq_offset.reshape([num_batch, num_res**2])
pair_feats.append(self.index_embedder(rel_seq_offset))
# Self-conditioning distogram.
if self._embed_conf.embed_self_conditioning:
sc_dgram = du.calc_distogram(
self_conditioning_ca,
self._embed_conf.min_bin,
self._embed_conf.max_bin,
self._embed_conf.num_bins,
)
pair_feats.append(sc_dgram.reshape([num_batch, num_res**2, -1]))
node_embed = self.node_embedder(torch.cat(node_feats, dim=-1).float())
edge_embed = self.edge_embedder(torch.cat(pair_feats, dim=-1).float())
edge_embed = edge_embed.reshape([num_batch, num_res, num_res, -1])
if torch.any(node_embed.isnan()):
print("node_embed is somewhere nan in Embedder")
import ipdb; ipdb.set_trace()
return node_embed, edge_embed
class ScoreNetwork(nn.Module):
def __init__(self, model_conf, diffuser):
super(ScoreNetwork, self).__init__()
self._model_conf = model_conf
self.embedding_layer = Embedder(model_conf)
self.diffuser = diffuser
self.score_model = ipa_pytorch.IpaScore(model_conf, diffuser)
def _apply_mask(self, aatype_diff, aatype_0, diff_mask):
return diff_mask * aatype_diff + (1 - diff_mask) * aatype_0
def forward(self, input_feats, F=None,
use_twisting=False, twist_scale=1.,
twist_potential_rot=True,
twist_potential_trans=True,
twist_update_rot=True,
twist_update_trans=True,
):
"""Forward computes the reverse diffusion conditionals p(X^t|X^{t+1})
for each item in the batch
Args:
X: the noised samples from the noising process, of shape [Batch, N, D].
Where the T time steps are t=1,...,T (i.e. not including the un-noised X^0)
Returns:
model_out: dictionary of model outputs.
"""
# Frames as [batch, res, 7] tensors.
bb_mask = input_feats['res_mask'].type(torch.float32) # [B, N]
fixed_mask = input_feats['fixed_mask'].type(torch.float32)
edge_mask = bb_mask[..., None] * bb_mask[..., None, :]
# Initial embeddings of positonal and relative indices.
init_node_embed, init_edge_embed = self.embedding_layer(
seq_idx=input_feats['seq_idx'],
t=input_feats['t'],
fixed_mask=fixed_mask,
self_conditioning_ca=input_feats['sc_ca_t'],
)
edge_embed = init_edge_embed * edge_mask[..., None]
node_embed = init_node_embed * bb_mask[..., None]
if torch.any(node_embed.isnan()):
print("node_embed is somewhere nan")
import ipdb; ipdb.set_trace()
# If input_feats has conditioning information, update input rigids to track gradients
if use_twisting and "rigids_motif" in input_feats:
# Log that we are using conditioning
Log_delta_R, delta_x = twisting.perturbations_for_grad(input_feats, self.diffuser)
# Run main network
model_out = self.score_model(node_embed, edge_embed, input_feats)
# Psi angle prediction
gt_psi = input_feats['torsion_angles_sin_cos'][..., 2, :]
psi_pred = self._apply_mask(
model_out['psi'], gt_psi, 1 - fixed_mask[..., None])
pred_out = {'psi_pred': psi_pred}
pred_out['rot_score'] = model_out['rot_score']
pred_out['trans_score'] = model_out['trans_score']
final_rigids = Rigid(Rotation(model_out['R_final']), model_out['trans_final'])
model_out['final_rigids'] = final_rigids
rigids_pred = model_out['final_rigids']
pred_out['rigids'] = rigids_pred.to_tensor_7()
# If input_feats has conditioning information, compute conditional score
if use_twisting:
grad_R_log_p_motif, grad_x_log_p_motif, max_log_p_idx, twist_log_p = twisting.grad_log_lik_approx(
R_t=input_feats['R_t'],
R_pred=model_out['R_final'],
trans_pred=model_out['trans_final'],
motif_tensor_7=input_feats['rigids_motif'],
Log_delta_R=Log_delta_R, delta_x=delta_x,
se3_diffuser=self.diffuser,
t=input_feats['t'],
F=F,
twist_scale=twist_scale,
twist_potential_rot=twist_potential_rot,
twist_potential_trans=twist_potential_trans,
)
pred_out['max_log_p_idx'] = max_log_p_idx
pred_out['twist_log_p'] = twist_log_p
verbose = False
if verbose:
# Log the mean norms of the two gradients
grad_R_log_p_motif_norm = torch.norm(grad_R_log_p_motif, dim=[-2, -1]).mean()
grad_x_log_p_motif_norm = torch.norm(grad_x_log_p_motif, dim=[-1]).mean()
print("input_feats[t]: ", input_feats['t'])
print("grad_R_log_p_motif_norm: ", grad_R_log_p_motif_norm)
print("grad_x_log_p_motif_norm: ", grad_x_log_p_motif_norm)
# Log the means of the unconditioanal gradients
grad_R_uncond = pred_out['rot_score']
grad_x_uncond = pred_out['trans_score']
grad_R_uncond_norm = torch.norm(grad_R_uncond, dim=[-2, -1]).mean()
grad_x_uncond_norm = torch.norm(grad_x_uncond, dim=[-1]).mean()
print("grad_R_uncond_norm: ", grad_R_uncond_norm)
print("grad_x_uncond_norm: ", grad_x_uncond_norm)
# scale grad_R_log_p_motif such that each 3x3 matrix can have Frobenius norm at most 1000
if sum(torch.isnan(grad_R_log_p_motif).flatten()) > 0:
num_nans = sum(torch.isnan(grad_R_log_p_motif).flatten())
print("grad_R_log_p_motif has ", num_nans, " nans")
# set the nans to 0
# first find indices corresponding to nans
nan_indices = torch.where(torch.isnan(grad_R_log_p_motif[0]).sum(dim=[-2,-1]))[0]
# set rotation matrices to zero if they have nans
grad_R_log_p_motif[0, nan_indices] = 0.
# Consider doing something similar for translations? (i.e. for scaling)
# TODO: Do ablation to check if this matters! (i.e. if we don't scale the gradients)
max_norm = 1e3
norms = torch.norm(grad_R_log_p_motif, dim=[-2, -1], keepdim=True) # keep the last dimensions
if sum(norms.flatten() > max_norm) > 0:
print("norms of grad_R_log_p_motif are ", norms.shape, norms.flatten())
grad_R_scaling = max_norm / (max_norm + norms)
grad_R_log_p_motif = grad_R_scaling*grad_R_log_p_motif
if sum(norms.flatten() > max_norm) > 0:
print("norms of grad_trans_log_p_motif are ", norms.shape, norms.flatten())
#norms = torch.norm(grad_x_log_p_motif, dim=[-1], keepdim=True) # keep the last dimensions
#if sum(norms.flatten() > max_norm) > 0:
# print("norms of grad_trans_log_p_motif are ", norms.shape, norms.flatten())
#grad_x_scaling = max_norm / (max_norm + norms)
#grad_x_log_p_motif = grad_x_scaling*grad_x_log_p_motif
if twist_update_rot:
pred_out['rot_score_uncond'] = pred_out['rot_score'].detach().clone()
pred_out['rot_score'] = pred_out['rot_score'] + grad_R_log_p_motif
if twist_update_trans:
pred_out['trans_score_uncond'] = pred_out['trans_score'].detach().clone()
pred_out['trans_score'] = pred_out['trans_score'] + grad_x_log_p_motif
bar_a_t = torch.exp(-self.diffuser._r3_diffuser.marginal_b_t(input_feats['t']))
factor_on_score_x = (1-bar_a_t)/torch.sqrt(bar_a_t)
rigids_pred = Rigid.from_tensor_7(pred_out['rigids'])
pred_out['rigids_uncond'] = pred_out['rigids'].detach().clone()
x_pred = rigids_pred.get_trans()
x_pred = x_pred + factor_on_score_x[:, None, None] * self.diffuser._r3_diffuser._unscale(grad_x_log_p_motif)
rigids_pred._trans = x_pred
pred_out['rigids'] = rigids_pred.to_tensor_7()
for k, v in input_feats.items():
# check if a the value is a tensor, and detach if so.
if isinstance(v, torch.Tensor):
input_feats[k] = v.detach()
return pred_out
| blt2114/twisted_diffusion_sampler | protein_exp/model/score_network.py | score_network.py | py | 13,595 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number":... |
34781259647 | import h5py
import numpy as np
filename = 'weight_imgnet_ker5_h5/ResNet_18_ker5.h5'
h5f = h5py.File(filename, 'r')
cvsfmt = '%.18e' # covers upto float128
# get a List of data sets in group 'dd48'
# print('h5f:', h5f.shape)
# Get the data
lv0_keys = list(h5f.keys())
print("lv0: ", lv0_keys)
for keys0 in lv0_keys:
lv1_keys = list(h5f[keys0].keys())
print("lv1: ",lv1_keys)
for keys1 in lv1_keys:
lv2_keys = list(h5f[keys0][keys1].keys())
print("lv2: ",lv2_keys)
for keys2 in lv2_keys:
if (keys0 == 'bn') or (keys0 == 'conv0') or (keys0 == 'fully_connected'):
data = h5f[keys0][keys1][keys2]
np.savetxt('weight_imgnet_ker5_h5/w-'+str(keys0)+'-'+str(keys1)+'-'+str(keys2)+'.csv', np.reshape(data, [-1]), fmt=cvsfmt, delimiter=',')
else:
lv3_keys = list(h5f[keys0][keys1][keys2].keys())
for keys3 in lv3_keys:
data = h5f[keys0][keys1][keys2][keys3]
np.savetxt('weight_imgnet_ker5_h5/w-'+str(keys0)+'-'+str(keys1)+'-'+str(keys2)+'-'+str(keys3)+'.csv', np.reshape(data, [-1]), fmt=cvsfmt, delimiter=',')
| dwkim606/lattigo_conv | imgnet_read_h5.py | imgnet_read_h5.py | py | 1,175 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "h5py.File",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number... |
39056277589 | from matplotlib import pyplot as plt
vr=[2.6,2.8,3.0,3.1,3.2,3.3,3.4,3.6]
VR=[48.37,57.55,64.92,69.12,72.82,74.12,73.1,67.66]
plt.plot(vr,VR,'k',lw=1.5)
plt.scatter(vr,VR,marker='+',s=90,lw=1.5)
plt.grid()
plt.xlabel('Rupture speed (km/s)')
plt.ylabel('Variance reductio (%)')
plt.show() | Ogweno/mylife | Nepal/plot_rupt_speed.py | plot_rupt_speed.py | py | 290 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matpl... |
12478884070 | import json
import os
import cv2
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
# change IDs to your IDs.
ID1 = '206299463'
ID2 = '312497084'
ID = "HW3_{0}_{1}".format(ID1, ID2)
RESULTS = 'results'
os.makedirs(RESULTS, exist_ok=True)
IMAGE_DIR_PATH = "Images"
# SET NUMBER OF PARTICLES
N = 100
# Initial Settings
s_initial = [297, # x center
139, # y center
16, # half width
43, # half height
0, # velocity x
0] # velocity y
# state index constants for readability becuase I keep forgetting
X_ind = 0
Y_ind = 1
W_ind = 2
H_ind = 3
VX_ind = 4
VY_ind = 5
WIDTH = 576
HEIGHT = 352
# set this to True if you want to generate a video of the tracking process
GENERATE_VIDEO = False
def predict_particles(s_prior: np.ndarray) -> np.ndarray:
"""Progress the prior state with time and add noise.
Note that we explicitly did not tell you how to add the noise.
We allow additional manipulations to the state if you think these are necessary.
Args:
s_prior: np.ndarray. The prior state.
Return:
state_drifted: np.ndarray. The prior state after drift (applying the motion model) and adding the noise.
"""
''' A little about our motion model assumptions:
the input video is of a running human, mostly on a horizontal plane; for an average running pace of 2~3 m/s,
a running human moves (horizontally) an average of 0.1 meters between frames.
We estimated the FPS of the input video to be ~25[fps] by by observing the video at different frame rates
and converging on what felt like natural movement; we then estimate the pixel/meter ratio to be ~65
by measuring the human's height in pixels (110[px]) and dividing it by an average human's height (1.7[m]),
as well as assuming same px/m ratio for horizontal and vertical directions.
Finally, we end up with a possible range 6~7 pixels horizontal displacement between frames;
as for vertical displacement, assuming a constant height human (most likely), we assume a maximum of
~3px vertical displacement between frames, for the scenario that the human meets a sudden slope.'''
# Progress the state with time
s_prior = s_prior.astype(float)
state_drifted = np.copy(s_prior)
# update current state's positions according to the prior's velocity
state_drifted[[X_ind, Y_ind]] += s_prior[[VX_ind, VY_ind]]
# the bounding box might drift out of the frame if the
# tracked object is moving towards the frame's edges
state_drifted[[X_ind, Y_ind]] = np.clip(state_drifted[[X_ind, Y_ind]].T, [0, 0], [WIDTH-1, HEIGHT-1]).T
# estimating uniform noise: x,y limits according typical human velocities,
# vx, vy limits according to typical human acceleration - a human might start/stop moving
# which will be reflected in abrupt changes in velocity
x_lim = 7
y_lim = 3
vx_lim = 4
vy_lim = 2
h_lim = w_lim = 0 # no changes in width/height
lims = np.vstack(np.array([x_lim, y_lim, w_lim, h_lim, vx_lim, vy_lim]))
noise = np.random.uniform(-1*lims, lims, size=state_drifted.shape)
state_drifted += noise
# keep velocities within reasonable limits as described in the motion model
state_drifted[[VX_ind, VY_ind]] = np.clip(state_drifted[[VX_ind, VY_ind]].T, [-1.2*vx_lim, -0.8*vy_lim], [1.2*vx_lim, 0.8*vy_lim]).T
return state_drifted
def compute_normalized_histogram(image: np.ndarray, state: np.ndarray) -> np.ndarray:
"""Compute the normalized histogram using the state parameters.
Args:
image: np.ndarray. The image we want to crop the rectangle from.
state: np.ndarray. State candidate.
Return:
hist: np.ndarray. histogram of quantized colors.
"""
x, y, w, h, _, _ = state.astype(int)
patch = image[y-h:y+h, x-w:x+w]
H = cv2.calcHist([patch], [0, 1, 2], None, [16, 16, 16], [0, 256, 0, 256, 0, 256])
H /= H.sum() # normalize histogram
return H.flatten()
def sample_particles(previous_state: np.ndarray, cdf: np.ndarray) -> np.ndarray:
"""Sample particles from the previous state according to the cdf.
If additional processing to the returned state is needed - feel free to do it.
Args:
previous_state: np.ndarray. previous state, shape: (6, N)
cdf: np.ndarray. cummulative distribution function: (N, )
Return:
s_next: np.ndarray. Sampled particles. shape: (6, N)
"""
rs = np.random.random(size=cdf.shape)
diffs = cdf - np.vstack(rs) # The resultant matrix D(iffs) holds: Dij = cdf[j]-rs[i]
diffs[diffs <= 0] = np.inf # I eliminate all the negative values in diffs from the comparison
new_indices = diffs.argmin(axis=1) # find the minimum value in each column, that is the new index of the particle
s_next = previous_state[:, new_indices]
# purposefully not calculating velocity after resampling
# instead, rely on the motion model described in predict_particles
# to estimate the velocity
return s_next
def bhattacharyya_distance(p: np.ndarray, q: np.ndarray) -> float:
"""Calculate Bhattacharyya Distance between two histograms p and q.
Args:
p: np.ndarray. first histogram.
q: np.ndarray. second histogram.
Return:
distance: float. The Bhattacharyya Distance.
"""
return np.exp(20*np.sqrt(p*q).sum())
def create_image_with_boundingbox(image: np.ndarray,
mean_bbox: tuple,
max_bbox: tuple,
current_bbox: tuple
) -> np.ndarray:
"""Create an image with the bounding box and the ID.
I used this function to create a video of the entire tracking process,
this proved very helpful in estimating the best tuning parameters for
the tracking algorithm, as well as include tweaks in the algorithm itself.
To view the video, set the "GENERATE_VIDEO" variable to True at the top of the file
"""
image_with_bbox = image.copy()
# max bbox in red
x, y, w, h = [int(round(i)) for i in max_bbox]
image_with_bbox = cv2.rectangle(image_with_bbox, (x-w, y-h), (x+w, y+h), (0, 0, 255), 2)
# mean bbox in green
x, y, w, h = [int(round(i)) for i in mean_bbox]
image_with_bbox = cv2.rectangle(image_with_bbox, (x-w, y-h), (x+w, y+h), (0, 255, 0), 2)
# current bbox in blue
x, y, w, h = [int(round(i)) for i in current_bbox]
image_with_bbox = cv2.rectangle(image_with_bbox, (x-w, y-h), (x+w, y+h), (255, 0, 0), 2)
return image_with_bbox
def show_particles(image: np.ndarray, state: np.ndarray, W: np.ndarray, frame_index: int, ID: str,
frame_index_to_mean_state: dict, frame_index_to_max_state: dict,
) -> tuple:
fig, ax = plt.subplots(1)
image = image[:,:,::-1]
plt.imshow(image)
plt.title(ID + " - Frame number = " + str(frame_index))
# Avg particle box
avg_state = np.average(state, axis=1, weights=W)
(x_avg, y_avg, w_avg, h_avg, _, _) = avg_state
rect = patches.Rectangle((x_avg-w_avg, y_avg-h_avg), 2*w_avg, 2*h_avg, linewidth=2, edgecolor='g', facecolor='none')
ax.add_patch(rect)
# calculate Max particle box
max_state = state[:, np.argmax(W)]
(x_max, y_max, w_max, h_max, _, _) = max_state
rect = patches.Rectangle((x_max-w_max, y_max-h_max), 2*w_max, 2*h_max, linewidth=2, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.show(block=False)
fig.savefig(os.path.join(RESULTS, ID + "-" + str(frame_index) + ".png"))
frame_index_to_mean_state[frame_index] = [float(x) for x in [x_avg, y_avg, w_avg, h_avg]]
frame_index_to_max_state[frame_index] = [float(x) for x in [x_max, y_max, w_max, h_max]]
return frame_index_to_mean_state, frame_index_to_max_state
def main():
state_at_first_frame = np.matlib.repmat(s_initial, N, 1).T
S = predict_particles(state_at_first_frame)
# LOAD FIRST IMAGE
image = cv2.imread(os.path.join(IMAGE_DIR_PATH, "001.png"))
# COMPUTE NORMALIZED HISTOGRAM
q = compute_normalized_histogram(image, np.array(s_initial))
# COMPUTE NORMALIZED WEIGHTS (W) AND PREDICTOR CDFS (C)
weights = np.array([bhattacharyya_distance(compute_normalized_histogram(image, s), q) for s in S.T])
weights /= weights.sum()
# Initialize the variable W with the computed weights
W = weights
# COMPUTE CDF
cdf = np.cumsum(weights)
images_processed = 1
# MAIN TRACKING LOOP
image_name_list = os.listdir(IMAGE_DIR_PATH)
image_name_list.sort()
images_paths = [os.path.join(IMAGE_DIR_PATH, image_name) for image_name in image_name_list]
frame_index_to_avg_state = {}
frame_index_to_max_state = {}
if GENERATE_VIDEO:
dimensions = image.shape[:2][::-1]
slowed_down_vw = cv2.VideoWriter(os.path.join(RESULTS, "slowed_down_video.avi"),
fourcc=cv2.VideoWriter_fourcc(*'XVID'),
fps=10,
frameSize=dimensions,
isColor=True)
real_time_vw = cv2.VideoWriter(os.path.join(RESULTS, "normal_speed_video.avi"),
fourcc=cv2.VideoWriter_fourcc(*'XVID'),
fps=25,
frameSize=dimensions,
isColor=True)
mean_bbox = s_initial[:4]
max_bbox = s_initial[:4]
for image_path in images_paths[1:]:
S_prev = S
# LOAD NEW IMAGE FRAME
current_image = cv2.imread(image_path)
# SAMPLE THE CURRENT PARTICLE FILTERS
S_next_tag = sample_particles(S_prev, cdf)
# PREDICT THE NEXT PARTICLE FILTERS (YOU MAY ADD NOISE)
S = predict_particles(S_next_tag)
# COMPUTE NORMALIZED WEIGHTS (W) AND PREDICTOR CDFS (C)
# YOU NEED TO FILL THIS PART WITH CODE:
weights = np.array([bhattacharyya_distance(compute_normalized_histogram(current_image, s), q) for s in S.T])
weights /= weights.sum()
W = weights
# COMPUTE CDF
cdf = np.cumsum(weights)
# CREATE DETECTOR PLOTS
images_processed += 1
if 0 == images_processed%10:
frame_index_to_avg_state, frame_index_to_max_state = show_particles(
current_image, S, W, images_processed, ID, frame_index_to_avg_state, frame_index_to_max_state)
if GENERATE_VIDEO:
mean_bbox = frame_index_to_avg_state[images_processed]
max_bbox = frame_index_to_max_state[images_processed]
if GENERATE_VIDEO:
current_frame_bbox = np.average(S[[X_ind, Y_ind, W_ind, H_ind]], axis=1, weights=W)
bounded_frame = create_image_with_boundingbox(current_image, mean_bbox, max_bbox, current_frame_bbox)
slowed_down_vw.write(bounded_frame)
real_time_vw.write(bounded_frame)
if GENERATE_VIDEO:
slowed_down_vw.release()
real_time_vw.release()
with open(os.path.join(RESULTS, 'frame_index_to_avg_state.json'), 'w') as f:
json.dump(frame_index_to_avg_state, f, indent=4)
with open(os.path.join(RESULTS, 'frame_index_to_max_state.json'), 'w') as f:
json.dump(frame_index_to_max_state, f, indent=4)
if __name__ == "__main__":
main()
| StudentYuval/VP2023 | ex3/particle_filter.py | particle_filter.py | py | 11,564 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.makedirs",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.copy",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_numb... |
74551961384 | #!/usr/bin/env python3
# encoding:utf-8
'''
@author: lierl
@file: use_enum.py
@time: 2018/3/24 17:31
'''
__author__ = 'lierl'
from enum import Enum, unique
Month = Enum('Month',('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'))
for name, member in Month.__members__.items():
print(name, '==>', member, ",", member.value)
@unique#@unique装饰器可以帮助我们检查保证没有重复值。
class Weekday(Enum):
Sun = 0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6
day1 = Weekday.Mon
print(day1)
print(Weekday['Tue'])
print(Weekday.Tue.value)
for name, member in Weekday.__members__.items():
print(name, "==>", member)
# 把Student的gender属性改造为枚举类型,可以避免使用字符串:
class Student(object):
def __init__(self, name, gender):
self.name = name
self.gender = gender
@unique
class Gender(Enum):
Male = 0
Female = 1
bart = Student('Bart', Gender.Male)
if bart.gender == Gender.Male:
print('测试通过')
else:
print("测试失败")
# Enum可以把一组相关常量定义在一个class中,且class不可变,而且成员可以直接比较。 | dream7319/djtest | demo/use_enum.py | use_enum.py | py | 1,187 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "enum.unique",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 44,
... |
33057055793 | from flask import request
from app import app
from app.service import get_value, set_value, add_set, get_set, set_expiry, range_elements, rank
#End point for GET, SET and EXPIRE command
@app.route('/redis/key',methods=['POST','GET','PATCH'])
def keyDetails():
if request.method == "GET":
key = request.form['key']
return get_value(key)
elif request.method == "POST":
key = request.form['key']
value = request.form['value']
return set_value(key,value)
elif request.method == "PATCH":
key = request.form['key']
time = request.form['time']
return set_expiry(key,time)
else:
return "bad request"
#End point for ZADD command
@app.route('/redis/set',methods=['POST', 'GET'])
def sorted_set():
if request.method == 'POST':
key = request.form['key']
value = request.form['value']
score = request.form['score']
return add_set(key,score,value)
elif request.method == 'GET':
return get_set()
else:
return "bad request"
#End point for ZRANGE command
@app.route('/redis/set/range',methods=['POST', 'GET'])
def range():
if request.method == 'GET':
key = request.form['key']
left = request.form['left']
right = request.form['right']
return range_elements(key,left,right)
else:
return "bad request"
#End point for ZRANK command
@app.route('/redis/set/rank',methods=['POST', 'GET'])
def find():
if request.method == 'GET':
key = request.form['key']
value = request.form['value']
return rank(key,value)
else:
return "bad request" | shreyans-sureja/Redis-Implementation | app/routes.py | routes.py | py | 1,651 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.method",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flask.re... |
40857469466 | import requests
import show_route
import serial
class Navi_auto:
def __init__(self):
self.key = '1b1779b2176bc8d85a93f9aef22b8a53'
self.latitude = 1
self.longitude = 0
self.start_coordinate = [116.481028, 39.989643]
self.desti_coordinate = [116.434446, 39.90816]
self.res_url = ""
def get_destination(self, destination, region='320111'):
url = f"https://restapi.amap.com/v5/place/text?key={self.key}&keywords={destination}®ion={region}&city_limit=true&show_fields=children"
data_dict = requests.get(url).json()
pos_dict = {}
for poi in data_dict["pois"]:
pos_dict[poi['name']] = poi['location']
return pos_dict
def get_coordinate(self, start_longitude, start_latitude, desti_longitude, desti_latitude):
self.start_coordinate[self.longitude] = start_longitude
self.start_coordinate[self.latitude] = start_latitude
self.desti_coordinate[self.longitude] = desti_longitude
self.desti_coordinate[self.latitude] = desti_latitude
def get_walking_url(self):
start_pos = str(self.start_coordinate).strip('[').strip(']').replace(' ', '')
desti_pos = str(self.desti_coordinate).strip('[').strip(']').replace(' ', '')
self.res_url = f"https://restapi.amap.com/v3/direction/walking?key={self.key}&origin={start_pos}&destination={desti_pos}"
def get_bike_url(self):
start_pos = str(self.start_coordinate).strip('[').strip(']').replace(' ', '')
desti_pos = str(self.desti_coordinate).strip('[').strip(']').replace(' ', '')
self.res_url = f"https://restapi.amap.com/v4/direction/bicycling?key={self.key}&origin={start_pos}&destination={desti_pos}"
def get_drive_url(self):
start_pos = str(self.start_coordinate).strip('[').strip(']').replace(' ', '')
desti_pos = str(self.desti_coordinate).strip('[').strip(']').replace(' ', '')
self.res_url = f"https://restapi.amap.com/v3/direction/driving?origin={start_pos}&destination={desti_pos}&key={self.key}"
def make_navi_data(self):
points = []
data = requests.get(self.res_url).json()
try:
paths = data["route"]["paths"]
polyline = paths[0]['steps'] # list
except Exception as e:
paths = data["data"]["paths"]
polyline = paths[0]['steps'] # list
for i in range(0, len(polyline)):
points.extend(polyline[i]['polyline'].split(';'))
show_route.gps_lon_lat.clear()
for i in range(0, len(points)):
x, y = map(float, points[i].split(","))
show_route.gps_lon_lat.append(y)
show_route.gps_lon_lat.append(x)
show_route.create_pic_data()
class device:
def __init__(self):
self.location = []
self.baud_rate = 115200
self.port = ''
self.interface = ''
self.GPS_Data = ''
self.create_interface()
def create_interface(self):
for i in range(0, 100):
self.port = 'COM'
self.port = self.port + str(i)
if i == 7 or i == 9:
continue
try:
self.interface = serial.Serial(self.port, self.baud_rate, timeout=1)
self.GPS_Data = self.interface.readline().decode('utf-8')
if len(self.GPS_Data) >= 8 and self.GPS_Data[0] != '2':
print(self.GPS_Data)
print("Successfully find the device!")
print("Port:{}".format(self.port))
break
else:
print(self.GPS_Data)
print("Connected to {},but it is not the device".format(self.port))
except Exception as e:
print("{} is not the device".format(self.port))
print("error msg:{}".format(e))
def get_location(self):
while True:
self.GPS_Data = self.interface.readline().decode('utf-8')
if self.GPS_Data.startswith('$GNRMC'):
fields = self.GPS_Data.split(',')
self.location = []
self.location.append(show_route.DegreeConvert(float(fields[3])))
self.location.append(show_route.DegreeConvert(float(fields[5])))
return self.location
if __name__ == '__main__':
a = device()
a.get_location()
| haiboCode233/KivyPlusAR | GPSAPI.py | GPSAPI.py | py | 4,415 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "show_route.gps_lon_lat.clear",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "show_route.gps... |
29500719773 | import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import concat, col, lit, split, to_date, date_format
import os
import time
# Create a SparkSession
spark = SparkSession.builder.getOrCreate()
# Start the timer
start_time = time.time()
# Load the config file
with open('config.json') as f:
config = json.load(f)
#Check if the input_csv file exists and is in csv format
if 'input_csv' not in config or not config['input_csv'].endswith('.csv') or not os.path.exists(config['input_csv']):
print("No CSV file is selected or the file is invalid.")
exit()
# Load the DataFrame from the CSV file
df = spark.read.csv(config['input_csv'], header=True, inferSchema=True)
# Convert the DataFrame to a Parquet file
df.write.mode("overwrite").parquet('temp.parquet')
# Load the DataFrame from the Parquet file
df = spark.read.parquet('temp.parquet')
# Apply transformations
for transformation in config['transformations']:
if transformation['type'] == 'date_format':
df = df.withColumn(transformation['column'], to_date(col(transformation['column']), transformation['input_format']))
df = df.withColumn(transformation['column'], date_format(col(transformation['column']), transformation['output_format']))
elif transformation['type'] == 'concat':
df = df.withColumn(transformation['output_column'], concat(*[col(c) for c in transformation['columns']], lit(transformation['separator'])))
elif transformation['type'] == 'split':
split_col = split(df[transformation['column']], transformation['separator'])
for i, output_column in enumerate(transformation['output_columns']):
df = df.withColumn(output_column, split_col.getItem(i))
elif transformation['type'] == 'drop':
df = df.drop(transformation['column'])
# Get the total number of records
total_records = df.count()
# Print the total number of records
print("Total records: ", total_records)
# Save the transformed DataFrame to a new Parquet file
df.write.mode('overwrite').parquet(config['output_parquet'])
df = spark.read.parquet(config['output_parquet'])
df.show()
# End the timer
end_time = time.time()
# Calculate the elapsed time
elapsed_time = end_time - start_time
# Print the elapsed time
print("Time taken: {} seconds".format(elapsed_time))
| hari01008/Extract-Transform-Load-With-Mysql-and-Pyspark | transform.py | transform.py | py | 2,316 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.getOrCreate",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 8,
"usage_type... |
22577730329 | '''
Created on 06.12.2013
@author: hfrieden
Import an Arma 2/Arma 3 unbinarized MDL file
'''
import struct
import bpy
import bmesh
import os.path as path
import ArmaToolbox
import ArmaTools
def getLayerMask(layer):
res = [False, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False]
res[layer % 20] = True
return res
# Datatype reading
def readULong(filePtr):
return struct.unpack("i", filePtr.read(4))[0]
def readSignature(filePtr):
return filePtr.read(4)
def readFloat(filePtr):
return struct.unpack("f", filePtr.read(4))[0]
def readChar(filePtr):
return struct.unpack("c", filePtr.read(1))[0]
def readByte(filePtr):
return struct.unpack("b", filePtr.read(1))[0]
def readString(filePtr):
res = b''
t = True
while t:
a = filePtr.read(1)
if a != b'\000':
res = res + a
else:
t = False
return res.decode("utf-8")
def makeLodName(fileName, lodLevel):
lodName = path.basename(fileName)
lodName = lodName.split(".")[0]
lodName = "{0}.{1}".format(lodName, lodLevel)
return lodName
def maybeAddEdgeSplit(obj):
obj.data.use_auto_smooth = True
obj.data.auto_smooth_angle = 3.1415927
#modifier = obj.modifiers.get("FHQ_ARMA_Toolbox_EdgeSplit")
#if modifier is None:
# modifier = obj.modifiers.new("FHQ_ARMA_Toolbox_EdgeSplit",
# type='EDGE_SPLIT')
#
# modifier.show_expanded = False
# modifier.use_edge_angle = False # Want only sharp edges
# modifier.use_edge_sharp = True
#obj.data.show_edge_sharp = True
def correctedResolution(r):
res = int(r)
if (r < 1000):
return r #res
values ={
10000000000000 : 'G',
3000000000000000 : 'RD',
#11010 : 'SV2',
8000000000000000 : 'VCG',
10000 : 'S1',
14000000000000000 : 'VPFG',
17000000000000000 : 'SP',
#10010 : 'S2',
12000000000000000 : 'VCFG',
20000000000000000 : 'SVVG',
1200 : 'VC',
9000000000000000 : 'VCFG',
15000000000000000 : 'VGG',
13000000000000000 : 'VPG',
18000000000000000 : 'SVVC',
1000000000000000 : 'M',
1100 : 'P',
21000000000000000 : 'WRL',
4000000000000000 : 'PTH',
40000000000000 : 'GPX',
7000000000000000 : 'FG',
10000000000000000 : 'VC',
6000000000000000 : 'VG',
1000 : 'GN',
16000000000000000 : 'VGFG',
20000000000000 : 'GB',
19000000000000000 : 'SVVP',
2000000000000000 : 'LC',
11000 : 'SV1',
20000 : 'ED',
5000000000000000 : 'HP',
11000000000000000 : 'VCG',
}
error = 1000000000000000000000
value = -1
for n in values:
x = abs(n-res)
if x < error:
error = x
value = n
return value
def resolutionName(r):
res = int(r)
if (r < 1000):
return str(res)
values ={
1.000e+3:'View Gunner',
1.100e+3:'View Pilot',
1.200e+3:'View Cargo',
1.000e+4:'Stencil Shadow',
2.000e+4:'Edit',
#1.001e+4:'Stencil Shadow 2',
1.100e+4:'Shadow Volume',
#1.101e+4:'Shadow Volume 2',
1.000e+13:'Geometry',
1.000e+15:'Memory',
2.000e+15:'Land Contact',
3.000e+15:'Roadway',
4.000e+15:'Paths',
5.000e+15:'Hit Points',
6.000e+15:'View Geometry',
7.000e+15:'Fire Geometry',
8.000e+15:'View Cargo Geometry',
9.000e+15:'View Cargo Fire Geometry',
1.000e+16:'View Commander',
1.100e+16:'View Commander Geometry',
1.200e+16:'View Commander Fire Geometry',
1.300e+16:'View Pilot Geometry',
1.400e+16:'View Pilot Fire Geometry',
1.500e+16:'View Gunner Geometry',
1.600e+16:'View Gunner Fire Geometry',
1.700e+16:'Sub Parts',
1.800e+16:'Cargo View shadow volume',
1.900e+16:'Pilot View shadow volume',
2.000e+16:'Gunner View shadow volume',
2.100e+16:'Wreckage',
2.000e+13:'Geometry Buoyancy',
4.000e+13:'Geometry PhysX'
}
error = 1000000000000000000000
value = -1
for n in values:
x = abs(n-res)
if x < error:
error = x
value = n
ret = values.get(value, "?")
if value == 1.000e+4 or value == 2.000e+4:
ret = ret + " " + str(r-value)
return ret
def decodeWeight(b):
if b == 0:
return 0.0
elif b == 2:
return 1.0
elif b > 2:
return 1.0 - round( (b-2) / 2.55555 )*0.01
elif b < 0:
return -round( b / 2.55555 ) * 0.01
else:
# print ("decodeWeight(",b,") returns 1.0 as else case")
return 1.0 #TODO: Correct?
def loadLOD(context, filePtr, objectName, materialData, layerFlag, lodnr):
global objectLayers
meshName = objectName
weightArray = []
# Check for P3DM signature
sig = readSignature(filePtr)
if sig != b'P3DM':
return -1
# Read major and minor version
major = readULong(filePtr)
minor = readULong(filePtr)
if major != 0x1c:
print("Unknown major version {0}".format(major))
return -1
if minor != 0x100:
print("Unknown minor version {0}".format(minor))
return -1
numPoints = readULong(filePtr)
numNormals = readULong(filePtr)
numFaces = readULong(filePtr)
print("read lod")
dummyFlags = readULong(filePtr)
# Read the Points. Points are XYZTriples followed by an ULONG flags word
verts = []
for i in range(0, numPoints):
point = struct.unpack("fffi", filePtr.read(16))
pnt = [point[0], point[2], point[1]]
verts.append(pnt)
print("normals (",numNormals, ")...")
normals = []
for i in range(0, numNormals):
normal = struct.unpack("fff", filePtr.read(12))
nrm = [normal[0], normal[1], normal[2]]
normals.append(normal)
#print ("Normal = ", normal)
faceData = []
faces = []
print("faces...")
# Start reading and adding faces
for i in range(0, numFaces):
numSides = readULong(filePtr)
# Vertex table
vIdx = []
nrmIdx = []
uvs = []
for n in range(0, 4):
vtable = struct.unpack("iiff", filePtr.read(16))
if n<numSides:
vIdx.append(vtable[0])
nrmIdx.append(vtable[1])
uvs.append( [vtable[2], vtable[3]])
faceFlags = readULong(filePtr)
textureName = readString(filePtr)
materialName = readString(filePtr)
faceData.append(
(numSides, nrmIdx, uvs, faceFlags, textureName, materialName)
)
faces.append(vIdx)
# Handle the material if it doesn't exists yet
if len(textureName) > 0 or len(materialName)>0:
try:
materialData[(textureName, materialName)]
except:
# Need to create a new material for this
#mat = bpy.data.materials.new("Arma Material")
mat = bpy.data.materials.new(path.basename(textureName) + " :: " + path.basename(materialName))
mat.armaMatProps.colorString = textureName
mat.armaMatProps.rvMat = materialName
if len(textureName) > 0 and textureName[0] == '#':
mat.armaMatProps.texType = 'Custom'
mat.armaMatProps.colorString = textureName
else:
mat.armaMatProps.texType = 'Texture'
mat.armaMatProps.texture = textureName
mat.armaMatProps.colorString = ""
materialData[(textureName, materialName)] = mat
if readSignature(filePtr) != b'TAGG':
print("No tagg signature")
return -1;
# Create the mesh. Doing it here makes the named selections
# easier to read.
mymesh = bpy.data.meshes.new(name=meshName)
mymesh.from_pydata(verts, [], faces)
mymesh.update(calc_edges = True)
obj = bpy.data.objects.new(meshName, mymesh)
# TODO: Maybe add a "logical Collection" option that
# Collects all geometries, shadows, custom etc in a collection.
scn = bpy.context.scene
coll = bpy.data.collections.new(meshName)
context.scene.collection.children.link(coll)
coll.objects.link(obj)
#NEIN! coll.hide_viewport = True
#scn.objects.link(obj)
#scn.objects.active = obj
# Build Edge database to make finding sharp edges easier
edgeDict = dict()
for edge in mymesh.edges:
v1 = edge.vertices[0]
v2 = edge.vertices[1]
if (v1 > v2): # Swap if out of order
temp = v2
v2 = v1
v1 = temp
#print(f"adding edge index {edge.index} as ({v1},{v2}) to edge dictionary")
edgeDict[(v1,v2)] = edge.index
print("taggs")
loop = True
sharpEdges = None
weight = None
while loop:
active = readChar(filePtr)
tagName = readString(filePtr)
numBytes = readULong(filePtr)
#print ("tagg: ",tagName, " size ", numBytes)
if active == b'\000':
if numBytes != 0:
filePtr.seek(numBytes, 1)
else:
if tagName == "#EndOfFile#":
loop = False
elif tagName == "#SharpEdges#":
# Read Sharp Edges
sharpEdges = []
for i in range(0,numBytes,8):
n1 = readULong(filePtr)
n2 = readULong(filePtr)
sharpEdges.append([n1, n2])
#print ("sharp edges", sharpEdges)
elif tagName == "#Property#":
# Read named property
propName = struct.unpack("64s", filePtr.read(64))[0].decode("utf-8")
propValue = struct.unpack("64s", filePtr.read(64))[0].decode("utf-8")
item = obj.armaObjProps.namedProps.add()
item.name=propName;
item.value=propValue
elif tagName == "#UVSet#":
id = readULong(filePtr)
layerName = "UVSet " + str(id)
if id == 0:
# Name first layer "UVMap" so that there isn't any fuckups with uv sets
layerName = "UVMap"
#print("adding UV set " + layerName)
mymesh.uv_layers.new(name=layerName)
layer = mymesh.uv_layers[-1]
index = 0
for faceIdx in range(0,numFaces):
n = faceData[faceIdx][0]
for x in range(0,n):
u = readFloat(filePtr)
v = readFloat(filePtr)
layer.data[index].uv = [u,1 - v]
index += 1
elif tagName == "#Mass#":
weightArray = []
weight = 0;
for idx in range (0,numPoints):
f = readFloat(filePtr)
weightArray.append(f)
weight += f
elif tagName[0] == '#':
# System tag we don't read
filePtr.seek(numBytes, 1)
else:
# Named Selection
# Add a vertex group
# First, check the tagName for a proxy
newVGrp = True
if len(tagName) > 5:
if tagName[:6] == "proxy:":
newVGrp = False
vgrp = obj.vertex_groups.new(name = "@@armaproxy")
prp = obj.armaObjProps.proxyArray
prx = tagName.split(":")[1]
if prx.find(".") != -1:
a = prx.split(".")
prx = a[0]
idx = a[-1]
if len(idx) == 0:
idx = "1"
else:
idx = "1"
n = prp.add()
n.name = vgrp.name
n.index = int(idx)
n.path = "P:" + prx
tagName = "@@armyproxy"
if newVGrp == True:
vgrp = obj.vertex_groups.new(name = tagName)
for i in range(0, numPoints):
b = readByte(filePtr)
w = decodeWeight(b)
if (w>0):
vgrp.add([i],float(w),'REPLACE')
#print("b = ",b,"w = ", w)
for i in range(0, numFaces):
b = readByte(filePtr)
w = decodeWeight(b)
# print("b = ",b,"w = ", w)
# if w== 1.0:
# pPoly = obj.data.polygons[i]
# for n in range(0,len(pPoly.vertices)):
# idx = pPoly.vertices[n]
# vgrp.add([idx], w, 'REPLACE')
#filePtr.seek(numFaces, 1)
# Done with the taggs, only the resolution is left to read
resolution = readFloat(filePtr)
#meshName = meshName + "." + resolutionName(resolution)
meshName = resolutionName(resolution)
mymesh.name = meshName
obj.name = meshName
coll.name = meshName
print("materials...")
indexData = {}
# Set up materials
for faceIdx in range(0,numFaces):
fd = faceData[faceIdx]
textureName = fd[4]
materialName = fd[5]
try:
mat = materialData[(textureName, materialName)]
# Add the material if it isn't in
if mat.name not in mymesh.materials:
mymesh.materials.append(mat)
thisMatIndex = len(mymesh.materials)-1
indexData[mat] = thisMatIndex
#print("added new material at " + str(thisMatIndex))
else:
thisMatIndex = indexData [mat]
#print("old material " + str(thisMatIndex))
mymesh.polygons[faceIdx].material_index = thisMatIndex
except:
pass
print("sharp edges")
# Set sharp edges
#if sharpEdges is not None:
# for edge in mymesh.edges:
# v1 = edge.vertices[0]
# v2 = edge.vertices[1]
# if [v1,v2] in sharpEdges:
# mymesh.edges[edge.index].use_edge_sharp = True
# elif [v2,v1] in sharpEdges:
# mymesh.edges[edge.index].use_edge_sharp = True
# else:
# print(f"Edge pair {v1},{v2} not found in edges")
# New Code
if sharpEdges is not None:
for sharpEdge in sharpEdges:
v1 = sharpEdge[0]
v2 = sharpEdge[1]
if (v1 > v2): # Swap if out of order
temp = v2
v2 = v1
v1 = temp
try: # Apparently, some models have sharp edges that (no longer) exist.
idx = edgeDict[(v1,v2)]
mymesh.edges[idx].use_edge_sharp = True
except:
print(f"WARNING: Edge {v1},{v2} does not exist")
#for pair in sharpEdges:
# p1 = pair[0]
# p2 = pair[1]
# edge = mymesh.edges.get([mymesh.vertices[p1], mymesh.vertices[p2]])
# print("edge = ", edge)
# #if edge != None:
# # edge.use_edge_sharp = True
# TODO: This causes faces with the same vertices but different normals to
# be discarded. Don't want that
#mymesh.validate()
print("Normal calculation")
mymesh.calc_normals()
for poly in mymesh.polygons:
poly.use_smooth = True
print("Add edge split")
maybeAddEdgeSplit(obj)
#scn.update()
obj.select_set(True)
#if layerFlag == True:
# # Move to layer
# objectLayers = getLayerMask(lodnr)
# bpy.ops.object.move_to_layer(layers=objectLayers)
hasSet = False
oldres = resolution
resolution = correctedResolution(resolution)
offset = oldres - resolution
obj.armaObjProps.isArmaObject = True
if resolution <= 1000:
obj.armaObjProps.lodDistance = resolution
hasSet = True
else:
obj.armaObjProps.lodDistance = offset #0.0
print("set LOD type")
# Set the right LOD type
lodPresets = ArmaToolbox.lodPresets
for n in lodPresets:
if float(n[0]) == resolution:
obj.armaObjProps.lod = n[0]
hasSet = True
if hasSet == False:
print("Error: unknown lod %f" % (resolution))
print("resolution %d" % (correctedResolution(resolution)))
print("weight")
if weight is not None:
obj.armaObjProps.mass = weight
if len(weightArray) > 0:
bm = bmesh.new()
bm.from_mesh(obj.data)
bm.verts.ensure_lookup_table()
weight_layer = bm.verts.layers.float.new('FHQWeights')
weight_layer = bm.verts.layers.float['FHQWeights']
print(weight_layer)
for i in range(0,len(weightArray)):
bm.verts[i][weight_layer] = weightArray[i]
bm.to_mesh(obj.data)
obj.select_set(False)
if obj.armaObjProps.lod == '1.000e+13' or obj.armaObjProps.lod == '4.000e+13':
ArmaTools.attemptFixMassLod (obj)
if obj.armaObjProps.lod == '-1.0':
ArmaTools.PostProcessLOD(obj)
print("done reading lod")
return 0
# Main Import Routine
def importMDL(context, fileName, layerFlag):
global objectLayers
objectLayers = [True, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False,
False, False, False, False, False]
currentLayer = 0
filePtr = open(fileName, "rb")
objName = path.basename(fileName).split(".")[0]
# This is used to collect combinations of texture and rvmat
# in order to generate Materials
materialData = {}
# Read the header
sig = readSignature(filePtr)
version = readULong(filePtr)
numLods = readULong(filePtr)
print ("Signature = {0}, version={1}, numLods = {2}".format(sig, version, numLods))
if version != 257 or sig != b'MLOD':
return -1
# Start loading lods
for i in range(0,numLods):
if loadLOD(context, filePtr, objName, materialData, layerFlag, i) != 0:
return -2
filePtr.close()
return 0
| AlwarrenSidh/ArmAToolbox | ArmaToolbox/MDLImporter.py | MDLImporter.py | py | 18,846 | python | en | code | 70 | github-code | 36 | [
{
"api_name": "struct.unpack",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_n... |
43008845296 | import pytest
from sqlobject import DatabaseIndex, ForeignKey, IntCol, MultipleJoin, \
SQLObject, StringCol
from sqlobject.dberrors import DatabaseError, IntegrityError, \
OperationalError, ProgrammingError
from sqlobject.tests.dbtest import raises, setupClass, supports
########################################
# Indexes
########################################
class SOIndex1(SQLObject):
name = StringCol(length=100)
number = IntCol()
nameIndex = DatabaseIndex('name', unique=True)
nameIndex2 = DatabaseIndex(name, number)
nameIndex3 = DatabaseIndex({'column': name,
'length': 3})
class SOIndex2(SQLObject):
name = StringCol(length=100)
nameIndex = DatabaseIndex({'expression': 'lower(name)'})
def test_indexes_1():
setupClass(SOIndex1)
n = 0
for name in 'blah blech boring yep yort snort'.split():
n += 1
SOIndex1(name=name, number=n)
mod = SOIndex1._connection.module
raises(
(mod.ProgrammingError, mod.IntegrityError,
mod.OperationalError, mod.DatabaseError,
ProgrammingError, IntegrityError, OperationalError, DatabaseError),
SOIndex1, name='blah', number=0)
def test_indexes_2():
if not supports('expressionIndex'):
pytest.skip("expressionIndex isn't supported")
setupClass(SOIndex2)
SOIndex2(name='')
class PersonIndexGet(SQLObject):
firstName = StringCol(length=100)
lastName = StringCol(length=100)
age = IntCol(alternateID=True)
nameIndex = DatabaseIndex(firstName, lastName, unique=True)
def test_index_get_1():
setupClass(PersonIndexGet, force=True)
PersonIndexGet(firstName='Eric', lastName='Idle', age=62)
PersonIndexGet(firstName='Terry', lastName='Gilliam', age=65)
PersonIndexGet(firstName='John', lastName='Cleese', age=66)
PersonIndexGet.get(1)
PersonIndexGet.nameIndex.get('Terry', 'Gilliam')
PersonIndexGet.nameIndex.get(firstName='John', lastName='Cleese')
raises(Exception, PersonIndexGet.nameIndex.get,
firstName='Graham', lastName='Chapman')
raises(Exception, PersonIndexGet.nameIndex.get,
'Terry', lastName='Gilliam')
raises(Exception, PersonIndexGet.nameIndex.get, 'Terry', 'Gilliam', 65)
raises(Exception, PersonIndexGet.nameIndex.get, 'Terry')
class PersonIndexGet2(SQLObject):
name = StringCol(alternateID=True, length=100)
age = IntCol()
addresses = MultipleJoin('AddressIndexGet2')
class AddressIndexGet2(SQLObject):
person = ForeignKey('PersonIndexGet2', notNone=True)
type = StringCol(notNone=True, length=100)
street = StringCol(notNone=True)
pk = DatabaseIndex(person, type, unique=True)
def test_index_get_2():
setupClass([PersonIndexGet2, AddressIndexGet2])
p = PersonIndexGet2(name='Terry Guilliam', age=64)
AddressIndexGet2(person=p, type='home', street='Terry Street 234')
AddressIndexGet2(person=p, type='work', street='Guilliam Street 234')
AddressIndexGet2.pk.get(p, 'work')
AddressIndexGet2.pk.get(person=p, type='work')
| sqlobject/sqlobject | sqlobject/tests/test_indexes.py | test_indexes.py | py | 3,088 | python | en | code | 140 | github-code | 36 | [
{
"api_name": "sqlobject.SQLObject",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "sqlobject.StringCol",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sqlobject.IntCol",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlobject.Da... |
10024409616 | import csv
import operator
import decimal
import numpy as np
import matplotlib.pyplot as pl
import calendar
import sys
import os.path
h = open('../results/MonthlyTurnoverOldCerrado.csv','rb')
data = csv.reader(h)
months = []
bints = []
seasons = []
boundarys = []
sumprecips = []
avghumid = []
avgvisits = []
for column in data:
months.append(column[0])
bints.append(column[1])
seasons.append(column[2])
boundarys.append(column[3])
sumprecips.append(column[4])
avghumid.append(column[5])
avgvisits.append(column[5])
h.close()
#remove header
for i in [months, bints, seasons, boundarys, sumprecips, avghumid, avgvisits]:
del i[0]
pl.plot(sumprecips, avgvisits, 'ro')
pl.grid(True)
pl.title('sumprecip-Avgvisits')
plotpath = '../results/' + 'sumprecip-Avgvisits' + '.pdf'
pl.savefig(plotpath)
pl.show() | musikzauberin/buzz | code/OldCode/plotting2.py | plotting2.py | py | 828 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot... |
21609680031 | from contextlib import contextmanager
import sys
import os
import tempfile
from shutil import rmtree
from os import getcwd, chdir
from os.path import join, basename, dirname, isdir, abspath, sep
import unittest
import six
from six.moves import reload_module
from pylint import config, lint
from pylint.lint import PyLinter, Run, preprocess_options, \
ArgumentPreprocessingError
from pylint.utils import MSG_STATE_SCOPE_CONFIG, MSG_STATE_SCOPE_MODULE, MSG_STATE_CONFIDENCE, \
MessagesStore, PyLintASTWalker, MessageDefinition, FileState, \
build_message_def, tokenize_module, UnknownMessage
from pylint.testutils import TestReporter, catch_warnings
from pylint.reporters import text, html
from pylint import checkers
from pylint.checkers.utils import check_messages
from pylint import interfaces
if os.name == 'java':
if os._name == 'nt':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
else:
if sys.platform == 'win32':
HOME = 'USERPROFILE'
else:
HOME = 'HOME'
@contextmanager
def fake_home():
folder = tempfile.mkdtemp('fake-home')
old_home = os.environ.get(HOME)
try:
os.environ[HOME] = folder
yield
finally:
os.environ.pop('PYLINTRC', '')
if old_home is None:
del os.environ[HOME]
else:
os.environ[HOME] = old_home
rmtree(folder, ignore_errors=True)
def remove(file):
try:
os.remove(file)
except OSError:
pass
HERE = abspath(dirname(__file__))
INPUTDIR = join(HERE, 'input')
@contextmanager
def tempdir():
"""Create a temp directory and change the current location to it.
This is supposed to be used with a *with* statement.
"""
tmp = tempfile.mkdtemp()
# Get real path of tempfile, otherwise test fail on mac os x
current_dir = getcwd()
chdir(tmp)
abs_tmp = abspath('.')
try:
yield abs_tmp
finally:
chdir(current_dir)
rmtree(abs_tmp)
def create_files(paths, chroot='.'):
"""Creates directories and files found in <path>.
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = join(chroot, path)
filename = basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(dirname(path))
files.add(path)
for dirpath in dirs:
if not isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
class SysPathFixupTC(unittest.TestCase):
def setUp(self):
self.orig = list(sys.path)
self.fake = [1, 2, 3]
sys.path[:] = self.fake
def tearDown(self):
sys.path[:] = self.orig
def test_no_args(self):
with lint.fix_import_path([]):
self.assertEqual(sys.path, self.fake)
self.assertEqual(sys.path, self.fake)
def test_one_arg(self):
with tempdir() as chroot:
create_files(['a/b/__init__.py'])
expected = [join(chroot, 'a')] + self.fake
cases = (
['a/b/'],
['a/b'],
['a/b/__init__.py'],
['a/'],
['a'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
def test_two_similar_args(self):
with tempdir() as chroot:
create_files(['a/b/__init__.py', 'a/c/__init__.py'])
expected = [join(chroot, 'a')] + self.fake
cases = (
['a/b', 'a/c'],
['a/c/', 'a/b/'],
['a/b/__init__.py', 'a/c/__init__.py'],
['a', 'a/c/__init__.py'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
def test_more_args(self):
with tempdir() as chroot:
create_files(['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'])
expected = [
join(chroot, suffix)
for suffix in [sep.join(('a', 'b')), 'a', sep.join(('a', 'e'))]
] + self.fake
cases = (
['a/b/c/__init__.py', 'a/d/__init__.py', 'a/e/f.py'],
['a/b/c', 'a', 'a/e'],
['a/b/c', 'a', 'a/b/c', 'a/e', 'a'],
)
self.assertEqual(sys.path, self.fake)
for case in cases:
with lint.fix_import_path(case):
self.assertEqual(sys.path, expected)
self.assertEqual(sys.path, self.fake)
class PyLinterTC(unittest.TestCase):
def setUp(self):
self.linter = PyLinter()
self.linter.disable('I')
self.linter.config.persistent = 0
# register checkers
checkers.initialize(self.linter)
self.linter.set_reporter(TestReporter())
def init_linter(self):
linter = self.linter
linter.open()
linter.set_current_module('toto')
linter.file_state = FileState('toto')
return linter
def test_pylint_visit_method_taken_in_account(self):
class CustomChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = 'custom'
msgs = {'W9999': ('', 'custom', '')}
@check_messages('custom')
def visit_class(self, _):
pass
self.linter.register_checker(CustomChecker(self.linter))
self.linter.open()
out = six.moves.StringIO()
self.linter.set_reporter(text.TextReporter(out))
self.linter.check('abc')
def test_enable_message(self):
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102'))
linter.disable('W0101', scope='package')
linter.disable('W0102', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('W0102', 1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102'))
linter.enable('W0101', scope='package')
linter.enable('W0102', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('W0102', 1))
def test_enable_message_category(self):
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
linter.disable('W', scope='package')
linter.disable('C', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
self.assertFalse(linter.is_message_enabled('C0202', line=1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
linter.enable('W', scope='package')
linter.enable('C', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('C0202'))
self.assertTrue(linter.is_message_enabled('C0202', line=1))
def test_message_state_scope(self):
class FakeConfig(object):
confidence = ['HIGH']
linter = self.init_linter()
linter.disable('C0202')
self.assertEqual(MSG_STATE_SCOPE_CONFIG,
linter.get_message_state_scope('C0202'))
linter.disable('W0101', scope='module', line=3)
self.assertEqual(MSG_STATE_SCOPE_CONFIG,
linter.get_message_state_scope('C0202'))
self.assertEqual(MSG_STATE_SCOPE_MODULE,
linter.get_message_state_scope('W0101', 3))
linter.enable('W0102', scope='module', line=3)
self.assertEqual(MSG_STATE_SCOPE_MODULE,
linter.get_message_state_scope('W0102', 3))
linter.config = FakeConfig()
self.assertEqual(
MSG_STATE_CONFIDENCE,
linter.get_message_state_scope('this-is-bad',
confidence=interfaces.INFERENCE))
def test_enable_message_block(self):
linter = self.init_linter()
linter.open()
filepath = join(INPUTDIR, 'func_block_disable_msg.py')
linter.set_current_module('func_block_disable_msg')
astroid = linter.get_ast(filepath, 'func_block_disable_msg')
linter.process_tokens(tokenize_module(astroid))
fs = linter.file_state
fs.collect_block_lines(linter.msgs_store, astroid)
# global (module level)
self.assertTrue(linter.is_message_enabled('W0613'))
self.assertTrue(linter.is_message_enabled('E1101'))
# meth1
self.assertTrue(linter.is_message_enabled('W0613', 13))
# meth2
self.assertFalse(linter.is_message_enabled('W0613', 18))
# meth3
self.assertFalse(linter.is_message_enabled('E1101', 24))
self.assertTrue(linter.is_message_enabled('E1101', 26))
# meth4
self.assertFalse(linter.is_message_enabled('E1101', 32))
self.assertTrue(linter.is_message_enabled('E1101', 36))
# meth5
self.assertFalse(linter.is_message_enabled('E1101', 42))
self.assertFalse(linter.is_message_enabled('E1101', 43))
self.assertTrue(linter.is_message_enabled('E1101', 46))
self.assertFalse(linter.is_message_enabled('E1101', 49))
self.assertFalse(linter.is_message_enabled('E1101', 51))
# meth6
self.assertFalse(linter.is_message_enabled('E1101', 57))
self.assertTrue(linter.is_message_enabled('E1101', 61))
self.assertFalse(linter.is_message_enabled('E1101', 64))
self.assertFalse(linter.is_message_enabled('E1101', 66))
self.assertTrue(linter.is_message_enabled('E0602', 57))
self.assertTrue(linter.is_message_enabled('E0602', 61))
self.assertFalse(linter.is_message_enabled('E0602', 62))
self.assertTrue(linter.is_message_enabled('E0602', 64))
self.assertTrue(linter.is_message_enabled('E0602', 66))
# meth7
self.assertFalse(linter.is_message_enabled('E1101', 70))
self.assertTrue(linter.is_message_enabled('E1101', 72))
self.assertTrue(linter.is_message_enabled('E1101', 75))
self.assertTrue(linter.is_message_enabled('E1101', 77))
fs = linter.file_state
self.assertEqual(17, fs._suppression_mapping['W0613', 18])
self.assertEqual(30, fs._suppression_mapping['E1101', 33])
self.assertTrue(('E1101', 46) not in fs._suppression_mapping)
self.assertEqual(1, fs._suppression_mapping['C0302', 18])
self.assertEqual(1, fs._suppression_mapping['C0302', 50])
# This is tricky. While the disable in line 106 is disabling
# both 108 and 110, this is usually not what the user wanted.
# Therefore, we report the closest previous disable comment.
self.assertEqual(106, fs._suppression_mapping['E1101', 108])
self.assertEqual(109, fs._suppression_mapping['E1101', 110])
def test_enable_by_symbol(self):
"""messages can be controlled by symbolic names.
The state is consistent across symbols and numbers.
"""
linter = self.init_linter()
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102'))
self.assertTrue(linter.is_message_enabled('dangerous-default-value'))
linter.disable('unreachable', scope='package')
linter.disable('dangerous-default-value', scope='module', line=1)
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('unreachable'))
self.assertFalse(linter.is_message_enabled('W0102', 1))
self.assertFalse(linter.is_message_enabled('dangerous-default-value', 1))
linter.set_current_module('tutu')
self.assertFalse(linter.is_message_enabled('W0101'))
self.assertFalse(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102'))
self.assertTrue(linter.is_message_enabled('dangerous-default-value'))
linter.enable('unreachable', scope='package')
linter.enable('dangerous-default-value', scope='module', line=1)
self.assertTrue(linter.is_message_enabled('W0101'))
self.assertTrue(linter.is_message_enabled('unreachable'))
self.assertTrue(linter.is_message_enabled('W0102', 1))
self.assertTrue(linter.is_message_enabled('dangerous-default-value', 1))
def test_lint_ext_module_with_file_output(self):
self.linter.set_reporter(text.TextReporter())
if sys.version_info < (3, 0):
strio = 'StringIO'
else:
strio = 'io'
self.linter.config.files_output = True
pylint_strio = 'pylint_%s.txt' % strio
files = [pylint_strio, 'pylint_global.txt']
for file in files:
self.addCleanup(remove, file)
self.linter.check(strio)
self.linter.generate_reports()
for f in files:
self.assertTrue(os.path.exists(f))
def test_enable_report(self):
self.assertEqual(self.linter.report_is_enabled('RP0001'), True)
self.linter.disable('RP0001')
self.assertEqual(self.linter.report_is_enabled('RP0001'), False)
self.linter.enable('RP0001')
self.assertEqual(self.linter.report_is_enabled('RP0001'), True)
def test_report_output_format_aliased(self):
text.register(self.linter)
self.linter.set_option('output-format', 'text')
self.assertEqual(self.linter.reporter.__class__.__name__, 'TextReporter')
def test_report_output_format_custom(self):
this_module = sys.modules[__name__]
class TestReporter(object):
pass
this_module.TestReporter = TestReporter
class_name = ".".join((this_module.__name__, 'TestReporter'))
self.linter.set_option('output-format', class_name)
self.assertEqual(self.linter.reporter.__class__.__name__, 'TestReporter')
def test_set_option_1(self):
linter = self.linter
linter.set_option('disable', 'C0111,W0234')
self.assertFalse(linter.is_message_enabled('C0111'))
self.assertFalse(linter.is_message_enabled('W0234'))
self.assertTrue(linter.is_message_enabled('W0113'))
self.assertFalse(linter.is_message_enabled('missing-docstring'))
self.assertFalse(linter.is_message_enabled('non-iterator-returned'))
def test_set_option_2(self):
linter = self.linter
linter.set_option('disable', ('C0111', 'W0234') )
self.assertFalse(linter.is_message_enabled('C0111'))
self.assertFalse(linter.is_message_enabled('W0234'))
self.assertTrue(linter.is_message_enabled('W0113'))
self.assertFalse(linter.is_message_enabled('missing-docstring'))
self.assertFalse(linter.is_message_enabled('non-iterator-returned'))
def test_enable_checkers(self):
self.linter.disable('design')
self.assertFalse('design' in [c.name for c in self.linter.prepare_checkers()])
self.linter.enable('design')
self.assertTrue('design' in [c.name for c in self.linter.prepare_checkers()])
def test_errors_only(self):
linter = self.linter
self.linter.error_mode()
checkers = self.linter.prepare_checkers()
checker_names = set(c.name for c in checkers)
should_not = set(('design', 'format', 'metrics',
'miscellaneous', 'similarities'))
self.assertSetEqual(set(), should_not & checker_names)
def test_disable_similar(self):
self.linter.set_option('disable', 'RP0801')
self.linter.set_option('disable', 'R0801')
self.assertFalse('similarities' in [c.name for c in self.linter.prepare_checkers()])
def test_disable_alot(self):
"""check that we disabled a lot of checkers"""
self.linter.set_option('reports', False)
self.linter.set_option('disable', 'R,C,W')
checker_names = [c.name for c in self.linter.prepare_checkers()]
for cname in ('design', 'metrics', 'similarities'):
self.assertFalse(cname in checker_names, cname)
def test_addmessage(self):
self.linter.set_reporter(TestReporter())
self.linter.open()
self.linter.set_current_module('0123')
self.linter.add_message('C0301', line=1, args=(1, 2))
self.linter.add_message('line-too-long', line=2, args=(3, 4))
self.assertEqual(
['C: 1: Line too long (1/2)', 'C: 2: Line too long (3/4)'],
self.linter.reporter.messages)
def test_init_hooks_called_before_load_plugins(self):
self.assertRaises(RuntimeError,
Run, ['--load-plugins', 'unexistant', '--init-hook', 'raise RuntimeError'])
self.assertRaises(RuntimeError,
Run, ['--init-hook', 'raise RuntimeError', '--load-plugins', 'unexistant'])
def test_analyze_explicit_script(self):
self.linter.set_reporter(TestReporter())
self.linter.check(os.path.join(os.path.dirname(__file__), 'data', 'ascript'))
self.assertEqual(
['C: 2: Line too long (175/100)'],
self.linter.reporter.messages)
def test_html_reporter_missing_files(self):
output = six.StringIO()
with catch_warnings():
self.linter.set_reporter(html.HTMLReporter(output))
self.linter.set_option('output-format', 'html')
self.linter.check('troppoptop.py')
self.linter.generate_reports()
value = output.getvalue()
self.assertIn('troppoptop.py', value)
self.assertIn('fatal', value)
def test_python3_checker_disabled(self):
checker_names = [c.name for c in self.linter.prepare_checkers()]
self.assertNotIn('python3', checker_names)
self.linter.set_option('enable', 'python3')
checker_names = [c.name for c in self.linter.prepare_checkers()]
self.assertIn('python3', checker_names)
class ConfigTC(unittest.TestCase):
def setUp(self):
os.environ.pop('PYLINTRC', None)
def test_pylint_home(self):
uhome = os.path.expanduser('~')
if uhome == '~':
expected = '.pylint.d'
else:
expected = os.path.join(uhome, '.pylint.d')
self.assertEqual(config.PYLINT_HOME, expected)
try:
pylintd = join(tempfile.gettempdir(), '.pylint.d')
os.environ['PYLINTHOME'] = pylintd
try:
reload_module(config)
self.assertEqual(config.PYLINT_HOME, pylintd)
finally:
try:
os.remove(pylintd)
except:
pass
finally:
del os.environ['PYLINTHOME']
def test_pylintrc(self):
with fake_home():
try:
self.assertEqual(config.find_pylintrc(), None)
os.environ['PYLINTRC'] = join(tempfile.gettempdir(),
'.pylintrc')
self.assertEqual(config.find_pylintrc(), None)
os.environ['PYLINTRC'] = '.'
self.assertEqual(config.find_pylintrc(), None)
finally:
reload_module(config)
def test_pylintrc_parentdir(self):
with tempdir() as chroot:
create_files(['a/pylintrc', 'a/b/__init__.py', 'a/b/pylintrc',
'a/b/c/__init__.py', 'a/b/c/d/__init__.py',
'a/b/c/d/e/.pylintrc'])
with fake_home():
self.assertEqual(config.find_pylintrc(), None)
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c/d/e' : join(chroot, 'a', 'b', 'c', 'd', 'e', '.pylintrc'),
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
self.assertEqual(config.find_pylintrc(), expected)
def test_pylintrc_parentdir_no_package(self):
with tempdir() as chroot:
with fake_home():
create_files(['a/pylintrc', 'a/b/pylintrc', 'a/b/c/d/__init__.py'])
self.assertEqual(config.find_pylintrc(), None)
results = {'a' : join(chroot, 'a', 'pylintrc'),
'a/b' : join(chroot, 'a', 'b', 'pylintrc'),
'a/b/c' : None,
'a/b/c/d' : None,
}
for basedir, expected in results.items():
os.chdir(join(chroot, basedir))
self.assertEqual(config.find_pylintrc(), expected)
class PreprocessOptionsTC(unittest.TestCase):
def _callback(self, name, value):
self.args.append((name, value))
def test_value_equal(self):
self.args = []
preprocess_options(['--foo', '--bar=baz', '--qu=ux'],
{'foo' : (self._callback, False),
'qu' : (self._callback, True)})
self.assertEqual(
[('foo', None), ('qu', 'ux')], self.args)
def test_value_space(self):
self.args = []
preprocess_options(['--qu', 'ux'],
{'qu' : (self._callback, True)})
self.assertEqual(
[('qu', 'ux')], self.args)
def test_error_missing_expected_value(self):
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar', '--qu=ux'],
{'bar' : (None, True)})
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar'],
{'bar' : (None, True)})
def test_error_unexpected_value(self):
self.assertRaises(
ArgumentPreprocessingError,
preprocess_options,
['--foo', '--bar=spam', '--qu=ux'],
{'bar' : (None, False)})
class MessagesStoreTC(unittest.TestCase):
def setUp(self):
self.store = MessagesStore()
class Checker(object):
name = 'achecker'
msgs = {
'W1234': ('message', 'msg-symbol', 'msg description.',
{'old_names': [('W0001', 'old-symbol')]}),
'E1234': ('Duplicate keyword argument %r in %s call',
'duplicate-keyword-arg',
'Used when a function call passes the same keyword argument multiple times.',
{'maxversion': (2, 6)}),
}
self.store.register_messages(Checker())
def _compare_messages(self, desc, msg, checkerref=False):
self.assertMultiLineEqual(desc, msg.format_help(checkerref=checkerref))
def test_check_message_id(self):
self.assertIsInstance(self.store.check_message_id('W1234'),
MessageDefinition)
self.assertRaises(UnknownMessage,
self.store.check_message_id, 'YB12')
def test_message_help(self):
msg = self.store.check_message_id('W1234')
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description. This message belongs to the achecker checker.''',
msg, checkerref=True)
self._compare_messages(
''':msg-symbol (W1234): *message*
msg description.''',
msg, checkerref=False)
def test_message_help_minmax(self):
# build the message manually to be python version independant
msg = self.store.check_message_id('E1234')
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message belongs to the achecker checker. It can't be emitted when using
Python >= 2.6.''',
msg, checkerref=True)
self._compare_messages(
''':duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message can't be emitted when using Python >= 2.6.''',
msg, checkerref=False)
def test_list_messages(self):
sys.stdout = six.StringIO()
try:
self.store.list_messages()
output = sys.stdout.getvalue()
finally:
sys.stdout = sys.__stdout__
# cursory examination of the output: we're mostly testing it completes
self.assertIn(':msg-symbol (W1234): *message*', output)
def test_add_renamed_message(self):
self.store.add_renamed_message('W1234', 'old-bad-name', 'msg-symbol')
self.assertEqual('msg-symbol',
self.store.check_message_id('W1234').symbol)
self.assertEqual('msg-symbol',
self.store.check_message_id('old-bad-name').symbol)
def test_renamed_message_register(self):
self.assertEqual('msg-symbol',
self.store.check_message_id('W0001').symbol)
self.assertEqual('msg-symbol',
self.store.check_message_id('old-symbol').symbol)
if __name__ == '__main__':
unittest.main()
| a0x8o/kafka | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/unittest_lint.py | unittest_lint.py | py | 26,703 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "os.name",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os._name",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.platform",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"... |
32473828762 | from config import bot, chat_id
from time import sleep
from telebot import types
from plugins.error import Error
from plugins.message import shout
import random
from plugins.error import in_chat, check_private
@in_chat()
def say(m):
bot.delete_message(m.chat.id, m.message_id)
try:
if m.chat.type != "private":
markup = types.InlineKeyboardMarkup() #Отвечаем, если выхов был из супер чата
link_bot= types.InlineKeyboardButton(text='Перейти в лс', url='t.me/cat_glav_bot') #Отвечаем, если выхов был из супер чата
markup.add(link_bot) #Отвечаем, если выхов был из супер чата
sent=bot.send_message(m.chat.id, "Команда /say работает только в лс бота", reply_markup = markup) #Отвечаем, если выхов был из супер чата
sleep(10)
bot.delete_message(m.chat.id,sent.message_id)
if m.reply_to_message:
sticker_id = bot.get_file(m.reply_to_message.sticker.file_id)
bot.send_sticker(chat_id, sticker_id.file_id)
sent=bot.send_message(m.chat.id, "Стикер успешно отправлен!")#Отвечаем, если команда пришла не из супер чата
sleep(10)
bot.delete_message(m.chat.id,sent.message_id)
else:
bot.send_message(chat_id, f"_{random.choice(shout)}:_ *'{m.text[5:]}'* 😱 ", parse_mode="Markdown") #Обработать команду и отправить то, что находится с 5 символа и до...
sent=bot.send_message(m.chat.id, "Сообщение успешно отправлено!")#Отвечаем, если команда пришла не из супер чата
sleep(10)
bot.delete_message(m.chat.id,sent.message_id)
except Exception:
Error(m, bot).error()
| evilcatsystem/telegram-bot | plugins/say.py | say.py | py | 2,015 | python | ru | code | 1 | github-code | 36 | [
{
"api_name": "config.bot.delete_message",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "config.bot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "telebot.types.InlineKeyboardMarkup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name"... |
16627412673 | '''
*****************************************************************************************
*
* =================================================
* Pharma Bot Theme (eYRC 2022-23)
* =================================================
*
* This script is intended for implementation of Task 4A
* of Pharma Bot (PB) Theme (eYRC 2022-23).
*
* Filename: task_4a.py
* Created:
* Last Modified: 02/01/2023
* Author: e-Yantra Team
*
* This software is made available on an "AS IS WHERE IS BASIS".
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
*****************************************************************************************
'''
# Team ID: [ 3004 ]
# Author List: [ Aryan Bawankar, Advait Dhamorikar ]
# Filename: task_4a.py
# Functions: [ place_packages, place_traffic_signals, place_start_end_nodes, place_horizontal_barricade, place_vertical_barricade]
#
####################### IMPORT MODULES #######################
## You are not allowed to make any changes in this section. ##
##############################################################
import numpy as np
import cv2
from zmqRemoteApi import RemoteAPIClient
import zmq
import os
import time
##############################################################
################# ADD UTILITY FUNCTIONS HERE #################
##############################################################
def place_packages(medicine_package_details, sim, all_models):
"""
Purpose:
---
This function takes details (colour, shape and shop) of the packages present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
them on the virtual arena. The packages should be inserted only into the
designated areas in each shop as mentioned in the Task document.
Functions from Regular API References should be used to set the position of the
packages.
Input Arguments:
---
`medicine_package_details` : [ list ]
nested list containing details of the medicine packages present.
Each element of this list will contain
- Shop number as Shop_n
- Color of the package as a string
- Shape of the package as a string
- Centroid co-ordinates of the package
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Example call:
---
all_models = place_packages(medicine_package_details, sim, all_models)
"""
models_directory = os.getcwd()
packages_models_directory = os.path.join(models_directory, "package_models")
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
flag1 = 0
flag2 = 0
flag3 = 0
flag4 = 0
flag5 = 0
for i in medicine_package_details:
shop = i[0]
if i[2] == "Circle":
shape = "cylinder"
elif i[2] == "Square":
shape = "cube"
elif i[2] == "Triangle":
shape = "cone"
# Setting Coordinate
if shop == "Shop_1":
if flag1 == 1:
x = x + 0.09
else:
x = -0.9 + 0.044
flag1 = 1
elif shop == "Shop_2":
if flag2 == 1:
x = x + 0.09
else:
x = -0.54 + 0.044
flag2 = 1
elif shop == "Shop_3":
if flag3 == 1:
x = x + 0.09
else:
x = -0.18 + 0.044
flag3 = 1
elif shop == "Shop_4":
if flag4 == 1:
x = x + 0.09
else:
x = 0.18 + 0.044
flag4 = 1
elif shop == "Shop_5":
if flag5 == 1:
x = x + 0.09
else:
x = 0.54 + 0.044
flag5 = 1
package = i[1] + "_" + shape
package_ttm = package + ".ttm"
# print(shop, package)
package_ttm = os.path.join(packages_models_directory, package_ttm)
medicine = sim.loadModel(package_ttm)
sim.setObjectParent(medicine, arena, False)
sim.setObjectAlias(medicine, package)
sim.setObjectPosition(medicine, arena, [x, 0.65, 0.015])
all_models.append(medicine)
####################################################################
return all_models
def place_traffic_signals(traffic_signals, sim, all_models):
"""
Purpose:
---
This function takes position of the traffic signals present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
them on the virtual arena. The signal should be inserted at a particular node.
Functions from Regular API References should be used to set the position of the
signals.
Input Arguments:
---
`traffic_signals` : [ list ]
list containing nodes in which traffic signals are present
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
None
Example call:
---
all_models = place_traffic_signals(traffic_signals, sim, all_models)
"""
models_directory = os.getcwd()
traffic_sig_model = os.path.join(models_directory, "signals", "traffic_signal.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
for i in traffic_signals:
a = i[0]
b = i[1]
x = 0
y = 0
# setting up X Coordinate
if a == 'A':
x = -0.9
elif a == 'B':
x = -0.54
elif a == 'C':
x = -0.18
elif a == 'D':
x = 0.18
elif a == 'E':
x = 0.54
elif a == 'F':
x = 0.9
# setting up Y Coordinate
if b == '1':
y = 0.9
elif b == '2':
y = 0.54
elif b == '3':
y = 0.18
elif b == '4':
y = -0.18
elif b == '5':
y = -0.54
elif b == '6':
y = -0.9
name = "Signal_" + i
position = [x, y, 0.15588]
signal = sim.loadModel(traffic_sig_model)
sim.setObjectParent(signal, arena, False)
sim.setObjectAlias(signal, name)
sim.setObjectPosition(signal, arena, position)
all_models.append(signal)
####################################################################
return all_models
def place_start_end_nodes(start_node, end_node, sim, all_models):
"""
Purpose:
---
This function takes position of start and end nodes present in
the arena and places them on the virtual arena.
The models should be inserted at a particular node.
Functions from Regular API References should be used to set the position of the
start and end nodes.
Input Arguments:
---
`start_node` : [ string ]
`end_node` : [ string ]
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
---
None
Example call:
---
all_models = place_start_end_nodes(start_node, end_node, sim, all_models)
"""
models_directory = os.getcwd()
start_node_model = os.path.join(models_directory, "signals", "start_node.ttm" )
end_node_model = os.path.join(models_directory, "signals", "end_node.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
a = start_node[0]
b = start_node[1]
x = 0
y = 0
# setting up X Coordinate
if a == 'A':
x = -0.9
elif a == 'B':
x = -0.54
elif a == 'C':
x = -0.18
elif a == 'D':
x = 0.18
elif a == 'E':
x = 0.54
elif a == 'F':
x = 0.9
# setting up Y Coordinate
if b == '1':
y = 0.9
elif b == '2':
y = 0.54
elif b == '3':
y = 0.18
elif b == '4':
y = -0.18
elif b == '5':
y = -0.54
elif b == '6':
y = -0.9
name = "Start_Node"
position = [x, y, 0.15588]
start_node = sim.loadModel(start_node_model)
sim.setObjectParent(start_node, arena, False)
sim.setObjectAlias(start_node, name)
sim.setObjectPosition(start_node, arena, position)
all_models.append(start_node)
a = end_node[0]
b = end_node[1]
x = 0
y = 0
# setting up X Coordinate
if a == 'A':
x = -0.9
elif a == 'B':
x = -0.54
elif a == 'C':
x = -0.18
elif a == 'D':
x = 0.18
elif a == 'E':
x = 0.54
elif a == 'F':
x = 0.9
# setting up Y Coordinate
if b == '1':
y = 0.9
elif b == '2':
y = 0.54
elif b == '3':
y = 0.18
elif b == '4':
y = -0.18
elif b == '5':
y = -0.54
elif b == '6':
y = -0.9
name = "End_Node"
position = [x, y, 0.15588]
end_node = sim.loadModel(end_node_model)
sim.setObjectParent(end_node, arena, False)
sim.setObjectAlias(end_node, name)
sim.setObjectPosition(end_node, arena, position)
all_models.append(end_node)
####################################################################
return all_models
def place_horizontal_barricade(horizontal_roads_under_construction, sim, all_models):
"""
Purpose:
---
This function takes the list of missing horizontal roads present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
horizontal barricades on virtual arena. The barricade should be inserted
between two nodes as shown in Task document.
Functions from Regular API References should be used to set the position of the
horizontal barricades.
Input Arguments:
---
`horizontal_roads_under_construction` : [ list ]
list containing missing horizontal links
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
---
None
Example call:
---
all_models = place_horizontal_barricade(horizontal_roads_under_construction, sim, all_models)
"""
models_directory = os.getcwd()
horiz_barricade_model = os.path.join(models_directory, "barricades", "horizontal_barricade.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
for i in horizontal_roads_under_construction:
nodes = i
A = nodes[0]
# setting up X Coordinate
if A == 'A':
x = -0.9
elif A == 'B':
x = -0.54
elif A == 'C':
x = -0.18
elif A == 'D':
x = 0.18
elif A == 'E':
x = 0.54
elif A == 'F':
x = 0.9
x = x + 0.18
C = nodes[1]
# setting up Y Coordinate
if C == '1':
y = 0.9
elif C == '2':
y = 0.54
elif C == '3':
y = 0.18
elif C == '4':
y = -0.18
elif C == '5':
y = -0.54
elif C == '6':
y = -0.9
positions = [x, y, 0.027]
name = "Horizontal_missing_road_" + A + C + "_" + nodes[3] + nodes[4]
h_barricade = sim.loadModel(horiz_barricade_model)
sim.setObjectParent(h_barricade, arena, False)
sim.setObjectAlias(h_barricade, name)
sim.setObjectPosition(h_barricade, arena, positions)
all_models.append(h_barricade)
####################################################################
return all_models
def place_vertical_barricade(vertical_roads_under_construction, sim, all_models):
"""
Purpose:
---
This function takes the list of missing vertical roads present in
the arena (using "detect_arena_parameters" function from task_1a.py) and places
vertical barricades on virtual arena. The barricade should be inserted
between two nodes as shown in Task document.
Functions from Regular API References should be used to set the position of the
vertical barricades.
Input Arguments:
---
`vertical_roads_under_construction` : [ list ]
list containing missing vertical links
`sim` : [ object ]
ZeroMQ RemoteAPI object
`all_models` : [ list ]
list containing handles of all the models imported into the scene
Returns:
`all_models` : [ list ]
list containing handles of all the models imported into the scene
---
None
Example call:
---
all_models = place_vertical_barricade(vertical_roads_under_construction, sim, all_models)
"""
models_directory = os.getcwd()
vert_barricade_model = os.path.join(models_directory, "barricades", "vertical_barricade.ttm" )
arena = sim.getObject('/Arena')
####################### ADD YOUR CODE HERE #########################
for i in vertical_roads_under_construction:
nodes = i
A = nodes[0]
# setting up X Coordinate
if A == 'A':
x = -0.9
elif A == 'B':
x = -0.54
elif A == 'C':
x = -0.18
elif A == 'D':
x = 0.18
elif A == 'E':
x = 0.54
elif A == 'F':
x = 0.9
C = nodes[1]
# setting up Y Coordinate
if C == '1':
y = 0.9
elif C == '2':
y = 0.54
elif C == '3':
y = 0.18
elif C == '4':
y = -0.18
elif C == '5':
y = -0.54
elif C == '6':
y = -0.9
y = y - 0.18
positions = [x, y, 0.027]
name = "Vertical_missing_road_" + A + C + "_" + nodes[3] + nodes[4]
v_barricade = sim.loadModel(vert_barricade_model)
sim.setObjectParent(v_barricade, arena, False)
sim.setObjectAlias(v_barricade, name)
sim.setObjectPosition(v_barricade, arena, positions)
all_models.append(v_barricade)
####################################################################
return all_models
if __name__ == "__main__":
client = RemoteAPIClient()
sim = client.getObject('sim')
# arena = sim.getObject('/Arena')
aruco_handle = sim.getObject('/aruco_3')
arena = sim.getObject('/Arena')
# sim.setObjectParent(aruco_handle, arena, False)
# sim.setObjectAlias(aruco_handle, "marker")
sim.setObjectPosition(aruco_handle, -1, [0.15, 0.15, 0.15])
sim.setObjectOrientation(aruco_handle, -1, [0, 0, 45])
# path directory of images in test_images folder
img_dir = os.getcwd() + "/test_imgs/"
i = 0
config_img = cv2.imread(img_dir + 'maze_' + str(i) + '.png')
print('\n============================================')
print('\nFor maze_0.png')
# object handles of each model that gets imported to the scene can be stored in this list
# at the end of each test image, all the models will be removed
all_models = []
# import task_1a.py. Make sure that task_1a.py is in same folder as task_4a.py
task_1 = __import__('task_1a')
detected_arena_parameters = task_1.detect_arena_parameters(config_img)
# obtain required arena parameters
medicine_package_details = detected_arena_parameters["medicine_packages"]
traffic_signals = detected_arena_parameters['traffic_signals']
start_node = detected_arena_parameters['start_node']
end_node = detected_arena_parameters['end_node']
horizontal_roads_under_construction = detected_arena_parameters[
'horizontal_roads_under_construction']
vertical_roads_under_construction = detected_arena_parameters[
'vertical_roads_under_construction']
print("[1] Setting up the scene in CoppeliaSim")
all_models = place_packages(medicine_package_details, sim, all_models)
all_models = place_traffic_signals(traffic_signals, sim, all_models)
all_models = place_horizontal_barricade(
horizontal_roads_under_construction, sim, all_models)
all_models = place_vertical_barricade(
vertical_roads_under_construction, sim, all_models)
all_models = place_start_end_nodes(start_node, end_node, sim, all_models)
print("[2] Completed setting up the scene in CoppeliaSim")
# wait for 10 seconds and then remove models
time.sleep(10)
print("[3] Removing models for maze_0.png")
for i in all_models:
sim.removeModel(i)
choice = input(
'\nDo you want to run your script on all test images ? => "y" or "n": ')
if choice == 'y':
for i in range(1, 5):
print('\n============================================')
print('\nFor maze_' + str(i) + '.png')
config_img = cv2.imread(img_dir + 'maze_' + str(i) + '.png')
# object handles of each model that gets imported to the scene can be stored in this list
# at the end of each test image, all the models will be removed
all_models = []
# import task_1a.py. Make sure that task_1a.py is in same folder as task_4a.py
task_1 = __import__('task_1a')
detected_arena_parameters = task_1.detect_arena_parameters(
config_img)
# obtain required arena parameters
medicine_package_details = detected_arena_parameters["medicine_packages"]
traffic_signals = detected_arena_parameters['traffic_signals']
start_node = detected_arena_parameters['start_node']
end_node = detected_arena_parameters['end_node']
horizontal_roads_under_construction = detected_arena_parameters[
'horizontal_roads_under_construction']
vertical_roads_under_construction = detected_arena_parameters[
'vertical_roads_under_construction']
print("[1] Setting up the scene in CoppeliaSim")
place_packages(medicine_package_details, sim, all_models)
place_traffic_signals(traffic_signals, sim, all_models)
place_horizontal_barricade(
horizontal_roads_under_construction, sim, all_models)
place_vertical_barricade(
vertical_roads_under_construction, sim, all_models)
place_start_end_nodes(start_node, end_node, sim, all_models)
print("[2] Completed setting up the scene in CoppeliaSim")
# wait for 10 seconds and then remove models
time.sleep(10)
print("[3] Removing models for maze_" + str(i) + '.png')
for i in all_models:
sim.removeModel(i)
| advait-0/eyrc22_PB_3004 | Task 4/Task 4A/task_4a.py | task_4a.py | py | 20,080 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
36788048136 | from pydantic import BaseSettings, BaseModel
from pathlib import Path
from .languages import WIKIPEDIA_LANGS
import toml
class Settings(BaseSettings):
status: str = "unknown"
logging_chat: int = None
db_path: Path = Path("jdanbot.db")
music_path: Path = Path("media/music")
admin_notes: list[str]
bot_owners: list[int] = [795449748, 0]
class Tokens(BaseModel):
bot_token: str
class Schedule(BaseModel):
delay_seconds: int = 20
katz_bots: bool = False
class Egg(BaseModel):
commands: list[str]
audio: Path
tokens: Tokens
schedule: Schedule = Schedule()
eggs: list[Egg]
with open("settings.toml") as file:
settings_file = toml.loads(file.read())
with open(".secrets.toml") as file:
secrets_file = toml.loads(file.read())
settings = Settings.parse_obj(settings_file | secrets_file)
BASE_DIR = Path(__file__).parent.parent.parent
LOCALES_DIR = BASE_DIR / "locales"
WIKIPEDIA_SHORTCUTS = {
"ru": ["w"],
"en": ["v"],
"uk": ["wua", "wikiua"]
}
WIKI_COMMANDS = []
for lang in WIKIPEDIA_LANGS:
WIKI_COMMANDS.extend([f"wiki{lang}", f"w{lang}"])
for lang in WIKIPEDIA_SHORTCUTS:
WIKI_COMMANDS.extend(WIKIPEDIA_SHORTCUTS[lang])
| jDan735/jdan734-bot | bot/config/config.py | config.py | py | 1,254 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "pydantic.BaseSettings",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
... |
17217695574 | from extras import *
from common import Common
from indexer import Indexer
import math
import re
import nltk
import operator
from collections import defaultdict, OrderedDict
from nltk.stem import SnowballStemmer
from nltk.corpus import wordnet
class Query_Expansion:
def __init__(self):
"""
Constructor: Used to initialize all the class variables
"""
self.utility = Utility()
self.frequency_map = defaultdict()
self.synonyms_map = defaultdict()
self.file_handling = FileHandling()
self.common = Common()
self.indexer = Indexer()
def generate_expected_words_for_expansion(self, queries):
stopWords = self.utility.get_stop_list()
stemmer = SnowballStemmer("english")
for i in range (0,len(queries)):
query = queries[i]
listofwords = []
words = query.split()
for word in words:
word = word.lower()
stem = stemmer.stem(word)
expected = self.fetch_expected_words(word,stem)
if expected not in stopWords:
frequency = self.generate_frequency_map(word,expected)
if frequency > 0:
listofwords.append(expected)
self.frequency_map[i+1] = listofwords
return self.frequency_map
def generate_frequency_map(self,word,stem):
occurrences = 0
if stem in self.positional_index and word in self.positional_index:
dict_stem = self.positional_index[stem]
dict_word = self.positional_index[word]
for doc in dict_word:
if doc in dict_stem:
list1 = dict_word[doc]
list2 = dict_stem[doc]
pos1 = 0
for i in range(0, len(list1)):
pos1 = pos1 + list1[i]
pos2 = 0
for j in range(0, len(list2)):
pos2 = pos2 + list2[j]
if abs(pos1 - pos2) <= 12:
occurrences = occurrences + 1
break
return occurrences
def fetch_expected_words(self,word,stem):
if self.utility.check_word_exist(stem):
return stem
else:
return nltk.stem.WordNetLemmatizer().lemmatize(word)
def expand_queries_using_stemming(self, queries):
self.positional_index = self.indexer.read_index(index_type=True)
print('\n' + self.utility.line_break + '\n' +\
'Running Query Expansion using Stemming..')
stem_map = self.generate_expected_words_for_expansion(queries)
updated_query_map = defaultdict(set)
for i in range(len(queries)):
stop_words = self.utility.get_stop_list()
listofwords = stem_map[i+1]
for word in listofwords:
for syn in wordnet.synsets(word):
for l in syn.lemmas():
if str(l.name) not in queries[i] and '_' not in str(l.name) and str(l.name) not in stop_words:
updated_query_map[i+1].add(l.name())
if (len(updated_query_map[i+1])) > 4:
break
if len(updated_query_map[i+1]) > 4:
break
new_queries = []
for i in range (len(queries)):
old_query = queries[i]
new_query = old_query
for word in updated_query_map[i+1]:
new_query = new_query + " "+ str(word)
new_queries.append(new_query)
return new_queries
def create_tf(self,inverted_index):
tf = {}
for term in inverted_index:
c = 0
doc_to_frequency = inverted_index[term]
for doc in doc_to_frequency:
c = c + doc_to_frequency[doc]
tf[term] = c
return self.generatePotentialQuery(tf)
# generating potential query words by evaluating term frequency and removing stop words
def generatePotentialQuery(self,tf):
terms = []
total = 0
for key, value in tf.items():
total = total + value
potentialList = []
for key, value in tf.items():
if key not in self.utility.get_stop_list() and len(key) > 4:
potentialList.append(key)
return potentialList
# calculating dice's co-efficient for different terms
def diceCoff(self,list1, list2, invertedIndex):
associationDict = {}
for i in list1:
if i != "in" and i in invertedIndex:
docList = invertedIndex[i]
sum = 0
for j in list2:
docList2 = invertedIndex[j]
sum = 0
for k in docList2:
if k in docList:
sum = sum + 1
if sum > 10:
associationDict[i + " " + j] = sum * 1.0 / (len(docList) + len(docList2))
sorted_dict = OrderedDict(associationDict)
return sorted_dict
def expand_queries_using_pseduo_relevance(self, queries):
print('\n' + self.utility.line_break + '\n' +\
'Running Query Expansion using Pseduo Relevance..')
docs = self.common.read_top_documents_for_score(top=40)
relevant_docs = []
for record in docs:
relevant_docs.append((record.values()[0]))
self.indexer.create_save_indexer_with_relevant_docs(relevant_docs)
inverted_index = self.indexer.read_simple_index()
potential_list = self.create_tf(inverted_index)
updated_query_list = []
for i in range(len(queries)):
query = queries[i]
query = query.lower()
words_from_query = []
word_array = query.split()
for word in word_array:
word = re.sub(r'\W+', ' ', word)
if word not in self.utility.get_stop_list():
words_from_query.append(word)
updatedQuery = query
suggested_words = self.diceCoff(words_from_query,potential_list,inverted_index).items()
k = 0
for value in suggested_words:
if k > 8:
break
else:
words = value[0].split()
if words[1] not in updatedQuery:
updatedQuery = updatedQuery + ' ' + words[1]
k = k + 1
updated_query_list.append(updatedQuery)
return updated_query_list | ghildiyal-ashutosh/Search_Engine | tasks/query_expansion.py | query_expansion.py | py | 6,739 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "common.Common",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "indexer... |
10210350419 | from sklearn import datasets
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
# Multiclass Classification Datasets
def load_mnist():
mnist = datasets.load_digits(as_frame=True)
mnist_X, mnist_y = mnist.data, mnist.target
X_train, X_test, y_train, y_test = train_test_split(mnist_X, mnist_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_forest_covertypes():
forest = datasets.fetch_covtype(as_frame=True)
forest_X, forest_y = forest.data, forest.target
forest_X = forest_X[:15000]
forest_y = forest_y[:15000]
X_train, X_test, y_train, y_test = train_test_split(forest_X, forest_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_kepler_exoplanets():
kepler = pd.read_csv('data/kepler_exoplanet.csv')
not_X_columns = ['rowid', 'kepid', 'kepoi_name', 'kepler_name', 'koi_score', 'koi_pdisposition', 'koi_disposition', 'koi_teq_err1', 'koi_teq_err2', 'koi_tce_delivname']
kepler_X = kepler.drop(not_X_columns, axis=1)
kepler_y = kepler['koi_pdisposition']
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
kepler_X_array = imputer.fit_transform(kepler_X)
kepler_X = pd.DataFrame(kepler_X_array, columns=kepler_X.columns)
X_train, X_test, y_train, y_test = train_test_split(kepler_X, kepler_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
# Regression Datasets
def load_cali_housing():
california = datasets.fetch_california_housing(as_frame=True)
cali_X, cali_y = california.data, california.target
X_train, X_test, y_train, y_test = train_test_split(cali_X, cali_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_melbourne_housing():
melbourne = pd.read_csv('data/melbourne_housing_data.csv')
not_X_columns = ['Address', 'Price', 'SellerG', 'Date']
melbourne_X = melbourne.drop(not_X_columns, axis=1)
melbourne_y = melbourne['Price']
# Convert all features to integers
categorical_cols = ['Suburb', 'Type', 'Method', 'CouncilArea', 'Regionname']
for col_name in categorical_cols:
melbourne_X[col_name] = pd.Categorical(melbourne_X[col_name]).codes
# Impute missing values
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
melbourne_X_array = imputer.fit_transform(melbourne_X)
melbourne_X = pd.DataFrame(melbourne_X_array, columns=melbourne_X.columns)
X_train, X_test, y_train, y_test = train_test_split(melbourne_X, melbourne_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_world_happiness():
happiness = pd.read_csv('data/world-happiness-report-2021.csv')
X_columns = ['Regional indicator', 'Logged GDP per capita', 'Social support', 'Healthy life expectancy', 'Freedom to make life choices', 'Generosity', 'Perceptions of corruption']
happiness_X = happiness[X_columns]
happiness_y = happiness['Ladder score']
happiness_X['Regional indicator'] = pd.Categorical(happiness_X['Regional indicator']).codes
X_train, X_test, y_train, y_test = train_test_split(happiness_X, happiness_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
# Binary Classification Datasets
def load_heart_attack():
heart_attack = pd.read_csv('data/heart-attack.csv')
heart_X = heart_attack.drop('output', axis=1)
heart_y = heart_attack['output']
X_train, X_test, y_train, y_test = train_test_split(heart_X, heart_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_stroke():
stroke = pd.read_csv('data/healthcare-dataset-stroke-data.csv')
not_X_columns = ['id', 'stroke']
stroke_X = stroke.drop(not_X_columns, axis=1)
stroke_y = stroke['stroke']
# Convert all features to integers
categorical_cols = ['gender', 'ever_married', 'work_type', 'Residence_type', 'smoking_status']
for col_name in categorical_cols:
stroke_X[col_name] = pd.Categorical(stroke_X[col_name]).codes
# Impute missing values
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
stroke_X_array = imputer.fit_transform(stroke_X)
stroke_X = pd.DataFrame(stroke_X_array, columns=stroke_X.columns)
X_train, X_test, y_train, y_test = train_test_split(stroke_X, stroke_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_telecom():
telecom = pd.read_csv('data/telecom_users.csv')
not_X_columns = ['Unnamed: 0', 'customerID', 'TotalCharges', 'Churn']
telecom_X = telecom.drop(not_X_columns, axis=1)
telecom_y = pd.Categorical(telecom['Churn']).codes
# Convert all features to integers
not_categorical_cols = ['SeniorCitizen', 'tenure', 'MonthlyCharges']
for col_name in telecom_X.columns:
if col_name not in not_categorical_cols:
telecom_X[col_name] = pd.Categorical(telecom_X[col_name]).codes
# Impute missing values
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
telecom_X_array = imputer.fit_transform(telecom_X)
telecom_X = pd.DataFrame(telecom_X_array, columns=telecom_X.columns)
X_train, X_test, y_train, y_test = train_test_split(telecom_X, telecom_y, test_size=0.2, random_state=42)
return X_train, X_test, y_train, y_test
def load_multiclass():
all_data = {}
all_data['mnist'] = load_mnist()
all_data['forest_covertypes'] = load_forest_covertypes()
all_data['kepler_exoplanets'] = load_kepler_exoplanets()
return all_data
def load_regression():
all_data = {}
all_data['california_housing'] = load_cali_housing()
all_data['melbourne_housing'] = load_melbourne_housing()
all_data['world_happiness'] = load_world_happiness()
return all_data
def load_binary():
all_data = {}
all_data['heart_attack'] = load_heart_attack()
all_data['stroke'] = load_stroke()
all_data['telecom'] = load_telecom()
return all_data
| eccabay/CMA-ES_hyperparameters | load_data.py | load_data.py | py | 6,168 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.load_digits",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 14,
"usage_type": "call"
},
{... |
38516174008 | # pip install uszipcode
import pandas as pd
from uszipcode import SearchEngine
search = SearchEngine(simple_zipcode=False)
def add_coordinates(df):
'''Input: a pandas dataframe which includes a 'Zip' field representing a
US zip code.
Output: dataframe with 'lat' and 'lng' fields added which represent
latitude and longitude
'''
df['lat'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).lat))
df['lng'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).lng))
df['county'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).county))
def add_fips(df):
'''Input: a pandas dataframe which includes a 'Zip' field representing a
US zip code.
Output: dataframe with 'lat' and 'lng' fields added which represent
latitude and longitude
'''
colorado_fips = ['08001', '08003', '08005', '08007', '08009', '08011', '08013', '08014', '08015', '08017', '08019',
'08021', '08023', '08025', '08027', '08029', '08031', '08033', '08035', '08037', '08039', '08041',
'08043', '08045', '08047', '08049', '08051', '08053', '08055', '08057', '08059', '08061', '08063',
'08065', '08067', '08069', '08071', '08073', '08075', '08077', '08079', '08081', '08083', '08085',
'08087', '08089', '08091', '08093', '08095', '08097', '08099', '08101', '08103', '08105', '08107',
'08109', '08111', '08113', '08115', '08117', '08119', '08121', '08123', '08125']
colorado_counties = ['Adams County', 'Alamosa County', 'Arapahoe County', 'Archuleta County', 'Baca County', 'Bent County', 'Boulder County', 'Broomfield County',
'Chaffee County', 'Cheyenne County', 'Clear Creek County', 'Conejos County', 'Costilla County', 'Crowley County', 'Custer County',
'Delta County', 'Denver County', 'Dolores County', 'Douglas County', 'Eagle County', 'Elbert County', 'El Paso County', 'Fremont County',
'Garfield County', 'Gilpin County', 'Grand County', 'Gunnison County', 'Hinsdale County', 'Huerfano County', 'Jackson County', 'Jefferson County',
'Kiowa County', 'Kit Carson County', 'Lake County', 'La Plata County', 'Larimer County', 'Las Animas County', 'Lincoln County', 'Logan County',
'Mesa County', 'Mineral County', 'Moffat County', 'Montezuma County', 'Montrose County', 'Morgan County', 'Otero County', 'Ouray County',
'Park County', 'Phillips County', 'Pitkin County', 'Prowers County', 'Pueblo County', 'Rio Blanco County', 'Rio Grande County','Routt County',
'Saguache County', 'San Juan County', 'San Miguel County', 'Sedgwick County', 'Summit County', 'Teller County', 'Washington County',
'Weld County', 'Yuma County']
counties_fips_dict = dict(zip(colorado_counties, colorado_fips))
df['county'] = df['Zip'].apply(lambda x: pd.Series(search.by_zipcode(x).county))
col_only_df = df[df['county'].isin(colorado_counties)]
col_only_df['fip'] = col_only_df['county'].apply(lambda x: counties_fips_dict[x])
return col_only_df
| dslachar/analysis_of_crp_data | add_coordinates.py | add_coordinates.py | py | 3,241 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "uszipcode.SearchEngine",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
... |
8519295436 | import pygame
import Snake, Apple
import os
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
pygame.display.set_caption("Sssssnake")
# 10x10 segments
winwidth = 200
winheight = 240
win = pygame.display.set_mode((winwidth, winheight))
# segment - 20x20px
segsize = 20
snake = Snake.Snake(0, 20, segsize, segsize-2, segsize-2)
apple = Apple.Apple(segsize//2, winwidth, winheight, segsize, snake.segments)
# font
font = pygame.font.SysFont("monospace", 15)
# sounds
eatsound = pygame.mixer.Sound('sounds/eat sound.wav')
losesound = pygame.mixer.Sound('sounds/lose sound.wav')
music = pygame.mixer.music.load('sounds/bg music.mp3')
pygame.mixer.music.play(-1)
def lost():
pygame.mixer.music.stop()
losesound.play()
global win, running, snake, score
gameover = font.render("GAME OVER :(", 1, (255, 255, 255))
playagain = font.render("Play again?", 1, (255, 255, 255))
yorn = font.render("(y) (n)", 1, (255, 255, 255))
win.blit(gameover, (winwidth//2 - 55, winheight//2 - 35))
win.blit(playagain, (winwidth//2 - 52, winheight//2 - 15))
win.blit(yorn, (winwidth//2 - 40, winheight//2))
pygame.display.update()
pygame.event.clear()
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT:
running = False
break
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_y:
snake = Snake.Snake(0, 20, segsize, segsize-2, segsize-2)
apple.forbidden = snake.segments
score = 0
pygame.mixer.music.play(-1)
break
if event.key == pygame.K_n:
running = False
break
# mainloop
running = True
score = 0
while running:
pygame.time.delay(200)
keypressed = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN and not keypressed:
if event.key == pygame.K_UP and snake.direction != (1, 0):
snake.direction = (-1, 0)
keypressed = True
elif event.key == pygame.K_DOWN and snake.direction != (-1, 0):
snake.direction = (1, 0)
keypressed = True
elif event.key == pygame.K_LEFT and snake.direction != (0, 1):
snake.direction = (0, -1)
keypressed = True
elif event.key == pygame.K_RIGHT and snake.direction != (0, -1):
snake.direction = (0, 1)
keypressed = True
# calculating new position
tempx = snake.x + snake.vel * snake.direction[1]
tempy = snake.y + snake.vel * snake.direction[0]
if 0 <= tempx <= winwidth - snake.segwidth and segsize <= tempy <= winheight - segsize - snake.segheigth:
snake.x = tempx
snake.y = tempy
else:
#collision with borders
lost()
continue
snake.move()
# collision with snake
if snake.segments.count((snake.x, snake.y)) > 1:
lost()
continue
win.fill((0, 0, 0))
# collision with apple
if snake.x <= apple.x <= snake.x + segsize and snake.y <= apple.y <= snake.y + segsize:
eatsound.play()
snake.addsegment()
snake.draw(win)
apple.setposition()
score += 1
else:
snake.draw(win)
label = font.render("Score {}".format(score), 1, (255, 255, 255))
win.blit(label, (winwidth - 70, 0))
apple.draw(win)
pygame.draw.rect(win, (255, 255, 0), (1, 20, winwidth - 2, winheight - 2*segsize), 1)
pygame.display.update()
pygame.quit() | TZdybel/Games-with-Pygame | snake/main.py | main.py | py | 3,681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.pre_init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
... |
31458300119 | import pygame
import jumpingHorses.constants as Constants
from .constants import BLACK_PIECE, WHITE_PIECE, MOVE_COLOR, MOVE_RADIUS, SQUARE_SIZE, WIDTH, HEIGHT, LETTER_GAP_SIZE, OUTLINE_SIZE
from .board import Board
from menu.main_menu import MainMenu
import menu.gameState as GameState
class GameMaster:
#inicializē
def __init__(self, surface):
self._init()
self.surface = surface
#atjaunina to, kas redzams ekrānā
def update(self):
self.board.draw(self.surface)
if self.selectedPiece != None:
self.draw_valid_moves(self.valid_moves)
#sāk no jauna spēli
def reset(self):
self._init()
#notīra esošo stāvokli
def _init(self):
self.selectedPiece = None
self.turn = WHITE_PIECE
self.board = Board()
self.valid_moves = {}
#pārbauda, vai var izvēlēties
def select(self, pos):
if LETTER_GAP_SIZE < pos[0] < WIDTH+LETTER_GAP_SIZE and pos[1] < HEIGHT:
row, col = self.get_row_col_from_mouse(pos)
if self.selectedPiece:#ja kaut kas jau ir izvēlēts
result = self._move(row, col)#tad to pabīda, ja ir legāls gājiens
self.selectedPiece = None
if not result:#ja neidzodas pabīdīt(izvēlas neiespējamu gājienu, vai kaut ko citu), tad selecto pa jaunam
self.selectedPiece = None
self.select(pos)
return True
#ja kaut kas tiek izvēlēts pirmo reizi, vai tika izvēlēts kaut kas, kas nebija iespējams gājiens
piece = self.board.get_piece(row, col)
if piece != 0 and piece.color == self.turn:
self.selectedPiece = piece
self.valid_moves = self.board.get_valid_moves(piece)
return True
self.selectedPiece = None
return False
#pārvieto kauliņu
def _move (self, row, col):
piece = self.board.get_piece(row,col)
if self.selectedPiece and piece == 0 and (row,col) in self.valid_moves:
self.board.move(self.selectedPiece, row, col)
self.change_turn()
else:
return False
return True
#samaina gājienu
def change_turn(self):
self.valid_moves = {}
if self.turn == BLACK_PIECE:
self.turn = WHITE_PIECE
else:
self.turn = BLACK_PIECE
#uzzīmē legālos gājienus
def draw_valid_moves(self, moves):
pygame.draw.circle(self.surface, (0,255,0), (self.selectedPiece.x, self.selectedPiece.y), self.selectedPiece.radius+OUTLINE_SIZE, 5)
for move in moves:
row, col = move
pygame.draw.circle(self.surface, MOVE_COLOR, (col*SQUARE_SIZE + SQUARE_SIZE//2, row*SQUARE_SIZE + SQUARE_SIZE//2) , MOVE_RADIUS)
#atgriež rindu un kolonnu atkarībā no peles pozīcijas
def get_row_col_from_mouse(self, pos):
x, y = pos
row = y // SQUARE_SIZE
col = (x - LETTER_GAP_SIZE) // SQUARE_SIZE
return row, col
#dators veic gājienu
def ai_move(self, board):
self.board = board
self.change_turn()
#pārbauda, vai ir uzvarētājs
def check_winner(self):
if self.board.winner() == Constants.starting_player[1]:
GameState.currentState = GameState.State.win
elif self.board.winner() == Constants.starting_player[2]:
GameState.currentState = GameState.State.lost
def get_board(self):
#print("get_board called")
#self.board.print_board()
return self.board
| perkonss/AI1darbs | jumpingHorses/game_master.py | game_master.py | py | 3,699 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "constants.WHITE_PIECE",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "board.Board",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "constants.LETTER_GAP_SIZE",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "constants... |
37350976457 | """Zero-field splitting.
See::
Spin decontamination for magnetic dipolar coupling calculations:
Application to high-spin molecules and solid-state spin qubits
Timur Biktagirov, Wolf Gero Schmidt, and Uwe Gerstmann
Phys. Rev. Research 2, 022024(R) – Published 30 April 2020
"""
from math import pi
from typing import List, Tuple, Dict
import numpy as np
from ase.units import Bohr, Ha, _c, _e, _hplanck
from my_gpaw.calculator import GPAW
from my_gpaw.grid_descriptor import GridDescriptor
from my_gpaw.typing import Array1D, Array2D, Array4D
from my_gpaw.hyperfine import alpha # fine-structure constant: ~ 1 / 137
from my_gpaw.setup import Setup
from my_gpaw.pw.lfc import PWLFC
from my_gpaw.pw.descriptor import PWDescriptor
from my_gpaw.mpi import serial_comm
def zfs(calc: GPAW,
method: int = 1) -> Array2D:
"""Zero-field splitting.
Calculate magnetic dipole coupling tensor in eV.
"""
(kpt1, kpt2), = calc.wfs.kpt_qs # spin-polarized and gamma only
nocc1 = (kpt1.f_n > 0.5).sum()
nocc2 = (kpt2.f_n > 0.5).sum()
assert nocc1 == nocc2 + 2, (nocc1, nocc2)
if method == 1:
wf1 = WaveFunctions.from_calc(calc, 0, nocc1 - 2, nocc1)
wf12 = [wf1]
else:
wf1 = WaveFunctions.from_calc(calc, 0, 0, nocc1)
wf2 = WaveFunctions.from_calc(calc, 1, 0, nocc2)
wf12 = [wf1, wf2]
D_vv = np.zeros((3, 3))
if calc.world.rank == 0:
compensation_charge = create_compensation_charge(wf1.setups,
wf1.pd,
calc.spos_ac)
for wfa in wf12:
for wfb in wf12:
d_vv = zfs1(wfa, wfb, compensation_charge)
D_vv += d_vv
calc.world.broadcast(D_vv, 0)
return D_vv
class WaveFunctions:
def __init__(self,
psit_nR: Array4D,
P_ani: Dict[int, Array2D],
spin: int,
setups: List[Setup],
gd: GridDescriptor = None,
pd: PWDescriptor = None):
"""Container for wave function in real-space and projections."""
self.pd = pd or PWDescriptor(ecut=None, gd=gd)
self.psit_nR = psit_nR
self.P_ani = P_ani
self.spin = spin
self.setups = setups
@staticmethod
def from_calc(calc: GPAW, spin: int, n1: int, n2: int) -> 'WaveFunctions':
"""Create WaveFunctions object GPAW calculation."""
kpt = calc.wfs.kpt_qs[0][spin]
gd = calc.wfs.gd.new_descriptor(pbc_c=np.ones(3, bool),
comm=serial_comm)
psit_nR = gd.empty(n2 - n1)
for band, psit_R in enumerate(psit_nR):
psit_R[:] = calc.get_pseudo_wave_function(
band + n1,
spin=spin) * Bohr**1.5
return WaveFunctions(psit_nR,
kpt.projections.as_dict_on_master(n1, n2),
spin,
calc.setups,
gd=gd)
def __len__(self) -> int:
return len(self.psit_nR)
def create_compensation_charge(setups: List[Setup],
pd: PWDescriptor,
spos_ac: Array2D) -> PWLFC:
compensation_charge = PWLFC([data.ghat_l for data in setups], pd)
compensation_charge.set_positions(spos_ac)
return compensation_charge
def zfs1(wf1: WaveFunctions,
wf2: WaveFunctions,
compensation_charge: PWLFC) -> Array2D:
"""Compute dipole coupling."""
pd = wf1.pd
setups = wf1.setups
N2 = len(wf2)
G_G = pd.G2_qG[0]**0.5
G_G[0] = 1.0
G_Gv = pd.get_reciprocal_vectors(add_q=False) / G_G[:, np.newaxis]
n_sG = pd.zeros(2)
for n_G, wf in zip(n_sG, [wf1, wf2]):
D_aii = {}
Q_aL = {}
for a, P_ni in wf.P_ani.items():
D_ii = np.einsum('ni, nj -> ij', P_ni, P_ni)
D_aii[a] = D_ii
Q_aL[a] = np.einsum('ij, ijL -> L', D_ii, setups[a].Delta_iiL)
for psit_R in wf.psit_nR:
n_G += pd.fft(psit_R**2)
compensation_charge.add(n_G, Q_aL)
nn_G = (n_sG[0] * n_sG[1].conj()).real
D_vv = zfs2(pd, G_Gv, nn_G)
n_nG = pd.empty(N2)
for n1, psit1_R in enumerate(wf1.psit_nR):
D_anii = {}
Q_anL = {}
for a, P1_ni in wf1.P_ani.items():
D_nii = np.einsum('i, nj -> nij', P1_ni[n1], wf2.P_ani[a])
D_anii[a] = D_nii
Q_anL[a] = np.einsum('nij, ijL -> nL',
D_nii, setups[a].Delta_iiL)
for n_G, psit2_R in zip(n_nG, wf2.psit_nR):
n_G[:] = pd.fft(psit1_R * psit2_R)
compensation_charge.add(n_nG, Q_anL)
nn_G = (n_nG * n_nG.conj()).sum(axis=0).real
D_vv -= zfs2(pd, G_Gv, nn_G)
D_vv -= np.trace(D_vv) / 3 * np.eye(3) # should be traceless
sign = 1.0 if wf1.spin == wf2.spin else -1.0
return sign * alpha**2 * pi * D_vv * Ha
def zfs2(pd: PWDescriptor,
G_Gv: Array2D,
nn_G: Array1D) -> Array2D:
"""Integral."""
D_vv = np.einsum('gv, gw, g -> vw', G_Gv, G_Gv, nn_G)
D_vv *= 2 * pd.gd.dv / pd.gd.N_c.prod()
return D_vv
def convert_tensor(D_vv: Array2D,
unit: str = 'eV') -> Tuple[float, float, Array1D, Array2D]:
"""Convert 3x3 tensor to D, E and easy axis.
Input tensor must be in eV and the result can be returned in
eV, μeV, MHz or 1/cm acording to the value uf *unit*
(must be one of "eV", "ueV", "MHz", "1/cm").
>>> D_vv = np.diag([1, 2, 3])
>>> D, E, axis, _ = convert_tensor(D_vv)
>>> D
4.5
>>> E
0.5
>>> axis
array([0., 0., 1.])
"""
if unit == 'ueV':
scale = 1e6
elif unit == 'MHz':
scale = _e / _hplanck * 1e-6
elif unit == '1/cm':
scale = _e / _hplanck / _c / 100
elif unit == 'eV':
scale = 1.0
else:
raise ValueError(f'Unknown unit: {unit}')
(e1, e2, e3), U = np.linalg.eigh(D_vv * scale)
if abs(e1) > abs(e3):
D = 1.5 * e1
E = 0.5 * (e2 - e3)
axis = U[:, 0]
else:
D = 1.5 * e3
E = 0.5 * (e2 - e1)
axis = U[:, 2]
return D, E, axis, D_vv * scale
def main(argv: List[str] = None) -> Array2D:
"""CLI interface."""
import argparse
parser = argparse.ArgumentParser(
prog='python3 -m gpaw.zero_field_splitting',
description='...')
add = parser.add_argument
add('file', metavar='input-file',
help='GPW-file with wave functions.')
add('-u', '--unit', default='ueV', choices=['ueV', 'MHz', '1/cm'],
help='Unit. Must be "ueV" (micro-eV, default), "MHz" or "1/cm".')
add('-m', '--method', type=int, default=1)
args = parser.parse_intermixed_args(argv)
calc = GPAW(args.file)
D_vv = zfs(calc, args.method)
D, E, axis, D_vv = convert_tensor(D_vv, args.unit)
unit = args.unit
if unit == 'ueV':
unit = 'μeV'
print('D_ij = (' +
',\n '.join('(' + ', '.join(f'{d:10.3f}' for d in D_v) + ')'
for D_v in D_vv) +
') ', unit)
print('i, j = x, y, z')
print()
print(f'D = {D:.3f} {unit}')
print(f'E = {E:.3f} {unit}')
x, y, z = axis
print(f'axis = ({x:.3f}, {y:.3f}, {z:.3f})')
return D_vv
if __name__ == '__main__':
main()
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/zero_field_splitting.py | zero_field_splitting.py | py | 7,481 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "my_gpaw.calculator.GPAW",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "my_gpaw.typing.Array2D",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "my_gpaw.ty... |
19840085119 | import numpy as np
import tensorflow as tf
from mpl_toolkits import mplot3d
from d2l import tensorflow as d2l
def f(x):
return x ** 2
def f_grad(x):
return 2 * x
def gd(eta, f_grad):
x = 10.0
results = [x]
for i in range(10):
x -= eta * f_grad(x)
results.append(float(x))
print(f'Epoch 10, x: {x:f}')
return results
def show_trace(results, f):
n = max(abs(min(results)), abs(max(results)))
f_line = tf.range(-n, n, 0.01)
d2l.set_figsize()
d2l.plot([f_line, results], [[f(x) for x in f_line], [
f(x) for x in results]], 'x', 'f(x)', fmts=['-', '-o'])
def train_2d(trainer, steps=20):
x1, x2, s1, s2 = -5, -2, 0, 0
results = [(x1, x2)]
for i in range(steps):
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
return results
def show_trace_2d(f, results):
"""Show the trace of 2D variables during optimization."""
d2l.set_figsize()
d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1),
np.arange(-3.0, 1.0, 0.1))
d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
d2l.plt.xlabel('x1')
d2l.plt.ylabel('x2')
f = lambda x1, x2: x1 ** 2 + 2 * x2 ** 2
gradf = lambda x1, x2: (2 * x1, 4 * x2)
def gd(x1, x2, s1, s2):
(g1, g2) = gradf(x1, x2)
return (x1 - eta * g1, x2 - eta * g2, 0, 0)
if __name__ == "__main__":
# results = gd(0.3, f_grad)
# show_trace(results, f)
eta = 0.1
results = train_2d(gd)
print(results)
| AnhVietPham/Deep-Learning | optimization-algrithms/gradient-descent/main.py | main.py | py | 1,568 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.range",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "d2l.tensorflow.set_figsize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "d2l.tensorflow",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "d2l.tensor... |
38568112529 | import collections
class Solution:
def findLucky(self, arr) :
count = collections.Counter(arr)
max_count = -1
for key in count :
if key == count[key]:
max_count = max(max_count, key)
return max_count
s = Solution()
print(s.findLucky([4,2,3]))
'''
using dictionary : takes longer
sorting
[2,3,4] ??
counter method
>>> array = [1,2,2,3,3,3]
>>> Counter(array)
Counter({3: 3, 2: 2, 1: 1})
>>> c = Counter(array)
>>> c.keys()
[1, 2, 3]
>>> c.values()
[1, 2, 3]
>>> c[3]
3
''' | archanakalburgi/Algorithms | daily_log/27_aug/lucky_num.py | lucky_num.py | py | 561 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 4,
"usage_type": "call"
}
] |
34182105193 | import time
from unittest import skip
from qiskit.providers.jobstatus import JobStatus
from qiskit_ibm_runtime.exceptions import RuntimeJobTimeoutError
from ..unit.mock.proxy_server import MockProxyServer, use_proxies
from ..ibm_test_case import IBMIntegrationJobTestCase
from ..decorators import run_integration_test
from ..utils import cancel_job_safe, wait_for_status
class TestIntegrationResults(IBMIntegrationJobTestCase):
"""Integration tests for result callbacks."""
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_result_callback(self, service):
"""Test result callback."""
def result_callback(job_id, result):
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
nonlocal callback_err
if job_id != job.job_id():
callback_err.append(f"Unexpected job ID: {job_id}")
if "interim_results" in result and result["interim_results"] != int_res:
callback_err.append(f"Unexpected interim result: {result}")
int_res = "foo"
final_it = 0
callback_err = []
iterations = 3
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results=int_res,
callback=result_callback,
)
job.wait_for_final_state()
self.assertEqual(iterations - 1, final_it)
self.assertFalse(callback_err)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_result_callback_with_job_result(self, service):
"""Test result callback along with job result."""
def result_callback(job_id, result):
nonlocal count
count = count + 1
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
nonlocal callback_err
if job_id != job.job_id():
callback_err.append(f"Unexpected job ID: {job_id}")
if "interim_results" in result and result["interim_results"] != int_res:
callback_err.append(f"Unexpected interim result: {result}")
int_res = "foo"
count = 0
final_it = 0
callback_err = []
iterations = 3
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results=int_res,
callback=result_callback,
)
job.result()
self.assertEqual(iterations - 1, final_it)
self.assertEqual(iterations + 1, count)
self.assertFalse(callback_err)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_stream_results(self, service):
"""Test stream_results method."""
def result_callback(job_id, result):
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
nonlocal callback_err
if job_id != job.job_id():
callback_err.append(f"Unexpected job ID: {job_id}")
if "interim_results" in result and result["interim_results"] != int_res:
callback_err.append(f"Unexpected interim result: {result}")
int_res = "bar"
final_it = 0
callback_err = []
iterations = 3
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results=int_res,
)
job.stream_results(result_callback)
job.wait_for_final_state()
self.assertEqual(iterations - 1, final_it)
self.assertFalse(callback_err)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_stream_results_done(self, service):
"""Test streaming results after job is done."""
def result_callback(job_id, result):
# pylint: disable=unused-argument
nonlocal called_back_count
called_back_count += 1
called_back_count = 0
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
interim_results="foobar",
)
job.wait_for_final_state()
job._status = JobStatus.RUNNING # Allow stream_results()
job.stream_results(result_callback)
time.sleep(2)
# Callback is expected twice because both interim and final results are returned
self.assertEqual(2, called_back_count)
self.assertIsNotNone(job._ws_client._server_close_code)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_retrieve_interim_results(self, service):
"""Test retrieving interim results with API endpoint"""
job = self._run_program(service)
job.wait_for_final_state()
interim_results = job.interim_results()
self.assertIn("iteration", interim_results[0])
self.assertIn("counts", interim_results[0])
@run_integration_test
def test_result_timeout(self, service):
"""Test job result timeout"""
job = self._run_program(service)
with self.assertRaises(RuntimeJobTimeoutError):
job.result(0.1)
@run_integration_test
def test_wait_for_final_state_timeout(self, service):
"""Test job wait_for_final_state timeout"""
job = self._run_program(service)
with self.assertRaises(RuntimeJobTimeoutError):
job.wait_for_final_state(0.1)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_callback_error(self, service):
"""Test error in callback method."""
def result_callback(job_id, result):
# pylint: disable=unused-argument
if "iteration" in result and result["iteration"] == 0:
raise ValueError("Kaboom!")
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
final_it = 0
iterations = 10
inputs = {"iterations": iterations, "sleep_per_iteration": 3}
with self.assertLogs("qiskit_ibm_runtime", level="WARNING") as err_cm:
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
inputs=inputs,
interim_results="foo",
callback=result_callback,
)
job.wait_for_final_state()
self.assertIn("Kaboom", ", ".join(err_cm.output))
self.assertEqual(iterations - 1, final_it)
self.assertIsNotNone(job._ws_client._server_close_code)
@run_integration_test
def test_callback_cancel_job(self, service):
"""Test canceling a running job while streaming results."""
def result_callback(job_id, result):
# pylint: disable=unused-argument
nonlocal final_it
if "iteration" in result:
final_it = result["iteration"]
final_it = 0
iterations = 5
sub_tests = [JobStatus.QUEUED, JobStatus.RUNNING]
for status in sub_tests:
with self.subTest(status=status):
if status == JobStatus.QUEUED:
_ = self._run_program(service)
job = self._run_program(
service=service,
interim_results="foo",
callback=result_callback,
)
wait_for_status(job, status)
if not cancel_job_safe(job, self.log):
return
time.sleep(3) # Wait for cleanup
self.assertIsNotNone(job._ws_client._server_close_code)
self.assertLess(final_it, iterations)
@skip("skip until qiskit-ibm-runtime #933 is fixed")
@run_integration_test
def test_websocket_proxy(self, service):
"""Test connecting to websocket via proxy."""
def result_callback(job_id, result): # pylint: disable=unused-argument
nonlocal callback_called
callback_called = True
MockProxyServer(self, self.log).start()
callback_called = False
with use_proxies(service, MockProxyServer.VALID_PROXIES):
job = self._run_program(
service,
backend="ibmq_qasm_simulator",
callback=result_callback,
)
job.wait_for_final_state()
self.assertTrue(callback_called)
@run_integration_test
def test_websocket_proxy_invalid_port(self, service):
"""Test connecting to websocket via invalid proxy port."""
def result_callback(job_id, result): # pylint: disable=unused-argument
nonlocal callback_called
callback_called = True
callback_called = False
invalid_proxy = {
"https": "http://{}:{}".format(
MockProxyServer.PROXY_IP_ADDRESS, MockProxyServer.INVALID_PROXY_PORT
)
}
# TODO - verify WebsocketError in output log. For some reason self.assertLogs
# doesn't always work even when the error is clearly logged.
with use_proxies(service, invalid_proxy):
job = self._run_program(service, callback=result_callback)
job.wait_for_final_state()
self.assertFalse(callback_called)
| Qiskit/qiskit-ibm-runtime | test/integration/test_results.py | test_results.py | py | 9,622 | python | en | code | 106 | github-code | 36 | [
{
"api_name": "ibm_test_case.IBMIntegrationJobTestCase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "unittest.skip",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "decorators.run_integration_test",
"line_number": 17,
"usage_type": "name"
},
{
... |
44209779593 | """OWM API parser for creating weather report of different cities"""
from app.input_file_handler import *
from app.logging_and_error_handler import *
import requests
class CityReport:
"""Get report from OWM for current city and process it"""
def __init__(self, city: City):
"""
:param city: city class, for which we want to have report
"""
self.city = city
self.api_key = "81f4883e62f5ec5c7ec74e04ebb662ed" # Unique API key for our client
self.base_url = "http://api.openweathermap.org/data/2.5/" # OpenWeatherAPI basic url for requests of data
self.weather_report = {}
self.forecast_report = {}
self.create_city_report()
self.create_city_report("forecast")
self.validate_status_code()
def create_city_report(self, service="weather", params="&units=metric"):
"""
Send request to OWM API for weather report and save it as json
:param params: Add to OWM API request some parameters. Useful for forecast days limitation, temperature units.
:param service: Which OWM API service is use - Weather report or Forecast Report
"""
complete_url = self.base_url + service + "?appid=" + self.api_key + "&q=" + self.city.city + params
owm_report = requests.get(complete_url)
owm_report_json = owm_report.json()
if service == "weather":
logging(3, f"Weather report for city {self.city} is successfully generated")
self.weather_report = owm_report_json
elif service == "forecast":
logging(3, f"Forecast report for city {self.city} is successfully generated")
self.forecast_report = owm_report_json
else:
logging(1, "Faulty report creation service parameter")
raise AttributeError
return owm_report_json
def validate_status_code(self):
"""
Check a report for status code of creation
HTTP Status Code 200 means, that request is successfully created and returned
HTTP Status Code 404 means, that city is not found
"""
if self.weather_report['cod'] == 200 and self.forecast_report['cod'] == '200':
self.city.is_exist = True
logging(3, f"City {self.city} existence is confirmed")
return True
elif self.weather_report['cod'] == '404' or self.forecast_report['cod'] == '404':
self.city.is_exist = False
logging(2, f"City {self.city} does not exist and reports are unusable")
return False
class CityReportProcess:
"""Process created full report for filtering and formatting variables until required condition"""
def __init__(self, report: CityReport):
"""
:param report: CityReport class, where is created a full report
"""
self.report = report
self.validate_report_for_existing_city()
self.main_details_ready_report = {}
self.weather_ready_report = {}
self.forecast_ready_report = {}
self.process_main_details()
self.process_current_weather()
self.process_forecast()
logging(3, f"Report for city {self.report.city} is successfully processed")
def process_main_details(self):
"""
Process full report for saving only required details about city and report itself
Required: city, coordinates, temperatureUnit
:return: filtered main details report dictionary
"""
full_report = self.report.weather_report
city = full_report['name']
temperature_unit = "Celsius"
coordinates = str(full_report['coord']['lat']) + "," + str(full_report['coord']['lon'])
report = {"city": city, "coordinates": coordinates, "temperatureUnit": temperature_unit}
self.main_details_ready_report = report
return report
def process_current_weather(self):
"""
Process full report for saving only required details about current weather information
Required: date, temperature, humidity, pressure
:return: filtered current weather report dictionary
"""
full_report = self.report.weather_report
full_report_weather = self.process_main_weather_details(full_report)
# Convert epoch timestamp to the date
date = datetime.datetime.fromtimestamp(full_report['dt']).strftime("%d-%m-%Y")
report = {"date": date}
report.update(full_report_weather)
self.weather_ready_report = report
return report
def process_forecast(self):
"""
Process full report for saving only required details about 3 days forecast
Required for each day: date, temperature, humidity, pressure
:return: filtered forecast report dictionary
"""
full_report = self.report.forecast_report
full_report_all_msg = full_report['list']
today_date = datetime.date.today().strftime("%Y-%m-%d") + " 12:00:00"
day_num = 0
report = []
for forecast_msg in full_report_all_msg:
forecast_timestamp = forecast_msg['dt_txt']
timestamp_check = re.search("....-..-.. 12:00:00", forecast_timestamp) # Use only launch forecasts
if day_num < 3 and timestamp_check is not None and forecast_timestamp != today_date:
forecast_for_day = self.process_forecast_day(forecast_msg)
report.append(forecast_for_day)
day_num += 1
if day_num == 3:
break
self.forecast_ready_report = report
return report
def process_forecast_day(self, forecast_msg):
"""
Process forecast report for saving only required details about one day forecast
Required for each day: date, temperature, humidity, pressure
:type forecast_msg: dict of full report for that day
:return: filtered forecast day report dictionary
"""
weather_report = self.process_main_weather_details(forecast_msg)
# Convert epoch timestamp to the date
date = datetime.datetime.fromtimestamp(forecast_msg['dt']).strftime("%d-%m-%Y")
report = {"date": date, "weather": weather_report}
return report
@staticmethod
def process_main_weather_details(report_to_process) -> dict:
"""
Process weather report for saving only required details about weather
Required: date, temperature, humidity, pressure
:type report_to_process: dict of full report for that day
:return: filtered only weather day report dictionary
"""
weather_report = report_to_process['main']
temperature = weather_report['temp']
humidity = weather_report['humidity']
pressure = weather_report['pressure']
report = {"temperature": temperature, "humidity": humidity, "pressure": pressure}
return report
def validate_report_for_existing_city(self):
"""City, which is not exist --> doesn't have report to process --> must not be processed"""
if self.report.city.is_exist is not True:
raise CityNotExistError
return True
| vizamo/Python-Study-Coding | ICD0004/Final Project/app/owm_parser.py | owm_parser.py | py | 7,368 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
}
] |
73603910825 | from flask import Flask
from bitrix24 import *
import pycep_correios
app = Flask(__name__)
@app.route('/<id>', methods=['POST','GET'])
def cep(id):
bx24 = Bitrix24('https://megaisencoes.bitrix24.com.br/rest/XXXX/XXXXXXX/')
dealId = id
chamada= bx24.callMethod("crm.deal.get", id=dealId)
cep1=chamada.get('UF_CRM_5DF0204B5D798')
contactId = chamada.get('CONTACT_ID')
complemento = chamada.get('UF_CRM_5DF0204B50C64')
numero= chamada.get('UF_CRM_5DF0204B42F73')
cpf = chamada.get('UF_CRM_5DF0204BA9076')
rg = chamada.get('UF_CRM_5DF0204BB3CD4')
cep = chr_remove(cep1, ' -.,')
endereco = pycep_correios.get_address_from_cep(cep)
bairro = endereco['bairro']
cidade = endereco['cidade']
rua = endereco['logradouro']
uf = endereco['uf']
cep = endereco['cep']
cep2 = cep[0:5]
zona = getZona(int(cep2))
bx24.callMethod("crm.deal.update", id=dealId, fields={'UF_CRM_1606240753':zona,'UF_CRM_1606228463':uf, 'UF_CRM_5DF0204B93074':bairro,'UF_CRM_5E18F32827B32':rua,'UF_CRM_5DF0204B68A91':cidade,'UF_CRM_5DF0204B5D798':cep})
bx24.callMethod("crm.contact.update", id=contactId, fields={'UF_CRM_5DE6A1384D99D':complemento,'UF_CRM_5DE6A1384016A':numero,'UF_CRM_1575396704':rg,'UF_CRM_1575396694':cpf,'UF_CRM_1606395844':uf, 'UF_CRM_5DE6A139AD7B0':bairro,'UF_CRM_5E2F1DAA04C4B':rua,'UF_CRM_5DE6A13867AD5':cidade,'UF_CRM_5DE6A1385B8FC':cep})
return '<h3>Endereço preenchido</h3>'
def chr_remove(old, to_remove):
new_string = old
for x in to_remove:
new_string = new_string.replace(x, '')
return new_string
def getZona(cep):
if cep > 1000 and cep < 1599:
return "Centro"
elif cep > 2000 and cep < 2999:
return "Zona Norte"
elif cep > 3000 and cep < 3999 or (cep > 8000 and cep < 8499):
return "Zona Leste"
elif cep > 4000 and cep < 4999:
return "Zona Sul"
elif cep > 5000 and cep < 5899:
return "Zona Oeste"
else:
return "Cep não pertence a cidade de São Paulo"
if __name__ == '__main__':
app.run(debug=True) | Gabriandl/Projetos-Mega | addCep/addCep.py | addCep.py | py | 2,148 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pycep_correios.get_address_from_cep",
"line_number": 22,
"usage_type": "call"
}
] |
26610511083 | #DEFAULT ARGUMENTS#
import torch
import numpy as np
from utilities.optirank.src.BCD.Create_Instructions_List import classical_architecture_separate_b_once
from utilities.small_functions import percentage_zero, percentage_ones
from utilities.optirank.src.relaxation.lambda_P_setting import delta_loss
from utilities.optirank.src.BCD.BCD_units.convergence_criterion import absolute_delta_args
default_BCD_args = {"BCD_architecture": classical_architecture_separate_b_once, "max_iter": 10000, "search_method__L_min": 10**(-10),
"search_method__eta": [1.5, 1.5], "search_method__init_L_method": "hessian_proj", "initializazionparameters__name": "gamma_05_w_0",
"search_method__n_min": -1, "search_method__L_start": "previous",
"search_method__search_method_name": "first_best", "search_method__n_max": np.inf}
#chosen after inspection of setting lambda_P strategy
default_setting_lambda_P_strategy_args = {"class":delta_loss, "args":{"M": 100, "delta_lambda_min": 10**(-20), "with_interpolation": False}}
default_bilinear_ranking_classifier_args = {"rounding_threshold": 0.0, "setting_lambda_P_strategy_args": default_setting_lambda_P_strategy_args, "convergence_criterion_args": absolute_delta_args, "high_tol": False, "max_relaxation_iter": 10000, "tol_dist_to_border": 10**(-10), **default_BCD_args}
default_optirank_args = {**default_bilinear_ranking_classifier_args, "R_normalization": "k"}
#default_bilinear_optirank_args_no_constraint_sum_gamma = {**default_bilinear_ranking_classifier_args, "R_normalization": "d"}
#functions for diagnostics
subgradients_funs_dict = {
"|dsurrogate_loss/dw|min": lambda p: torch.norm(p.subgradient_minimal_norm_surrogate_loss_on_w()).item(),
"|dsurrogate_loss/dgamma_dual|": lambda p: torch.norm(p.gradient_surrogate_loss_with_penalties_dgamma_dual()).item(),
"|dsurrogate_loss/db|":lambda p: torch.norm(p.dlogloss_db()).item(),
"|dsurrogate_loss/dgamma|proj": lambda p: torch.norm(p.gradient_surrogate_loss_on_gamma()).item()
}
percentages_funs_dict = {"perc_gamma_1": lambda p: percentage_ones(p.gamma.numpy()),
"perc_gamma_0": lambda p: percentage_zero(p.gamma.numpy())} | paolamalsot/optirank | utilities/optirank/classifiers/default_args.py | default_args.py | py | 2,224 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utilities.optirank.src.BCD.Create_Instructions_List.classical_architecture_separate_b_once",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.inf",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "utilities.optirank.src.relaxation.lambd... |
24389916144 | import os.path
import warnings
from collections import defaultdict
from itertools import chain
from . import builtin
from .. import options as opts
from .file_types import static_file
from .path import relname
from ..backends.compdb import writer as compdb
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..build_inputs import build_input, Edge
from ..exceptions import ToolNotFoundError
from ..file_types import *
from ..iterutils import (first, flatten, iterate, listify, slice_dict, uniques,
unlistify)
from ..languages import known_formats
from ..objutils import convert_each, convert_one
from ..platforms import known_native_object_formats
from ..shell import posix as pshell
build_input('link_options')(lambda: {
'dynamic': defaultdict(list), 'static': defaultdict(list)
})
class Link(Edge):
msbuild_output = True
extra_kwargs = ()
def __init__(self, context, name, files, libs, packages, link_options,
lang=None, extra_deps=None, description=None):
build = context.build
name = relname(context, name)
self.name = self.__name(name)
self.user_libs = libs
forward_opts = opts.ForwardOptions.recurse(self.user_libs)
self.libs = self.user_libs + forward_opts.libs
self.user_packages = packages
self.packages = self.user_packages + forward_opts.packages
self.user_files = files
self.files = self.user_files + flatten(
getattr(i, 'extra_objects', []) for i in self.user_files
)
if ( len(self.files) == 0 and
not any(isinstance(i, WholeArchive) for i in self.user_libs) ):
raise ValueError('need at least one source file')
self.user_options = link_options
formats = uniques(i.format for i in chain(self.files, self.libs,
self.packages))
if len(formats) > 1:
raise ValueError('cannot link multiple object formats')
self.format = formats[0]
self.input_langs = uniques(chain(
(i.lang for i in self.files if i.lang is not None),
(j for i in self.libs for j in iterate(i.lang))
))
if not lang and not self.input_langs:
raise ValueError('unable to determine language')
self.langs = [lang] if lang else self.input_langs
self.linker = self.__find_linker(context.env, formats[0], self.langs)
# Forward any necessary options to the compile step.
if hasattr(self.linker, 'compile_options'):
compile_opts = self.linker.compile_options(self)
else:
compile_opts = opts.option_list()
compile_opts.extend(forward_opts.compile_options)
for i in self.files:
if hasattr(i.creator, 'add_extra_options'):
i.creator.add_extra_options(compile_opts)
extra_options = self.linker.pre_output(context, name, self)
self._fill_options(context.env, extra_options, forward_opts)
output = self.linker.output_file(name, self)
primary = first(output)
primary.package_deps.extend(self.packages)
self._fill_output(output)
options = self.options
public_output = self.linker.post_output(context, options, output, self)
primary.post_install = self.linker.post_install(options, output, self)
super().__init__(build, output, public_output, extra_deps, description)
build['defaults'].add(self.public_output)
@classmethod
def convert_args(cls, context, name, files, kwargs):
lang = kwargs.get('lang')
convert_each(kwargs, 'libs', context['library'],
kind=cls._preferred_lib, lang=lang)
convert_each(kwargs, 'packages', context['package'], lang=lang)
kwargs['link_options'] = pshell.listify(kwargs.get('link_options'),
type=opts.option_list)
intdir = ('{}.int/'.format(cls.__name(name))
if context.build['project']['intermediate_dirs'] else None)
intdir = kwargs.pop('intermediate_dir', intdir)
files = context['object_files'](
files, includes=kwargs.pop('includes', None),
pch=kwargs.pop('pch', None),
options=kwargs.pop('compile_options', None),
libs=kwargs['libs'], packages=kwargs['packages'], lang=lang,
directory=intdir,
extra_deps=kwargs.pop('extra_compile_deps', None)
)
return files, kwargs
def _get_linkers(self, env, langs):
yielded = False
for i in langs:
try:
linker = env.builder(i).linker(self.mode)
if linker:
yielded = True
yield linker
except ToolNotFoundError:
pass
if not yielded:
fmt = ('native' if self.format in known_native_object_formats
else self.format)
src_lang = known_formats[fmt].src_lang
yield env.builder(src_lang).linker(self.mode)
@classmethod
def __name(cls, name):
head, tail = os.path.split(name)
return os.path.join(head, cls._prefix + tail)
def __find_linker(self, env, format, langs):
for linker in self._get_linkers(env, langs):
if linker.can_link(format, langs):
return linker
raise ValueError('unable to find linker')
class DynamicLink(Link):
desc_verb = 'link'
base_mode = 'dynamic'
mode = 'executable'
msbuild_mode = 'Application'
_preferred_lib = 'shared'
_prefix = ''
extra_kwargs = ('entry_point', 'module_defs')
def __init__(self, *args, entry_point=None, module_defs=None, **kwargs):
self.entry_point = entry_point
self.module_defs = module_defs
super().__init__(*args, **kwargs)
@classmethod
def convert_args(cls, context, name, files, kwargs):
convert_one(kwargs, 'module_defs', context['module_def_file'])
return super().convert_args(context, name, files, kwargs)
@property
def options(self):
return self._internal_options + self.user_options
def flags(self, global_options=None):
return self.linker.flags(self.options, global_options, self.raw_output)
def lib_flags(self, global_options=None):
return self.linker.lib_flags(self.options, global_options)
def _fill_options(self, env, extra_options, forward_opts):
self._internal_options = opts.option_list(
opts.entry_point(self.entry_point) if self.entry_point else None,
opts.module_def(self.module_defs) if self.module_defs else None
)
if self.linker.needs_libs:
linkers = self._get_linkers(env, self.input_langs)
self._internal_options.collect(
(i.always_libs(i is self.linker) for i in linkers),
(opts.lib(i) for i in self.libs)
)
if self.linker.needs_package_options:
self._internal_options.collect(i.link_options(self.linker)
for i in self.packages)
self._internal_options.collect(extra_options,
forward_opts.link_options)
def _fill_output(self, output):
first(output).runtime_deps.extend(
i.runtime_file for i in self.libs if i.runtime_file
)
class SharedLink(DynamicLink):
desc_verb = 'shared-link'
mode = 'shared_library'
msbuild_mode = 'DynamicLibrary'
_prefix = 'lib'
extra_kwargs = DynamicLink.extra_kwargs + ('version', 'soversion')
def __init__(self, *args, version=None, soversion=None, **kwargs):
self.version = version
self.soversion = soversion
if (self.version is None) != (self.soversion is None):
raise ValueError('specify both version and soversion or neither')
super().__init__(*args, **kwargs)
class StaticLink(Link):
desc_verb = 'static-link'
base_mode = 'static'
mode = 'static_library'
msbuild_mode = 'StaticLibrary'
_preferred_lib = 'static'
_prefix = 'lib'
extra_kwargs = ('static_link_options',)
def __init__(self, *args, static_link_options=None, **kwargs):
self.user_static_options = static_link_options
super().__init__(*args, **kwargs)
@classmethod
def convert_args(cls, context, name, files, kwargs):
kwargs['static_link_options'] = pshell.listify(
kwargs.get('static_link_options'), type=opts.option_list
)
return super().convert_args(context, name, files, kwargs)
@property
def options(self):
return self._internal_options + self.user_static_options
def flags(self, global_options=None):
# Only pass the static-link options to the static linker. The other
# options are forwarded on to the dynamic linker when this library is
# used.
return self.linker.flags(self.options, global_options, self.raw_output)
def _fill_options(self, env, extra_options, forward_opts):
self._internal_options = extra_options
def _fill_output(self, output):
primary = first(output)
primary.forward_opts = opts.ForwardOptions(
link_options=self.user_options,
libs=self.user_libs,
packages=self.user_packages,
)
if hasattr(self.linker, 'forwarded_compile_options'):
primary.forward_opts.compile_options.extend(
self.linker.forwarded_compile_options(self)
)
primary.linktime_deps.extend(self.user_libs)
@builtin.function()
@builtin.type(Executable)
def executable(context, name, files=None, **kwargs):
if files is None and 'libs' not in kwargs:
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, Executable, name, dist, params, kwargs)
files, kwargs = DynamicLink.convert_args(context, name, files, kwargs)
return DynamicLink(context, name, files, **kwargs).public_output
@builtin.function()
@builtin.type(SharedLibrary, extra_in_type=DualUseLibrary)
def shared_library(context, name, files=None, **kwargs):
if isinstance(name, DualUseLibrary):
if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:
raise TypeError('unexpected arguments')
return name.shared
if files is None and 'libs' not in kwargs:
# XXX: What to do for pre-built shared libraries for Windows, which has
# a separate DLL file?
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, SharedLibrary, name, dist, params, kwargs)
files, kwargs = SharedLink.convert_args(context, name, files, kwargs)
return SharedLink(context, name, files, **kwargs).public_output
@builtin.function()
@builtin.type(StaticLibrary, extra_in_type=DualUseLibrary)
def static_library(context, name, files=None, **kwargs):
if isinstance(name, DualUseLibrary):
if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:
raise TypeError('unexpected arguments')
return name.static
if files is None and 'libs' not in kwargs:
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, StaticLibrary, name, dist, params, kwargs)
files, kwargs = StaticLink.convert_args(context, name, files, kwargs)
return StaticLink(context, name, files, **kwargs).public_output
@builtin.function()
@builtin.type(Library, extra_in_type=DualUseLibrary)
def library(context, name, files=None, *, kind=None, **kwargs):
explicit_kind = False
if kind is not None:
explicit_kind = True
elif context.env.library_mode.shared and context.env.library_mode.static:
kind = 'dual'
elif context.env.library_mode.shared:
kind = 'shared'
elif context.env.library_mode.static:
kind = 'static'
if isinstance(name, DualUseLibrary):
if files is not None or not set(kwargs.keys()) <= {'format', 'lang'}:
raise TypeError('unexpected arguments')
return name if kind == 'dual' else getattr(name, kind)
if files is None and 'libs' not in kwargs:
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
file_type = StaticLibrary
if explicit_kind:
if kind == 'shared':
file_type = SharedLibrary
elif kind == 'dual':
raise ValueError(
"can't create dual-use libraries from an existing file"
)
# XXX: Try to detect if a string refers to a shared lib?
return static_file(context, file_type, name, dist, params, kwargs)
if kind is None:
raise ValueError('unable to create library: both shared and static ' +
'modes disabled')
shared_kwargs = slice_dict(kwargs, SharedLink.extra_kwargs)
static_kwargs = slice_dict(kwargs, StaticLink.extra_kwargs)
shared_kwargs.update(kwargs)
static_kwargs.update(kwargs)
if kind == 'dual':
shared_files, shared_kwargs = SharedLink.convert_args(
context, name, files, shared_kwargs
)
shared = SharedLink(context, name, shared_files, **shared_kwargs)
if not shared.linker.builder.can_dual_link:
warnings.warn('dual linking not supported with {}'
.format(shared.linker.brand))
return shared.public_output
static_files, static_kwargs = StaticLink.convert_args(
context, name, shared_files, static_kwargs
)
static = StaticLink(context, name, static_files, **static_kwargs)
return DualUseLibrary(shared.public_output, static.public_output)
elif kind == 'shared':
files, kw = SharedLink.convert_args(context, name, files,
shared_kwargs)
return SharedLink(context, name, files, **kw).public_output
else: # kind == 'static'
files, kw = StaticLink.convert_args(context, name, files,
static_kwargs)
return StaticLink(context, name, files, **kw).public_output
@builtin.function()
@builtin.type(WholeArchive, extra_in_type=StaticLibrary)
def whole_archive(context, name, *args, **kwargs):
if isinstance(name, StaticLibrary):
if len(args) or len(kwargs):
raise TypeError('unexpected arguments')
return WholeArchive(name)
else:
return WholeArchive(context['static_library'](name, *args, **kwargs))
@builtin.function()
def global_link_options(context, options, family='native', mode='dynamic'):
for i in iterate(family):
context.build['link_options'][mode][i].extend(pshell.listify(options))
def _get_flags(backend, rule, build_inputs, buildfile):
variables = {}
cmd_kwargs = {}
linker = rule.linker
if hasattr(linker, 'flags_var') or hasattr(linker, 'libs_var'):
gopts = build_inputs['link_options'][rule.base_mode][linker.family]
if hasattr(linker, 'flags_var'):
global_ldflags, ldflags = backend.flags_vars(
linker.flags_var,
linker.global_flags + linker.flags(gopts, mode='global'),
buildfile
)
cmd_kwargs['flags'] = ldflags
flags = rule.flags(gopts)
if flags:
variables[ldflags] = [global_ldflags] + flags
if hasattr(linker, 'libs_var'):
global_ldlibs, ldlibs = backend.flags_vars(
linker.libs_var,
linker.global_libs + linker.lib_flags(gopts, mode='global'),
buildfile
)
cmd_kwargs['libs'] = ldlibs
lib_flags = rule.lib_flags(gopts)
if lib_flags:
variables[ldlibs] = [global_ldlibs] + lib_flags
if hasattr(rule, 'manifest'):
var = backend.var('manifest')
cmd_kwargs['manifest'] = var
variables[var] = rule.manifest
return variables, cmd_kwargs
@make.rule_handler(StaticLink, DynamicLink, SharedLink)
def make_link(rule, build_inputs, buildfile, env):
linker = rule.linker
variables, cmd_kwargs = _get_flags(make, rule, build_inputs, buildfile)
output_params = []
if linker.num_outputs == 'all':
output_vars = make.qvar('@')
else:
output_vars = []
for i in range(linker.num_outputs):
v = make.var(str(i + 2))
output_vars.append(v)
output_params.append(rule.output[i])
recipename = make.var('RULE_{}'.format(linker.rule_name.upper()))
if not buildfile.has_variable(recipename):
buildfile.define(recipename, [linker(
make.var('1'), output_vars, **cmd_kwargs
)])
files = rule.files
if hasattr(linker, 'transform_input'):
files = linker.transform_input(files)
package_build_deps = flatten(i.deps for i in rule.packages)
module_defs = listify(getattr(rule, 'module_defs', None))
manifest = listify(getattr(rule, 'manifest', None))
make.multitarget_rule(
build_inputs, buildfile,
targets=rule.output,
deps=(rule.files + rule.libs + package_build_deps + module_defs +
manifest + rule.extra_deps),
order_only=make.directory_deps(rule.output),
recipe=make.Call(recipename, files, *output_params),
variables=variables
)
@ninja.rule_handler(StaticLink, DynamicLink, SharedLink)
def ninja_link(rule, build_inputs, buildfile, env):
linker = rule.linker
variables, cmd_kwargs = _get_flags(ninja, rule, build_inputs, buildfile)
if rule.description:
variables['description'] = rule.description
if linker.num_outputs == 'all':
output_vars = ninja.var('out')
elif linker.num_outputs == 1:
output_vars = ninja.var('output')
variables[output_vars] = rule.output[0]
else:
output_vars = []
for i in range(linker.num_outputs):
v = ninja.var('output{}'.format(i + 1))
output_vars.append(v)
variables[v] = rule.output[i]
if hasattr(linker, 'transform_input'):
input_var = ninja.var('input')
variables[input_var] = linker.transform_input(rule.files)
else:
input_var = ninja.var('in')
if not buildfile.has_rule(linker.rule_name):
buildfile.rule(name=linker.rule_name, command=linker(
input_var, output_vars, **cmd_kwargs
), description=rule.desc_verb + ' => ' + first(output_vars))
package_build_deps = flatten(i.deps for i in rule.packages)
module_defs = listify(getattr(rule, 'module_defs', None))
manifest = listify(getattr(rule, 'manifest', None))
buildfile.build(
output=rule.output,
rule=linker.rule_name,
inputs=rule.files,
implicit=(rule.libs + package_build_deps + module_defs + manifest +
rule.extra_deps),
variables=variables
)
@compdb.rule_handler(StaticLink, DynamicLink, SharedLink)
def compdb_link(rule, build_inputs, buildfile, env):
linker = rule.linker
cmd_kwargs = {}
if hasattr(linker, 'flags_var') or hasattr(linker, 'libs_var'):
gopts = build_inputs['link_options'][rule.base_mode][linker.family]
if hasattr(linker, 'flags_var'):
cmd_kwargs['flags'] = (linker.global_flags +
linker.flags(gopts, mode='global') +
rule.flags(gopts))
if hasattr(linker, 'libs_var'):
cmd_kwargs['libs'] = (linker.global_libs +
linker.lib_flags(gopts, mode='global') +
rule.lib_flags(gopts))
if hasattr(rule, 'manifest'):
cmd_kwargs['manifest'] = rule.manifest
file = rule.files[0] if len(rule.files) else rule.user_libs[0]
in_files = rule.files
if hasattr(linker, 'transform_input'):
in_files = linker.transform_input(in_files)
output = unlistify(rule.output if linker.num_outputs == 'all'
else rule.output[0:linker.num_outputs])
buildfile.append(
arguments=linker(in_files, output, **cmd_kwargs),
file=file, output=first(rule.public_output)
)
try:
from .compile import CompileHeader
from ..backends.msbuild import writer as msbuild
def _parse_compiler_cflags(compiler, global_options):
return compiler.parse_flags(msbuild.textify_each(
compiler.global_flags +
compiler.flags(global_options[compiler.lang], mode='global')
))
def _parse_file_cflags(file, global_options, include_compiler=False):
compiler = file.creator.compiler
gopts = global_options[compiler.lang]
cflags = file.creator.flags(gopts)
if include_compiler:
cflags = (compiler.global_flags +
compiler.flags(gopts, mode='global') +
cflags)
return compiler.parse_flags(msbuild.textify_each(cflags))
def _parse_ldflags(rule, global_options):
linker = rule.linker
gopts = global_options[rule.base_mode][linker.family]
primary = first(rule.output)
ldflags = [linker.global_flags + linker.flags(gopts) +
rule.flags(gopts)]
if hasattr(rule.linker, 'libs_var'):
ldflags.append(linker.global_libs + linker.lib_flags(gopts) +
rule.lib_flags(gopts))
link_options = linker.parse_flags(
*[msbuild.textify_each(i) for i in ldflags]
)
if hasattr(primary, 'import_lib'):
link_options['import_lib'] = primary.import_lib
return link_options
@msbuild.rule_handler(DynamicLink, SharedLink, StaticLink)
def msbuild_link(rule, build_inputs, solution, env):
if ( any(i not in ['c', 'c++', 'rc'] for i in rule.input_langs) or
rule.linker.flavor != 'msvc' ):
raise ValueError('msbuild backend currently only supports c/c++ ' +
'with msvc')
global_compile_opts = build_inputs['compile_options']
global_link_opts = build_inputs['link_options']
# Parse compilation flags; if there's only one set of them (i.e. the
# command_var is the same for every compiler), we can apply these to
# all the files at once. Otherwise, we need to apply them to each file
# individually so they all get the correct options.
obj_creators = [i.creator for i in rule.files]
compilers = uniques(i.compiler for i in obj_creators)
if len(uniques(i.command_var for i in compilers)) == 1:
common_compile_options = _parse_compiler_cflags(
compilers[0], global_compile_opts
)
else:
common_compile_options = None
deps = chain(
(i.creator.file for i in rule.files),
chain.from_iterable(i.creator.include_deps for i in rule.files),
chain.from_iterable(i.creator.extra_deps for i in rule.files),
filter(None, (getattr(i.creator, 'pch_source', None)
for i in rule.files)),
rule.libs, rule.extra_deps
)
def get_source(file):
# Get the source file for this compilation rule; it's either a
# regular source file or a PCH source file.
if isinstance(file.creator, CompileHeader):
return file.creator.pch_source
return file.creator.file
# MSBuild doesn't build anything if it thinks there are no object files
# to link. This is a problem for building libraries with no sources
# that link to a whole-archive (a fairly-common way of making a shared
# library out of a static one). To get around this, explicitly add the
# whole-archive as an object file to link, in addition to passing
# `/WHOLEARCHIVE:foo` as usual.
objs = []
if not rule.files:
for i in rule.libs:
if isinstance(i, WholeArchive):
objs.append(i.library)
# Create the project file.
project = msbuild.VcxProject(
env, name=rule.name,
mode=rule.msbuild_mode,
output_file=first(rule.output),
files=[{
'name': get_source(i),
'options': _parse_file_cflags(
i, global_compile_opts,
include_compiler=(common_compile_options is None)
),
} for i in rule.files],
objs=objs,
compile_options=common_compile_options,
link_options=_parse_ldflags(rule, global_link_opts),
dependencies=solution.dependencies(deps),
)
solution[first(rule.public_output)] = project
except ImportError: # pragma: no cover
pass
| jimporter/bfg9000 | bfg9000/builtins/link.py | link.py | py | 25,450 | python | en | code | 73 | github-code | 36 | [
{
"api_name": "build_inputs.build_input",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "build_inputs.Edge",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "pa... |
19788170743 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
import math
import codecs
import random
import numpy as np
import os
import jieba
import pickle
import shutil
jieba.initialize()
def zero_digits(s):
"""将0~9数字字符统一用"0"字符取代
"""
return re.sub('\d', '0', s)
def load_sentences(path, lower, zeros):
"""加载数据,将数据转为:
[[[字, 标签], [字, 标签]...], # 第一句
[...], # 第二句
...
]
"""
sentences = []
sentence = []
with open(path) as f:
for line in f:
line = line.rstrip()
line = zero_digits(line) if zeros else line # 数字转换为"0"
if not line: # 如果是空行(新句子)
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
else: # 如果不是空行
word = line.split(" ")
assert len(word) == 2, print([word[0]]) # 确保切分后长度为2,[词,tag]
sentence.append(word)
if len(sentence) > 0:
sentences.append(sentence)
return sentences
def iob2(tags):
"""检查标签是否规范
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def iob_iobes(tags):
""" IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
def update_tag_scheme(sentences):
"""检查标签,转为iobes标注
"""
for i, s in enumerate(sentences):
tags = [w[-1] for w in s] # 标签序列
if not iob2(tags):
s_str = '\n'.join(' '.join(w) for w in s)
raise Exception('IOB标签有误,请检查 {}:\n{}'.format(i, s_str))
new_tags = iob_iobes(tags)
for word, new_tag in zip(s, new_tags):
word[-1] = new_tag
def load_data(config):
# 载入数据集
train_sentences = load_sentences(config["train_file"], config["lower"], config["zeros"])
dev_sentences = load_sentences(config["dev_file"], config["lower"], config["zeros"])
test_sentences = load_sentences(config["test_file"], config["lower"], config["zeros"])
# 修正语料标注格式(IOB→IOBES)
update_tag_scheme(train_sentences)
update_tag_scheme(dev_sentences)
update_tag_scheme(test_sentences)
return train_sentences, dev_sentences, test_sentences
def iobes_iob(tags):
""" IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
def create_mapping(words_fre_dic):
"""创建词与编号的映射字典
"""
sorted_items = sorted(words_fre_dic.items(), key=lambda x: (-x[1], x[0])) # 降序排列
id_to_word = {i: v[0] for i, v in enumerate(sorted_items)} # {编号:词}
word_to_id = {v: k for k, v in id_to_word.items()} # {词: 编号}
return word_to_id, id_to_word # 返回{词: 编号},{编号: 词}的映射字典
def augment_with_pretrained(dictionary, ext_emb_path):
"""将预训练embedding里的词添加到{词: 词频}字典里
"""
print('加载预训练好的词向量...')
assert os.path.isfile(ext_emb_path)
words_pretrained = set() # 预训练词
for line in open(ext_emb_path):
words_pretrained.add(line.rstrip().split()[0].strip())
count = 0
for word in words_pretrained:
if word not in dictionary:
count += 1
dictionary[word] = 0 # 即训练集中该词的词频为0
print("词表新增加 {} 种词,现有 {} 种词.".format(count, len(dictionary)))
return dictionary
def create_dic(sentences, lower=False):
"""创建词典(词——词频)
"""
words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences] # 字母转小写,抽取所有的word
tags = [[x[-1] for x in s] for s in sentences] # 标签列表
num_words = sum(len(x) for x in words)
num_tags = sum(len(x) for x in tags)
print("词总数: {}".format(num_words))
print("标签总数: {}".format(num_tags))
assert num_words == num_tags, print("词与标签数量不等!")
words_fre_dic = {}
for sen_word in words:
for word in sen_word:
if word not in words_fre_dic:
words_fre_dic[word] = 1
else:
words_fre_dic[word] += 1
words_fre_dic["<PAD>"] = 10000001
words_fre_dic['<UNK>'] = 10000000
print("词种类数:{}".format(len(words_fre_dic)))
tags_fre_dic = {}
for sen_tag in tags:
for tag in sen_tag:
if tag not in tags_fre_dic:
tags_fre_dic[tag] = 1
else:
tags_fre_dic[tag] += 1
print("标签种类数:{}".format(len(tags_fre_dic)))
return words_fre_dic, tags_fre_dic
def create_maps(train_sentences, config):
if not os.path.isfile(config["map_file"]): # 创建新的maps
words_dic_train, tags_dic_train = create_dic(train_sentences, config["lower"]) # 生成训练集{词: 词频}字典
tag_to_id, id_to_tag = create_mapping(tags_dic_train) # 创建标签与编号映射字典 {标签: 编号}, {编号: 标签}
# 创建词与编号的映射字典 {词: 编号}, {编号: 词}
if config["pre_emb"]:
dic_add_pre = augment_with_pretrained(words_dic_train.copy(), config["emb_file"]) # 预训练词
word_to_id, id_to_word = create_mapping(dic_add_pre)
else:
word_to_id, id_to_word = create_mapping(words_dic_train)
with open(config["map_file"], "wb") as f:
pickle.dump([word_to_id, id_to_word, tag_to_id, id_to_tag], f) # 保存词和标签的编号映射
else: # 直接读取已有的maps
with open(config["map_file"], "rb") as f:
word_to_id, id_to_word, tag_to_id, id_to_tag = pickle.load(f)
return word_to_id, id_to_word, tag_to_id, id_to_tag
def get_seg_features(string):
"""结巴分词,获取分词特征
"""
seg_feature = []
for word in jieba.cut(string):
if len(word) == 1:
seg_feature.append(0) # 如果词长是1,seg_feature==0
else:
tmp = [2] * len(word) # 如果词长>1,用1表示开头,用3表示结尾,用2表示中间
tmp[0] = 1
tmp[-1] = 3
seg_feature.extend(tmp)
return seg_feature # 所以seg_feature的长度仍然和字符串长度相同
def create_input(data):
""" Take sentence data and return an input for
the training or the evaluation function.
"""
inputs = list()
inputs.append(data['chars'])
inputs.append(data["segs"])
inputs.append(data['tags'])
return inputs
def load_word2vec(emb_path, id_to_word, word_dim, old_weights):
""" Load word embedding from pre-trained file
embedding size must match
"""
new_weights = old_weights
print('Loading pretrained embeddings from {}...'.format(emb_path))
pre_trained = {}
emb_invalid = 0
for i, line in enumerate(codecs.open(emb_path, 'r', 'utf-8')):
line = line.rstrip().split()
if len(line) == word_dim + 1:
pre_trained[line[0]] = np.array(
[float(x) for x in line[1:]]
).astype(np.float32)
else:
emb_invalid += 1
if emb_invalid > 0:
print('WARNING: %i invalid lines' % emb_invalid)
c_found = 0
c_lower = 0
c_zeros = 0
n_words = len(id_to_word)
# Lookup table initialization
for i in range(n_words):
word = id_to_word[i]
if word in pre_trained:
new_weights[i] = pre_trained[word]
c_found += 1
elif word.lower() in pre_trained:
new_weights[i] = pre_trained[word.lower()]
c_lower += 1
elif re.sub('\d', '0', word.lower()) in pre_trained:
new_weights[i] = pre_trained[
re.sub('\d', '0', word.lower())
]
c_zeros += 1
print('Loaded %i pretrained embeddings.' % len(pre_trained))
print('%i / %i (%.4f%%) words have been initialized with '
'pretrained embeddings.' % (
c_found + c_lower + c_zeros, n_words,
100. * (c_found + c_lower + c_zeros) / n_words)
)
print('%i found directly, %i after lowercasing, '
'%i after lowercasing + zero.' % (
c_found, c_lower, c_zeros
))
return new_weights
def full_to_half(s):
"""
Convert full-width character to half-width one
"""
n = []
for char in s:
num = ord(char)
if num == 0x3000:
num = 32
elif 0xFF01 <= num <= 0xFF5E:
num -= 0xfee0
char = chr(num)
n.append(char)
return ''.join(n)
def replace_html(s):
s = s.replace('"', '"')
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace(' ', ' ')
s = s.replace("“", "“")
s = s.replace("”", "”")
s = s.replace("—", "")
s = s.replace("\xa0", " ")
return s
def input_from_line(line, char_to_id):
"""
Take sentence data and return an input for
the training or the evaluation function.
"""
line = full_to_half(line)
line = replace_html(line)
inputs = list()
inputs.append([line])
line.replace(" ", "$")
inputs.append([[char_to_id[char] if char in char_to_id else char_to_id["<UNK>"]
for char in line]])
inputs.append([get_seg_features(line)])
inputs.append([[]])
return inputs
def prepare_dataset(sentences, char_to_id, tag_to_id, lower=False, train=True):
"""得到N个句子的 [[词列表], [词编号], [分词特征编号], [tag编号]]
"""
none_index = tag_to_id["O"]
def f(x):
return x.lower() if lower else x
data = []
for s in sentences:
string = [w[0] for w in s]
chars = [char_to_id[f(w) if f(w) in char_to_id else '<UNK>'] for w in string]
segs = get_seg_features("".join(string))
if train:
tags = [tag_to_id[w[-1]] for w in s]
else:
tags = [none_index for _ in chars]
data.append([string, chars, segs, tags])
return data
def make_path(config):
"""生成路径
"""
print("make path...")
if not os.path.isdir(config["result_path"]):
os.makedirs(config["result_path"])
if not os.path.isdir(config["ckpt_path"]):
os.makedirs(config["ckpt_path"])
if not os.path.isdir(config["log_path"]):
os.makedirs(config["log_path"])
def clean(config):
"""清空无关文件
"""
print("clean files...")
if os.path.isfile(config["map_file"]):
os.remove(config["map_file"])
if os.path.isdir(config["ckpt_path"]):
shutil.rmtree(config["ckpt_path"])
if os.path.isdir(config["result_path"]):
shutil.rmtree(config["result_path"])
if os.path.isfile(config["config_file"]):
os.remove(config["config_file"])
if os.path.isdir("__pycache__"):
shutil.rmtree("__pycache__")
class BatchManager(object):
# 数据分成batch,每个batch含有
# [[[词列表], [词列表], ...], (batch_size个)
# [[词编号], [词编号], ...],
# [[分词特征编号], [分词特征编号], ...],
# [tag编号], tag编号], ...]]
def __init__(self, data, batch_size):
self.batch_data = self.sort_and_pad(data, batch_size)
self.len_data = len(self.batch_data)
def sort_and_pad(self, data, batch_size):
num_batch = int(math.ceil(len(data) / batch_size)) # math.ceil(),向上取整
sorted_data = sorted(data, key=lambda x: len(x[0]))
batch_data = list()
for i in range(num_batch):
batch_data.append(self.pad_data(sorted_data[i * batch_size: (i + 1) * batch_size]))
return batch_data
@staticmethod
def pad_data(data):
strings = []
chars = []
segs = []
targets = []
max_length = max([len(sentence[0]) for sentence in data])
for line in data:
string, char, seg, target = line
padding = [0] * (max_length - len(string))
strings.append(string + padding)
chars.append(char + padding)
segs.append(seg + padding)
targets.append(target + padding)
return [strings, chars, segs, targets]
def iter_batch(self, shuffle=False):
if shuffle:
random.shuffle(self.batch_data)
for idx in range(self.len_data):
yield self.batch_data[idx]
if __name__ == "__main__":
pass
| churximi/Car-NER | data_utils.py | data_utils.py | py | 14,187 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jieba.initialize",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
8890281336 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 11:28:15 2017
@author: vitorhadad
"""
import torch
from torch import nn, cuda
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
import numpy as np
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class RNN(nn.Module):
def __init__(self,
input_size,
hidden_size,
num_layers=1,
bidirectional = False,
class_weights = [1, 10]):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.seq_len = 1
self.num_classes = 2
self.class_weights = class_weights
self.loss_fn = nn.CrossEntropyLoss(reduce = False,
weight = torch.FloatTensor(self.class_weights))
self.count_loss_fn = nn.MSELoss()
self.rnn = nn.LSTM(input_size,
hidden_size,
num_layers,
bidirectional = bidirectional)
self.logit_layer = nn.Linear(hidden_size, self.num_classes)
self.count_layer = nn.Linear(hidden_size, 1)
self.c0 = nn.Parameter(torch.randn(num_layers * self.num_directions,
1,
hidden_size), requires_grad = True)
self.h0 = nn.Parameter(torch.randn(num_layers * self.num_directions,
1,
hidden_size), requires_grad = True)
self.optim = optim.Adam(self.parameters(), lr=0.001)
def forward(self, inputs, lens = None):
if lens is None:
lens = inputs.any(2).sum(0)
inputs = Variable(torch.FloatTensor(inputs),
requires_grad = False)
order = np.flip(np.argsort(lens), 0).astype(int)
order_r = torch.LongTensor(order[order])
seq = pack_padded_sequence(inputs[:,order,:], lens[order])
this_batch_size = seq.batch_sizes[0]
initial_state = (self.c0.repeat(1, this_batch_size, 1),
self.h0.repeat(1, this_batch_size, 1))
outputs, staten = self.rnn(seq, initial_state)
outputs, lens = pad_packed_sequence(outputs)
outputs = outputs[:, :, :self.hidden_size] +\
outputs[:, :, self.hidden_size:]
prelogits = outputs[:,order_r,:].transpose(1,0)
logits = self.logit_layer(prelogits)
precounts = prelogits.sum(1).squeeze()
counts = self.count_layer(precounts)
return logits, counts
def run(self, inputs, true_outputs, lens = None):
if lens is None:
lens = inputs.any(2).sum(0)
ylogits, ycount = self.forward(inputs, lens)
ytruth = Variable(torch.LongTensor(true_outputs), requires_grad = False)
logit_loss = 0
for i,l in enumerate(lens):
l = lens[i]
yh = ylogits[i,:l]
yt = ytruth[i,:l].view(-1)
try:
logit_loss += self.loss_fn(yh, yt).mean()
except RuntimeError as e:
print(e)
logit_loss /= batch_size
count_loss = self.count_loss_fn(ycount, ytruth.sum(1).float())
loss = logit_loss + count_loss
self.optim.zero_grad()
loss.backward()
self.optim.step()
return (logit_loss.data.numpy()[0],
count_loss.data.numpy()[0],
ylogits, ycount.data.numpy())
def __str__(self):
return "RNN_{}-{}"\
.format(self.hidden_size,
self.num_layers)
#%%
if __name__ == "__main__":
from matching.utils.data_utils import open_file, confusion
from sys import argv, platform
if platform == "darwin":
argv.extend(["abo", 1, 100, .5, np.random.randint(1e8)])
#if len(argv) > 1:
print("Creating new RNN")
env_type = argv[1]
num_layers = int(argv[2])
hidden_size = int(argv[3])
c = float(argv[4])
s = str(argv[5])
input_size = {"abo":24, "optn":294}
net = RNN(input_size=input_size[env_type],
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=True,
class_weights = [1,100*c])
batch_size = 32
open_every = 10
save_every = 500
log_every = 10
name = "{}-{}_{}".format(
str(net),
env_type,
s)
#%%
for i in range(10000000):
if i % open_every == 0:
X, Y, GN = open_file(env_type = env_type, open_GN = True, open_A = False)
SS = np.concatenate([X, GN], 2).transpose((1,0,2))
n = SS.shape[1]
idx = np.random.choice(n, size=batch_size)
inputs = SS[:,idx,:]
ytrue = Y[idx]
lens = inputs.any(2).sum(0)
avg_ones = np.hstack([Y[k,:l,0] for k,l in zip(idx, lens)]).mean()
if avg_ones > 0:
w = c*1/avg_ones
net.loss_fn = nn.CrossEntropyLoss(reduce = False,
weight = torch.FloatTensor([1, w]))
lloss,closs, ylogits, ycount = net.run(inputs, ytrue, lens)
cacc = np.mean(ycount.round() == ytrue.sum(1))
tp, tn, fp, fn = confusion(ylogits, ytrue, lens)
tpr = tp/(tp+fn)
tnr = tn/(tn+fp)
lacc = (tp + tn)/(tp+fp+tn+fn)
if tpr < .1:
c *= 1.05
if tnr < .1:
c *= .95
msg = "{:1.4f},{:1.4f},{:1.4f},"\
"{:1.4f},{:1.4f},{:1.4f},{:1.4f}"\
.format(lloss,
closs,
tpr, # True positive rate
tnr, # True negative rate
lacc, # Logits accuracy
cacc, # Count accuracy
w)
if i % log_every == 0:
print(msg)
if platform == "linux":
with open("results/" + name + ".txt", "a") as f:
print(msg, file = f)
if platform == "linux" and i % save_every == 0:
torch.save(net, "results/" + name)
| halflearned/organ-matching-rl | matching/deep_ml/count_lstm.py | count_lstm.py | py | 6,722 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
... |
32017330441 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
import math
if __name__ == '__main__':
SZ = 64 # taille de l'image
SP = int(SZ / 2) # Coordonnee max/min
im = np.zeros((SZ, SZ), np.uint8) # Image comme un tableau
all_i = range(-SP, SP)
all_j = range(-SP, SP)
# fp periode sur une image
fp = 4.0 # 1.0;2.0;4.0;8.0;16.0
fr = fp / SZ # frequence reduite
for i in all_i:
for j in all_j:
im[i, j] = 128 + 128 * math.sin(2 * 3.14 * fr * i)
plt.figure(1)
plt.clf()
plt.imshow(im, cmap=plt.cm.gray)
IG, JG = np.meshgrid(all_i, all_j)
fig = plt.figure(2)
plt.clf()
ax = Axes3D(fig)
ax.plot_surface(IG, JG, im, rstride=1, cstride=1, cmap=cm.jet)
plt.show() | JuIngong/TPTelecom | Signaux/TP2/tp2_2.py | tp2_2.py | py | 815 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "math.sin",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
... |
10794984259 | from absl.testing import absltest
from absl.testing import parameterized
import more_itertools
import tensorflow as tf
from uncertainty_baselines.datasets import datasets
import data_preprocessor # local file import from experimental.language_structure.vrnn
import data_utils # local file import from experimental.language_structure.vrnn
import utils # local file import from experimental.language_structure.vrnn
INPUT_ID_NAME = data_preprocessor.INPUT_ID_NAME
INPUT_MASK_NAME = data_preprocessor.INPUT_MASK_NAME
DIAL_TURN_ID_NAME = data_preprocessor.DIAL_TURN_ID_NAME
class DataPreprocessorTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.batch_size = 2
def create_data_preprocessor(self, max_seq_length, **kwargs):
del max_seq_length # unused
return data_preprocessor.DataPreprocessor(**kwargs)
def load_dataset(self, dataset_name):
dataset_builder = datasets.get(
dataset_name, split='test', add_dialog_turn_id=True)
return dataset_builder.load(batch_size=self.batch_size).prefetch(1)
@parameterized.named_parameters(('multiwoz_synth', 'multiwoz_synth'),
('simdial', 'simdial'),
('sgd_synth', 'sgd_synth'))
def test_output_shape(self, dataset_name):
dataset = self.load_dataset(dataset_name)
dialog_length = data_utils.get_dataset_max_dialog_length(dataset_name)
seq_length = data_utils.get_dataset_max_seq_length(dataset_name)
num_states = data_utils.get_dataset_num_latent_states(dataset_name)
preprocessor = self.create_data_preprocessor(
seq_length, num_states=num_states)
dataset = dataset.map(preprocessor.create_feature_and_label)
(input_1, input_2, label, label_mask, initial_state, initial_sample,
domain_label) = more_itertools.first(dataset)
for inputs in [input_1, input_2]:
for key in [INPUT_ID_NAME, INPUT_MASK_NAME]:
self.assertEqual([self.batch_size, dialog_length, seq_length],
inputs[key].shape.as_list())
for inputs in [label, label_mask, domain_label]:
self.assertEqual([self.batch_size, dialog_length], inputs.shape.as_list())
for inputs in [initial_state, initial_sample]:
self.assertEqual([self.batch_size, num_states], inputs.shape.as_list())
@parameterized.named_parameters(('multiwoz_synth', 'multiwoz_synth'),
('simdial', 'simdial'),
('sgd_synth', 'sgd_synth'))
def test_label_mask_by_dialog_turn_ids(self, dataset_name):
dataset = self.load_dataset(dataset_name)
inputs = more_itertools.first(dataset)
dialog_turn_id_indices = [(0, 2), (1, 3), (1, 5)]
dialog_turn_ids = tf.gather_nd(inputs[DIAL_TURN_ID_NAME],
dialog_turn_id_indices)
seq_length = data_utils.get_dataset_max_seq_length(dataset_name)
num_states = data_utils.get_dataset_num_latent_states(dataset_name)
preprocessor = self.create_data_preprocessor(
seq_length,
num_states=num_states,
labeled_dialog_turn_ids=dialog_turn_ids)
dataset = dataset.map(preprocessor.create_feature_and_label)
(_, _, _, label_mask, _, _, _) = more_itertools.first(dataset)
for i, row in enumerate(label_mask.numpy()):
for j, val in enumerate(row):
if (i, j) in dialog_turn_id_indices:
self.assertEqual(val, 1)
else:
self.assertEqual(val, 0)
class BertDataPreprocessorTest(DataPreprocessorTest):
def create_data_preprocessor(self, max_seq_length, **kwargs):
preprocess_tfhub_url = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3'
bert_preprocess_model = utils.BertPreprocessor(preprocess_tfhub_url,
max_seq_length)
return data_preprocessor.BertDataPreprocessor(bert_preprocess_model,
**kwargs)
if __name__ == '__main__':
absltest.main()
| HeyGF/uncertainty-baselines | experimental/language_structure/vrnn/data_preprocessor_test.py | data_preprocessor_test.py | py | 4,004 | python | en | code | null | github-code | 36 | [
{
"api_name": "data_preprocessor.INPUT_ID_NAME",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "data_preprocessor.INPUT_MASK_NAME",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "data_preprocessor.DIAL_TURN_ID_NAME",
"line_number": 12,
"usa... |
39006259890 |
import os
import anndata
import scanpy as sc
from matplotlib import rcParams
import sccross
rcParams["figure.figsize"] = (4, 4)
PATH = "s01_preprocessing"
os.makedirs(PATH, exist_ok=True)
rna = anndata.read_h5ad("Saunders-2018.h5ad")
met = anndata.read_h5ad("Luo-2017.h5ad")
atac = anndata.read_h5ad("10x-ATAC-Brain5k.h5ad")
rna.layers["raw_count"] = rna.X.copy()
sc.pp.normalize_total(rna)
sc.pp.log1p(rna)
sc.pp.scale(rna, max_value=10)
sc.tl.pca(rna, n_comps=100, use_highly_variable=True, svd_solver="auto")
rna.X = rna.layers["raw_count"]
del rna.layers["raw_count"]
sc.pp.neighbors(rna, n_pcs=100, metric="cosine")
sc.tl.umap(rna)
rna.obs["cell_type"].cat.set_categories([
"Layer2/3", "Layer5a", "Layer5", "Layer5b", "Layer6",
"Claustrum", "CGE", "MGE"
], inplace=True)
met.X = met.layers["norm"].copy()
sc.pp.log1p(met)
sc.pp.scale(met, max_value=10)
sc.tl.pca(met, n_comps=100, use_highly_variable=True, svd_solver="auto")
met.X = met.layers["norm"]
del met.layers["norm"]
sc.pp.neighbors(met, n_pcs=100, metric="cosine")
sc.tl.umap(met)
met.obs["cell_type"].cat.set_categories([
"mL2/3", "mL4", "mL5-1", "mDL-1", "mDL-2", "mL5-2",
"mL6-1", "mL6-2", "mDL-3", "mIn-1", "mVip",
"mNdnf-1", "mNdnf-2", "mPv", "mSst-1", "mSst-2"
], inplace=True)
sccross.data.lsi(atac, n_components=100, use_highly_variable=False, n_iter=15)
sc.pp.neighbors(atac, n_pcs=100, use_rep="X_lsi", metric="cosine")
sc.tl.umap(atac)
atac.obs["cell_type"].cat.set_categories([
"L2/3 IT", "L4", "L5 IT", "L6 IT", "L5 PT",
"NP", "L6 CT", "Vip", "Pvalb", "Sst"
], inplace=True)
fig = sc.pl.umap(atac, color="cell_type", title="scATAC-seq cell type", return_fig=True)
fig.savefig(f"{PATH}/atac_ct.pdf")
atac2rna = sccross.data.geneActivity(atac)
rna.write("rna_preprocessed.h5ad", compression="gzip")
met.write("met_preprocessed.h5ad", compression="gzip")
atac.write("atac_preprocessed.h5ad", compression="gzip")
atac2rna.write("atac2rna.h5ad", compression="gzip")
| mcgilldinglab/scCross | data/unmatched_mouse_cortex/preprocess.py | preprocess.py | py | 2,006 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "anndata.read_h5ad",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "anndata.read_h5ad",... |
39804372033 | from core_operations.models import US_COUNTRY_CODE, NUMBER_OF_DAYS_IN_A_YEAR, LIST_OF_STATES_IN_US
from core_operations.models import FormattedPhoneNumberField, YearsOfWorkField
from django.db import models
import re
from datetime import date
from datetime import datetime
# from internal_users.models import InternalUser
from django.utils import timezone
from faker import Faker
fake = Faker()
# added on 2023-06-03. common operational models, functions shall be defined in core_operations app.
UNASSIGNED = 'Unassigned'
VISITOR = 'visitor only'
SERVICE_FRONT = 'service advisor'
SERVICE_GARAGE = 'service technican'
TALENT_MANAGEMENT = 'talent management'
ACCOUNTING = 'Accounting'
CODING_MASTERS = 'code masters'
CYBER_SECURITY = 'cyber security'
TRAINEE = 'Trainee'
LEGAL = 'legal'
DEPARTMENTS = ((UNASSIGNED, 'your deparment has not been assigned yet.'),
(VISITOR, 'visitor group'),
(SERVICE_FRONT, 'service advisor group'),
(SERVICE_GARAGE, 'service technican group'),
(TALENT_MANAGEMENT, 'talent management group'),
(LEGAL, 'legal group'),
(TRAINEE, 'trainee group'),
(CODING_MASTERS, 'code master group'),
(CYBER_SECURITY, 'cyber security group'),
)
PAY_TYPE_UNASSIGNED = 0
PAY_TYPE_HOURLY = 1
PAY_TYPE_SALARY = 2
PAY_TYPE_BONUS = 3
PAY_TYPE_INTERNSHIP = 4
PAY_TYPE_OTHER1 = 5
PAY_TYPE_OTHER2 = 6
PAY_TYPES = ((PAY_TYPE_UNASSIGNED, 'unassigned pay type. Must be assigned before the first work day.'),
(PAY_TYPE_HOURLY, 'hourly'),
(PAY_TYPE_SALARY, 'salaried'),
(PAY_TYPE_INTERNSHIP, 'internship'),
(PAY_TYPE_BONUS, 'bonus pay'),
(PAY_TYPE_OTHER1, 'pay type other-1'),
(PAY_TYPE_OTHER2, 'pay type other-2'),
)
# 2023-05-23 add pay frequency choices
# Weekly – 52 paychecks per year.
# Biweekly – 26 paychecks per year.
# Semi-monthly – 24 paychecks per year.
# Monthly – 12 paychecks per year.
PAY_FREQUENCY_UNDEFINED = 0
PAY_FREQUENCY_DAILY = 1
PAY_FREQUENCY_WEEKLY = 2
PAY_FREQUENCY_BIWEEKLY = 3
PAY_FREQUENCY_SEMIMONTHLY = 4
PAY_FREQUENCY_MONTHLY = 5
PAY_FREQUENCY_SEMIANNUALLY = 6
PAY_FREQUENCY_ANNUALLY = 7
PAY_FREQUENCY_RESERVE1 = 8
PAY_FREQUENCY_RESERVE2 = 9
PAY_FREQUENCY_LIST = ((PAY_FREQUENCY_UNDEFINED, 'pay frequency not defined'),
(PAY_FREQUENCY_DAILY, 'daily'),
(PAY_FREQUENCY_WEEKLY, 'weekly'),
(PAY_FREQUENCY_BIWEEKLY, 'bi-weekly'),
(PAY_FREQUENCY_SEMIMONTHLY, 'semi-monthly'),
(PAY_FREQUENCY_MONTHLY, 'monthly'),
(PAY_FREQUENCY_SEMIANNUALLY, 'monthly'),
(PAY_FREQUENCY_ANNUALLY, 'monthly'),
(PAY_FREQUENCY_RESERVE1,
'reserved pay frequency 1; not used yet'),
(PAY_FREQUENCY_RESERVE2,
'reserved pay frequency 2; not used yet'),
)
class TalentsModel(models.Model):
talent_id = models.BigAutoField(primary_key=True)
talent_employee_id = models.IntegerField(unique=True)
talent_first_name = models.CharField(
max_length=50, null=False, verbose_name="Legal First Name (as in driver license (DL) or passport)")
talent_last_name = models.CharField(
max_length=50, null=False, verbose_name="Legal Last Name (as in driver license (DL) or passport)")
talent_middle_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name="Middle Name")
talent_preferred_name = models.CharField(
max_length=50, null=True, blank=True, verbose_name="Preferred Name")
talent_email = models.EmailField(max_length=50, blank=True, null=True)
# this custom field works with a glitch as of 2023-06-03.
talent_phone_number_primary = FormattedPhoneNumberField()
talent_phone_number_primary_digits_only = models.CharField(
max_length=20, null=True, blank=True)
talent_phone_number_alternates_01 = FormattedPhoneNumberField(null=True)
talent_phone_number_alternates_02 = FormattedPhoneNumberField(null=True)
talent_emergency_contact = models.CharField(
max_length=200, null=True, blank=True)
talent_date_of_birth = models.DateField(
verbose_name='Date of Birth (DOB)', null=True)
talent_ssn = models.CharField(max_length=15,
verbose_name='SSN or Tax ID (TIN)', null=True)
talent_physical_address_01 = models.CharField(
verbose_name='street address 01', max_length=100)
talent_physical_address_02 = models.CharField(
verbose_name='street address 02 (apt numbers, unit #, etc.)', max_length=100, blank=True, null=True)
talent_physical_address_city = models.CharField(max_length=50, null=True)
talent_physical_address_state = models.CharField(max_length=2, null=True)
talent_physical_address_zip_code = models.CharField(
max_length=10, null=True)
talent_physical_address_country = models.CharField(verbose_name='Country',
max_length=50, default='US')
talent_mailing_address_is_the_same_physical_address = models.BooleanField(
default=True)
talent_mailing_address_01 = models.CharField(
verbose_name='mailing address 01', max_length=100)
talent_mailing_address_02 = models.CharField(
verbose_name=' mailing address 02 (apt numbers, unit #, etc)', max_length=100, blank=True, null=True)
talent_mailing_address_city = models.CharField(max_length=50, null=True)
talent_mailing_address_state = models.CharField(max_length=2, null=True)
talent_mailing_address_zip_code = models.CharField(max_length=10)
talent_mailing_address_country = models.CharField(
max_length=50, default='US')
talent_education_level = models.CharField(max_length=100, default='None')
talent_certifications = models.CharField(
max_length=500, null=True, blank=True)
talent_hire_date = models.DateTimeField(blank=True, null=True)
talent_department = models.CharField(max_length=50,
choices=DEPARTMENTS,
default=UNASSIGNED)
talent_supervisor = models.ForeignKey(
'self', on_delete=models.SET_NULL, null=True, blank=True)
talent_work_start_date = models.DateTimeField(blank=True, null=True)
talent_pay_type = models.PositiveSmallIntegerField(default=PAY_TYPE_UNASSIGNED,
choices=PAY_TYPES)
talent_pay_rate = models.DecimalField(
max_digits=10, decimal_places=2, default=0.00)
talent_pay_frequency = models.PositiveSmallIntegerField(choices=PAY_FREQUENCY_LIST,
default=PAY_FREQUENCY_UNDEFINED)
talent_previous_department = models.CharField(max_length=50,
choices=DEPARTMENTS,
default=UNASSIGNED,
)
talent_discharge_date = models.DateTimeField(null=True, blank=True)
talent_years_of_work = YearsOfWorkField(null=True, blank=True)
talent_HR_remarks_json = models.TextField(null=True, blank=True)
talent_incident_record_json = models.TextField(null=True, blank=True)
# added on 2023-06-02 to store the future talent_digital_files
talent_digital_file_storage_path_01 = models.CharField(
max_length=2000, null=True, blank=True)
talent_digital_file_storage_path_02 = models.CharField(
max_length=2000, null=True, blank=True)
talent_is_active = models.BooleanField(default=True)
talent_created_at = models.DateTimeField(auto_now_add=True)
talent_last_udpated_at = models.DateTimeField(auto_now=True)
# talent_created_by_user = models.ForeignKey(
# InternalUser, null=True, on_delete=models.SET_NULL)
@property
def talent_full_name(self):
return f"{self.talent_first_name} {self.talent_last_name} {self.talent_middle_name}"
@property
def talent_full_physical_address(self):
addr_fields = [self.talent_physical_address_01, self.talent_physical_address_02, self.talent_physical_address_city, self.talent_physical_address_state.upper(),
self.talent_physical_address_zip_code]
full_address = " ".join(
[field for field in addr_fields if field is not None]).strip()
if len(full_address) != 0:
full_address = full_address + " " + self.talent_physical_address_country
else:
full_address = full_address
return full_address
@property
def talent_full_mailing_address(self):
addr_fields = [self.talent_mailing_address_01, self.talent_mailing_address_02, self.talent_mailing_address_city, self.talent_mailing_address_state,
self.talent_mailing_address_zip_code]
full_address = " ".join(
[field for field in addr_fields if field is not None]).strip()
# if the first 5 fields are empty; do not add the country in the end, return none instead.
if len(full_address) != 0:
full_address = full_address + " " + self.talent_mailing_address_country
else:
full_address = full_address
return full_address
def __init__(self, *args, **kwargs):
super(TalentsModel, self).__init__(*args, **kwargs)
self._initial_state = {field: getattr(
self, field) for field in self.fields_to_track()}
@classmethod
def fields_to_track(cls):
return ['talent_pay_rate', 'talent_pay_frequency', 'talent_date_of_birth', 'talent_email', 'talent_phone_number_primary']
def get_changed_fields(self):
changed_fields = {}
for field in self.fields_to_track():
if self._initial_state[field] != getattr(self, field):
changed_fields[field] = self._initial_state[field]
return changed_fields
def save(self, *args, **kwargs):
# if not self.pk:
# Only set the talent_created_by_user_id if this is a new instance
# self.talent_created_by_user_id = request.user.id
# super(TalentsModel, self).save(*args, **kwargs)
# creating a employee_id that is different from the talent_id that is used in the database.
# employee ID start from 1024
if not self.talent_employee_id:
last_talent_employee = TalentsModel.objects.order_by(
'-talent_employee_id').first()
if last_talent_employee:
self.talent_employee_id = last_talent_employee.talent_employee_id + 2
else:
self.talent_employee_id = 1024
elif self.talent_employee_id and self.pk:
self.talent_employee_id = self.talent_employee_id
super(TalentsModel, self).save(*args, **kwargs)
def __str__(self):
return f"{self.talent_first_name} {self.talent_last_name} {self.talent_middle_name}"
class Meta:
db_table = 'talent_managment'
ordering = ['-talent_id']
class TalentDocuments(models.Model):
document_id = models.BigAutoField(primary_key=True)
talent = models.ForeignKey(
TalentsModel, on_delete=models.SET_NULL, null=True)
talent_employment_docs = models.FileField(
upload_to='2023_talent_employment_docs')
talent_uploaded_photos = models.ImageField(upload_to='photos')
uploaded_date = models.DateTimeField(default=timezone.now)
document_is_active = models.BooleanField(default=True)
class Meta:
db_table = 'talent_documents'
ordering = ['-talent_id']
class TalentAudit(models.Model):
talent_audit_id = models.BigAutoField(primary_key=True)
# Changed from OneToOneField to ForeignKey to allow multiple audit records per talent
talent = models.ForeignKey(TalentsModel, on_delete=models.CASCADE)
created_by = models.ForeignKey(
'internal_users.InternalUser', related_name="created_audits", swappable=True, on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField(auto_now_add=True)
field_changed = models.CharField(max_length=50, null=True, blank=True)
old_value = models.CharField(max_length=255, null=True, blank=True)
new_value = models.CharField(max_length=255, null=True, blank=True)
class Meta:
db_table = 'talent_audit'
ordering = ['-talent_audit_id']
| zjgcainiao/new_place_at_76 | talent_management/models.py | models.py | py | 12,604 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "faker.Faker",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.db.mo... |
43190780850 | from openpyxl.styles import PatternFill, GradientFill
def set_fill_color_green(workbook):
# read
# http://openpyxl.readthedocs.io/en/stable/api/openpyxl.styles.fills.html
ws = workbook.active
a1 = ws['A1']
# 2 different fill types
fill = PatternFill("solid", fgColor="DDDDDD")
fill = GradientFill(stop=("000000", "FFFFFF"))
fill = PatternFill(
fill_type=None,
start_color='FFFFFFFF',
end_color='FF000000')
a1.fill = fill
return a1
| simkimsia/ug-read-write-excel-using-python | examples/c09_2_fill_color/openpyxl/index.py | index.py | py | 498 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.GradientFill",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 12,
"usage_type": "call"
}
] |
10713162688 | import os
import mujoco_py
import numpy as np
from mujoco_py import functions
from learn_seq.utils.general import get_mujoco_model_path
# object indicator in mujoco
MJ_SITE_OBJ = 6 # `site` objec
MJ_BODY_OBJ = 1 # `body` object
MJ_GEOM_OBJ = 5 # `geom` object
# geom types
MJ_CYLINDER = 5
MJ_BOX = 6
MJ_MESH = 7
def load_model(xml_name="round_pih.xml"):
"""Load a model from `mujoco/franka_pih`
:param type xml_name: Description of parameter `xml_name`.
:param type primitive: Description of parameter `primitive`.
:return: Description of returned object.
:rtype: type
"""
model_path = get_mujoco_model_path()
xml_path = os.path.join(model_path, xml_name)
model = mujoco_py.load_model_from_path(xml_path)
sim = mujoco_py.MjSim(model)
return sim
def attach_viewer(sim):
return mujoco_py.MjViewer(sim)
def set_state(sim, qpos, qvel):
assert qpos.shape == (sim.model.nq, ) and qvel.shape == (sim.model.nv, )
old_state = sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel, old_state.act,
old_state.udd_state)
sim.set_state(new_state)
sim.forward()
def get_contact_force(mj_model, mj_data, body_name, frame_pos, frame_quat):
"""Get the force acting on a body, with respect to a frame.
Note that mj_rnePostConstraint should be called before this function
to update the simulator state.
:param str body_name: Body name in mujoco xml model.
:return: force:torque format.
:rtype: np.array(6)
"""
bodyId = mujoco_py.functions.mj_name2id(mj_model, MJ_BODY_OBJ, body_name)
force_com = mj_data.cfrc_ext[bodyId, :]
# contact force frame
# orientation is aligned with world frame
qf = np.array([1, 0, 0, 0.])
# position of origin in the world frame
body_rootid = mj_model.body_rootid[bodyId]
pf = mj_data.subtree_com[body_rootid, :]
# inverse com frame
pf_inv, qf_inv = np.zeros(3), np.zeros(4)
functions.mju_negPose(pf_inv, qf_inv, pf, qf)
# T^com_target
p_ct, q_ct = np.zeros(3), np.zeros(4)
functions.mju_mulPose(p_ct, q_ct, pf_inv, qf_inv, frame_pos, frame_quat)
# q_ct -> mat
mat_ct = np.zeros(9)
functions.mju_quat2Mat(mat_ct, q_ct)
# transform to desired frame
trn_force = force_com.copy()
functions.mju_transformSpatial(trn_force, force_com, 1, p_ct, np.zeros(3),
mat_ct)
# reverse order to get force:torque format
return np.concatenate((trn_force[3:], trn_force[:3]))
def get_geom_pose(model, geom_name):
"""Return the geom pose (relative to parent body).
:param mujoco_py.MjModel model:
:param str geom_name:
:return: position, quaternion
:rtype: tuple(np.array(3), np.array(4))
"""
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
pos = model.geom_pos[geom_id, :]
quat = model.geom_quat[geom_id, :]
return pos, quat
def get_geom_size(model, geom_name):
"""Return the geom size.
:param mujoco_py.MjModel model:
:param str geom_name:
:return: (radius, half-length, _) for cylinder geom, and
(X half-size; Y half-size; Z half-size) for box geom
:rtype: np.array(3)
"""
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
if model.geom_type[geom_id] == MJ_BOX or model.geom_type[
geom_id] == MJ_CYLINDER:
return model.geom_size[geom_id, :]
else:
return None
def get_geom_friction(model, geom_name):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
return model.geom_friction[geom_id, :]
def get_body_mass(model, body_name):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
return model.body_mass[body_id]
def get_body_pose(model, body_name):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
return model.body_pos[body_id], model.body_quat[body_id]
def get_mesh_vertex_pos(model, geom_name):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
assert model.geom_type[geom_id] == MJ_MESH
mesh_id = model.geom_dataid[geom_id]
first_vertex_id = model.mesh_vertadr[mesh_id]
no_vertex = model.mesh_vertnum[mesh_id]
vertex_pos = model.mesh_vert[first_vertex_id:first_vertex_id + no_vertex]
return vertex_pos
def set_geom_size(model, geom_name, size):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
model.geom_size[geom_id, :] = size
def set_body_mass(model, body_name, mass):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
model.body_mass[body_id] = mass
def set_geom_friction(model, geom_name, friction):
geom_id = functions.mj_name2id(model, MJ_GEOM_OBJ, geom_name)
model.geom_friction[geom_id, :] = friction
def set_body_pose(model, body_name, pos, quat):
body_id = functions.mj_name2id(model, MJ_BODY_OBJ, body_name)
model.body_pos[body_id, :] = pos
model.body_quat[body_id, :] = quat
# -------- GEOMETRY TOOLs
def quat_error(q1, q2):
"""Compute the rotation vector (expressed in the base frame), that if follow
in a unit time, will transform a body with orientation `q1` to
orientation `q2`
:param list/np.ndarray q1: Description of parameter `q1`.
:param list/np.ndarray q2: Description of parameter `q2`.
:return: a 3D rotation vector
:rtype: np.ndarray
"""
if isinstance(q1, list):
q1 = np.array(q1)
if isinstance(q2, list):
q2 = np.array(q2)
dtype = q1.dtype
neg_q1 = np.zeros(4, dtype=dtype)
err_rot_quat = np.zeros(4, dtype=dtype)
err_rot = np.zeros(3, dtype=dtype)
if q1.dot(q2) < 0:
q1 = -q1
functions.mju_negQuat(neg_q1, q1)
functions.mju_mulQuat(err_rot_quat, q2, neg_q1)
functions.mju_quat2Vel(err_rot, err_rot_quat, 1)
return err_rot
def quat2mat(q):
"""Tranform a quaternion to rotation amtrix.
:param type q: Description of parameter `q`.
:return: 3x3 rotation matrix
:rtype: np.array
"""
mat = np.zeros(9)
functions.mju_quat2Mat(mat, q)
return mat.reshape((3, 3))
def pose_transform(p1, q1, p21, q21):
"""Coordinate transformation between 2 frames
:param np.ndarray p1: position in frame 1
:param np.ndarray q1: orientation (quaternion) in frame 1
:param np.ndarray p21: relative position between frame 1 and 2
:param np.ndarray q21: relative orientation between frame 1 and 2
:return: position and orientation in frame 2
:rtype: type
"""
# quat to rotation matrix
R21 = quat2mat(q21)
p2 = p21 + R21.dot(p1)
q2 = np.zeros_like(q1)
functions.mju_mulQuat(q2, q21, q1) # q2 = q21*q1
return p2, q2
def integrate_quat(q, r, dt):
"""Integrate quaternion by a fixed angular velocity over the duration dt.
:param np.array(4) q: quaternion.
:param np.array(3) r: angular velocity.
:param float dt: duration.
:return: result quaternion.
:rtype: np.array(4)
"""
qres = np.zeros(4)
qe = np.zeros(4)
r = r * dt
angle = np.linalg.norm(r)
if angle < 1e-9:
# if angle too small then return current q
return q.copy()
axis = r / angle
functions.mju_axisAngle2Quat(qe, axis, angle)
functions.mju_mulQuat(qres, qe, q)
return qres
def transform_spatial(v1, q21):
"""Coordinate transformation of a spatial vector. The spatial vector can be either
twist (linear + angular velocity) or wrench (force + torque)
:param type v1: Spatial vector in frame 1
:param type q21: transformation matrix (in terms of quaternion)
:return: Description of returned object.
:rtype: type
"""
R21 = quat2mat(q21)
R = np.block([[R21, np.zeros((3, 3))], [np.zeros((3, 3)), R21]])
return R.dot(v1)
def similarity_transform(A1, q21):
"""Similarity transformation of a matrix from frame 1 to frame 2
A2 = R21 * A1 * R12
:param np.array((3, 3)) A1: 3x3 matrix.
:param np.array(4) q21: quaternion representation.
:return: 3x3 matrix
:rtype: np.array
"""
R21 = quat2mat(q21)
return R21.dot(A1.dot(R21.T))
# NOTE: there are infinite rotation vector solutions for a particular
# orientation, the `ref` is to find the closest solution to a reference.
# Is there another minimal representation that could avoid this?
def quat2vec(q, ref=None):
"""Transform quaternion representation to rotation vector representation"""
r = np.zeros(3)
scale = 1
mujoco_py.functions.mju_quat2Vel(r, q, scale)
if ref is not None:
if r.dot(ref) < 0:
angle = np.linalg.norm(r)
r = r / angle
angle = angle - 2 * np.pi
r = r * angle
return r
def inverse_frame(p, q):
pi, qi = np.zeros(3), np.zeros(4)
functions.mju_negPose(pi, qi, p, q)
return pi, qi
def mat2quat(R):
R = R.flatten()
q = np.zeros(4)
mujoco_py.functions.mju_mat2Quat(q, R)
return q
def mul_quat(q1, q2):
q = np.zeros(4)
mujoco_py.functions.mju_mulQuat(q, q1, q2)
return q
| deanpham98/learn-seq | learn_seq/utils/mujoco.py | mujoco.py | py | 9,113 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "learn_seq.utils.general.get_mujoco_model_path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name"... |
27053376609 | import pytest
from oddEvenList import Solution
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
@pytest.mark.parametrize("nums, expected", [
([1, 2, 3, 4, 5], [1, 3, 5, 2, 4]),
([2, 1, 3, 5, 6, 4, 7], [2, 3, 6, 7, 1, 5, 4])
])
def test_oddEvenList(nums, expected):
head = current = ListNode(nums[0])
for n in nums[1:]:
current.next = ListNode(n)
current = current.next
actual = Solution().oddEvenList(head)
index = 0
while actual:
assert actual.val == expected[index]
actual = actual.next
index += 1
| ikedaosushi/leetcode | problems/python/tests/test_oddEvenList.py | test_oddEvenList.py | py | 613 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "oddEvenList.Solution",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 11,
"usage_type": "attribute"
}
] |
16848170368 |
from app import app, mail
from . import login_required, bad_request, created_request
from flask import jsonify, request
from db import db, User, Department, Role, UserRoles
from sqlalchemy.exc import IntegrityError
from setting import MAX_USER_PER_PAGE
from dateutil import parser as TimeParser
from secrets import token_urlsafe
from flask_mail import Message
from flask import render_template
import json
import phonenumbers
import smtplib
@app.route('/api/user/email/verify', methods=["POST"])
@login_required()
def email_verify():
data = request.get_json()
if "token" not in data:
return bad_request("Missing token")
user = request.session.user
if user.email_token != data['token']:
return jsonify({
'status': "FAIL",
'err': "INVALID_CODE"
})
try:
user.email = user.new_email
user.new_email = None
user.email_token = None
db.session.commit()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "EXIST_ERR"
})
return jsonify({
'status': "OK"
})
@app.route('/api/user/add', methods=['POST'])
@login_required(role_allow=['manager', 'administrator'])
def add_user() -> jsonify:
"""
This route is dedicated to adding new user to the system
Returns:
jsonify : HTTP response
"""
data = request.get_json()
# Validate the incoming request
if "email" not in data or \
"passwd" not in data or \
"department" not in data or \
"name" not in data or \
"role" not in data:
return bad_request('Missing parameter')
if not data['passwd']:
return bad_request("Missing password")
if (type(data['role']) == str and not data['role'].isdigit()) or \
(type(data['department']) == str and not data['department'].isdigit()):
return bad_request('Invalid argument')
department = db.session.query(Department).filter(
Department.id == int(data['department'])).first()
role = db.session.query(Role).filter(Role.id == int(data['role'])).first()
if not department:
return bad_request('Invalid department id')
if not role:
return bad_request('Invalid role id')
# Creating new user and add to the system
try:
user = User(
username=data['name'],
password=data['passwd'],
department=department.id,
email=data['email']
)
db.session.add(user)
db.session.flush()
except IntegrityError:
db.session.rollback()
return created_request('Account with this email/name has already existed')
# Creating a new role record for this user
new_role = UserRoles(userid=user.id, roleid=role.id)
db.session.add(new_role)
db.session.commit()
return jsonify({
'status': "OK"
})
@app.route('/api/user/delete/<user_id>', methods=['DELETE'])
@login_required(role_allow=['manager', 'administrator'])
def delete_user(user_id: str) -> jsonify:
"""
This route is dedicated to deleting existing user out of the system
Returns:
jsonify : HTTP response
"""
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return jsonify({
'status': "FAIL",
'err': "User doesn't exist."
})
if user.id == request.session.user.id:
return jsonify({
'status': "FAIL",
'err': "It's not recommended to delete your own account."
})
db.session.delete(user)
db.session.commit()
return jsonify({
'status': "OK"
})
@app.route('/api/user/list', methods=['POST', "GET"])
@login_required(role_allow=['manager', 'administrator'])
def list_all_users() -> jsonify:
"""
This route is dedicated to listing all existing users inside the system
Returns:
jsonify : HTTP response
"""
data = request.get_json()
if 'page' not in data:
return bad_request('Missing argument')
if type(data['page']) != int:
return bad_request('Invalid page number')
if 'exclude' in data and data['exclude']:
temp = db.session.query(User).filter(
User.id != request.session.user.id)
else:
temp = db.session.query(User)
ret = temp.offset(
data['page']*MAX_USER_PER_PAGE).limit(MAX_USER_PER_PAGE).all()
return jsonify([
{
'id': user.id,
'name': user.username,
'did': user.department_id, # Department id
'email': user.email,
'created': user.created_on
} for user in ret
]), 200
@app.route('/api/user/get/<user_id>', methods=['POST', "GET"])
@login_required(allow_personal_user=True)
def get_user_info(user_id):
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id"
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return jsonify({
'status': "FAIL",
'err': "User doesn't exist"
})
return jsonify({
'status': "OK",
'data': {
'id': user.id,
'name': user.username,
'did': user.department_id,
'email': user.email,
'created': user.created_on,
'phone': user.phone,
'address': user.address,
'theme': user.theme,
'language': user.language,
'gender': user.gender,
'birthday': user.birthday,
'roles': list([urole.role.name for urole in user.userrole_ref.all()])
}
})
def change_user_info(user, data):
role_list = set(
[urole.role.name for urole in request.session.user.userrole_ref.all()])
# Checking if this is an administrator or manager
is_manager = "manager" in role_list
is_admin = "administrator" in role_list
is_privilege = is_manager or is_admin
if not user:
return bad_request("User with this id doesn't exist")
if "name" in data:
# Updating the username only
try:
user.username = data['name'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return created_request('This username has already existed')
if "email" in data:
# Updating the email only
email = data['email'].strip()
if email != user.email:
check = db.session.query(User.email).filter(
User.email == email).first()
if check:
return jsonify({
'status': "FAIL",
'err': "This email has already been used"
})
code = token_urlsafe(100)
user.email_token = code
user.new_email = email
# Creating a message which send to the user email later
msg = Message('Verify your email',
sender=app.config.get("MAIL_USERNAME"),
recipients=[email])
msg.html = render_template(
'email_verify.html', username=user.username, url=user.craft_verify_url(code))
try:
mail.send(msg)
except smtplib.SMTPRecipientsRefused:
pass
if "phone" in data:
# Updating the phone number only
if data['phone'] != '':
try:
number = phonenumbers.parse(data['phone'])
if not phonenumbers.is_possible_number(number):
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Invalid phone number"
})
user.phone = data['phone'].strip()
db.session.flush()
except phonenumbers.phonenumberutil.NumberParseException:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Missing or invalid region code."
})
except IntegrityError:
db.session.rollback()
return created_request('This phone number has already been used')
if "birthday" in data:
# Updating the birthday only
try:
user.birthday = TimeParser.parse(data['birthday'].strip())
db.session.flush()
except:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Unexpected error occurred while setting birthday."
}), 500
if "gender" in data:
# Updating the gender only
try:
user.gender = data['gender'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Invalid gender."
}), 500
if "address" in data:
# Updating the address only
try:
user.address = data['address'].strip()
db.session.flush()
except:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Unexpected error occurred while setting address."
}), 500
if "passwd" in data:
# Updating the password only
# print(is_privilege)
if not is_privilege or user.id == request.session.user.id:
# If someone who isn't privileged try to change password, there should be a current password for tat
# If a person trying to change their own password, there should be current as well
if "cpass" not in data:
# if there is no confirm password
return bad_request('Missing confirm password.')
if not user.check_password(data['cpass']):
return jsonify({
'status': "FAIL",
'err': "INVALID_PASS"
})
try:
user.set_new_password(data['passwd'].strip())
db.session.flush()
except:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': "Unexpected error occurred while setting new password."
}), 500
if "theme" in data:
try:
user.theme = data['theme'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return bad_request('Invalid theme')
if "language" in data:
try:
user.language = data['language'].strip()
db.session.flush()
except IntegrityError:
db.session.rollback()
return bad_request('Invalid language')
if "did" in data:
try:
user.department_id = int(data['did'])
db.session.flush()
except IntegrityError:
return jsonify({
'status': "FAIL",
'err': "Invalid department id"
})
db.session.commit()
@app.route('/api/user/<user_id>/role/update', methods=["POST"])
@login_required(role_allow=["manager", "administrator"])
def add_new_role(user_id):
# There has to be {
# "role" : ID_HERE,
# "action" : False = Delete, True =
# }
data = request.get_json()
if "action" not in data or "role" not in data:
return jsonify({
'status': "FAIL",
'err': "Missing parameters."
})
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
if (not user):
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
else:
if user.id == request.session.user.id:
return jsonify({
'status': "FAIL",
'err': "It's not recommended to change your own roles."
})
check_role = user.userrole_ref.filter(
UserRoles.roleid == data['role']).first()
if data['action']:
# Adding a new role
if check_role:
# Checking if this role has already been added to this user
return jsonify({
'status': "FAIL",
'err': "This role has already been added."
})
try:
new_role = UserRoles(userid=user.id, roleid=data['role'])
db.session.add(new_role)
db.session.commit()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'err': 'Invalid role id'
})
else:
if check_role:
if user.userrole_ref.count() <= 1:
return jsonify({
'status': "FAIL",
'err': "Unable to delete the last role of this user."
})
db.session.delete(check_role)
db.session.commit()
else:
return jsonify({
'status': "FAIL",
'err': "Role doesn't exist"
})
return jsonify({
'status': "OK"
})
@app.route('/api/user/update/<user_id>', methods=['POST'])
@login_required(allow_personal_user=True)
def update_user_info(user_id):
data = request.get_json()
if not user_id.isdigit():
return jsonify({
'status': "FAIL",
'err': "Invalid user id."
})
user = db.session.query(User).filter(User.id == int(user_id)).first()
ret = change_user_info(user, data)
if ret:
return ret
return jsonify({
'status': "OK"
})
@app.route('/api/user/update', methods=["POST"])
@login_required()
def update_self_info():
data = request.get_json()
ret = change_user_info(request.session.user, data)
if ret:
return ret
return jsonify({
'status': "OK"
})
# @app.route('/api/user/<user_id>/role/update', methods=['POST'])
# @login_required(allow_personal_user=True)
# def update_user_role(user_id):
# if not user_id.isdigit():
# return bad_request('Invalid user id')
# data = request.get_json()
# if "new_role" not in data:
# return bad_request('Missing argument')
# role_list = set(data['new_role'])
# ret = db.session.query(UserRoles).filter(UserRoles.userid == int(user_id)).filter(UserRoles.roleid.in_(
# role_list
# )).all()
# for record in ret:
# role_list.remove(record.roleid)
# try:
# db.session.add_all([
# UserRoles(userid=int(user_id), roleid=new_role) for new_role in role_list
# ])
# db.session.commit()
# except IntegrityError:
# db.session.rollback()
# return bad_request('Invalid role id detected')
# return jsonify({
# 'status': "OK"
# })
@app.route('/api/user/<user_id>/export', methods=["GET", 'POST'])
@login_required(allow_personal_user=True)
def export_user_config(user_id):
if not user_id.isdigit():
return bad_request('Invalid user id')
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return bad_request('User doesn\'t exist')
return jsonify({
"phone": user.phone,
"theme": f"{user.theme}",
"language": f"{user.language}",
"birthday": user.birthday,
"gender": f"{user.gender}",
"address": f"{user.address}"
})
@app.route('/api/user/<user_id>/import', methods=['POST'])
@login_required(allow_personal_user=True)
def import_user_config(user_id):
if not user_id.isdigit():
return bad_request('Invalid user id')
if not request.files.get('file'):
return bad_request('Missing file')
user = db.session.query(User).filter(User.id == int(user_id)).first()
if not user:
return jsonify({
'status': "FAIL",
'msg': 'User doesn\'t exist'
})
config_file = request.files['file']
try:
content = config_file.read().decode()
json_content = json.loads(content)
for (key, value) in json_content.items():
# Some of this can be null, so i have to write a if clause first
# before executing
print(key, value)
(setattr(user, key, TimeParser.parse(value.strip())
if key == "birthday" else value) if value else None)
db.session.commit()
except IntegrityError:
db.session.rollback()
return jsonify({
'status': "FAIL",
'msg': 'Unable to import, either the name or email may have already existed, or the file is malformed'
})
except:
return jsonify({
'status': "FAIL",
'msg': 'Invalid configuration'
})
return jsonify({
'status': "OK"
})
| hackernese/Idea-Manager | backend/api/routes/user.py | user.py | py | 17,364 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.get_json",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "flask.request.session",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask... |
788805808 | import math
import numpy as np
import cv2
import settings
from point import polarvec, Point
class EV3Map:
def __init__(self):
self.settings = settings.SettingsRegistry['global']
self.image = cv2.imread(self.settings.load_map)
self.gray = cv2.cvtColor(self.image, cv2.COLOR_RGB2GRAY)
offset = self.settings.camera_shadow_offset
self.gray[self.gray < offset] = 0
self.gray[self.gray >= offset] -= offset
self.image_w = self.image.shape[1]
self.image_h = self.image.shape[0]
self.world_w = self.settings.world_w
self.world_h = self.settings.world_h
self.calib_w2i = Point(self.image_w / self.world_w,
self.image_h / self.world_h)
self.calib_i2w = Point(self.world_w / self.image_w,
self.world_h / self.image_h)
def _world2image(self, p):
return (int(p.x * self.calib_w2i.x),
int(p.y * self.calib_w2i.y))
def image2world(self, image_x, image_y):
return Point(image_x * self.calib_i2w.x,
image_y * self.calib_i2w.y)
def get_circle(self, world_center, world_radius, angle):
center = self._world2image(world_center)
radius = int(world_radius * self.calib_w2i.x)
patch_size = (radius * 2, radius * 2)
patch = cv2.getRectSubPix(self.gray, patch_size, center)
return patch
def get_rectangle(self, world_center, camera_w, camera_h, angle):
center = self._world2image(world_center)
patch_w = int(camera_w * self.calib_w2i.x)
patch_h = int(camera_h * self.calib_w2i.y)
angle_degree = math.degrees(angle)
patch_size = (patch_w, patch_h)
m = cv2.getRotationMatrix2D(center, angle_degree + 90., 1.)
dst = cv2.warpAffine(self.gray, m, (self.image_w, self.image_h), flags=cv2.INTER_LINEAR)
patch = cv2.getRectSubPix(dst, patch_size, center)
return patch
| pfnet-research/chainer-ev3 | simulator2d/map.py | map.py | py | 1,986 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "settings.SettingsRegistry",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2... |
34547539160 | import cv2
# 选择第二只摄影机
cap = cv2.VideoCapture(0)
while(True):
# 从摄影机撷取一张影像
ret, frame = cap.read()
cv2.putText(frame, "aaqqqqqqqqqqqq", (0,100), cv2.FONT_HERSHEY_SIMPLEX,1, (0,0,0))
# 显示图片
cv2.imshow('frame', frame)
cv2.waitKey(1)
if cv2.getWindowProperty('frame', cv2.WND_PROP_AUTOSIZE) == -1:
break
# 释放摄影机
cap.release()
# 关闭所有 OpenCV 视窗
cv2.destroyAllWindows() | Amenoimi/Simple_OCR | cv.py | cv.py | py | 456 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
... |
5217107857 | import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import contextily as ctx
from adjustText import adjust_text
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
file_path = '/Users/jakewatembach/Desktop/meteoriteLandings/Meteorite_Landings.csv'
df = pd.read_csv(file_path)
df['mass (t)'] = df['mass (g)'] / 1e6
df = df.sort_values(by='mass (t)', ascending=False).head(10)
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df['reclong'], df['reclat'])
)
gdf.crs = 'EPSG:4326'
gdf = gdf.to_crs('EPSG:3857')
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
world = world.to_crs('EPSG:3857')
plt.style.use('dark_background')
plt.rcParams['font.family'] = 'Arial'
fig, ax = plt.subplots(figsize=(20, 10))
ax.set_aspect('equal')
ax.margins(x=0.05)
world.plot(ax=ax, edgecolor='white', linewidth=0.5, facecolor='green', alpha=0.3)
ctx.add_basemap(ax, source=ctx.providers.CartoDB.Positron)
meteorite_img = plt.imread('/Users/jakewatembach/Desktop/meteoriteLandings/meteorite.png')
# Get the dimensions of the meteorite image
img_height, img_width, _ = meteorite_img.shape
# Calculate the offset between the center of the meteorite and the center of the flame behind it
x_offset = int(img_width * 0.26)
y_offset = int(img_height * 0.27)
for x, y in zip(gdf.geometry.x, gdf.geometry.y):
imagebox = OffsetImage(meteorite_img, zoom=0.03)
ab = AnnotationBbox(imagebox, (x - x_offset, y - y_offset), frameon=False, box_alignment=(0.5, 0.5))
ax.add_artist(ab)
texts = []
bbox = ax.get_xlim() + ax.get_ylim()
# Calculate the range of the meteorite locations in x and y directions
meteorites_bbox = gdf.total_bounds
x_range = meteorites_bbox[2] - meteorites_bbox[0]
y_range = meteorites_bbox[3] - meteorites_bbox[1]
# Adjust the label positions to ensure they're within the bounds of the plot
for x, y, name, mass in zip(gdf.geometry.x, gdf.geometry.y, gdf['name'], gdf['mass (t)']):
label = f"{name} ({mass:.2f} t)"
# Calculate the offset of the label from the meteorite
label_offset = (0.03 * x_range, 0.03 * y_range)
if x < bbox[0]:
label_offset = (-0.03 * x_range, label_offset[1])
elif x > bbox[1]:
label_offset = (0.03 * x_range, label_offset[1])
if y < bbox[2]:
label_offset = (label_offset[0], -0.03 * y_range)
elif y > bbox[3]:
label_offset = (label_offset[0], 0.03 * y_range)
# Adjust the label position based on the offset
label_x = x + label_offset[0]
label_y = y + label_offset[1]
# Adjust the label position so that it is within the bounds of the plot
if label_x < bbox[0]:
label_x = bbox[0] + (bbox[1] - bbox[0]) * 0.01
if label_x > bbox[1]:
label_x = bbox[1] - (bbox[1] - bbox[0]) * 0.01
if label_y < bbox[2]:
label_y = bbox[2] + (bbox[3] - bbox[2]) * 0.01
if label_y > bbox[3]:
label_y = bbox[3] - (bbox[3] - bbox[2]) * 0.01
texts.append(ax.text(label_x, label_y, label, fontsize=10, ha='center', va='center', color='white',
bbox=dict(boxstyle="round", fc="black", alpha=0.5)))
adjust_text(texts)
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
meteorites_bbox = gdf.total_bounds
padding_factor = 0.2
x_range = meteorites_bbox[2] - meteorites_bbox[0]
y_range = meteorites_bbox[3] - meteorites_bbox[1]
ax.set_xlim(meteorites_bbox[0] - x_range * padding_factor, meteorites_bbox[2] + x_range * padding_factor)
ax.set_ylim(meteorites_bbox[1] - y_range * padding_factor, meteorites_bbox[3] + y_range * padding_factor)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.title('Top 10 Biggest Meteorite Landings', fontsize=20, color='white')
plt.savefig('meteorite_landings.png', dpi=300, bbox_inches='tight')
plt.show()
| jakeww/meteoriteLandings | landings.py | landings.py | py | 3,775 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "geopandas.points_from_xy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "geopan... |
70387200423 | import os
import torch
import timm
import numpy as np
from simple_network import SimpleNetwork
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import Compose, Normalize, ToTensor, Resize
from torchvision.models import resnet18
#from torch import nn
from nvflare.apis.dxo import DXO, DataKind, from_shareable
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
class ChestXrayValidator(Executor):
def __init__(self, data_path="/dataset",model_name="resnet18", validate_task_name=AppConstants.TASK_VALIDATION,pretrained_model_path=None):
super().__init__()
self._validate_task_name = validate_task_name
self.pretrained_model_path = pretrained_model_path
if pretrained_model_path and os.path.exists(pretrained_model_path):
state_dict = torch.load(self.pretrained_model_path)
self.model = timm.create_model(model_name, pretrained=False, num_classes=2) # Assuming a binary classification problem in Chest X-ray
self.model.load_state_dict(state_dict, strict=False)
print("Loaded pretrained model from:", pretrained_model_path)
else:
self.model = timm.create_model(model_name, pretrained=True, num_classes=2) # Assuming a binary classification problem in Chest X-ray
# Setup the model
# self.model = resnet18(pretrained=True)
#self.model = torchvision.models.resnet18()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transform = Compose(
[
Resize((224, 224)),
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
val_data_path = os.path.join(data_path,'val')
self._val_dataset = ImageFolder(root=val_data_path, transform=transform)
self._val_loader = DataLoader(self._val_dataset, batch_size=32, shuffle=True,num_workers=4,pin_memory=True)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self._validate(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(
fl_ctx,
f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"
f"s data: {val_accuracy}",
)
dxo = DXO(data_kind=DataKind.METRICS, data={"val_acc": val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def _validate(self, weights, abort_signal):
#self.model.load_state_dict(weights)
# change numpy.ndarray to torch.Tensor
weights = {k: torch.from_numpy(v).to(self.device) if isinstance(v, np.ndarray) else v.to(self.device) for k, v in weights.items()}
# creat new state_dict and del fc.weight andfc.bias
new_state_dict = {k: v for k, v in weights.items() if k not in ["fc.weight", "fc.bias"]}
self.model.load_state_dict(new_state_dict, strict=False)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self._val_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct / float(total)
return metric
| Hamster-yang/FL_chest-xray_timm | app/custom/chestxrayvalidator.py | chestxrayvalidator.py | py | 5,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nvflare.apis.executor.Executor",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "nvflare.app_common.app_constant.AppConstants.TASK_VALIDATION",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "nvflare.app_common.app_constant.AppConstants",
... |
29224787647 | from flask import Flask, request, render_template, redirect, flash
from flask_sqlalchemy import SQLAlchemy
import requests
import sys
import os
app = Flask(__name__)
app.config.from_pyfile("config.py")
db = SQLAlchemy(app)
def return_data_from_api(city_name_value):
api_id = "24034c2fc253da6475cd74bc0b96cf5a"
api_link = f"http://api.openweathermap.org/data/2.5/weather?q={city_name_value}&APPID={api_id}"
return requests.get(api_link).json()
class Weather(db.Model):
id = db.Column(db.Integer, primary_key=True)
city_name = db.Column(db.String(50), unique=True)
db_path = os.path.join("/", "weather.db")
if not os.access(db_path, os.F_OK):
db.create_all()
@app.route('/', methods=["GET", "POST"])
def index():
if request.method == "GET":
from_db = Weather.query.all()
result = []
for entry in from_db:
# city_id = entry.id
city_name = entry.city_name
dict_with_info = return_data_from_api(city_name)
city = dict_with_info["name"]
temperature = int(dict_with_info["main"]["temp"]) - 273
state = dict_with_info["weather"][0]["main"]
result.append({"city_name": city, "temperature": temperature, "state": state})
return render_template('index.html', info=result, x=from_db)
elif request.method == "POST":
city_name = request.form["city_name"]
if return_data_from_api(city_name)["cod"] == "404":
flash("The city doesn't exist!")
return redirect("/")
q = db.session.query(Weather.city_name).filter(Weather.city_name == city_name)
city_in_db = db.session.query(q.exists()).scalar()
if not city_in_db:
new_entry = Weather(city_name=city_name)
db.session.add(new_entry)
db.session.commit()
else:
flash("The city has already been added to the list!")
return redirect("/")
@app.route('/delete/<city_name>', methods=['GET', 'POST'])
def delete(city_name):
city = db.session.query(Weather).filter(Weather.city_name == city_name).first()
print(city, type(city))
db.session.delete(city)
db.session.commit()
return redirect('/')
# don't change the following way to run flask:
if __name__ == '__main__':
if len(sys.argv) > 1:
arg_host, arg_port = sys.argv[1].split(':')
app.run(host=arg_host, port=arg_port)
else:
app.run()
| artem-chigin/weather_app | program.py | program.py | py | 2,458 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
... |
24159423427 | import subprocess
import os
import requests
KEY = 'trnsl.1.1.20161216T160124Z.4a07c4b6a2f01566.ade260e6c684818698899fd08a9c15d72faca843'
URL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
# путь к файлу с текстом;
directory_source = 'Source'
# путь к файлу с результатом;
directory_result = 'Result'
# создаем директорию с результатами перевода
create_directory_out = subprocess.run(['mkdir', '-p', './Result'])
# Получаем список файлов в переменную list_files
list_files = os.listdir(directory_source)
# язык с которого перевести;
def choice_language(file, lang_out):
if file == 'DE.txt':
lang = 'de-'
elif file == 'ES.txt':
lang = 'es-'
else:
lang = 'fr-'
return lang + lang_out
# чтение текста из файла для перевода
def import_text(file):
with open(os.path.join(directory_source, file)) as f:
text = f.readlines()
return text
# Функция перевода
def translate_me(mytext, lang):
"""
YANDEX translation plugin
docs: https://tech.yandex.ru/translate/doc/dg/reference/translate-docpage/
https://translate.yandex.net/api/v1.5/tr.json/translate ?
key=<API-ключ>
& text=<переводимый текст>
& lang=<направление перевода>
& [format=<формат текста>]
& [options=<опции перевода>]
& [callback=<имя callback-функции>]
:param text: <str> text for translation.
:return: <str> translated text.
Args:
mytext:
"""
params = {
'key': KEY,
'text': mytext,
'lang': lang,
}
response = requests.get(URL, params=params)
return response.json()
# запись текста в файл после перевода
def export_text(file, text):
with open(os.path.join(directory_result, file), 'w') as f:
f.write(text)
print('Переведен и сохранен файл ', os.path.join(directory_result, file))
# Пакетный перевод файлов
second_lang = input('Введите язык, на который следует перевести текст файлов, находящихся в папке "Source": ')
for file_name in list_files:
lang_pair = choice_language(file_name, second_lang) # формируем пару языков для параметров перевода
text_for_translate = import_text(file_name) # читаем текст из файла для перевода
json = translate_me(text_for_translate, lang_pair) # переведенный текст
text_after_translate = ' '.join(json['text']) # форматируем переведенный текст
export_text(file_name, text_after_translate) # записываем переведенный текст в файл
| pashkovsky/PY2PM | PY3_2/Homework/translator_files.py | translator_files.py | py | 3,013 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "subprocess.run",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3... |
22354333965 | import os
import re
import typing
import pymysql
import mlrun.utils
class MySQLUtil(object):
dsn_env_var = "MLRUN_HTTPDB__DSN"
dsn_regex = (
r"mysql\+pymysql://(?P<username>.+)@(?P<host>.+):(?P<port>\d+)/(?P<database>.+)"
)
check_tables = [
"projects",
# check functions as well just in case the previous version used a projects leader
"functions",
]
def __init__(self, logger: mlrun.utils.Logger):
self._logger = logger
def wait_for_db_liveness(self, retry_interval=3, timeout=2 * 60):
self._logger.debug("Waiting for database liveness")
mysql_dsn_data = self.get_mysql_dsn_data()
tmp_connection = mlrun.utils.retry_until_successful(
retry_interval,
timeout,
self._logger,
True,
pymysql.connect,
host=mysql_dsn_data["host"],
user=mysql_dsn_data["username"],
port=int(mysql_dsn_data["port"]),
database=mysql_dsn_data["database"],
)
self._logger.debug("Database ready for connection")
tmp_connection.close()
def check_db_has_tables(self):
connection = self._create_connection()
try:
with connection.cursor() as cursor:
cursor.execute(
"SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='mlrun';"
)
if cursor.fetchone()[0] > 0:
return True
return False
finally:
connection.close()
def set_modes(self, modes):
if not modes or modes in ["nil", "none"]:
self._logger.debug("No sql modes were given, bailing", modes=modes)
return
connection = self._create_connection()
try:
self._logger.debug("Setting sql modes", modes=modes)
with connection.cursor() as cursor:
cursor.execute("SET GLOBAL sql_mode=%s;", (modes,))
finally:
connection.close()
def check_db_has_data(self):
connection = self._create_connection()
try:
with connection.cursor() as cursor:
for check_table in self.check_tables:
cursor.execute("SELECT COUNT(*) FROM %s;", (check_table,))
if cursor.fetchone()[0] > 0:
return True
return False
finally:
connection.close()
def _create_connection(self):
mysql_dsn_data = self.get_mysql_dsn_data()
if not mysql_dsn_data:
raise RuntimeError(f"Invalid mysql dsn: {self.get_dsn()}")
return pymysql.connect(
host=mysql_dsn_data["host"],
user=mysql_dsn_data["username"],
port=int(mysql_dsn_data["port"]),
database=mysql_dsn_data["database"],
)
@staticmethod
def get_mysql_dsn_data() -> typing.Optional[dict]:
match = re.match(MySQLUtil.dsn_regex, MySQLUtil.get_dsn())
if not match:
return None
return match.groupdict()
@staticmethod
def get_dsn() -> str:
return os.environ.get(MySQLUtil.dsn_env_var, "")
| mlrun/mlrun | server/api/utils/db/mysql.py | mysql.py | py | 3,216 | python | en | code | 1,129 | github-code | 36 | [
{
"api_name": "mlrun.utils.utils",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "mlrun.utils",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "mlrun.utils.utils.retry_until_successful",
"line_number": 27,
"usage_type": "call"
},
{
"api_n... |
44648666543 | from rest_framework.documentation import include_docs_urls
from django.contrib import admin
from django.urls import path, re_path, include
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += [
path(
'drf-docs/',
include_docs_urls(
title='DRF Docs',
authentication_classes=[],
permission_classes=[],
),
),
]
urlpatterns += [
path('board/', include('board.urls')),
path('account/', include('account.urls')),
]
| Endlex-net/Easting_kanban | kanban/urls.py | urls.py | py | 539 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dja... |
15538629853 | #!/usr/bin/env python
# coding: utf-8
# In[63]:
from PIL import Image, ImageDraw
import numpy as np
import math
import imageio
from copy import deepcopy
import cv2
def to_integral_image(img_arr):
"""
Calculates the integral image based on this instance's original image data.
"""
row_sum = np.zeros(img_arr.shape)
# we need an additional column and row of padding zeros
integral_image_arr = np.zeros((img_arr.shape[0] + 1, img_arr.shape[1] + 1))
for x in range(img_arr.shape[1]):
for y in range(img_arr.shape[0]):
row_sum[y, x] = row_sum[y-1, x] + img_arr[y, x]
integral_image_arr[y+1, x+1] = integral_image_arr[y+1, x-1+1] + row_sum[y, x]
return integral_image_arr
def sum_region(integral_img_arr, top_left, bottom_right):
"""
Calculates the sum in the rectangle specified by the given tuples.
"""
top_left=(top_left[0]-1,top_left[1]-1)
top_right = (bottom_right[0], top_left[1])
bottom_left = (top_left[0], bottom_right[1])
return integral_img_arr[bottom_right] - integral_img_arr[top_right] - integral_img_arr[bottom_left] + integral_img_arr[top_left]
class HaarFeature(object):
"""
Class representing a haar-like feature.
"""
def __init__(self, feature_type, top_left, bottom_right, threshold, polarity, error, weight, flag):
"""
Creates a new haar-like feature with relevant attributes.
"""
self.type = feature_type
self.top_left = top_left
self.bottom_right = bottom_right
self.width = bottom_right[0]-top_left[0]
self.height = bottom_right[1]-top_left[1]
self.threshold = threshold
self.polarity = polarity
self.error=error
self.weight=weight
self.flag=flag
def get_score(self, int_img):
"""
Get score for given integral image array.
"""
score = 0
if self.type == (1,2):
first = sum_region(int_img, self.top_left, (self.top_left[0] + self.width, int(self.top_left[1] + self.height / 2)))
second = sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), self.bottom_right)
score = first - second
elif self.type == (2,1):
first = sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), self.top_left[1] + self.height))
second = sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), self.bottom_right)
score = first - second
return score
def get_vote(self, int_img):
"""
Get vote of this feature for given integral image, the vote is 1 or -1.
"""
score = self.get_score(int_img)
return 1 if self.polarity * (score-self.threshold) >= 0 else -1
#helper function to sum positive numbers in an array
def sum_positive(array):
s=0
l=len(array)
for i in range(l):
if array[i]>0:
s=s+array[i]
return s
#helper function to sum negative numbers in an array
def sum_negative(array):
s=0
l=len(array)
for i in range(l):
if array[i]<0:
s=s+array[i]
return s
#given an array of lables and weights of each image (label), find the threshold for this weaker learner
def find_threshold(array, weights):
index=1
p=1
l=len(array)
output_error=1
temp=np.multiply(array,weights)
Lp=0
Ln=0
Rp=sum_positive(temp)
Rn=sum_negative(temp)
#try every index
for i in range(1,l):
t=temp[i]
if t>0:
Lp=Lp+t
Rp=Rp-t
else:
Ln=Ln+t
Rn=Rn-t
error=min(Lp+abs(Rn),abs(Ln)+Rp)
if error < output_error:
output_error=error
index = i
if Lp+abs(Rn) < Rp+abs(Ln):
p=1
else:
p=-1
#return the best polarity, the index of the image (whose score will be the threshold),
#and the error of this weak learner
return (index,p,output_error)
def learn(features, images, labels, weights):
"""
This is the mean funtion we use, every time we feed the images, labels, features,
and weigts of the images, it will output the current best weaklearner and set the
parameters.
"""
# select classifiers
lenf = len(features)
leni = len(labels)
fi=0
min_error=1
for i in range(lenf):
temp=np.zeros((leni,3))
for j in range(leni):
img=images[j]
x=features[i].get_score(img)
y=labels[j]
temp[j][0]=x
temp[j][1]=y
temp[j][2]=weights[j]
temp=temp[temp[:,0].argsort()]
#get the labels and weights we need to find the threshold for this feature
tup=find_threshold(temp[:,1],temp[:,2])
index=tup[0]
features[i].threshold=temp[index][0]
features[i].polarity=tup[1]
error=tup[2]
#to record the best feature
if (error < min_error) and (features[i].flag==0):
min_error=error
fi=i
# already find the best feature, update its parameters
# flag indicates whether this feature has already been picked before
features[fi].flag=1
features[fi].error=min_error
# find the weight of this chosen weak learner
if min_error>0:
z=2*(min_error*(1-min_error)) ** (1/2)
a = 0.5 * np.log((1 - min_error) / min_error)
else:
a=2
z=0
features[fi].weight=a
# update the weights of the data (images)
if z!=0:
for i in range(leni):
vote=features[i].get_vote(images[i])
weights[i]=weights[i]*math.exp(-a*labels[i]*vote)/z
# normalize the weights
s=np.sum(weights)
weights=weights/s
return (fi,weights)
# This generates all the features we need
all_features=[]
for i in range(1,65,3):
for j in range(1,65,3):
m1=min(i+16,65)
m2=min(j+16,65)
for k1 in range(i,m1,4):
for h1 in range(j+3,m2,4):
f = HaarFeature((1,2),(i,j),(k1,h1),0,1,0,0,0)
all_features.append(f)
for k2 in range(i+3,m1,4):
for h2 in range(j,m2,4):
f = HaarFeature((2,1),(i,j),(k2,h2),0,1,0,0,0)
all_features.append(f)
# given a classifier, this function outputs whether an image is a face or not according to this classifier
def test(img, classifiers, threshold=3):
l=len(classifiers)
s=0
for i in range(l):
s=s+classifiers[i].weight*classifiers[i].get_vote(img)
if s>=threshold:
return (1,s)
return (-1,s)
# load the images
images=[]
labels=[]
for i in range (1000):
j=i
j=str(j)
im = imageio.imread('Downloads/faces/face'+j+'.jpg')
im=np.array(im).mean(axis=2)
im=to_integral_image(im)
images.append(im)
labels.append(1)
for i in range (1000):
j=i
j=str(j)
im = imageio.imread('Downloads/background/'+j+'.jpg')
im=np.array(im).mean(axis=2)
im=to_integral_image(im)
images.append(im)
labels.append(-1)
cascade=[]
thres=[]
iter=0
for j in range(6):
classifiers=[]
for i in range(5):
l=len(images)
weights=np.full(l,1/l)
t = learn(all_features, images, labels, weights)
p=t[0]
weights=t[1]
classifiers.append(all_features[p])
l=len(images)
mini=0
# set the capital Theta to make sure we correctly classify all faces
for i in range(l):
t=test(images[i],classifiers)
if labels[i]==1:
if t[1]<mini:
mini=t[1]
thres.append(deepcopy(mini))
cascade.append(deepcopy(classifiers))
# elimiate the images we correctly classify as backgrounds
images_temp=[]
labels_temp=[]
for i in range(l):
t=test(images[i],classifiers,mini)
if (labels[i]!=t[0]) or (labels[i]==1):
images_temp.append(images[i])
labels_temp.append(labels[i])
images=deepcopy(images_temp)
labels=deepcopy(labels_temp)
# deal with the situation is before we use up all 5 classifiers, the error has already been 0
iter=iter+1
if len(images)<2:
break
def get_v(cascade, int_img):
"""
helper funtion to avoid overlapping of red patches detecting faces
"""
l=len(cascade)
s=0
for i in range(l):
score = cascade[i].get_score(int_img)
ab=abs(cascade[i].polarity * (score-cascade[i].threshold))
s=s+cascade[i].weight*ab
return s
#slide our cascade to detect faces in the test image
coor=np.zeros((1280,1600))
im1=Image.open('Downloads/test_img.jpg')
#add chanels to the black&white test image, so we can draw "red sqaures" on it
im1=cv2.cvtColor(np.array(im1),cv2.COLOR_GRAY2RGB)
img=Image.fromarray(im1, 'RGB')
im2=Image.open('Downloads/test_img.jpg')
imay=np.array(im2)
draw=ImageDraw.Draw(img)
# determine where we shoud place our pateches
for i in range(0,1216,4):
for j in range(0,1536,4):
y=to_integral_image(imay[i:i+64,j:j+64])
flag=0
for k in range(iter):
t=test(y,cascade[k])
if t[0]==1:
flag=flag+1
if (flag>=6):
v=0
for k in range(iter):
v=v+get_v(cascade[k],y)
coor[i,j]=v
# avoid overlapping
for i in range(0,1216,4):
for j in range(0,1536,4):
ff=1
for z1 in range(-7,8):
for z2 in range(-7,8):
if coor[i,j]<coor[i-4*z1,j-4*z2]:
ff=0
if (ff==1) and coor[i,j]>0:
draw.line([(j,i),(j,i+64)],fill='red')
draw.line([(j,i),(j+64,i)],fill='red')
draw.line([(j+64,i),(j+64,i+64)],fill='red')
draw.line([(j,i+64),(j+64,i+64)],fill='red')
img.save('Downloads/result4.jpg')
| lizihao1999/Viola-Jones | hw4.py | hw4.py | py | 9,985 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_numbe... |
32432567007 | from django import template
from django.utils.translation import ugettext as _
from message.models import Message
from milestone.models import Milestone
from lib import utils
register = template.Library()
def displaystatistic(context, name, trans_name, number):
icons_folder = '/media/basepage/images/icons/'
icon = ''
if name == 'message':
icon = 'comment.png'
elif name == 'milestone':
icon = 'clock.png'
elif name == 'wikiboard':
icon = 'page.png'
elif name == 'file':
icon = 'page_white_put.png'
elif name == 'todo':
icon = 'note.png'
icon = icons_folder + icon
return {'icon': icon, 'name': trans_name, 'number': number}
register.inclusion_tag("lib/displaystatistic.html", takes_context=True)(displaystatistic)
| joaquimrocha/Rancho | rancho/lib/templatetags/displaystatistic.py | displaystatistic.py | py | 794 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 7,
"usage_type": "name"
}
] |
35802636506 | from tkinter import *
from PyQt4 import QtGui,QtCore
import cv2
import re
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
class Window(QtGui.QMainWindow):
def __init__(self, parent=None):
super(Window,self).__init__(parent)
self.setGeometry(150,150,680,565)
self.setWindowTitle('Motion Scanner')
self.video = QtGui.QLabel('', self)
self.video.setGeometry(20, 20, 640, 485)
self.btn1 = QtGui.QPushButton('Start', self)
self.btn1.setGeometry(50, 515, 100, 30)
self.btn1.clicked.connect(self.Start)
self.btn3 = QtGui.QPushButton('Scan', self)
self.btn3.setGeometry(170, 515, 100, 30)
self.btn3.clicked.connect(self.Stop)
self.output = QtGui.QLabel('', self)
self.output.setGeometry(290, 515, 150, 30)
myPixmap = QtGui.QPixmap("I:/projects/py/loadin/camera.jpg")
myScaledPixmap = myPixmap.scaled(self.video.size())
self.video.setPixmap(myScaledPixmap)
self.cap = cv2.VideoCapture(1)
self.show()
def Start(self):
self.fps=30
self.timer = QtCore.QTimer()
ret, frame = self.cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = QtGui.QImage(frame, frame.shape[1], frame.shape[0], QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(img)
self.a=frame
self.video.setPixmap(pix)
self.timer.timeout.connect(self.Start)
self.timer.start(1000. / self.fps)
def Stop(self):
cv2.imwrite("Scan1.jpg", self.a)
self.timer.stop()
opts = Options()
opts.set_headless()
assert opts.headless
driver = Firefox(options=opts)
# navigate to the application home page
driver.get("https://images.google.com/")
# click on camera image
search_field = driver.find_element_by_id("qbi")
search_field.click()
driver.find_element_by_class_name('qbtbha.qbtbtxt.qbclr').click()
# clicking on upload image
b = driver.find_element_by_css_selector("input[type=\"file\"]")
b.clear()
# uploading image
b.send_keys("I:\\\\projects\\\\py\\\\Scan1.jpg")
search_form = driver.find_element_by_id('mKlEF')
search_form.submit()
driver.implicitly_wait(30)
# getting results
RESULTS_LOCATOR = "//div/h3/a"
# WebDriverWait(driver, 10).until(
# EC.visibility_of_element_located((By.XPATH, RESULTS_LOCATOR)))
page1_results = driver.find_elements(By.XPATH, RESULTS_LOCATOR)
a = " "
# storing all the results in a
for item in page1_results:
a += item.text
print()
# finding the most repeated word and showing it
frequency = {}
document_text = a
text_string = document_text.lower()
match_pattern = re.findall(r'\b[a-z]{3,15}\b', text_string)
for word in match_pattern:
count = frequency.get(word, 0)
frequency[word] = count + 1
# frequency_list = frequency.keys()
result=max(frequency.keys(), key=(lambda k: frequency[k]))
print(max(frequency.keys(), key=(lambda k: frequency[k])))
cv2.putText(self.a, result, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 3, 4)
self.output.setText(result)
driver.close()
app=QtGui.QApplication(sys.argv)
GUI=Window()
sys.exit(app.exec_()) | lucifer6666/Reverse-Image-Search | revimage.py | revimage.py | py | 3,601 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt4.QtGui.QMainWindow",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "PyQt4.QtGui",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt4.QtGui.QLabel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt4.QtGui... |
70118415145 | import webcolors
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from colormap import rgb2hex
import pandas as pd
from scipy.spatial import KDTree
class ColorNames:
WebColorMap = {}
WebColorMap["AliceBlue"] = "#F0F8FF"
WebColorMap["AntiqueWhite"] = "#FAEBD7"
WebColorMap["Aqua"] = "#00FFFF"
WebColorMap["Aquamarine"] = "#7FFFD4"
WebColorMap["Azure"] = "#F0FFFF"
WebColorMap["Beige"] = "#F5F5DC"
WebColorMap["Bisque"] = "#FFE4C4"
WebColorMap["Black"] = "#000000"
WebColorMap["BlanchedAlmond"] = "#FFEBCD"
WebColorMap["Blue"] = "#0000FF"
WebColorMap["BlueViolet"] = "#8A2BE2"
WebColorMap["Brown"] = "#A52A2A"
WebColorMap["BurlyWood"] = "#DEB887"
WebColorMap["CadetBlue"] = "#5F9EA0"
WebColorMap["Chartreuse"] = "#7FFF00"
WebColorMap["Chocolate"] = "#D2691E"
WebColorMap["Coral"] = "#FF7F50"
WebColorMap["CornflowerBlue"] = "#6495ED"
WebColorMap["Cornsilk"] = "#FFF8DC"
WebColorMap["Crimson"] = "#DC143C"
WebColorMap["Cyan"] = "#00FFFF"
WebColorMap["DarkBlue"] = "#00008B"
WebColorMap["DarkCyan"] = "#008B8B"
WebColorMap["DarkGoldenRod"] = "#B8860B"
WebColorMap["DarkGray"] = "#A9A9A9"
WebColorMap["DarkGrey"] = "#A9A9A9"
WebColorMap["DarkGreen"] = "#006400"
WebColorMap["DarkKhaki"] = "#BDB76B"
WebColorMap["DarkMagenta"] = "#8B008B"
WebColorMap["DarkOliveGreen"] = "#556B2F"
WebColorMap["Darkorange"] = "#FF8C00"
WebColorMap["DarkOrchid"] = "#9932CC"
WebColorMap["DarkRed"] = "#8B0000"
WebColorMap["DarkSalmon"] = "#E9967A"
WebColorMap["DarkSeaGreen"] = "#8FBC8F"
WebColorMap["DarkSlateBlue"] = "#483D8B"
WebColorMap["DarkSlateGray"] = "#2F4F4F"
WebColorMap["DarkSlateGrey"] = "#2F4F4F"
WebColorMap["DarkTurquoise"] = "#00CED1"
WebColorMap["DarkViolet"] = "#9400D3"
WebColorMap["DeepPink"] = "#FF1493"
WebColorMap["DeepSkyBlue"] = "#00BFFF"
WebColorMap["DimGray"] = "#696969"
WebColorMap["DimGrey"] = "#696969"
WebColorMap["DodgerBlue"] = "#1E90FF"
WebColorMap["FireBrick"] = "#B22222"
WebColorMap["FloralWhite"] = "#FFFAF0"
WebColorMap["ForestGreen"] = "#228B22"
WebColorMap["Fuchsia"] = "#FF00FF"
WebColorMap["Gainsboro"] = "#DCDCDC"
WebColorMap["GhostWhite"] = "#F8F8FF"
WebColorMap["Gold"] = "#FFD700"
WebColorMap["GoldenRod"] = "#DAA520"
WebColorMap["Gray"] = "#808080"
WebColorMap["Grey"] = "#808080"
WebColorMap["Green"] = "#008000"
WebColorMap["GreenYellow"] = "#ADFF2F"
WebColorMap["HoneyDew"] = "#F0FFF0"
WebColorMap["HotPink"] = "#FF69B4"
WebColorMap["IndianRed"] = "#CD5C5C"
WebColorMap["Indigo"] = "#4B0082"
WebColorMap["Ivory"] = "#FFFFF0"
WebColorMap["Khaki"] = "#F0E68C"
WebColorMap["Lavender"] = "#E6E6FA"
WebColorMap["LavenderBlush"] = "#FFF0F5"
WebColorMap["LawnGreen"] = "#7CFC00"
WebColorMap["LemonChiffon"] = "#FFFACD"
WebColorMap["LightBlue"] = "#ADD8E6"
WebColorMap["LightCoral"] = "#F08080"
WebColorMap["LightCyan"] = "#E0FFFF"
WebColorMap["LightGoldenRodYellow"] = "#FAFAD2"
WebColorMap["LightGray"] = "#D3D3D3"
WebColorMap["LightGrey"] = "#D3D3D3"
WebColorMap["LightGreen"] = "#90EE90"
WebColorMap["LightPink"] = "#FFB6C1"
WebColorMap["LightSalmon"] = "#FFA07A"
WebColorMap["LightSeaGreen"] = "#20B2AA"
WebColorMap["LightSkyBlue"] = "#87CEFA"
WebColorMap["LightSlateGray"] = "#778899"
WebColorMap["LightSlateGrey"] = "#778899"
WebColorMap["LightSteelBlue"] = "#B0C4DE"
WebColorMap["LightYellow"] = "#FFFFE0"
WebColorMap["Lime"] = "#00FF00"
WebColorMap["LimeGreen"] = "#32CD32"
WebColorMap["Linen"] = "#FAF0E6"
WebColorMap["Magenta"] = "#FF00FF"
WebColorMap["Maroon"] = "#800000"
WebColorMap["MediumAquaMarine"] = "#66CDAA"
WebColorMap["MediumBlue"] = "#0000CD"
WebColorMap["MediumOrchid"] = "#BA55D3"
WebColorMap["MediumPurple"] = "#9370D8"
WebColorMap["MediumSeaGreen"] = "#3CB371"
WebColorMap["MediumSlateBlue"] = "#7B68EE"
WebColorMap["MediumSpringGreen"] = "#00FA9A"
WebColorMap["MediumTurquoise"] = "#48D1CC"
WebColorMap["MediumVioletRed"] = "#C71585"
WebColorMap["MidnightBlue"] = "#191970"
WebColorMap["MintCream"] = "#F5FFFA"
WebColorMap["MistyRose"] = "#FFE4E1"
WebColorMap["Moccasin"] = "#FFE4B5"
WebColorMap["NavajoWhite"] = "#FFDEAD"
WebColorMap["Navy"] = "#000080"
WebColorMap["OldLace"] = "#FDF5E6"
WebColorMap["Olive"] = "#808000"
WebColorMap["OliveDrab"] = "#6B8E23"
WebColorMap["Orange"] = "#FFA500"
WebColorMap["OrangeRed"] = "#FF4500"
WebColorMap["Orchid"] = "#DA70D6"
WebColorMap["PaleGoldenRod"] = "#EEE8AA"
WebColorMap["PaleGreen"] = "#98FB98"
WebColorMap["PaleTurquoise"] = "#AFEEEE"
WebColorMap["PaleVioletRed"] = "#D87093"
WebColorMap["PapayaWhip"] = "#FFEFD5"
WebColorMap["PeachPuff"] = "#FFDAB9"
WebColorMap["Peru"] = "#CD853F"
WebColorMap["Pink"] = "#FFC0CB"
WebColorMap["Plum"] = "#DDA0DD"
WebColorMap["PowderBlue"] = "#B0E0E6"
WebColorMap["Purple"] = "#800080"
WebColorMap["Red"] = "#FF0000"
WebColorMap["RosyBrown"] = "#BC8F8F"
WebColorMap["RoyalBlue"] = "#4169E1"
WebColorMap["SaddleBrown"] = "#8B4513"
WebColorMap["Salmon"] = "#FA8072"
WebColorMap["SandyBrown"] = "#F4A460"
WebColorMap["SeaGreen"] = "#2E8B57"
WebColorMap["SeaShell"] = "#FFF5EE"
WebColorMap["Sienna"] = "#A0522D"
WebColorMap["Silver"] = "#C0C0C0"
WebColorMap["SkyBlue"] = "#87CEEB"
WebColorMap["SlateBlue"] = "#6A5ACD"
WebColorMap["SlateGray"] = "#708090"
WebColorMap["SlateGrey"] = "#708090"
WebColorMap["Snow"] = "#FFFAFA"
WebColorMap["SpringGreen"] = "#00FF7F"
WebColorMap["SteelBlue"] = "#4682B4"
WebColorMap["Tan"] = "#D2B48C"
WebColorMap["Teal"] = "#008080"
WebColorMap["Thistle"] = "#D8BFD8"
WebColorMap["Tomato"] = "#FF6347"
WebColorMap["Turquoise"] = "#40E0D0"
WebColorMap["Violet"] = "#EE82EE"
WebColorMap["Wheat"] = "#F5DEB3"
WebColorMap["White"] = "#FFFFFF"
WebColorMap["WhiteSmoke"] = "#F5F5F5"
WebColorMap["Yellow"] = "#FFFF00"
WebColorMap["YellowGreen"] = "#9ACD32"
@staticmethod
def rgbFromStr(s):
# s starts with a #.
r, g, b = int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16)
return r, g, b
@staticmethod
def findNearestWebColorName(R,G,B):
return ColorNames.findNearestColorName(R,G,B, ColorNames.WebColorMap)
@staticmethod
def findNearestColorName(R,G,B, Map):
mindiff = None
for d in Map:
r, g, b = ColorNames.rgbFromStr(Map[d])
diff = abs(R - r) * 256 + abs(G - g) * 256 + abs(B - b) * 256
if mindiff is None or diff < mindiff:
mindiff = diff
mincolorname = d
return mincolorname
def calculate_white(img):
clusters = 3
dc = DominantColors(img, clusters)
colors = dc.dominantColors()
percentage = dc.get_percentage()
r = img.split('/')
category = r[1]
name = r[2]
col = ""
max = 0
maxColor = ""
for i in range(len(colors)):
hex = str(rgb2hex(colors[i][0], colors[i][1], colors[i][2]))
col = col + hex + '(' + str(percentage[i]) + '),'
if max < percentage[i]:
max = percentage[i]
maxColor = colors[i]
col = col [:-1]
maxColor = ColorNames.findNearestWebColorName(maxColor[0],maxColor[1],maxColor[2])
dict = {
"category": category,
"name": name,
"color": col,
"mainColor": maxColor
}
print(dict)
return dict
class DominantColors:
CLUSTERS = None
IMAGE = None
COLORS = None
LABELS = None
def __init__(self, image, clusters=3):
self.CLUSTERS = clusters
self.IMAGE = image
def dominantColors(self):
# read image
from PIL import Image
im = Image.open(self.IMAGE, 'r')
pixel_values = list(im.getdata())
pixels = []
for pv in pixel_values:
if (pv[3] > 0):
pixels.append(pv[:-1])
if len(pixels) == 0:
pixels.append([0,0,0])
img = self.IMAGE
# save image after operations
self.IMAGE = pixels
# using k-means to cluster pixels
diff = 0
done = False
if len(pixels) < self.CLUSTERS:
self.IMAGE = []
for p in pixels:
for r in range(self.CLUSTERS * 10):
self.IMAGE.append(p)
while not done:
try:
kmeans = KMeans(n_clusters=self.CLUSTERS - diff)
kmeans.fit(self.IMAGE)
done = True
except ValueError:
print("------------------------ERROR---------------------------------------" + str(img))
diff = diff + 1
if diff > self.CLUSTERS:
break
# the cluster centers are our dominant colors.
self.COLORS = kmeans.cluster_centers_
# save labels
self.LABELS = kmeans.labels_
# returning after converting to integer from float
return self.COLORS.astype(int)
def get_percentage(self):
from collections import Counter, defaultdict
total = 0
counter = {}
c = Counter(self.LABELS)
for key in sorted(c):
counter[key] = c[key]
for k, v in counter.items():
total = total + v
percentage = {}
for k, v in counter.items():
percentage[k] = v / total * 100
return percentage
import os
images = []
for root, dirs, files in os.walk("test2"):
for dir in dirs:
for root, dirs, files in os.walk("test2/" + dir):
for file in files:
images.append("test2/" + dir + "/" + file)
n = len(images)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(16)
results = pool.map(calculate_white, images)
results = pd.DataFrame(results)
print(results)
results.to_csv('colors.csv', encoding='utf-8', index=False)
| danydepo/ffs-backend | TrainAndTest/Color/color_detection.py | color_detection.py | py | 10,216 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "colormap.rgb2hex",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "sklearn.cluster.KMeans",... |
69905549866 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
"""Perform inference on a single image or all images with a certain extension
(e.g., .jpg) in a folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import glob
import logging
import os
import sys
import time
import numpy
from caffe2.python import workspace
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.utils.stylizeimage as style
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
import detectron.utils.vis as vis_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def infer_method(im,mymethod="back"):
logger = logging.getLogger(__name__)
#styleimage = style.style_method()
merge_cfg_from_file("configs/DensePoseKeyPointsMask_ResNet50_FPN_s1x-e2e.yaml")
cfg.NUM_GPUS = 1
myweights = cache_url("DensePoseKeyPointsMask_ResNet50_FPN_s1x-e2e.pkl", cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine.initialize_model_from_cfg(myweights)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
out_name = os.path.join(
"tools/output", '{}'.format(os.path.basename("myresult") + '.jpg')
)
#logger.info('Processing {} -> {}'.format(im_name, out_name))
im_name = "love.jpg"
im2 = cv2.imread("tools/iron8.jpg")
timers = defaultdict(Timer)
t = time.time()
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine.im_detect_all(
model, im, None, timers=timers)
if im2 is not None:
cls_boxes2, cls_segms2, cls_keyps2, cls_bodys2 = infer_engine.im_detect_all(
model, im2, None, timers=timers)
logger.info('Inference time: {:.3f}s'.format(time.time() - t))
for k, v in timers.items():
logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if mymethod == "back":
vis_utils.change_background(
im[:, :, ::-1], # BGR -> RGB for visualization
im2[:, :, ::-1],
im_name,
"static/img",
cls_boxes,
cls_segms,
cls_keyps,
cls_bodys,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
elif mymethod == "iron":
vis_utils.ironman(
im[:, :, ::-1], # BGR -> RGB for visualization
im2[:, :, ::-1],
im_name,
args.output_dir,
cls_boxes,
cls_boxes2,
cls_segms,
cls_keyps,
cls_bodys,
cls_segms2,
cls_keyps2,
cls_bodys2,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
elif mymethod == 'style_b':
styleimage = cv2.cvtColor(numpy.array(style.stylize_img(im_name,args.image_second)),cv2.COLOR_RGB2BGR)
resized_im = style.tensor_to_image(style.load_to_mask(im_name))
opencvImage = cv2.cvtColor(numpy.array(resized_im), cv2.COLOR_RGB2BGR)
print(opencvImage)
with c2_utils.NamedCudaScope(0):
bo,se,ke,bod = infer_engine.im_detect_all(model, opencvImage, None, timers=timers)
vis_utils.change_background(
opencvImage[:, :, ::-1], # BGR -> RGB for visualization
styleimage[:, :, ::-1],
"stylized_img.jpg",
args.output_dir,
bo,
se,
ke,
bod,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
else:
vis_utils.vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
im_name,
args.output_dir,
cls_boxes,
cls_segms,
cls_keyps,
cls_bodys,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2)
| chhari/tiredwebsite | infer_website.py | infer_website.py | py | 5,203 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "detectron.utils.c2.import_detectron_ops",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "detectron.utils.c2",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "cv2.ocl.setUseOpenCL",
"line_number": 47,
"usage_type": "call"
},
{
"a... |
5313775703 | import numpy as np
import cv2
import os.path as osp
from glob import glob
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.metrics import confusion_matrix
import random
"""
return all gif frames as a Python list
"""
def load_gif(path):
im = Image.open(path)
n_frames = im.n_frames
count = 0
ret = []
while count < n_frames:
im.seek(count)
imframe = im.copy()
if count == 0:
palette = imframe.getpalette()
elif count <= n_frames // 2:
imframe.putpalette(palette)
# add the interesting frames
ret.append(imframe)
count = count+1
return ret
def load_gif_gray(path):
im = Image.open(path)
ret = np.array(im.convert('L'))
return ret
def load_gif_color(path):
im = Image.open(path)
return im
def get_image_directories(data_path, categories):
return [osp.join(data_path, category) for category in categories]
def load_images(limit, path):
"""
try to load paths for each category as much as limit_each_category
"""
image_paths = []
image_ids = []
files = glob(osp.join(path, '*.jpg'))
random.shuffle(files)
files = files[:limit]
image_paths.extend(files)
image_ids = [osp.split(image_path)[-1].split('.')[0] for image_path in image_paths]
return image_paths, image_ids
def load_agument_image_paths(agument_path, image_paths, bases):
agument_paths = []
agument_labels = []
agument_ids = []
for image_path in image_paths:
category = osp.split(osp.split(image_path)[-2])[-1]
image_name = osp.split(image_path)[-1]
for base in bases:
target_path = osp.join(agument_path, category, str(base) + '_' + image_name)
if osp.exists(target_path):
agument_paths.append(target_path)
agument_labels.append(category)
agument_ids.append(image_name.split('.')[0])
return agument_paths, agument_labels, agument_ids
def show_results(train_image_paths, test_image_paths, train_labels, test_labels,
categories, abbr_categories, predicted_categories):
"""
shows the results
:param train_image_paths:
:param test_image_paths:
:param train_labels:
:param test_labels:
:param categories:
:param abbr_categories:
:param predicted_categories:
:return:
"""
cat2idx = {cat: idx for idx, cat in enumerate(categories)}
# confusion matrix
y_true = [cat2idx[cat] for cat in test_labels]
y_pred = [cat2idx[cat] for cat in predicted_categories]
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype(np.float) / cm.sum(axis=1)[:, np.newaxis]
acc = np.mean(np.diag(cm))
print(cm)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap('jet'))
plt.title('Confusion matrix. Mean of diagonal = {:4.2f}%'.format(acc*100))
tick_marks = np.arange(len(categories))
plt.tight_layout()
plt.xticks(tick_marks, abbr_categories, rotation=45)
plt.yticks(tick_marks, categories) | CS6220-YelpImageSearch/YelpImageSearch | evaluation/utils.py | utils.py | py | 3,045 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number"... |
27254983972 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
Script to obtain uncertainties of heavy mass spectrum and widhts via bootstrap
Authors: A. Ramirez-Morales (andres.ramirez.morales@cern.ch) and
H. Garcia-Tecocoatzi
-----------------------------------------------------------------------------
"""
import sys
import os
from iminuit import Minuit
import numpy as np
import datetime
import pandas as pd
import json
# framework modules
from bottomfw.baryons import data_preparation as dp
from bottomfw.baryons.bottom_three_quark import BottomThreeQuark
workpath = os.getcwd()
# for running batch jobs with htcondor
batch_number = None
run_baryons = None
if len(sys.argv) == 4:
batch_number = sys.argv[1]
workpath = sys.argv[2]
run_baryons = sys.argv[3]
config = None
with open(workpath+"/config/three_quark_config.json", "r") as jsonfile:
config = json.load(jsonfile)
if config is not None:
if run_baryons is None:
run_baryons = config["baryons"]
n_events = config["n_events"]
asymmetric = config["asymmetric_errors"]
decay_width = config["decay_width"]
decay_width_em = config["decay_width_em"]
bootstrap = config["bootstrap_mass"]
bootstrap_width = config["bootstrap_st_dec"]
bootstrap_width_em = config["bootstrap_em_dec"]
prev_params = config["previous_param"]
else:
sys.exit('Please provide a configuration file. Try again!')
print('Getting paper results for:', run_baryons)
# input parameters
param_v,param_w,param_x,param_y,param_z,param_q1,param_q2,param_q3,\
param_is_rho,param_is_lam,param_is_omega,param_is_cascade,param_is_sigma = dp.fetch_data_extended()
def model(q1, q2, q3, is_rho, is_lam, is_omega, is_cascade, is_sigma, v, w, x, y, z, m1, m2, m3, k, a, b, e, g):
"""
mass model, m1 == bottom, m2== strange, m3 == light
"""
return q1*m1 + q2*m2 + q3*m3 + \
v*k*np.sqrt(1./(is_rho*(is_omega*m2 + is_cascade*((m2+m3)/2) + is_sigma*m3 ) + \
is_lam*(is_omega*((3*m2*m1)/(2.*m2+m1)) + is_cascade*((1.5*(m2+m3)*m1)/(m1+m2+m3)) + is_sigma*((3.*m3*m1)/(2.*m3+m1)) ) )) + \
w*a + x*b + y*e + z*g
def least_squares(m1, m2, m3, k, a, b, e, g):
# y_var_0 = sigma_0 # best sigma_0=12.47 (bottom)
# yvar_0 = y_var_0*np.ones(16)
# yvar = y_errors_exp
# yvar_2 = np.power(yvar_0, 2) + np.power(yvar, 2)
yvar_2 = 0.001
pred_m = model(param_q1, param_q2, param_q3, param_is_rho, param_is_lam,
param_is_omega, param_is_cascade, param_is_sigma, param_v,
param_w, param_x, param_y, param_z,
m1, m2, m3, k, a, b, e, g)
yval_2 = np.power( (pred_m - exp_m), 2)
return np.sum( np.divide(yval_2, yvar_2) )
def fit(least_squares):
m = Minuit(least_squares, m1=1, m2=1, m3=1, k=0, a=0, b=0, e=0, g=0)#1400, m2=300, m3=250, k=0, a=0, b=0, e=0, g=0)
m.limits['m1'] = (4000, 6000)
m.limits['m2'] = (400, 470)
m.limits['m3'] = (250, 300)
m.errordef=Minuit.LEAST_SQUARES
m.migrad()
return m
def sample_gauss(mu, sigma):
return np.random.normal(mu, sigma, 10000)
def random(sample, random_n=1):
#return np.mean(resample(sample, replace=False, n_samples=1, random_state=random_n))
return np.random.choice(sample, size=None)
# arrays to store the sampled parameters
sampled_k,sampled_a,sampled_b,sampled_e,sampled_g = ([]),([]),([]),([]),([])
sampled_m1,sampled_m2,sampled_m3 = ([]),([]),([])
# arrays to store sampled correlation coeficients
rho_m2m1,rho_m3m1,rho_km1,rho_am1,rho_bm1,rho_em1,rho_gm1 = ([]),([]),([]),([]),([]),([]),([])
rho_m3m2,rho_km2,rho_am2,rho_bm2,rho_em2,rho_gm2,rho_km3 = ([]),([]),([]),([]),([]),([]),([])
rho_am3,rho_bm3,rho_em3,rho_gm3,rho_ak,rho_bk,rho_ek = ([]),([]),([]),([]),([]),([]),([])
rho_gk, rho_ba, rho_ea, rho_ga, rho_eb,rho_gb,rho_ge = ([]),([]),([]),([]),([]),([]),([])
# start bootstrap
start = datetime.datetime.now()
sigma_model = 12.47**2 # to be obtained with optimization (Li.Jin)
# gaussian pdf with the measured value and with experimental and model(sigma_model) uncertainties
# Omegas
gauss_6061 = sample_gauss(6045.2, np.power((1.20**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6316 = sample_gauss(6315.6, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6330 = sample_gauss(6330.3, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6340 = sample_gauss(6339.7, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6350 = sample_gauss(6349.8, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
# Cascade b sextet
gauss_5935 = sample_gauss(5935.0, np.power((0.05**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_5953 = sample_gauss(5953.8, np.power((1.62**2 + sigma_model), 0.5 )) # PDG::Average
gauss_6328 = sample_gauss(6227.4, np.power((1.69**2 + sigma_model), 0.5 )) # PDG::Average (decided to be cascade prime)
# Sigma b
gauss_5813 = sample_gauss(5813.1, np.power((2.55**2 + sigma_model), 0.5 )) # PDG::Average
gauss_5837 = sample_gauss(5832.5, np.power((2.23**2 + sigma_model), 0.5 )) # PDG::Average
gauss_6097 = sample_gauss(6096.9, np.power((2.10**2 + sigma_model), 0.5 )) # PDG::Average
# Lambda b
gauss_5617 = sample_gauss(5619.6, np.power((0.17**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_5912 = sample_gauss(5912.2, np.power((0.17**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_5920 = sample_gauss(5920.1, np.power((0.17**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6146 = sample_gauss(6146.2, np.power((0.40**2 + sigma_model), 0.5 )) # PDG::Direct (not in the fit)
gauss_6152 = sample_gauss(6152.5, np.power((0.40**2 + sigma_model), 0.5 )) # PDG::Direct (not in the fit)
gauss_6070 = sample_gauss(6072.3, np.power((2.90**2 + sigma_model), 0.5 )) # PDG::Direct (not in the fit)
# Cascades b anti-3-plet
gauss_5794 = sample_gauss(5794.5, np.power((2.61**2 + sigma_model), 0.5 )) # PDG::Average
gauss_6100 = sample_gauss(6100.3, np.power((0.60**2 + sigma_model), 0.5 )) # PDG::Direct
gauss_6327 = sample_gauss(6329.9, np.power((2.72**2 + sigma_model), 0.5 )) # LHCB::Average (not in the fit)
# plug here the sigma_0 optimization lines from data_utils.py
count = 0
# construct the simulated sampling distribution (bootstrap technique)
for i in range(n_events): # max 10000 with decays included, computationally expensive
#if(states=='All'):
exp_m = np.array([ # measured baryon masses
# omegas
random(gauss_6061),
random(gauss_6316),
random(gauss_6330),
random(gauss_6340),
random(gauss_6350),
# Cascade
random(gauss_5935),
random(gauss_5953),
random(gauss_6328),
# Sigma b
random(gauss_5813),
random(gauss_5837),
random(gauss_6097),
# Lambda b
random(gauss_5617),
random(gauss_5912),
random(gauss_5920),
# random(gauss_6146),
# random(gauss_6152),
# Cascades
random(gauss_5794),
random(gauss_6100),
# random(gauss_6327)
])
# perform the parameter fitting (via minimizing squared distance)
m = fit(least_squares)
if type(m.covariance) != type(None):
count += 1
else:
continue
sampled_m1 = np.append(sampled_m1, m.values['m1'])
sampled_m2 = np.append(sampled_m2, m.values['m2'])
sampled_m3 = np.append(sampled_m3, m.values['m3'])
sampled_k = np.append(sampled_k, m.values['k'])
sampled_a = np.append(sampled_a, m.values['a'])
sampled_b = np.append(sampled_b, m.values['b'])
sampled_e = np.append(sampled_e, m.values['e'])
sampled_g = np.append(sampled_g, m.values['g'])
# correlation matrix
corr = m.covariance.correlation()
rho_m2m1 = np.append(rho_m2m1, corr['m2','m1'])
rho_m3m1 = np.append(rho_m3m1, corr['m3','m1'])
rho_km1 = np.append(rho_km1, corr['k','m1'])
rho_am1 = np.append(rho_am1, corr['a','m1'])
rho_bm1 = np.append(rho_bm1, corr['b','m1'])
rho_em1 = np.append(rho_em1, corr['e','m1'])
rho_gm1 = np.append(rho_gm1, corr['g','m1'])
rho_m3m2 = np.append(rho_m3m2, corr['m3','m2'])
rho_km2 = np.append(rho_km2 , corr['k','m2'])
rho_am2 = np.append(rho_am2 , corr['a','m2'])
rho_bm2 = np.append(rho_bm2 , corr['b','m2'])
rho_em2 = np.append(rho_em2 , corr['e','m2'])
rho_gm2 = np.append(rho_gm2 , corr['g','m2'])
rho_km3 = np.append(rho_km3, corr['k','m3'])
rho_am3 = np.append(rho_am3, corr['a','m3'])
rho_bm3 = np.append(rho_bm3, corr['b','m3'])
rho_em3 = np.append(rho_em3, corr['e','m3'])
rho_gm3 = np.append(rho_gm3, corr['g','m3'])
rho_ak = np.append(rho_ak, corr['a','k'])
rho_bk = np.append(rho_bk, corr['b','k'])
rho_ek = np.append(rho_ek, corr['e','k'])
rho_gk = np.append(rho_gk, corr['g','k'])
rho_ba = np.append(rho_ba, corr['b','a'])
rho_ea = np.append(rho_ea, corr['e','a'])
rho_ga = np.append(rho_ga, corr['g','a'])
rho_eb = np.append(rho_eb, corr['e','b'])
rho_gb = np.append(rho_gb, corr['g','b'])
rho_ge = np.append(rho_ge, corr['g','e'])
print(round(sampled_m1.mean()), "mb", round(sampled_m1.std()) )
print(round(sampled_m2.mean()), "ms", round(sampled_m2.std()) )
print(round(sampled_m3.mean()), "mn", round(sampled_m3.std()) )
print("K", pow(sampled_k.mean(), 2)/(1000**3), "KB", pow(sampled_k.std(), 2)/(1000**3))
print("A", sampled_a.mean(), " PS ", sampled_a.std())
print("B", sampled_b.mean(), " PSL ", sampled_b.std())
print("E", sampled_e.mean(), " PI ", sampled_e.std())
print("G", sampled_g.mean(), " PF ", sampled_g.std())
# save bootstrap results
df = pd.DataFrame({"M1" : sampled_m1,"M2" : sampled_m2,"M3" : sampled_m3,
"K" : sampled_k, "A" : sampled_a,
"B": sampled_b, "E" : sampled_e, "G" : sampled_g})
if batch_number is None:
if not os.path.exists(workpath+"/tables/"):
os.makedirs(workpath+"/tables/")
df.to_csv(workpath+"/tables/bootstrap_param_"+run_baryons+".csv", index=False)
else:
if not os.path.exists(workpath+"/batch_results/"+run_baryons+"/parameters/"):
os.makedirs(workpath+"/batch_results/"+run_baryons+"/parameters/")
df.to_csv(workpath+"/batch_results/"+run_baryons+"/parameters/"+str(batch_number)+".csv", index=False)
# create dictionaries
param = {'q1':param_q1, 'q2':param_q2, 'q3':param_q3,'is_rho':param_is_rho, 'is_lam':param_is_lam,'is_omega':param_is_omega,
'is_cascade':param_is_cascade, 'is_sigma':param_is_sigma,'V':param_v, 'W':param_w, 'X':param_x, 'Y':param_y, 'Z':param_z}
sampled = {'sampled_m1':sampled_m1,'sampled_m2':sampled_m2,'sampled_m3':sampled_m3,'sampled_k':sampled_k,
'sampled_a':sampled_a, 'sampled_b':sampled_b, 'sampled_e':sampled_e, 'sampled_g':sampled_g}
corr_mat_ext ={'rho_m2m1':rho_m2m1, 'rho_m3m1':rho_m3m1, 'rho_km1':rho_km1, 'rho_am1':rho_am1, 'rho_bm1':rho_bm1, 'rho_em1':rho_em1, 'rho_gm1':rho_gm1,
'rho_m3m2':rho_m3m2, 'rho_km2':rho_km2, 'rho_am2':rho_am2, 'rho_bm2':rho_bm2, 'rho_em2':rho_em2, 'rho_gm2':rho_gm2, 'rho_km3':rho_km3,
'rho_am3':rho_am3, 'rho_bm3':rho_bm3, 'rho_em3':rho_em3, 'rho_gm3':rho_gm3, 'rho_ak':rho_ak, 'rho_bk':rho_bk, 'rho_ek':rho_ek,
'rho_gk':rho_gk, 'rho_ba':rho_ba, 'rho_ea':rho_ea, 'rho_ga':rho_ga, 'rho_eb':rho_eb, 'rho_gb':rho_gb, 'rho_ge':rho_ge}
df = pd.DataFrame(corr_mat_ext)
if batch_number is None:
if not os.path.exists(workpath+"/tables/"):
os.makedirs(workpath+"/tables/")
df.to_csv(workpath+"/tables/bootstrap_correlation_"+run_baryons+".csv", index=False)
else:
if not os.path.exists(workpath+"/batch_results/"+run_baryons+"/correlation/"):
os.makedirs(workpath+"/batch_results/"+run_baryons+"/correlation/")
df.to_csv(workpath+"/batch_results/"+run_baryons+"/correlation/"+str(batch_number)+".csv", index=False)
# calculate masses and widths using the bootstraped fitted parameters
results = BottomThreeQuark(baryons=run_baryons, params=param, sampled=sampled, corr_mat=corr_mat_ext, asymmetric=asymmetric,
decay_width=decay_width, bootstrap_width=bootstrap_width, decay_width_em=decay_width_em, bootstrap_width_em=bootstrap_width_em, batch_number=batch_number, workpath=workpath)
results.fetch_values()
results.paper_results_predictions(bootstrap=bootstrap, bootstrap_width=bootstrap_width, prev_params=prev_params)
end = datetime.datetime.now()
elapsed_time = end - start
print(count, "no. successes")
print("Elapsed total time = " + str(elapsed_time))
| Ailierrivero/bottom-baryonsFW-copy | scripts/bootstrap_three_quark.py | bootstrap_three_quark.py | py | 12,649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 2... |
15521725834 | '''
40. Combination Sum II
Given a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
'''
class Solution:
def combination(self,nums,target,smallest):
if target == 0:
return [[]]
flag = True
for n in nums:
if nums[n] > 0:
if flag:
minNum = n
flag = False
else: minNum = n if n < minNum else minNum
if flag or target < minNum:
return []
ans = []
for n in nums:
if nums[n] > 0 and n >= smallest:
nums[n] -= 1
res = self.combination(nums,target-n,n)
nums[n] += 1
for r in res:
r.append(n)
ans += res
return ans
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
if not candidates:
return []
from collections import defaultdict
nums = defaultdict(int)
candidates.sort()
minNum = candidates[0]
for c in candidates:
nums[c] += 1
return self.combination(nums,target,minNum)
class Solution2:
def combination(self,candidates,target):
#print(candidates,target)
if target == 0:
return [[]]
if not candidates or target < candidates[0]:
return []
ans = []
cnt = 0
curr = candidates[0]
for i,c in enumerate(candidates):
if c == curr:
cnt += 1
else:
for j in range(cnt):
res = self.combination(candidates[i:],target-curr*(j+1))
for r in res:
r += [curr] * (j+1)
ans += res
cnt = 1
curr = c
for j in range(cnt):
res = self.combination([],target-curr*(j+1))
for r in res:
r += [curr] * (j+1)
ans += res
return ans
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
if not candidates:
return []
candidates.sort()
return self.combination(candidates,target)
if __name__ == '__main__':
print(Solution2().combinationSum2([1],2))
print(Solution2().combinationSum2([1,2,8],9)) | MarshalLeeeeee/myLeetCodes | 40-combinationSum2.py | 40-combinationSum2.py | py | 3,071 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 63,
"usage_type": "call"
}
] |
23504518781 | import struct
from pathlib import Path
from npkpy.common import NPKError, NPKIdError, NPKMagicBytesError
from npkpy.npk.npk_constants import CNT_HANDLER
from npkpy.npk.cnt_basic import BYTES_LEN_CNT_ID, BYTES_LEN_CNT_PAYLOAD_LEN
from npkpy.npk.npk_file_basic import FileBasic
MAGIC_BYTES = b"\x1e\xf1\xd0\xba"
BYTES_LEN_MAGIC_HEADER = 4
BYTES_LEN_PCK_SIZE_LEN = 4
"""
0____4____8____b____f
| | | | |
0_|AAAA|BBBB| C ..... |
1_|....|....|....|....|
A = MAGIC BYTES (4)
B = PCK SIZE (4)
C = Begin of Container area
"""
class Npk(FileBasic):
__cnt_list = None
def __init__(self, file_path: Path):
super().__init__(file_path)
self.cnt_offset = 8
self._data = self.read_data_from_file(offset=0, size=self.cnt_offset)
self._check_magic_bytes(error_msg="Magic bytes not found in Npk file")
self.pck_header = self.pck_cnt_list[0]
@property
def pck_magic_bytes(self):
return struct.unpack_from("4s", self._data, 0)[0]
@property
def pck_payload_len(self):
self.__pck_payload_size_update()
payload_len = struct.unpack_from("I", self._data, 4)[0]
return payload_len
def __pck_payload_size_update(self):
if any(cnt.modified for cnt in self.pck_cnt_list):
current_size = 0
for cnt in self.pck_cnt_list:
current_size += cnt.cnt_full_length
cnt.modified = False
struct.pack_into("I", self._data, 4, current_size)
@property
def pck_full_size(self):
return BYTES_LEN_MAGIC_HEADER + BYTES_LEN_PCK_SIZE_LEN + self.pck_payload_len
@property
def pck_full_binary(self):
binary = MAGIC_BYTES + struct.pack("I", self.pck_payload_len)
for cnt in self.pck_cnt_list:
binary += cnt.cnt_full_binary
return binary
@property
def pck_enumerate_cnt(self):
for pos, cnt in enumerate(self.pck_cnt_list):
yield pos, cnt
@property
def pck_cnt_list(self):
if not self.__cnt_list:
self.__cnt_list = self.__parse_all_cnt()
return self.__cnt_list
def __parse_all_cnt(self):
lst = []
offset = self.cnt_offset
while offset < self.file.stat().st_size - 1:
lst.append(self.__get_cnt(offset))
offset += BYTES_LEN_CNT_ID + BYTES_LEN_CNT_PAYLOAD_LEN + lst[-1].cnt_payload_len
return lst
def __get_cnt(self, offset):
cnt_id = struct.unpack_from("H", self.read_data_from_file(offset, 2))[0]
payload_len = struct.unpack_from("I", self.read_data_from_file(offset + BYTES_LEN_CNT_ID, 4))[0]
pkt_len = BYTES_LEN_CNT_ID + BYTES_LEN_CNT_PAYLOAD_LEN + payload_len
data = self.read_data_from_file(offset, pkt_len)
if len(data) != pkt_len:
raise NPKError(f"File maybe corrupted. Please download again. File: {self.file.absolute()}")
try:
return CNT_HANDLER[cnt_id](data, offset)
except KeyError as e:
raise NPKIdError(f"Failed with cnt id: {cnt_id}\n"
f"New cnt id discovered in file: {self.file.absolute()}") from e
def _check_magic_bytes(self, error_msg):
if not self.pck_magic_bytes == MAGIC_BYTES:
raise NPKMagicBytesError(error_msg)
| botlabsDev/npkpy | npkpy/npk/npk.py | npk.py | py | 3,324 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "npkpy.npk.npk_file_basic.FileBasic",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "struct.unpack_from",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "st... |
73360141224 | # django imports
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
# portlets imports
import portlets.utils
from portlets.models import PortletAssignment
from portlets.models import PortletBlocking
from portlets.models import PortletRegistration
from portlets.models import Slot
# lfs imports
import lfs.core.utils
from lfs.core.utils import LazyEncoder
@login_required
def portlets_inline(request, obj, template_name="manage/portlets/portlets_inline.html"):
"""Displays the assigned portlets for given object.
"""
ct = ContentType.objects.get_for_model(obj)
parent_for_portlets = obj.get_parent_for_portlets()
if parent_for_portlets:
parent_slots = portlets.utils.get_slots(parent_for_portlets)
else:
parent_slots = None
return render_to_string(template_name, RequestContext(request, {
"slots" : portlets.utils.get_slots(obj),
"parent_slots" : parent_slots,
"parent_for_portlets" : parent_for_portlets,
"portlet_types" : PortletRegistration.objects.filter(active=True),
"object" : obj,
"object_type_id" : ct.id,
}))
@login_required
def update_portlets(request, object_type_id, object_id):
"""Update portlets blocking.
"""
# Get content type to which the portlet should be added
object_ct = ContentType.objects.get(pk=object_type_id)
object = object_ct.get_object_for_this_type(pk=object_id)
blocked_slots = request.POST.getlist("block_slot")
for slot in Slot.objects.all():
if str(slot.id) in blocked_slots:
try:
PortletBlocking.objects.create(
slot_id=slot.id, content_type_id=object_type_id, content_id=object_id)
except IntegrityError:
pass
else:
try:
pb = PortletBlocking.objects.get(
slot=slot, content_type=object_type_id, content_id=object_id)
pb.delete()
except PortletBlocking.DoesNotExist:
pass
html = portlets_inline(request, object)
result = simplejson.dumps({
"html" : html,
"message" : _(u"Portlet has been updated.")},
cls = LazyEncoder
)
return HttpResponse(result)
@login_required
def add_portlet(request, object_type_id, object_id, template_name="manage/portlets/portlet_add.html"):
"""Form and logic to add a new portlet to the object with given type and id.
"""
# Get content type to which the portlet should be added
object_ct = ContentType.objects.get(pk=object_type_id)
object = object_ct.get_object_for_this_type(pk=object_id)
# Get the portlet type
portlet_type = request.REQUEST.get("portlet_type", "")
if request.method == "GET":
try:
portlet_ct = ContentType.objects.filter(model=portlet_type.lower())[0]
mc = portlet_ct.model_class()
form = mc().form(prefix="portlet")
return render_to_response(template_name, RequestContext(request, {
"form" : form,
"object_id" : object_id,
"object_type_id" : object_ct.id,
"portlet_type" : portlet_type,
"slots" : Slot.objects.all(),
}))
except ContentType.DoesNotExist:
pass
else:
try:
ct = ContentType.objects.filter(model=portlet_type.lower())[0]
mc = ct.model_class()
form = mc().form(prefix="portlet", data=request.POST)
portlet = form.save()
slot_id = request.POST.get("slot")
position = request.POST.get("position")
PortletAssignment.objects.create(
slot_id=slot_id, content=object, portlet=portlet, position=position)
html = portlets_inline(request, object)
result = simplejson.dumps({
"html" : html,
"message" : _(u"Portlet has been added.")},
cls = LazyEncoder
)
return HttpResponse(result)
except ContentType.DoesNotExist:
pass
@login_required
def delete_portlet(request, portletassignment_id):
"""Deletes a portlet for given portlet assignment.
"""
try:
pa = PortletAssignment.objects.get(pk=portletassignment_id)
except PortletAssignment.DoesNotExist:
pass
else:
pa.delete()
return lfs.core.utils.set_message_cookie(
request.META.get("HTTP_REFERER"),
msg = _(u"Portlet has been deleted."))
@login_required
def edit_portlet(request, portletassignment_id, template_name="manage/portlets/portlet_edit.html"):
"""Form and logic to edit the portlet of the given portlet assignment.
"""
try:
pa = PortletAssignment.objects.get(pk=portletassignment_id)
except PortletAssignment.DoesNotExist:
return ""
if request.method == "GET":
slots = []
for slot in Slot.objects.all():
slots.append({
"id" : slot.id,
"name" : slot.name,
"selected" : slot.id == pa.slot.id,
})
form = pa.portlet.form(prefix="portlet")
return render_to_response(template_name, RequestContext(request, {
"form" : form,
"portletassigment_id" : pa.id,
"slots" : slots,
"position" : pa.position,
}))
else:
form = pa.portlet.form(prefix="portlet", data=request.POST)
portlet = form.save()
# Save the rest
pa.slot_id = request.POST.get("slot")
pa.position = request.POST.get("position")
pa.save()
html = portlets_inline(request, pa.content)
result = simplejson.dumps({
"html" : html,
"message" : _(u"Portlet has been saved.")},
cls = LazyEncoder
)
return HttpResponse(result) | django-lfs/lfs | manage/views/lfs_portlets.py | lfs_portlets.py | py | 6,283 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.c... |
19449055889 | from django.db import models
import re
import statistics
from .direct_indicator import DirectIndicator
from .question_response import QuestionResponse
find_square_bracket_keys = re.compile(r"\[(.*?)\]")
class IndirectIndicator(models.Model):
topic = models.ForeignKey('Topic', related_name='indirect_indicators', on_delete=models.SET_NULL, null=True)
method = models.ForeignKey("Method", related_name="indirect_indicators", on_delete=models.CASCADE, null=True)
key = models.CharField(max_length=255, blank=False)
formula = models.CharField(max_length=1000, unique=False, blank=False)
name = models.CharField(max_length=255, unique=False, blank=False)
description = models.TextField(blank=True, null=True)
pre_unit = models.CharField(max_length=30, blank=True, default="") # Examples: $,€
post_unit = models.CharField(max_length=30, blank=True, default="") # Examples: %, points, persons
cut_off_lower_limit = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
cut_off_upper_limit = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
TEXT = "text"
INTEGER = "integer"
DOUBLE = "double"
DATE = "date"
BOOLEAN = "boolean"
SINGLECHOICE = "singlechoice"
MULTIPLECHOICE = "multiplechoice"
DATA_TYPES = (
(TEXT, "text"),
(INTEGER, "integer"),
(DOUBLE, "double"),
(DATE, "date"),
(BOOLEAN, "boolean"),
(SINGLECHOICE, "singlechoice"),
(MULTIPLECHOICE, "multiplechoice")
)
datatype = models.CharField(max_length=50, blank=False, choices=DATA_TYPES, default="text")
PERFORMANCE = "performance"
SCORING = "scoring"
CERTIFICATION = "certification"
INDICATOR_TYPES = (
(PERFORMANCE, "performance"),
(SCORING, "scoring"),
(CERTIFICATION, "certification")
)
type = models.CharField(max_length=50, blank=False, choices=INDICATOR_TYPES, default="scoring")
calculation = ''
absolute_weights = []
indicator_impact = None
critical_impact_by = {}
value = None
has_conditionals = False
exception = None
exception_detail = None
responses = None
# used to find absolute weights
expression = ''
class Meta:
unique_together = ['key', 'method']
def __init__(self, *args, **kwargs):
super(IndirectIndicator, self).__init__(*args, **kwargs)
self.calculation = self.formula.replace("\n", "")
if self.calculation.strip().startswith("IF"):
self.has_conditionals = True
def __str__(self):
return self.key
# calculation_keys are all indicators that are used within the formula of this indirect indicator
@property
def calculation_keys(self):
calculation_keys = re.findall(find_square_bracket_keys, self.calculation) #self.formula.replace("\n", ""))
# print('*****************************', self.formula, self.calculation, calculation_keys)
calculation_keys_uniques = list(set(calculation_keys))
if self.key in calculation_keys_uniques:
calculation_keys_uniques.remove(self.key)
return calculation_keys_uniques
# Used for calculation of absolute weights
@property
def formula_keys(self):
formula_keys = re.findall(find_square_bracket_keys, self.formula)
calculation_keys_uniques = list(set(formula_keys))
if self.key in calculation_keys_uniques:
calculation_keys_uniques.remove(self.key)
return calculation_keys_uniques
def find_weights(self, weight_dict):
self.absolute_weights = [weight_dict]
return self.absolute_weights
# Replaces indicator keys with corresponding value to be able to calculate the indirect indicator (used in 'utils > calculate_indicators')
def find_values(self, key_value_list):
calculation = self.calculation
if not None in key_value_list.values():
for calculation_key in self.calculation_keys:
if calculation_key in key_value_list:
value = key_value_list[calculation_key]
if isinstance(value, dict):
value = max(value, key=value.get)
calculation = calculation.replace(f"[{calculation_key}]", f"{value}")
self.calculation = calculation
else:
print('Missing values in key_value_list!')
# Calculates indicator formula
def calculate(self):
if len(self.calculation_keys) and not self.has_conditionals:
self.exception = Exception("Not all keys are replaced with values")
return
self.exception = None
self.error = None
functionList = ['sum(', 'avg(', 'min(', 'max(', 'median(', 'mode(']
# If there are conditionals
if self.has_conditionals:
self.value = None
value = self.calculate_conditionals()
self.value = value
# if there's a function
elif any(func in self.calculation for func in functionList):
key = re.findall(find_square_bracket_keys, self.formula)
if len(key):
question_responses = QuestionResponse.objects.filter(survey_response__esea_account=4, survey_response__finished=True)
directind = DirectIndicator.objects.filter(method=self.method, key=key[0]).first()
indirectind = IndirectIndicator.objects.filter(method=self.method, key=key[0]).first()
if directind is not None:
indicator = directind
indicator.filter_responses(question_responses)
responses = [float(r) for r in indicator.responses]
if 'avg(' in self.calculation:
self.value = sum(responses)/len(responses) # int(direct_indicator.value)
elif 'sum(' in self.calculation:
self.value = sum(responses)
elif 'min(' in self.calculation:
self.value = min(responses)
elif 'max(' in self.calculation:
self.value = max(responses)
elif 'median(' in self.calculation:
self.value = statistics.median(responses)
elif 'mode(' in self.calculation:
self.value = statistics.mode(responses)
else:
self.value = 1
print('There are no responses to calculate the sum with.')
return
# If a regular calculation can be performed
else:
try:
self.expression = self.formula
self.value = eval(self.calculation)
return self.value
except Exception as e:
print('error!', self.calculation, self.has_conditionals)
self.value = None
# Calculates conditional formulas (IF..THEN..)
def calculate_conditionals(self, verbose=False):
formula = self.calculation.replace('IF', '@@IF').replace('ELSE', '##ELSE').replace('THEN', '%%THEN')
formula = [x.strip() for x in re.split('@@|##|%%', formula)]
formula = list(filter(lambda x: x != '', formula))
if verbose:
print(f'\n {self.key}:::::::::: Start Conditional Calculations... \nformula: {formula}')
full_formula = self.formula.replace('IF', '@@IF').replace('ELSE', '##ELSE').replace('THEN', '%%THEN')
full_formula = [x.strip() for x in re.split('@@|##|%%', full_formula)]
full_formula = list(filter(lambda x: x != '', full_formula))
ifs = 1
elses = 0
last_if = False
search_else = False
val = None
for i, cond in enumerate(formula):
bracket_keys = list(set(re.findall(find_square_bracket_keys, cond)))
if self.key in bracket_keys:
bracket_keys.remove(self.key)
if len(bracket_keys):
print('Invalid Partial Condition: ', bracket_keys)
# raise Exception("invalid partial condition")
# Skips code till it finds the corresponding then/else statements corresponding to the IF statement that fails or succeeds.
if search_else:
if 'IF' in cond:
ifs += 1
if 'ELSE' in cond:
elses += 1
if ifs != elses:
continue
else:
search_else = False
last_if = True
ifs = 1
elses = 0
# Checks whether if statement equates to True
if 'IF' in cond:
cond = cond.replace('IF', '').replace('(', '').replace(')', '').replace('"', '').strip()
last_if = False
if 'AND' in cond:
conds = cond.split("AND")
conds = self.process_expression(conds)
evaluatedconds = [eval(n) for n in conds]
if not False in evaluatedconds:
last_if = True
else:
search_else = True
continue
if 'OR' in cond:
conds = cond.split("OR")
conds = self.process_expression(conds)
evaluatedconds = [eval(n) for n in conds]
if True in evaluatedconds:
last_if = True
else:
search_else = True
continue
cond = self.process_expression(cond)
if eval(cond):
last_if = True
else:
search_else = True
continue
# Serves conditional outcome
if (last_if and '=' in cond) or (cond == formula[-1]):
cond = cond.replace('(', '').replace(')', '')
[var, val] = cond.split('=')
var = var.replace('THEN', '').replace('ELSE', '')
var = var.replace('[', '').replace(']', '').strip()
if var != self.key:
raise Exception('Assignment variable does not match the key of this indirect indicator')
val = val.replace('"', '')
if verbose:
print('====', self.key, val)
# Used for extracting weights
self.expression = full_formula[i]
try:
val = eval(val)
except:
pass
return str(val)
def process_expression(self, conds):
allowedOperators = ['<', '<=', '==', '>=', '>', '=']
if not isinstance(conds, list):
conds = [conds]
for index, cond in enumerate(conds):
# cond = cond.replace('=', '==')
processed_cond = re.split('(<|<=|==|>=|>|=)', cond)
for idx, value in enumerate(processed_cond):
if value not in allowedOperators:
# Makes eval() of string equals string possible
processed_cond[idx] = f'"{value.strip().lower()}"'
conds[index] = ''.join(processed_cond)
if len(conds) == 1:
conds = conds[0]
return conds | sergioespana/openESEA | backend/core/models/indirect_indicator.py | indirect_indicator.py | py | 11,653 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.mode... |
40171037803 | import pygame
import config
pygame.init()
class Text:
""" Text class handles all text displays on a game window,
which is passed by the game handle. Each instance of Text
is its own type of text (e.g. velocity output, position output),
allowing for easy enabling and disabling of text display.
"""
white = config.WHITE
black = config.BLACK
possible_positions = ('topleft', 'topright', 'bottomright', 'bottomleft',
'center')
def __init__(self, game_window, font_size=config.default_font_size):
""" Class constructor initializing with the
pygame game_window/screen handle.
"""
self.game_window = game_window
self.text_surfaces = []
self.text_rects = []
self.text_positions = []
self.font_size = int(font_size)
self.normal_font = pygame.font.Font(config.font, self.font_size)
def text_objects(self, text: str, font, color: tuple):
""" Takes text and pygame font and returns a text surface and rect.
"""
text_surface = font.render(text, True, color)
return text_surface, text_surface.get_rect()
def message_display(self, text, x: int, y: int,
position: str = 'topleft',
color: tuple = white):
""" Takes text and places it at (x, y) coordinates.
The position argument is a string representating the
rectangle origin location.
For example, position can be 'bottomright'
or 'center'.
"""
text_surface, text_rect = self.text_objects(
text=text, font=self.normal_font, color=color)
# Set the coordinates of the rectangle depending on position
if position not in Text.possible_positions:
print("WARNING: {position} does not exist!"
"Defaulting to 'topleft'.")
position = "topleft"
setattr(text_rect, position, (x, y))
# Fills previous text with black rectangle.
self.game_window.fill(Text.black, text_rect)
# Blit the new text onto the surface.
self.game_window.blit(text_surface, text_rect)
# Append list of text surfaces, rectancles, and positions
self.text_surfaces.append(text_surface)
self.text_rects.append(text_rect)
self.text_positions.append(position)
pygame.display.update()
return text_surface
def change_text(self, index: int, new_text: str) -> None:
""" Updates the text in the list with a new text at the index.
Automatically finds the coordinates of the previous text.
"""
# Establish the previous text rect
prev_rect = self.text_rects[index]
# Set up the new message text and font
color = Text.white
text_surface, text_rect = self.text_objects(
text=new_text, font=self.normal_font, color=color)
# Set the proper coordinates for the new text rect (old coordinates)
position = self.text_positions[index] # e.g. 'topleft', 'center'
prev_rect_position = getattr(prev_rect, position)
setattr(text_rect, position, prev_rect_position)
# Fill old text with black using the previous rect
self.game_window.fill(Text.black, prev_rect)
# Blit the new text surface
self.game_window.blit(text_surface, text_rect)
pygame.display.update([text_rect, prev_rect])
# Update the list of text_rects and text_surfaces
self.text_surfaces[index] = text_surface
self.text_rects[index] = text_rect
| tbone-iii/Car-Driving-Simulator | text_display.py | text_display.py | py | 3,644 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "config.WHITE",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "config.BLACK",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "config.default_font_si... |
2285190842 | from setuptools import setup
VERSION = "1.0.3"
setup(
name="builder",
version=VERSION,
license="Apache License 2.0",
author="The Open Peer Power Authors",
author_email="hello@openpeerpower.io",
url="https://openpeerpower.io/",
description="Opp.io wheels builder form Open Peer Power.",
long_description="",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Topic :: Home Automation"
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.7",
],
keywords=["docker", "openpeerpower", "opp.io"],
zip_safe=False,
platforms="any",
packages=["builder"],
include_package_data=True,
)
| actions-marketplace-validations/OpenPeerPower_wheels | setup.py | setup.py | py | 989 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 5,
"usage_type": "call"
}
] |
4957095145 | import pytest
@pytest.mark.asyncio
async def test_healthcheck(test_client_rest):
response = await test_client_rest.get("http://test/healthcheck")
assert response.status_code == 200
data = response.json()
assert data == {"status": "ok"}
| riuzaver/market-temp-test | {{cookiecutter.project_name}}/tests/test_healthcheck.py | test_healthcheck.py | py | 255 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.mark",
"line_number": 4,
"usage_type": "attribute"
}
] |
20522871772 | from django.shortcuts import render, get_object_or_404
from .models import Project
from django.db.models import Q
# Create your views here.
def render_producto(request):
print(request.GET)
queryset = request.GET.get("buscar")
print(queryset)
productos = Project.objects.all()
if queryset:
print("hola")
productos = Project.objects.filter(
Q(title__icontains = queryset) |
Q(description__icontains = queryset)
).distinct()
else:
print("puto")
return render(request , 'producto.html',{"productos":productos})
def producto_detail(request, producto_id):
producto = get_object_or_404(Project, pk=producto_id)
related_products = Project.objects.all()[:4]
return render(request, 'producto_detail.html', {"productos": producto, 'related_products':related_products}) | Eliothd2/Imprentala-Website | productos/views.py | views.py | py | 871 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Project.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Project.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.Project",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": ... |
19150802136 | '''
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
class ShallowResNetBlock(torch.nn.Module):
'''
input -> Conv3x3 -> BN -> ReLU -> Conv3x3 -> BN + ShortCut -> Relu
| |
------------- (Conv1x1 -> BN ->) -----------------
'''
def __init__(self, in_channel, out_channel, downsample=False):
super(ShallowResNetBlock, self).__init__()
# main branch
# block1
if downsample:
self.conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=2, padding=1, bias=False)
else:
self.conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=3, padding=1, bias=False)
self.bn1 = torch.nn.BatchNorm2d(out_channel)
self.relu1 = torch.nn.ReLU(inplace=True)
# block2
self.conv2 = torch.nn.Conv2d(out_channel, out_channel, kernel_size=3, padding=1, bias=False)
self.bn2 = torch.nn.BatchNorm2d(out_channel)
# shortcut
# if the main branch is downsampled the shortcut branch will be downsampled (use conv1x1) too
if downsample:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=2, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
elif in_channel != out_channel:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
else:
self.shortcut_conv1 = None
self.shortcut_bn1 = None
# merge
self.relu_out = torch.nn.ReLU(inplace=True)
def forward(self, inputs):
# main
main = self.conv1(inputs)
main = self.bn1(main)
main = self.relu1(main)
main = self.conv2(main)
main = self.bn2(main)
# shortcut
if self.shortcut_conv1 is not None:
shortcut = self.shortcut_conv1(inputs)
shortcut = self.shortcut_bn1(shortcut)
else:
shortcut = inputs
# merge
outs = self.relu_out(main+shortcut)
return outs
class DeepResNetBlock(torch.nn.Module):
'''
input -> Conv1x1 -> BN -> ReLU -> Conv3x3 -> BN -> ReLU -> Conv1x1 -> BN + ShortCut -> Relu
| |
-------------------------- (Conv1x1 -> BN ->) ---------------------------
'''
def __init__(self, in_channel, out_channel, downsample=False):
super(DeepResNetBlock, self).__init__()
# main branch
mid_channel = int(out_channel / 4)
# block1 (in_channel -> mid_channel)
self.conv1 = torch.nn.Conv2d(in_channel, mid_channel, kernel_size=1, bias=False)
self.bn1 = torch.nn.BatchNorm2d(mid_channel)
self.relu1 = torch.nn.ReLU(inplace=True)
# block2
if downsample:
self.conv2 = torch.nn.Conv2d(mid_channel, mid_channel, kernel_size=3, padding=1, stride=2, bias=False)
else:
self.conv2 = torch.nn.Conv2d(mid_channel, mid_channel, kernel_size=3, padding=1, bias=False)
self.bn2 = torch.nn.BatchNorm2d(mid_channel)
self.relu2 = torch.nn.ReLU(inplace=True)
# block3 (mid_channel -> out_channel)
self.conv3 = torch.nn.Conv2d(mid_channel, out_channel, kernel_size=1, bias=False)
self.bn3 = torch.nn.BatchNorm2d(out_channel)
# shortcut
if downsample:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=2, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
elif in_channel != out_channel:
self.shortcut_conv1 = torch.nn.Conv2d(in_channel, out_channel, kernel_size=1, bias=False)
self.shortcut_bn1 = torch.nn.BatchNorm2d(out_channel)
else:
self.shortcut_conv1 = None
self.shortcut_bn1 = None
# merge
self.relu_out = torch.nn.ReLU(inplace=True)
def forward(inputs):
# main
main = self.conv1(inputs)
main = self.bn1(main)
main = self.relu1(main)
main = self.conv2(main)
main = self.bn2(main)
main = self.relu2(main)
main = self.conv3(main)
main = self.bn3(main)
# shortcut
if self.shortcut_conv1 is not None:
shortcut = self.shortcut_conv1(inputs)
shortcut = self.shortcut_bn1(shortcut)
else:
shortcut = inputs
# merge
outs = self.relu_out(main+shortcut)
# SHALLOW_BLOCK = 0
# DEEP_BLOCK = 1
class ResNet(torch.nn.Module):
def __init__(self, input_size, res_out_channel, num_classes, resnet_type, **kargs):
'''
Args:
input_size: input pic size (batch size not included, eg: (3,32,32))
res_out_channel: out_channel of res block, 512 (resnet18/34) or 2048 (resnet > 50)
type:
"7x7": first conv block is 7x7 with two downsample layers (Conv2d layer and MaxPool2d layer)
"3x3": first conv block is 3x3 and there is no downsample layer in begin
'''
super(ResNet, self).__init__(**kargs)
self.input_size = input_size
self.type = resnet_type
if resnet_type == "7x7":
# stage 1
self.conv1 = torch.nn.Conv2d(input_size[0], 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = torch.nn.BatchNorm2d(64)
self.relu1 = torch.nn.ReLU(inplace=True)
self.pool1 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# stage 6
self.pool2 = torch.nn.AvgPool2d(int(input_size[1]/32))
self.flatten = torch.nn.Flatten()
self.linear1 = torch.nn.Linear(res_out_channel, num_classes)
elif resnet_type == "3x3":
# stage 1
self.conv1 = torch.nn.Conv2d(input_size[0], 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = torch.nn.BatchNorm2d(64)
self.relu1 = torch.nn.ReLU(inplace=True)
# self.pool1 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# stage 6
self.pool2 = torch.nn.AvgPool2d(int(input_size[1]/8))
self.flatten = torch.nn.Flatten()
self.linear1 = torch.nn.Linear(res_out_channel, num_classes)
else:
raise RuntimeError("Invalid resnet type")
# stage 2~5
self.block1 = None
self.block2 = None
self.block3 = None
self.block4 = None
def forward(self, inputs):
# stage 1
outputs = self.conv1(inputs)
outputs = self.bn1(outputs)
outputs = self.relu1(outputs)
if self.type == "7x7":
outputs = self.pool1(outputs)
# stage 2-5
outputs = self.block1(outputs)
outputs = self.block2(outputs)
outputs = self.block3(outputs)
outputs = self.block4(outputs)
# stage 6
outputs = self.pool2(outputs)
outputs = self.flatten(outputs)
outputs = self.linear1(outputs)
return outputs
class ShallowResNet(ResNet):
def __init__(self, input_size, num_classes, block_num_list, resnet_type="3x3", **kargs):
super(ShallowResNet, self).__init__(input_size, 512, num_classes, resnet_type, **kargs)
# assert
for i in range(4):
assert block_num_list[i] > 0, "block num needs greater than 0!"
# stage 2
block1_list = []
for i in range(block_num_list[0]):
block1_list.append(ShallowResNetBlock(64,64))
self.block1 = torch.nn.Sequential(*block1_list)
# stage 3
block2_list = []
for i in range(block_num_list[1]):
if i == 0:
block2_list.append(ShallowResNetBlock(64, 128, downsample=True))
else:
block2_list.append(ShallowResNetBlock(128, 128))
self.block2 = torch.nn.Sequential(*block2_list)
# stage 4
block3_list = []
for i in range(block_num_list[2]):
if i == 0:
block3_list.append(ShallowResNetBlock(128, 256, downsample=True))
else:
block3_list.append(ShallowResNetBlock(256, 256))
self.block3 = torch.nn.Sequential(*block3_list)
# stage 5
block4_list = []
for i in range(block_num_list[3]):
if i == 0:
block4_list.append(ShallowResNetBlock(256, 512, downsample=True))
else:
block4_list.append(ShallowResNetBlock(512, 512))
self.block4 = torch.nn.Sequential(*block4_list)
class DeepResNet(ResNet):
def __init__(self, input_size, num_classes, block_num_list, resnet_type="3x3", **kargs):
super(DeepResNet, self).__init__(input_size, 2048, num_classes, resnet_type, **kargs)
# assert
for i in range(4):
assert block_num_list[i] > 0, "block num needs greater than 0!"
# stage 2
block1_list = []
for i in range(block_num_list[0]):
if i == 0:
block1_list.append(DeepResNetBlock(64,256))
else:
block1_list.append(DeepResNetBlock(256,256))
self.block1 = torch.nn.Sequential(*block1_list)
# stage 3
block2_list = []
for i in range(block_num_list[1]):
if i == 0:
block2_list.append(DeepResNetBlock(256, 512, downsample=True))
else:
block2_list.append(DeepResNetBlock(512, 512))
self.block2 = torch.nn.Sequential(*block2_list)
# stage 4
block3_list = []
for i in range(block_num_list[2]):
if i == 0:
block3_list.append(DeepResNetBlock(512, 1024, downsample=True))
else:
block3_list.append(DeepResNetBlock(1024, 1024))
self.block3 = torch.nn.Sequential(*block3_list)
# stage 5
block4_list = []
for i in range(block_num_list[3]):
if i == 0:
block4_list.append(DeepResNetBlock(1024, 2048, downsample=True))
else:
block4_list.append(DeepResNetBlock(2048, 2048))
self.block4 = torch.nn.Sequential(*block4_list)
def ResNet18(input_size, num_classes, resnet_type="3x3", **kargs):
return ShallowResNet(input_size, num_classes, block_num_list=[2,2,2,2], resnet_type=resnet_type, **kargs)
def ResNet34(input_size, num_classes, resnet_type="3x3", **kargs):
return ShallowResNet(input_size, num_classes, block_num_list=[3,4,6,3], resnet_type=resnet_type, **kargs)
def ResNet50(input_size, num_classes, resnet_type="3x3", **kargs):
return DeepResNet(input_size, num_classes, block_num_list=[3,4,6,3], resnet_type=resnet_type, **kargs)
def ResNet101(input_size, num_classes, resnet_type="3x3", **kargs):
return DeepResNet(input_size, num_classes, block_num_list=[3,4,23,3], resnet_type=resnet_type, **kargs) | zllz4/Face-Recognition | models/resnet.py | resnet.py | py | 11,268 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Conv2d",
"lin... |
39915116580 | import pandas as pd
import numpy as np
import datetime as dt
import os
import matplotlib.pyplot as plt
from util import get_data, plot_data
# prepare the data
def normalize_stocks(prices):
prices.fillna(method='ffill', inplace=True)
prices.fillna(method='bfill', inplace=True)
return prices / prices.iloc[0]
# The function to return SMA
# price < sma, BUY
# price > sma, SELL
"""Calculate simple moving average indicator
Parameters:
price: Normalized adjusted close price
rolling_mean: Rolling mean of certain numbers of days
Returns: SMA
"""
def compute_sma(normalized_price, rolling_days):
columns = ['SMA']
sma = pd.DataFrame(0, index = normalized_price.index, columns = columns)
sma['SMA'] = normalized_price.rolling(window=rolling_days, min_periods = rolling_days).mean()
return sma
# the function to return momentum
# negative --> postive, Buy
# postive --> negative, Sell
"""Calculate momentum indicator:
momentum[t] = (price[t]/price[t-rolling_days]) - 1
Parameters:
price: Normalized adjusted close price
rolling_days: Number of days to look back
Returns: Momentum
"""
def compute_momentum(normalized_price, rolling_days):
momentum = pd.DataFrame(0, index = normalized_price.index, columns = ['Momentum'])
momentum['Momentum'] = (normalized_price/normalized_price.shift(rolling_days))-1
return momentum
# the function to return Exponential moving average (EMA)
# price < ema, BUY
# price > ema, SELL
"""Calculate EMA indicator:
EMA = Closing price x multiplier + EMA (previous day) x (1-multiplier)
Parameters:
price: Normalized adjusted close price
rolling_days: Number of days to look back
Returns: EMA
"""
def compute_ema(normalized_price, rolling_days):
ema = pd.DataFrame(0, index = normalized_price.index, columns = ['EMA'])
ema['EMA'] =normalized_price.ewm(span= rolling_days,adjust=False).mean()
return ema
# MACD: Moving Average Convergence Divergence
# Signal Line > MACD Line , SELL
# Signal Line < MACD Line, BUY
"""Calculate MACD indicator:
MACD Line: (12-day EMA - 26-day EMA)
Signal Line: 9-day EMA of MACD Line
Parameters:
price: Normalized adjusted close price
Returns: MACD line and Signal line
"""
def compute_macd(normalized_price):
macd = pd.DataFrame(0, index = normalized_price.index, columns = ['ema_12','ema_26','macd_raw','MACD'])
macd['ema_12'] = normalized_price.ewm(span=12, adjust=False).mean()
macd['ema_26'] = normalized_price.ewm(span=26, adjust=False).mean()
macd['MACD'] = macd['ema_12'] - macd['ema_26']
macd['Signal'] = macd['MACD'].ewm(span=9, adjust=False).mean()
macd['MACD_diff'] = macd['Signal'] - macd['MACD']
return macd['MACD_diff']
# Stochastic Oscillator
# signal line (%D) > indicator line (%K), Overbought, SELL
# signal line (%D) < indicator line (%K), Oversold, BUY
"""Calculate Stochastic Oscillator indicator:
Indicator line (%K): (C−L14/H14−L14)*100
C = The most recent closing price
L14 = The lowest price traded of the 14 previous trading sessions
H14 = The highest price traded during the same 14-day period
%K = The current value of the stochastic indicator
Signal line (%D): D=100*(H3/L3)
H3=Highest of the three previous trading sessions
L3=Lowest price traded during the same three-day period
%D = The current value of the stochastic signal
Parameters:
price: Normalized adjusted close price
Returns: %K and %D
"""
def compute_kd(normalized_price):
KD = pd.DataFrame(0, index = normalized_price.index, columns = ['%K','%D'])
#compute K%
L14= normalized_price.rolling(14).min()
H14= normalized_price.rolling(14).max()
KD['%K']= ((normalized_price- L14)/ (H14-L14))*100
KD['%D']= KD['%K'].rolling(3).mean()
KD['%KD'] = KD['%D'] -KD['%K']
return KD['%KD']
def compute_indicators(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,12,31), \
syms=['JPM']):
# Read in date range, prices and symbols
symbol = syms[0]
dates = pd.date_range(sd, ed)
prices_all = get_data(syms, dates)
prices = prices_all[syms] # portfolio symbols
prices_SPY = prices_all['SPY'] # SPY, for comparison
prices_SPY_normalized = normalize_stocks(prices_SPY)
normalized_price = normalize_stocks(prices)
rolling_days = 20
sma = compute_sma(normalized_price, rolling_days)
columns = ['Price/SMA']
prices_sma_ratio = pd.DataFrame(0, index = normalized_price.index, columns = columns)
prices_sma_ratio['Price/SMA'] = normalized_price[symbol]/sma['SMA']
momentum = compute_momentum(normalized_price, rolling_days)
ema = compute_ema(normalized_price, rolling_days)
columns = ['Price/EMA']
prices_ema_ratio = pd.DataFrame(0, index = normalized_price.index, columns = columns)
prices_ema_ratio['Price/EMA'] = normalized_price[symbol]/ema['EMA']
macd = compute_macd(normalized_price)
kd = compute_kd(normalized_price)
sma_plot = pd.concat([normalized_price, sma, prices_sma_ratio], axis=1)
sma_plot.columns = [symbol, 'SMA', 'Price/SMA']
sma_plot.plot(grid=True, title='Simple Moving Average', use_index=True)
plt.savefig("sma.png")
momentum_plot = pd.concat([normalized_price, momentum], axis=1)
momentum_plot.plot(grid=True, title='Momentum', use_index=True)
plt.savefig("momentum.png")
ema_plot = pd.concat([normalized_price, ema, prices_ema_ratio], axis=1)
ema_plot.columns = [symbol, 'EMA', 'Price/EMA']
ema_plot.plot(grid=True, title='Exponential Moving Average', use_index=True)
plt.savefig("ema.png")
macd_plot = pd.DataFrame(0, index = normalized_price.index, columns = columns)
macd_plot = pd.concat([normalized_price, macd['ema_12'], macd['ema_26'],macd['MACD'],macd['Signal']], axis=1)
macd_plot.columns = [symbol, '12 days EMA', '26 days EMA', 'MACD','Signal']
fig, axes = plt.subplots(2, 1)
fig.suptitle('Moving Average Convergence Divergence')
axes[0].plot(macd_plot["JPM"])
axes[0].plot(macd_plot["12 days EMA"])
axes[0].plot(macd_plot["26 days EMA"])
axes[1].plot(macd_plot["MACD"])
axes[1].plot(macd_plot["Signal"])
#axes[0].legend(loc="lower left")
#axes[1].legend(loc="lower left")
axes[0].get_xaxis().set_visible(False)
axes[0].get_yaxis().set_visible(True)
axes[1].tick_params(labelrotation=45)
plt.savefig("macd.png")
kd_plot = pd.DataFrame(0, index = normalized_price.index, columns = columns)
kd_plot = pd.concat([normalized_price, kd['%K'], kd['%D']], axis=1)
kd_plot.columns = [symbol, '%K', '%D']
fig, axes = plt.subplots(2, 1)
fig.suptitle('Stochastic Oscillator')
axes[0].plot(kd_plot["JPM"])
axes[1].plot(kd_plot["%K"])
axes[1].plot(kd_plot["%D"])
#axes[0].legend(loc="lower left")
#axes[1].legend(loc="lower left")
axes[0].get_xaxis().set_visible(False)
axes[0].get_yaxis().set_visible(True)
axes[1].tick_params(labelrotation=45)
plt.savefig("kd.png")
def test_code():
compute_indicators()
if __name__ == "__main__":
test_code()
| EntingHsiao/Stock_trading_with_ML | indicators.py | indicators.py | py | 7,063 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",... |
42406250255 | import json
from flask import url_for
from flask_login import current_user
from flask_mail import Message
from bd_project import mail
from bd_project.models import OrderList, Product
from bd_project.classes import UserHelper
from bd_project.models import Order
def add_current_ordered_products(order_products_by_current_user, order_id):
for products in order_products_by_current_user:
for pr_id, product in products.items():
order_product = OrderList(order_id=order_id, product_id=Product.get(Product.id == pr_id),
amount=product.get('amount'))
order_product.save()
def get_current_order_products(user_id):
with open('ordered_products.json', 'r') as f:
order_products_by_users = json.load(f)
order_products_by_current_user = order_products_by_users.get(f'{user_id}')
return order_products_by_current_user if order_products_by_current_user else None
def clear_current_user_ordered_products():
with open('ordered_products.json', 'r') as f:
order_products = json.load(f)
order_products[f'{current_user.id}'] = []
with open('ordered_products.json', 'w') as f:
json.dump(order_products, f, indent=2)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Сброс пароля', sender='vladislavBlog@gmail.com', recipients=[user.email])
msg.body = f'''
Что бы сбросить пароль перейдите по ссылке:
{url_for('users.reset_token', token=token, _external=True)}
Если вы не делали этого запроса проигнорируйте сообщение.
'''
mail.send(msg)
def send_sales_receipt(user, ordered_products):
last_order = Order.select().order_by(Order.id.desc()).get()
msg = Message(f'Чек для заказа из магазина зефира "Влад магазин"', sender='vladislavBlog@gmail.com',
recipients=[user.email])
order_sum = UserHelper.order_price(ordered_products)
receipt = f'Ваш заказ номер:{last_order.id}\n'
for products in ordered_products:
for pr_id, product in products.items():
receipt += f'Продукт: {product.get("product")} - {product.get("amount")}\n'
receipt += f'Сумма заказа: {order_sum}.'
print(receipt)
msg.body = receipt
mail.send(msg)
| Braindead3/bd_project | bd_project/users/utils.py | utils.py | py | 2,407 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bd_project.models.OrderList",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bd_project.models.Product.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bd_project.models.Product",
"line_number": 15,
"usage_type": "name"
},
{
... |
11378302691 | from functools import reduce
n = int(input())
data = list(map(int, input().split()))
def mean(data):
sum = reduce(lambda x, y: x + y, data)
return sum / len(data)
def stdev(data):
mu = mean(data)
sq_sq_dists_mean = list(map(lambda x: (x - mu) ** 2, data))
sum_sq_dists_mean = reduce(lambda x, y: x + y, sq_sq_dists_mean)
return round((sum_sq_dists_mean / len(data)) ** 0.5, 1)
print(stdev(data))
| scouvreur/hackerrank | 10-days-of-statistics/python/day_1_3.py | day_1_3.py | py | 427 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 15,
"usage_type": "call"
}
] |
39964545331 | import openai
openai.api_key = "[YOUR_OPENAI_API]"
chat_history = [{"role": "system", "content": "You are a assistant."}]
def bot_message(input):
chat_history.append({"role": "user", "content": f"{input}"})
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chat_history
)
return chat
def start_conversation():
print("Hi, I'm a chatbot. How can I help you today?")
token_count = 0
while True:
user_input = input("> ")
prompt = f"{user_input}"
response = bot_message(prompt)
role = response.choices[0].message.role
answer = response.choices[0].message.content
return_message = f"BOT : {answer}"
history_message = {"role": f'{role}', "content": f"{answer}"}
chat_history.append(history_message)
completion_token = response.usage.completion_tokens
prompt_token = response.usage.prompt_tokens
used_tokens = completion_token + prompt_token
token_count = token_count + used_tokens
token_message = f"In this conversation, you use {used_tokens} tokens. Completion : {completion_token}, Prompt : {prompt_token}"
total_token_message = f"You used {token_count} Tokens"
print(return_message)
print(token_message)
print(total_token_message)
start_conversation() | ingyunson/gpt_chatbot | GPT_chatbot.py | GPT_chatbot.py | py | 1,411 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openai.api_key",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatCompletion.create",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "openai.ChatCompletion",
"line_number": 9,
"usage_type": "attribute"
}
] |
43046744636 | import typing
from datetime import datetime
from discord import Activity, ActivityType, Embed, Game, HTTPException, Status, Member, User, TextChannel, VoiceChannel, Role, Invite, Game, Emoji, PartialEmoji, Colour
from discord.ext import commands
from discordbot.botmodules import serverdata, audio
from discordbot.config import EXTENSIONFOLDER, EXTENSIONS, ALL_PREFIXES, MAIN_PREFIXES, DEBUG
from discordbot.utils import chunks
from discordbot.errors import ErrorMessage
from rich.traceback import install as install_traceback
install_traceback()
from rich.pretty import install as install_pretty
install_pretty()
#
CONVERTERS = {
Member: commands.MemberConverter,
User: commands.UserConverter,
TextChannel: commands.TextChannelConverter,
VoiceChannel: commands.VoiceChannelConverter,
Role: commands.RoleConverter,
Invite: commands.InviteConverter,
Game: commands.GameConverter,
Emoji: commands.EmojiConverter,
PartialEmoji: commands.PartialEmojiConverter,
Colour: commands.ColourConverter
}
# Own classes
class MyContext(commands.Context):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.database = serverdata.DjangoConnection(self.author, self.guild)
self.audio = audio.AudioManager(self)
if self.guild is not None:
self.data = serverdata.Server.getServer(self.guild.id)
async def sendEmbed(self, title: str, *args, receiver=None, message: str = "", description: str = "", fields: list = [], **kwargs):
if len(description) > 2048:
desc = list(chunks(description, 2042))
for i in range(len(desc)):
if i == 0:
await (receiver or self).send(message, embed=self.getEmbed(f"{title} ({i+1}/{len(desc)})", *args, description=desc[i]+" [...]", fields=fields, **kwargs))
elif i == len(desc)-1:
return await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(desc)})", *args, description=desc[i], **kwargs))
else:
await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(desc)})", *args, description=desc[i]+" [...]", **kwargs))
elif len(fields) > 25:
flds = list(chunks(fields, 25))
for i in range(len(flds)):
if i == 0:
await (receiver or self).send(message, embed=self.getEmbed(f"{title} ({i+1}/{len(flds)})", *args, description=description, fields=flds[i], **kwargs))
elif i == len(flds)-1:
return await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(flds)})", *args, fields=flds[i], **kwargs))
else:
await (receiver or self).send(embed=self.getEmbed(f"{title} ({i+1}/{len(flds)})", *args, fields=flds[i], **kwargs))
else:
return await (receiver or self).send(message, embed=self.getEmbed(title=title, *args, description=description, fields=fields, **kwargs))
def getEmbed(self, title:str, description:str="", color:int=0x000000, fields:list=[], inline=True, thumbnailurl:str=None, authorurl:str="", authorname:str=None, footertext:str="Angefordert von USER", footerurl:str="AVATARURL", timestamp=False):
EMBED = Embed(title=title[:256], description=description[:2048], color=color or getattr(self.cog, "color", 0x000000))
EMBED.set_footer(text=footertext.replace("USER", str(self.author.name+"#"+self.author.discriminator))[:2048], icon_url=footerurl.replace("AVATARURL", str(self.author.avatar_url)))
if timestamp:
EMBED.timestamp = datetime.utcnow() if timestamp is True else timestamp
for field in fields[:25]:
EMBED.add_field(name=field[0][:256], value=(field[1][:1018]+" [...]" if len(field[1]) > 1024 else field[1]), inline=bool(field[2] if len(field) > 2 else inline))
if thumbnailurl:
EMBED.set_thumbnail(url=thumbnailurl.strip())
if authorname:
if authorurl and ("https://" in authorurl or "http://" in authorurl):
EMBED.set_author(name=authorname[:256], url=authorurl.strip())
else:
EMBED.set_author(name=authorname[:256])
return EMBED
async def tick(self, value=True):
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
await self.message.add_reaction(emoji)
except HTTPException:
pass
async def send_help(self):
await self.invoke(self.bot.get_command("help"), self.invoked_with)
async def invoke_as(self, member, command, *args):
_command = command.replace("_", " ")
cmd = self.bot.get_command(_command)
if cmd is None:
raise ErrorMessage(f"Der Befehl `{ _command }` wurde nicht gefunden! \nPS: Benutze im Command bitte kein Prefix! Für Subcommands, benutze command_subcommand.")
self.message.content = self.prefix+_command+self.message.content.split(command)[1]
self.message.author = member
self.author = member
self.database = type(self.database)(self.author, self.guild)
annotations = cmd.callback.__annotations__
annotations.pop("return", None)
arguments = list(args)
for i, cls in enumerate(annotations.values()):
if len(arguments) > i:
if cls in CONVERTERS:
arguments[i] = await CONVERTERS[cls]().convert(self, arguments[i])
else:
arguments[i] = cls(arguments[i])
await self.invoke(cmd, *arguments)
class MyBot(commands.Bot):
def __init__(self, **kwargs):
super().__init__(self.get_command_prefix, **kwargs)
def get_command_prefix(self, client, message):
if message.guild:
prefixes = MAIN_PREFIXES
else:
prefixes = ALL_PREFIXES
return commands.when_mentioned_or(*prefixes)(client, message)
async def get_context(self, message, *, cls=MyContext):
return await super().get_context(message, cls=cls)
def getEmbed(self, title: str, description: str = "", color: int = 0x000000, fields: list = [], inline=True, thumbnailurl: str = None, authorurl: str = "", authorname: str = None, footertext: str = None, footerurl: str = None, timestamp=False):
EMBED = Embed(title=title[:256], description=description[:2048], color=color)
if footertext:
if footerurl:
EMBED.set_footer(text=footertext[:2048], icon_url=footerurl)
else:
EMBED.set_footer(text=footertext[:2048])
if timestamp:
EMBED.timestamp = datetime.utcnow() if timestamp is True else timestamp
for field in fields:
EMBED.add_field(name=field[0][:256], value=(field[1][:1018]+" [...]" if len(field[1]) > 1024 else field[1]), inline=bool(
field[2] if len(field) > 2 else inline))
if thumbnailurl:
EMBED.set_thumbnail(url=thumbnailurl.strip())
if authorname:
if authorurl and ("https://" in authorurl or "http://" in authorurl):
EMBED.set_author(name=authorname[:256], url=authorurl.strip())
else:
EMBED.set_author(name=authorname[:256])
return EMBED
# create Bot
bot = MyBot(
description='Das ist eine Beschreibung!',
case_insensitive=True,
activity=Activity(type=ActivityType.listening, name=(MAIN_PREFIXES[0] if MAIN_PREFIXES else "/")+"help"),
status=Status.idle,
help_command=None,
strip_after_prefix=True,
)
@bot.before_invoke
async def before_invoke(ctx):
await ctx.trigger_typing()
# Events
from discordbot.botevents import setup
setup(bot)
# Start
def run(TOKEN):
print("[Bot] - Starting with DEBUG="+str(DEBUG))
bot.run(TOKEN, bot=True, reconnect=True)
if __name__ == "__main__":
print("[Bot] - You must run this bot via your manage.py file: python3.8 manage.py run-discorbot")
| AlexeiSur/bot12345 | discordbot/bot.py | bot.py | py | 8,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rich.traceback.install",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rich.pretty.install",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "discord.Member",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "discord.Use... |
694411018 | import datetime
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
default_dag_args = {
# https://airflow.apache.org/faq.html#what-s-the-deal-with-start-date
'start_date': datetime.datetime(2020, 4, 27)
}
dataset = 'airflow'
table = 'Country'
query_cmd = 'bq query --use_legacy_sql=false '
table_cmd = 'create or replace table ' + dataset + '.' + table + '(id int64, name string)'
insert_cmd = 'insert into ' + dataset + '.' + table + '(id, name) values(1, "\'"USA"\'")'
with models.DAG(
'country2',
schedule_interval=None,
default_args=default_dag_args) as dag:
create_dataset = BashOperator(
task_id='create_dataset',
bash_command='bq --location=US mk --dataset ' + dataset)
create_table = BashOperator(
task_id='create_table',
bash_command=query_cmd + "'" + table_cmd + "'",
trigger_rule='all_done')
insert_row = BashOperator(
task_id='insert_row',
bash_command=query_cmd + "'" + insert_cmd + "'",
trigger_rule='one_success')
create_dataset >> create_table >> insert_row | cs327e-spring2020/snippets | country2.py | country2.py | py | 1,245 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "airflow.models.DAG",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "airflow.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "airflow.operators.... |
37220479023 | import json
with open('settings.json','r',encoding='utf8') as token:
data = json.load(token)
import requests
import subprocess
from flask import Flask, render_template, request, abort, make_response, jsonify
from datetime import datetime, timezone, timedelta
import firebase_admin
from firebase_admin import credentials, firestore
cred = credentials.Certificate("project-analytics-8acd9-firebase-adminsdk-6usuy-2415c74209.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
from bs4 import BeautifulSoup
from linebot import (LineBotApi, WebhookHandler)
from linebot.exceptions import (InvalidSignatureError)
from linebot.models import (MessageEvent, TextMessage, TextSendMessage, ImageSendMessage, LocationSendMessage)
line_bot_api = LineBotApi(data["LineBotApi"])
handler = WebhookHandler(data["webhook"])
app = Flask(__name__)
@app.route('/')
def index():
homepage = "<h1>許哲睿Python測試網頁</h1>"
homepage += "<a href=/mis>MIS</a><br>"
homepage += "<a href=/current>開啟網頁及顯示日期時間</a><br>"
homepage += "<a href=/welcome?nick=許哲睿>開啟網頁及傳送使用者暱稱</a><br>"
homepage += "<a href=/login>透過表單輸入名字傳值</a><br>"
homepage += "<a href=/hi>計算總拜訪次數</a><br>"
homepage += "<a href=/aboutme>關於子青老師 (響應式網頁實例)</a><br>"
homepage += "<br><a href=/read>讀取Firestore資料</a><br>"
homepage += "<a href=/resource>MIS resource</a><br>"
homepage += "<br><a href=/spider>讀取開眼電影即將上映影片,寫入Firestore</a><br>"
homepage += "<br><a href=/search>輸入關鍵字進行資料查詢</a><br>"
return homepage
@app.route('/mis')
def course():
return "<h1>資訊管理導論</h1>"
@app.route('/current')
def current():
tz = timezone(timedelta(hours=+8))
now = datetime.now(tz)
return render_template("current.html", datetime = str(now))
@app.route('/welcome', methods=["GET", "POST"])
def welcome():
user = request.values.get("nick")
return render_template("welcome.html", name=user)
@app.route('/hi')
def hi():# 載入原始檔案
f = open('count.txt', "r")
count = int(f.read())
f.close()
count += 1# 計數加1
f = open('count.txt', "w")# 覆寫檔案
f.write(str(count))
f.close()
return "本網站總拜訪人次:" + str(count)
@app.route("/login", methods=["POST","GET"])
def login():
if request.method == "POST":
user = request.form["nm"]
return "您輸入的名字為:" + user
else:
return render_template("login.html")
@app.route("/resource")
def classweb():
return render_template("links.html")
@app.route("/aboutme")
def about():
tz = timezone(timedelta(hours=+8))
now = datetime.now(tz)
return render_template("aboutme.html",datetime = str(now))
@app.route("/read")
def read():
Result = ""
collection_ref = db.collection("靜宜資管")
docs = collection_ref.order_by(
"mail", direction=firestore.Query.DESCENDING).get()
for doc in docs:
Result += "文件內容:{}".format(doc.to_dict()) + "<br>"
return Result
@app.route('/spider')
def spider():
url = "http://www.atmovies.com.tw/movie/next/"
Data = requests.get(url)
Data.encoding = "utf-8"
sp = BeautifulSoup(Data.text, "html.parser")
result = sp.select(".filmListAllX li")
lastUpdate = sp.find("div", class_="smaller09").text[5:]
for item in result:
picture = item.find("img").get("src").replace(" ", "")
title = item.find("div", class_="filmtitle").text
movie_id = item.find("div", class_="filmtitle").find(
"a").get("href").replace("/", "").replace("movie", "")
hyperlink = "http://www.atmovies.com.tw" + \
item.find("div", class_="filmtitle").find("a").get("href")
show = item.find("div", class_="runtime").text.replace("上映日期:", "")
show = show.replace("片長:", "")
show = show.replace("分", "")
showDate = show[0:10]
showLength = show[13:]
doc = {
"title": title,
"picture": picture,
"hyperlink": hyperlink,
"showDate": showDate,
"showLength": showLength,
"lastUpdate": lastUpdate
}
doc_ref = db.collection("電影").document(movie_id)
doc_ref.set(doc)
return "近期上映電影已爬蟲及存檔完畢,網站最近更新日期為:" + lastUpdate
@app.route("/search", methods=["POST", "GET"])
def search():
if request.method == "POST":
MovieTitle = request.form["MovieTitle"]
collection_ref = db.collection("電影")
docs = collection_ref.order_by("showDate").get()
info = ""
for doc in docs:
if MovieTitle in doc.to_dict()["title"]:
info += "片名:" + doc.to_dict()["title"] + "<br>"
info += "海報:" + doc.to_dict()["picture"] + "<br>"
info += "影片介紹:" + doc.to_dict()["hyperlink"] + "<br>"
info += "片長:" + doc.to_dict()["showLength"] + " 分鐘<br>"
info += "上映日期:" + doc.to_dict()["showDate"] + "<br><br>"
return info
else:
return render_template("input.html")
@app.route("/callback", methods=["POST"])
def callback():
# get X-Line-Signature header value
signature = request.headers["X-Line-Signature"]
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return "OK"
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
message = event.message.text
if(message[:5].upper() == 'MOVIE'):
res = searchMovie(message[6:])
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=res))
elif(message.upper() == "TCYANG"):
line_bot_api.reply_message(event.reply_token, ImageSendMessage(
original_content_url = "https://www1.pu.edu.tw/~tcyang/aboutme/family.jpg",
preview_image_url = "https://www1.pu.edu.tw/~tcyang/aboutme/family.jpg"
))
elif(message.upper() == "PU"):
line_bot_api.reply_message(event.reply_token, LocationSendMessage(
title="靜宜大學地理位置",
address="台中市沙鹿區臺灣大道七段200號",
latitude=24.22649,
longitude=120.5780923
))
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="我是電影機器人,您輸入的是:" + message + "。祝福您有個美好的一天!"))
def searchMovie(keyword):
info = "您要查詢電影,關鍵字為:" + keyword + "\n"
collection_ref = db.collection("電影")
docs = collection_ref.order_by("showDate").get()
found = False
for doc in docs:
if keyword in doc.to_dict()["title"]:
found = True
info += "片名:" + doc.to_dict()["title"] + "\n"
info += "海報:" + doc.to_dict()["picture"] + "\n"
info += "影片介紹:" + doc.to_dict()["hyperlink"] + "\n"
info += "片長:" + doc.to_dict()["showLength"] + " 分鐘\n"
info += "上映日期:" + doc.to_dict()["showDate"] + "\n\n"
if not found:
info += "很抱歉,目前無符合這個關鍵字的相關電影喔"
return info
@app.route("/webhook", methods=["POST"])
def webhook():
# build a request object
req = request.get_json(force=True)
# fetch queryResult from json
action = req.get("queryResult").get("action")
#msg = req.get("queryResult").get("queryText")
#info = "動作:" + action + "; 查詢內容:" + msg
if (action == "CityWeather"):
city = req.get("queryResult").get("parameters").get("city")
info = "查詢都市名稱:" + city + ",天氣:"
city = city.replace("台", "臺")
token = data["token"]
url = "https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-C0032-001?Authorization=" + \
token + "&format=JSON&locationName=" + str(city)
Data = requests.get(url)
Weather = json.loads(Data.text)[
"records"]["location"][0]["weatherElement"][0]["time"][0]["parameter"]["parameterName"]
Rain = json.loads(Data.text)[
"records"]["location"][0]["weatherElement"][1]["time"][0]["parameter"]["parameterName"]
info += Weather + ",降雨機率:" + Rain + "%"
elif (action == "searchMovie"):
cond = req.get("queryResult").get("parameters").get("FilmQ")
keyword = req.get("queryResult").get("parameters").get("any")
info = "您要查詢電影的" + cond + ",關鍵字是:" + keyword + "\n\n"
if (cond == "片名"):
collection_ref = db.collection("電影")
docs = collection_ref.order_by("showDate").get()
found = False
for doc in docs:
if keyword in doc.to_dict()["title"]:
found = True
info += "片名:" + doc.to_dict()["title"] + "\n"
info += "海報:" + doc.to_dict()["picture"] + "\n"
info += "影片介紹:" + doc.to_dict()["hyperlink"] + "\n"
info += "片長:" + doc.to_dict()["showLength"] + " 分鐘\n"
info += "上映日期:" + doc.to_dict()["showDate"] + "\n\n"
if not found:
info += "很抱歉,目前無符合這個關鍵字的相關電影喔"
return make_response(
jsonify({
"fulfillmentText": info,
"fulfillmentMessages": [
{"quickReplies": {
"title": info,
"quickReplies": ["台北天氣", "台中天氣", "高雄天氣"]
}}]
}))
if __name__ == "__main__":
app.run() | NTX8205/LineBot | flask_server.py | flask_server.py | py | 10,110 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials.Certificate",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials",
"line_number": 10,
"usage_type": "name"
},
{
"api_na... |
12435271099 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
import csv
import openpyxl
import json
import time
from datetime import datetime
from random import random
start_time = time.time()
print('scraping the site')
test = []
options = []
answer = ''
image_source = []
choices = None
try:
year_error_occurred = ''
for year in range(1978, 2020):
print('year', year)
for page in range(1, 11):
print('page number', page)
quote_page = 'https://myschool.ng/classroom/mathematics?exam_type=jamb&exam_year='+str(year) + '&page='+str(page)
year_error_occurred = quote_page
r = requests.get(quote_page)
encodedText = r.text.encode("utf-8")
soup = BeautifulSoup(encodedText, 'html.parser')
question = soup.find_all('div', class_='question-desc')
for item in question:
question_id = round(random() * 10000)
#print(item.text.strip())
content = item.text.rstrip().lstrip()
question = content.strip('\n')
next_Sibling = item.find_next_sibling('ul')
link = item.find_next_sibling('a')
img_container = item.find_previous_sibling('div')
image_source = []
if img_container is not None:
images = img_container.findChildren('img')
if images is not None:
for img in images:
image_source.append(img['src'])
#print('link to answer', link['href'])
if link is not None:
link_to_answer = link['href']
encodedText = requests.get(link_to_answer).text.encode("utf-8")
soup = BeautifulSoup(encodedText, 'html.parser')
h5_tag = soup.find('h5', class_='text-success')
#print('-', h5_tag.text.strip())
content = h5_tag.text.rstrip().lstrip()
answer = content.strip('\n')
choices = next_Sibling.findChildren('li')
options = []
if choices is not None:
for node in choices:
#print(node.text.strip())
content = node.text.lstrip().rstrip()
choice = content.strip('\n')
options.append(choice)
test.append({ 'id': question_id, 'year': year, 'examtype': 'Jamb',
'subject': 'Mathematics','qestion': question,'image_asset': image_source,
'options': options, 'answer': answer,
'linkToanswer':link_to_answer, 'source_url': quote_page})
time.sleep(1)
except:
print('error occurred while try to scrap', year_error_occurred)
print('done')
with open('data.json', 'w') as outfile:
json.dump(test, outfile)
print('executed successfully, total execution time: ', (time.time() - start_time))
| charlesinto/quizAppQuestions | index.py | index.py | py | 3,056 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_nu... |
38799393419 | from discord import Interaction
import bdg
import enum
class FormatStyle(enum.Enum):
UPPER = 0
LOWER = 1
REVERSED = 2
SPACED = 3
HACKER = 4
IRONIC = 5
hacker_dict = {
"a": "4",
"s": "5",
"o": "0",
"e": "3"
}
class FormatCommand(bdg.BdgCommand):
header = {
'name': "formatar",
'description': "Formate um texto de acordo com o estilo selecionado"
}
params = {
'estilo': "O estilo do texto, podendo ser: UPPER, LOWER, REVERSED, SPACED, HACKER, IRONIC",
'texto': "O texto a ser formatado no estilo indicado"
}
async def on_command(self, i: Interaction, estilo: FormatStyle, texto: str):
text = ""
if estilo == FormatStyle.UPPER:
text = texto.upper()
if estilo == FormatStyle.LOWER:
text = texto.lower()
if estilo == FormatStyle.REVERSED:
# Loop pelo "texto" ao contrário.
for char in texto[len(texto)-1:-1:-1]:
text += char
elif estilo == FormatStyle.SPACED:
chars = list(texto)
text = " ".join(chars)
elif estilo == FormatStyle.HACKER:
# Para cada caractére, use-o como chave no dicionário "hacker_style_dict",
# ... se a chave não existe, use o próprio caractére
text = "".join([hacker_dict.get(char.lower(), char) for char in texto])
elif estilo == FormatStyle.IRONIC:
# Se 'c' for par é maiúsculo, senão é minúsculo
for c in range(len(texto)):
char = texto[c]
text += char.upper() if c % 2 else char.lower()
await i.response.send_message(":speech_balloon: | " + text) | DanielKMach/BotDusGuri | src/commands/fun/format.py | format.py | py | 1,487 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "bdg.BdgCommand",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "discord.Interaction",
"line_number": 32,
"usage_type": "name"
}
] |
3954925048 | # -*- codind: utf-8 -*-
import os, sys, random, argparse, time
import math
import json
import codecs
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
install_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(install_path)
root_dir = os.path.join(install_path, 'exps/')
if not os.path.exists(root_dir):
os.makedirs(root_dir)
from xslu.utils import make_logger, read_emb
from xslu.optim import Optim
import xslu.Constants as Constants
from text.text import build_class_vocab
from text.dstc2 import slot2dic, process_sent
from model import RNN2One
from dataloader import OneBestIter4STC
from trainer import OneBestTrainer4STC
def model_opts(parser):
parser.add_argument('-model_type', default='RNN2One', type=str,
help="which model to use: RNN2One")
def train_opts(parser):
# Data options
parser.add_argument('-experiment', required=True,
help="Root path for saving results, models and logs")
parser.add_argument('-data_root', required=True,
help="Path prefix to the train and valid and class")
parser.add_argument('-save_model', default='best.pt',
help="Saved model filename")
parser.add_argument('-load_emb', action='store_true',
help='whether to load pre-trained word embeddings')
parser.add_argument('-fix_emb', action='store_true',
help='whether to fix pre-trained word embeddings')
parser.add_argument('-deviceid', default=0, type=int,
help="device id to run, -1 for cpus")
parser.add_argument('-batch_size', default=10, type=int,
help="batch size")
parser.add_argument('-epochs', default=100, type=int,
help="epochs")
parser.add_argument('-optim', default='adam', type=str,
help="optimizer")
parser.add_argument('-lr', default=0.001, type=float,
help="learning rate")
parser.add_argument('-max_norm', default=5, type=float,
help="threshold of gradient clipping (2 norm), < 0 for no clipping")
parser.add_argument('-seed', default=3435,
help='random seed')
def test_opts(parser):
# Data options
parser.add_argument('-test_json', default='test.json', type=str,
help="preprocessed test json file")
parser.add_argument('-save_decode', default='decode.json', type=str,
help="Path to the file of saving decoded results")
parser.add_argument('-load_chkpt', default=None, type=str,
help="Path to the checkpoint file to be loaded")
def parse_args():
parser = argparse.ArgumentParser(
description='Program Options',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-mode', default='train', type=str,
help="run mode: train, test, error")
model_opts(parser)
train_opts(parser)
test_opts(parser)
opt = parser.parse_args()
print(opt)
if opt.fix_emb:
assert opt.load_emb is True
opt.memory = torch.load(opt.data_root + 'memory.pt')
opt.class2idx = opt.memory['class2idx']
if opt.load_emb:
opt.word2idx = opt.memory['word2idx_w_glove']
else:
opt.word2idx = opt.memory['word2idx']
if opt.deviceid >= 0:
torch.cuda.set_device(opt.deviceid)
opt.cuda = True
else:
opt.cuda = False
# fix random seed
random.seed(opt.seed)
np.random.seed(opt.seed)
torch.manual_seed(opt.seed)
return opt
def make_model(opt):
if opt.model_type == 'RNN2One':
func = RNN2One
else:
raise Exception('Undefined model type!')
model = func(len(opt.word2idx), len(opt.class2idx))
if opt.cuda:
model = model.cuda()
return model
def train(opt):
# basics definition
opt.experiment = os.path.join(root_dir, opt.experiment)
if not os.path.exists(opt.experiment):
os.makedirs(opt.experiment)
opt.save_model = os.path.join(opt.experiment, opt.save_model)
opt.log_path = os.path.join(opt.experiment, 'log.train')
opt.logger = make_logger(opt.log_path)
# dataIter definition
train_iter = OneBestIter4STC(opt.data_root+'train', opt.word2idx, opt.class2idx,
opt.batch_size, opt.cuda, True)
valid_iter = OneBestIter4STC(opt.data_root+'valid', opt.word2idx, opt.class2idx,
opt.batch_size, opt.cuda, False)
# model definition
model = make_model(opt)
if opt.load_emb:
emb = read_emb(opt.word2idx)
model.emb.init_weight_from_pre_emb(emb, opt.fix_emb)
print(model)
# criterion definition
criterion = nn.BCELoss(reduction='sum')
if opt.cuda:
criterion = criterion.cuda()
# optimizer definition
optimizer = Optim(opt.optim, opt.lr, max_grad_norm=opt.max_norm)
optimizer.set_parameters(model.named_parameters())
print('Trainable parameter number: {}'.format(len(optimizer.params)))
# training procedure
trainer = OneBestTrainer4STC(model, criterion, optimizer, opt.logger)
trainer.train(opt.epochs, train_iter, valid_iter, opt.save_model)
def test(opt):
opt.experiment = os.path.join(root_dir, opt.experiment)
opt.load_chkpt = os.path.join(opt.experiment, opt.load_chkpt)
opt.save_decode = os.path.join(opt.experiment, opt.save_decode)
opt.test_json = os.path.join(opt.data_root, opt.test_json)
idx2class = {v:k for k,v in opt.class2idx.items()}
model = make_model(opt)
chkpt = torch.load(opt.load_chkpt, map_location=lambda storage, log: storage)
model.load_state_dict(chkpt)
# =======================================
model.eval()
# =======================================
sessions = json.loads(open(opt.test_json).read())['sessions']
print('Decoding ...')
decode_sessions = {'sessions': []}
for session in sessions:
n_session = {}
n_session['session-id'] = session['session-id']
n_session['turns'] = []
for turn in session['turns']:
asr_hyps = turn['asr-hyps']
sent = asr_hyps[0]['asr-hyp']
tokens = process_sent(sent)
if len(tokens) == 0:
slu_hyp = []
else:
sent_ids = [opt.word2idx.get(w) if w in opt.word2idx else Constants.UNK for w in tokens]
datas = torch.from_numpy(np.asarray(sent_ids, dtype='int64')).view(1, -1)
if opt.cuda:
datas = datas.cuda()
probs = model(datas, None)
scores = probs.data.cpu().view(-1,).numpy()
pred_classes = [i for i,p in enumerate(scores) if p > 0.5]
classes = [idx2class[i] for i in pred_classes]
slu_hyp = [slot2dic(string) for string in classes]
n_session['turns'].append(
{
'asr-hyps': asr_hyps,
'slu-hyps': [{'slu-hyp': slu_hyp, 'score': 1.0}]
}
)
decode_sessions['sessions'].append(n_session)
string = json.dumps(decode_sessions, sort_keys=True, indent=4, separators=(',', ':'))
with open(opt.save_decode, 'w') as f:
f.write(string)
print('Decode results saved in {}'.format(opt.save_decode))
if __name__ == '__main__':
opt = parse_args()
if opt.mode == 'train':
train(opt)
elif opt.mode == 'test':
test(opt)
else:
raise ValueError("unsupported type of mode {}".format(opt.mode))
| ZiJianZhao/Unaligned-SLU | base/stc/main.py | main.py | py | 7,763 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"l... |
73581418663 | from flask import Flask, render_template, jsonify
from database import engine, text, load_job_from_db
app = Flask(__name__)
def load_jobs_from_db():
try:
with engine.connect() as conn:
result = conn.execute(text("select * from jobs"))
column_names = result.keys()
jobs = []
for row in result.all():
jobs.append(dict(zip(column_names, row)))
return jobs
except Exception as e:
print(f"An error occurred while loading jobs from database: {e}")
return []
@app.route("/")
def hello():
job_list = load_jobs_from_db()
return render_template('home.html', jobs=job_list)
@app.route("/api/<id>")
def show_job(id):
job_list = load_job_from_db(id)
if not job_list:
return "Not found", 404
return render_template('jobpage.html', job=job_list)
@app.route("/api/jobs")
def list_jobs():
job_list = load_jobs_from_db()
return jsonify(job_list)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| Maulikdavra/mdfinance-carrer-webiste-v2 | app.py | app.py | py | 974 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "database.engine.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "database.engine",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "database.text",
... |
13285114730 | from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
from pathlib import Path
columns={0: 'text', 1: 'label'}
dataPath='../data/'
test_file=input("Test data file name:")
corpus: Corpus = ColumnCorpus(dataPath, columns, train_file='train_data.txt', test_file=test_file, dev_file='test_data.txt')
tag_type='label'
tag_dict=corpus.make_tag_dictionary(tag_type=tag_type)
print(tag_dict)
embedding_types = [WordEmbeddings('glove')]
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
tagger: SequenceTagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dict,
tag_type=tag_type, use_crf=True)
str_path = input("path to saved model:")
path = Path(str_path)
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
trainer.final_test(path, eval_mini_batch_size=32) | gfreitag/Recipe-Project | python/tester_script.py | tester_script.py | py | 1,031 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flair.data.Corpus",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flair.datasets.ColumnCorpus",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flair.embeddings.WordEmbeddings",
"line_number": 17,
"usage_type": "call"
},
{
"api... |
10762352030 | import os
import cv2
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from handy_msgs.msg import Float32Stamped
import readchar
import json
class MyNode(Node):
def __init__(self, name, published_image):
super().__init__(name)
self.image_publisher = self.create_publisher(Image, published_image, 10)
self.dist_publisher = self.create_publisher(Float32Stamped, '/gps/distance', 10)
self.bridge = CvBridge()
self.image_paths = []
self.gps_paths = []
def load_image_paths(self, folder_path):
for file_name in os.listdir(folder_path):
if file_name.endswith('.jpg') or file_name.endswith('.png'):
self.image_paths.append(os.path.join(folder_path, file_name))
new_array = sorted(self.image_paths, key=lambda name: name.lower())
for filename in new_array:
name, ext = os.path.splitext(filename)
new_filename = name + '.json'
self.gps_paths.append(new_filename)
self.image_paths = new_array
def publish(self, index, distance):
file_name = self.image_paths[index]
image = cv2.imread(file_name)
ros_image = self.bridge.cv2_to_imgmsg(image, 'bgr8')
self.image_publisher.publish(ros_image)
with open(self.gps_paths[index]) as f:
data = json.load(f)
json_str = json.dumps(data)
self.get_logger().info(f"JSON {json_str}")
if distance:
new_msg = Float32Stamped()
new_msg.header = ros_image.header
new_msg.data = float(data['DIST'])
self.dist_publisher.publish(new_msg)
self.get_logger().info(f"Published image {file_name}")
def main(args=None):
rclpy.init(args=args)
folder_path = '/doc/DATA/R4C/data/Cam/20230223143203'
front_node = MyNode('image_player', '/Pioneer3at/camera_front')
back_node = MyNode('image_player', '/Pioneer3at/camera_back')
front_node.load_image_paths(folder_path + '/RGB-18443010C1A2DF0F00')
back_node.load_image_paths(folder_path + '/RGB-18443010B1F4DE0F00')
counter = 0
while rclpy.ok():
key = readchar.readkey()
if key == ' ':
front_node.publish(counter, distance=True)
back_node.publish(counter, distance=True)
counter += 1
if key == 'r':
counter = 0
if key == 'b':
counter -= 2
front_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | bresilla/webo | webots_ros2_pioneer3at/webots_ros2_pioneer3at/utils/image_player.py | image_player.py | py | 2,641 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rclpy.node.Node",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sensor_msgs.msg.Image",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "handy_msgs.msg.Float32Stamped",
"line_number": 15,
"usage_type": "argument"
},
{
"api_n... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.