seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
469422111 | from argparse import ArgumentParser
from threading import Thread, current_thread
import logging
import time
from sloq import SlowQueue
def main(args=None):
prog = ArgumentParser()
prog.add_argument("-n", type=int, default=10, metavar="TASK_COUNT",
help="The number of tasks")
prog.add_argument("-t", type=float, default=1, metavar="TASK_INTERVAL",
help="The tick: seconds between tasks being released")
prog.add_argument("-w", type=int, default=3, metavar="WORKER_COUNT",
help="Number of workers")
prog.add_argument("-d", type=float, default=0, metavar="TASK_DURATION",
help="Duration of a single task")
prog.add_argument("-s", type=float, default=0, metavar="MAX_SLAM",
help="The maximum amount of slam to allow")
args = prog.parse_args(args)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
test_queue(logger, args.t, args.n, args.d, args.w, args.s)
def test_queue(logger, tick=1, tasks=10, task_duration=0, worker_count=3,
slam=0):
start_time = time.time()
sloq = SlowQueue(release_tick=tick, max_slam=slam)
# Begin the workers
for w in xrange(0, worker_count):
Thread(target=test_worker, args=(logger, start_time, sloq)).start()
# Populate the queue
for task in xrange(0, tasks):
sloq.put((task, task_duration))
for w in xrange(0, worker_count):
sloq.put((None, None))
sloq.join()
def test_worker(logger, start_time, queue):
while True:
task, sleep = queue.get()
if task is None:
logger.info("%s, Done" % current_thread().name)
queue.task_done()
return
else:
logger.info("%s, Elapsed time: %0.2f, Task: %r",
current_thread().name, time.time() - start_time, task)
if sleep:
time.sleep(sleep)
queue.task_done()
if __name__ == "__main__":
main()
| duedil-ltd/python-sloq | demo_sloq.py | demo_sloq.py | py | 2,091 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "logging.S... |
5459057284 | import configparser
from constants.Constants import Constants as const
from .OptimizerParamsFactory import OptimizerParamsFactory
from model.OptimizerFactory import OptimizerFactory
class ConfigParams(object):
def __init__(self, file):
config = configparser.ConfigParser()
config.read_file(open(file))
# Model
self.architecture = config.get(const.ConfigSection.model, "architecture")
# Valid only for mobilenet
if self.architecture == "mobilenet":
self.mobilenetAlpha = config.getfloat(const.ConfigSection.model, "mobilenetAlpha", fallback=1.0)
self.inputSize = config.getint(const.ConfigSection.model, "inputSize", fallback=224)
self.inputChannels = config.getint(const.ConfigSection.model, "inputChannels", fallback=3)
self.preprocessType = config.get(const.ConfigSection.model, "preprocessType", fallback="dummy")
# HyperParameters
self.epochs = config.getint(const.ConfigSection.hyperparameters, "epochs")
self.batchSize = config.getint(const.ConfigSection.hyperparameters, "batchSize")
self.patience = config.getint(const.ConfigSection.hyperparameters, "patience")
optimizerType = config.get(const.ConfigSection.hyperparameters, "optimizer")
optimizerParams = OptimizerParamsFactory.createOptimizerParams(optimizerType, config)
self.optimizer = OptimizerFactory.create(optimizerParams)
| SlipknotTN/kaggle_dog_breed | keras/lib/config/ConfigParams.py | ConfigParams.py | py | 1,442 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "constants.Constants.Constants.ConfigSection",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "constants.Constants.Constants",
"line_number": 16,
"usage_type... |
6724801910 | from RestrictedPython import compile_restricted_function, safe_builtins, limited_builtins, utility_builtins
someglobalvar = 123
myscript = """
import math
import tempfile
import io
#folgende befehle fuehren zu fehlern
#f = open("app.py", "rb")
#f = NamedTemporaryFile(delete=False)
def g(x):
#return x + 1 + someglobalvar <--- kein Zugriff auf someglobalvar moeglich
return h(x + 1)
result = math.exp(g(f(data)))
return result
"""
#globale variablen innerhalb der sandbox
safe_locals = {}
safe_globals = safe_builtins
additional_globals = {'data' : 2, 'f' : lambda x: x**2}
safe_globals.update(additional_globals)
#Kompilieren der Hauptfunktion
main_function_name = 'main'
main_function_compiled = compile_restricted_function(p = '', body = myscript, name = main_function_name, filename = '<inline code>')
#Kompilieren der Hilfsfunktion
support_function_name = 'h'
support_function_parameters = 'x'
support_function_body = 'return -x'
support_function_compiled = compile_restricted_function(p = support_function_parameters, body = support_function_body, name = support_function_name, filename = '<inline code>')
#Erstellen des Funktionszeigers der Hilfsfunktion
exec(support_function_compiled.code, safe_globals, safe_locals)
support_function_compiled_pointer = safe_locals[support_function_name]
print((support_function_compiled_pointer(123))) #Test der Hilfsfunktion
#Hinzufuegen der Hilfsfunktion zu den globalen Variablen der Sandbox, damit diese genutzt werden kann
updated_globals = {support_function_name : support_function_compiled_pointer}
safe_globals.update(updated_globals)
#Erzeugen des Funktionszeigers der Hauptfunktion
exec(main_function_compiled.code, safe_globals, safe_locals)
main_compiled_pointer = safe_locals[main_function_name]
print(main_compiled_pointer(*[], **{})) #Test der Hauptfunktion
#update der globalen variable 'data'
updated_globals = {'data' : 3}
safe_globals.update(updated_globals)
#update von 'h'
support_function_compiled = compile_restricted_function(p = support_function_parameters, body = 'return +x', name = support_function_name, filename = '<inline code>')
exec(support_function_compiled.code, safe_globals, safe_locals)
support_function_compiled_pointer = safe_locals[support_function_name]
updated_globals = {support_function_name : support_function_compiled_pointer}
safe_globals.update(updated_globals)
#erneute Kompilierung
import types
main_compiled_update_pointer = types.FunctionType(
main_compiled_pointer.__code__,
safe_globals,
'<' + main_function_name + '>',
main_compiled_pointer.__defaults__ or ())
print(main_compiled_update_pointer(*[], **{})) #Test der Hauptfunktion
| aleksProsk/HydroOpt2.0 | minimal-code-examples/minimal-embedded-script2.py | minimal-embedded-script2.py | py | 2,656 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "RestrictedPython.safe_builtins",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "RestrictedPython.compile_restricted_function",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "RestrictedPython.compile_restricted_function",
"line_number": 36,... |
33362276648 | import cv2
import base64
import json
import time
import os
import numpy
class Recognition:
"""
This class contain everything required to detect, learn or recognize a face
"""
db_path = "database/"
name_list_path = "database/"
def __init__(self, haar_cascade_file_path):
self.haar_cascade_file_path = haar_cascade_file_path
def take_pictures(self, number_of_pics):
(images, labels) = ([], [])
(width, height) = (130, 100)
face_cascade = cv2.CascadeClassifier(self.haar_cascade_file_path)
webcam = cv2.VideoCapture(0)
count = 1
while count < number_of_pics + 1:
ret_val, im = webcam.read()
time.sleep(1)
if ret_val:
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
images.append(face_resize)
labels.append(count)
count += 1
webcam.release()
cv2.destroyAllWindows()
return images, labels
@staticmethod
def create_recognize_msg(db_name, images):
retval, image = cv2.imencode('.png', images[0])
json_string = {'data': {'type': "recognize", 'db_name': db_name, 'image0': base64.b64encode(image)}}
return json.dumps(json_string)
@staticmethod
def create_learn_msg(db_name, person_name, info, images):
json_string = {'data': {'type': "learn", 'db_name': db_name, 'person_name': person_name, 'info': info,
'number_of_images': len(images)}}
i = 0
for image in images:
retval, im = cv2.imencode('.png', image)
json_string['data']['image' + str(i)] = base64.b64encode(im)
i += 1
return json.dumps(json_string)
def get_image_name(self, label, db_name):
names_list = self.image_name_list(self.db_path)
if db_name in names_list:
fh = open(self.name_list_path + db_name + ".txt", "r")
list_items = fh.readlines()
if label >= 0:
for item in list_items:
if int(item.split(":")[:-1][0]) == label:
return item.split(":")[1]
else:
return list_items[-1].split(":")[:-1][0]
else:
return 0
def set_image_name(self, person_name, db_name, info):
last_id = self.get_image_name(-1, db_name)
fh = open(self.name_list_path + db_name + ".txt", "a")
fh.write(str(int(last_id) + 1) + ":" + person_name + ":" + info)
fh.write("\n")
fh.close()
return int(last_id) + 1
def get_image_info(self, label, db_name):
names_list = self.image_name_list(self.db_path)
if db_name in names_list:
fh = open(self.name_list_path + db_name + ".txt", "r")
list_items = fh.readlines()
if label >= 0:
for item in list_items:
if int(item.split(":")[:-1][0]) == label:
return item.split(":")[2]
else:
return 0
@staticmethod
def db_list(db_path):
names = []
for filename in os.listdir(db_path):
names.append(filename.split(".")[:-1][0])
return names
@staticmethod
def image_name_list(path):
names = []
for filename in os.listdir(path):
names.append(filename.split(".")[:-1][0])
return names
def learn_person(self, db_name, person_name, info, images):
dbs = self.db_list('database/')
label_list = [self.set_image_name(person_name, db_name, info), self.set_image_name(person_name, db_name, info)]
(image, label) = [numpy.array(lists) for lists in [images, label_list]]
if db_name in dbs:
model = cv2.face.LBPHFaceRecognizer_create() # 125 #110
model.read(self.db_path + db_name + ".xml")
model.update(image, label)
model.write(self.db_path + db_name + ".xml")
else:
model = cv2.face.LBPHFaceRecognizer_create() # 125 #110
model.train(image, label)
model.write(self.db_path + db_name + ".xml")
def recognize_person(self, db_name, images):
dbs = self. db_list('database/')
if db_name in dbs:
model = cv2.face.LBPHFaceRecognizer_create() # 125 #110
model.read(self.db_path + db_name + ".xml")
for faces in images:
prediction = model.predict(faces)
if prediction[1] < 125:
rec = self.get_image_name(prediction[0], db_name)
info = self.get_image_info(prediction[0], db_name)
return rec, info
else:
return "Unknown"
else:
return None
| farshid616/clinet-server_FaceRecognition | recognition.py | recognition.py | py | 5,116 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
... |
5812334986 | import babel
import babel.numbers
import babel.plural
from typing import Any, Callable, Dict, List, TYPE_CHECKING, Tuple, Union, cast
from typing_extensions import Literal
from fluent.syntax import ast as FTL
from .builtins import BUILTINS
from .prepare import Compiler
from .resolver import CurrentEnvironment, Message, Pattern, ResolverEnvironment
from .utils import native_to_fluent
if TYPE_CHECKING:
from .types import FluentNone, FluentType
PluralCategory = Literal['zero', 'one', 'two', 'few', 'many', 'other']
class FluentBundle:
"""
Bundles are single-language stores of translations. They are
aggregate parsed Fluent resources in the Fluent syntax and can
format translation units (entities) to strings.
Always use `FluentBundle.get_message` to retrieve translation units from
a bundle. Generate the localized string by using `format_pattern` on
`message.value` or `message.attributes['attr']`.
Translations can contain references to other entities or
external arguments, conditional logic in form of select expressions, traits
which describe their grammatical features, and can use Fluent builtins.
See the documentation of the Fluent syntax for more information.
"""
def __init__(self,
locales: List[str],
functions: Union[Dict[str, Callable[[Any], 'FluentType']], None] = None,
use_isolating: bool = True):
self.locales = locales
self._functions = {**BUILTINS, **(functions or {})}
self.use_isolating = use_isolating
self._messages: Dict[str, Union[FTL.Message, FTL.Term]] = {}
self._terms: Dict[str, Union[FTL.Message, FTL.Term]] = {}
self._compiled: Dict[str, Message] = {}
# The compiler is not typed, and this cast is only valid for the public API
self._compiler = cast(Callable[[Union[FTL.Message, FTL.Term]], Message], Compiler())
self._babel_locale = self._get_babel_locale()
self._plural_form = cast(Callable[[Any], Callable[[Union[int, float]], PluralCategory]],
babel.plural.to_python)(self._babel_locale.plural_form)
def add_resource(self, resource: FTL.Resource, allow_overrides: bool = False) -> None:
# TODO - warn/error about duplicates
for item in resource.body:
if not isinstance(item, (FTL.Message, FTL.Term)):
continue
map_ = self._messages if isinstance(item, FTL.Message) else self._terms
full_id = item.id.name
if full_id not in map_ or allow_overrides:
map_[full_id] = item
def has_message(self, message_id: str) -> bool:
return message_id in self._messages
def get_message(self, message_id: str) -> Message:
return self._lookup(message_id)
def _lookup(self, entry_id: str, term: bool = False) -> Message:
if term:
compiled_id = '-' + entry_id
else:
compiled_id = entry_id
try:
return self._compiled[compiled_id]
except LookupError:
pass
entry = self._terms[entry_id] if term else self._messages[entry_id]
self._compiled[compiled_id] = self._compiler(entry)
return self._compiled[compiled_id]
def format_pattern(self,
pattern: Pattern,
args: Union[Dict[str, Any], None] = None
) -> Tuple[Union[str, 'FluentNone'], List[Exception]]:
if args is not None:
fluent_args = {
argname: native_to_fluent(argvalue)
for argname, argvalue in args.items()
}
else:
fluent_args = {}
errors: List[Exception] = []
env = ResolverEnvironment(context=self,
current=CurrentEnvironment(args=fluent_args),
errors=errors)
try:
result = pattern(env)
except ValueError as e:
errors.append(e)
result = '{???}'
return (result, errors)
def _get_babel_locale(self) -> babel.Locale:
for lc in self.locales:
try:
return babel.Locale.parse(lc.replace('-', '_'))
except babel.UnknownLocaleError:
continue
# TODO - log error
return babel.Locale.default()
| projectfluent/python-fluent | fluent.runtime/fluent/runtime/bundle.py | bundle.py | py | 4,408 | python | en | code | 185 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing_extensions.Literal",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "typing.Uni... |
10121862874 | class Standalone(object):
"""Common class for all standalone applications"""
def __init__(self, args, user_options):
# stores the arguments
self.args = args
self.user_options = user_options
if len(args) == 1:
# shows the help message if no arguments provided
self.help()
else:
# The user values should be used to update the
# user_options
options = self.user_options.parse_args(args[1:])
# Should update the CNOConfig file with the provided options
for key in self.user_options.config.keys():
for option in self.user_options.config[key]._get_names():
value = getattr(options, option)
setattr(getattr( getattr(self.user_options.config, key), option ), 'value', value)
self.options = options
def help(self):
self.user_options.parse_args(["prog", "--help"])
def report(self):
"""Create report and shows report (or not)"""
if self.options.onweb is True:
self.trainer.report(show=True)
elif self.options.report is True:
self.trainer.report(show=False)
else:
from easydev.console import red
print(red("No report requested; nothing will be saved or shown"))
print("use --on-web or --report options")
| cellnopt/cellnopt | cno/core/standalone.py | standalone.py | py | 1,405 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "easydev.console.red",
"line_number": 34,
"usage_type": "call"
}
] |
4419557828 | import sys
import pygame
import random
pygame.init()
pygame.display.set_caption("who is the caption")
window = pygame.display.set_mode((500,500))
font = pygame.font.Font("思源黑体.otf",35)
wheel_pics = []
for i in range(25):
filename = './pics/' + str(i) + '.png'
pic = pygame.image.load(filename)
wheel_pics.append(pic)
start_pic = pygame.image.load('start.png')
window.fill((255,255,255))
window.blit(start_pic,(0,0))
pygame.display.flip()
with open('names.txt',encoding = 'utf8') as f:
name_list = []
for i in range(6):
name_list.append(f.readline().strip())
choice = random.choice(name_list)
print(choice)
rolling = False
pic_index = 0
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
rolling = True
if rolling:
window.fill((255,255,255))
window.blit(wheel_pics[pic_index % 25],(0,0))
pic_index += 1
if pic_index >= 25:
rolling = False
pic_index = 0
choice = random.choice(name_list)
text = font.render(choice, True, (255,255,255))
window.blit(text,(215,220))
pygame.display.flip()
clock.tick(30) | Julia1976/python-project | Game Handling/Chosing the name/新的作品-55-1.py | 新的作品-55-1.py | py | 1,389 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.displa... |
13735356077 |
import os
import sys
import numpy as np
import PIL
import torch
import torch.utils.data
import torchvision
import matplotlib.pyplot as plt
import pickle
class AkMinitImagenetDataset(torch.utils.data.Dataset):
def __init__(self, rootdir_dataset, str_trainortestorinducing):
#grab args ===
self.rootdir_dataset = rootdir_dataset
self.str_trainortestorinducing = str_trainortestorinducing
#make internals ==
assert(isinstance(str_trainortestorinducing, str))
assert(self.str_trainortestorinducing in [
"train", "test", "inducing"
])
fname_train = "MiniImagenet/miniImageNet_category_split_train_phase_train.pickle"
fname_test = "MiniImagenet/miniImageNet_category_split_train_phase_test.pickle"
#"MiniImagenet/miniImageNet_category_split_test.pickle"
fname_pkl = fname_test if(str_trainortestorinducing == "test") else fname_train
with open(os.path.join(self.rootdir_dataset, fname_pkl), 'rb') as f:
content_pkl = pickle.load(f, encoding='latin1')
self.X = content_pkl['data'] #[N x 84 x 84 x 3]
self.Y = content_pkl['labels']
#make the transforms ===
tfm_colornormalization = torchvision.transforms.Normalize(
(0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)
)
if(self.str_trainortestorinducing == "train"):
self.tfm = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ToTensor(),
torchvision.transforms.RandomHorizontalFlip(p=0.5),
tfm_colornormalization
])
elif(self.str_trainortestorinducing == "inducing"):
self.tfm = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ToTensor(),
tfm_colornormalization
])
elif(self.str_trainortestorinducing == "test"):
self.tfm = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ToTensor(),
tfm_colornormalization
])
else:
raise Exception("Unknown str_trainortestorinducing: {}".format(
self.str_trainortestorinducing
))
def __len__(self):
return self.X.shape[0]
def __getitem__(self, n):
xn = self.X[n,:,:,:] #[84x84x3]
yn = self.Y[n]
return self.tfm(xn), yn, n
| blindreviewgtdxjnsd/gpex_blindreview | PaperResults/MiniImagenet/loadminiimagenet.py | loadminiimagenet.py | py | 2,624 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_nu... |
4354535219 | from collections import OrderedDict
from datetime import date, datetime
from decimal import Decimal, ROUND_DOWN
from models import models
from peewee import fn
from playhouse.shortcuts import model_to_dict
from pytz import timezone
from time import time
import app_config
import copytext
import simplejson as json
import xlrd
MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
AP_MONTHS = ['Jan.', 'Feb.', 'March', 'April', 'May', 'June', 'July', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Dec.']
ORDINAL_SUFFIXES = { 1: 'st', 2: 'nd', 3: 'rd' }
USPS_TO_AP_STATE = {
'AL': 'Ala.',
'AK': 'Alaska',
'AR': 'Ark.',
'AZ': 'Ariz.',
'CA': 'Calif.',
'CO': 'Colo.',
'CT': 'Conn.',
'DC': 'D.C.',
'DE': 'Del.',
'FL': 'Fla.',
'GA': 'Ga.',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Ill.',
'IN': 'Ind.',
'KS': 'Kan.',
'KY': 'Ky.',
'LA': 'La.',
'MA': 'Mass.',
'MD': 'Md.',
'ME': 'Maine',
'MI': 'Mich.',
'MN': 'Minn.',
'MO': 'Mo.',
'MS': 'Miss.',
'MT': 'Mont.',
'NC': 'N.C.',
'ND': 'N.D.',
'NE': 'Neb.',
'NH': 'N.H.',
'NJ': 'N.J.',
'NM': 'N.M.',
'NV': 'Nev.',
'NY': 'N.Y.',
'OH': 'Ohio',
'OK': 'Okla.',
'OR': 'Ore.',
'PA': 'Pa.',
'PR': 'P.R.',
'RI': 'R.I.',
'SC': 'S.C.',
'SD': 'S.D.',
'TN': 'Tenn.',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Va.',
'VT': 'Vt.',
'WA': 'Wash.',
'WI': 'Wis.',
'WV': 'W.Va.',
'WY': 'Wyo.'
}
GOP_CANDIDATES = [
'Ted Cruz',
'John Kasich',
'Donald Trump'
]
DEM_CANDIDATES = [
'Hillary Clinton',
'Bernie Sanders'
]
PARTY_MAPPING = {
'dem': {
'AP': 'Dem',
'long': 'Democrat',
'class': 'democrat',
'adverb': 'Democratic',
},
'gop': {
'AP': 'GOP',
'long': 'Republican',
'class': 'republican',
'adverb': 'Republican',
}
}
def comma_filter(value):
"""
Format a number with commas.
"""
return '{:,}'.format(value)
def percent_filter(value):
"""
Format percentage
"""
value = Decimal(value) * Decimal(100)
if value == 0:
return '0%'
elif value == 100:
return '100%'
elif value > 0 and value < 1:
return '<1%'
else:
cleaned_pct = value.quantize(Decimal('.1'), rounding=ROUND_DOWN)
return '{:.1f}%'.format(cleaned_pct)
def ordinal_filter(num):
"""
Format a number as an ordinal.
"""
num = int(num)
if 10 <= num % 100 <= 20:
suffix = 'th'
else:
suffix = ORDINAL_SUFFIXES.get(num % 10, 'th')
return unicode(num) + suffix
def ap_month_filter(month):
"""
Convert a month name into AP abbreviated style.
"""
return AP_MONTHS[int(month) - 1]
def ap_date_filter(value):
"""
Converts a date string in m/d/yyyy format into AP style.
"""
if isinstance(value, basestring):
value = datetime.strptime(value, '%m/%d/%Y')
value_tz = _set_timezone(value)
output = AP_MONTHS[value_tz.month - 1]
output += ' ' + unicode(value_tz.day)
output += ', ' + unicode(value_tz.year)
return output
def ap_time_filter(value):
"""
Converts a datetime or string in hh:mm format into AP style.
"""
if isinstance(value, basestring):
value = datetime.strptime(value, '%I:%M')
value_tz = _set_timezone(value)
value_year = value_tz.replace(year=2016)
return value_year.strftime('%-I:%M')
def ap_state_filter(usps):
"""
Convert a USPS state abbreviation into AP style.
"""
return USPS_TO_AP_STATE[unicode(usps)]
def ap_time_period_filter(value):
"""
Converts Python's AM/PM into AP Style's a.m./p.m.
"""
if isinstance(value, basestring):
value = datetime.strptime(value, '%p')
value_tz = _set_timezone(value)
value_year = value_tz.replace(year=2016)
periods = '.'.join(value_year.strftime('%p')) + '.'
return periods.lower()
def candidate_sort_lastname(item):
if item.winner:
return -1
elif item.last == 'Other' or item.last == 'Uncommitted' or item.last == 'Write-ins':
return 'zzz'
else:
return item.last
def candidate_sort_votecount(item):
return item.votecount
def _set_timezone(value):
datetime_obj_utc = value.replace(tzinfo=timezone('GMT'))
datetime_obj_est = datetime_obj_utc.astimezone(timezone('US/Eastern'))
return datetime_obj_est
def collate_other_candidates(results, party):
if party == 'GOP':
whitelisted_candidates = GOP_CANDIDATES
elif party == 'Dem':
whitelisted_candidates = DEM_CANDIDATES
other_votecount = 0
other_votepct = 0
for result in reversed(results):
candidate_name = '%s %s' % (result.first, result.last)
if candidate_name not in whitelisted_candidates:
other_votecount += result.votecount
other_votepct += result.votepct
results.remove(result)
return results, other_votecount, other_votepct
def set_delegates_updated_time():
"""
Write timestamp to filesystem
"""
now = time()
with open(app_config.DELEGATE_TIMESTAMP_FILE, 'w') as f:
f.write(str(now))
def get_delegates_updated_time():
"""
Read timestamp from file system and return UTC datetime object.
"""
with open(app_config.DELEGATE_TIMESTAMP_FILE) as f:
updated_ts = f.read()
return datetime.utcfromtimestamp(float(updated_ts))
def never_cache_preview(response):
"""
Ensure preview is never cached
"""
response.cache_control.max_age = 0
response.cache_control.no_cache = True
response.cache_control.must_revalidate = True
response.cache_control.no_store = True
return response
def open_db():
"""
Open db connection
"""
models.db.connect()
def close_db(response):
"""
Close db connection
"""
models.db.close()
return response
def get_results(party, electiondate):
ap_party = PARTY_MAPPING[party]['AP']
race_ids = models.Result.select(fn.Distinct(models.Result.raceid), models.Result.statename).where(
models.Result.electiondate == electiondate,
models.Result.party == ap_party,
models.Result.level == 'state',
models.Result.officename == 'President',
)
blacklist = app_config.RACE_BLACKLIST.get(electiondate)
if blacklist:
race_ids = race_ids.where(~(models.Result.raceid << blacklist))
race_ids.order_by(models.Result.statename, models.Result.raceid)
# Get copy once
copy_obj = copytext.Copy(app_config.COPY_PATH)
copy = copy_obj['meta']._serialize()
output = []
for race in race_ids:
output.append(get_race_results(race.raceid, ap_party, copy, race.statename))
sorted_output = sorted(output, key=lambda k: k['statename'])
return sorted_output
def get_race_results(raceid, party, copy, statename):
"""
Results getter
"""
race_results = models.Result.select().where(
models.Result.raceid == raceid,
models.Result.level == 'state',
models.Result.statename == statename
)
filtered, other_votecount, other_votepct = collate_other_candidates(list(race_results), party)
secondary_sort = sorted(filtered, key=candidate_sort_lastname)
sorted_results = sorted(secondary_sort, key=candidate_sort_votecount, reverse=True)
called = False
serialized_results = []
for result in sorted_results:
if (result.winner and result.call[0].accept_ap) or result.call[0].override_winner:
called = True
serialized_results.append(model_to_dict(result, backrefs=True))
output = {
'results': serialized_results,
'other_votecount': other_votecount,
'other_votepct': other_votepct,
'statename': serialized_results[0]['statename'],
'statepostal': serialized_results[0]['statepostal'],
'precinctsreportingpct': serialized_results[0]['precinctsreportingpct'],
'precinctsreporting': serialized_results[0]['precinctsreporting'],
'precinctstotal': serialized_results[0]['precinctstotal'],
'total': tally_results(raceid, statename),
'called': called,
'race_type': '',
'note': get_race_note(serialized_results[0], copy)
}
if len(serialized_results[0]['meta']):
output.update({
'poll_closing': serialized_results[0]['meta'][0].get('poll_closing'),
'race_type': serialized_results[0]['meta'][0].get('race_type'),
'order': serialized_results[0]['meta'][0].get('order')
})
return output
def get_race_note(race, copy):
"""
Pluck race note out of meta sheet
"""
key = '{0}_{1}_note'.format(race['statepostal'], race['party']).lower()
return copy.get(key, '')
def group_poll_closings(races):
poll_closing_orders = []
for race in races:
if race['order'] not in poll_closing_orders:
poll_closing_orders.append(race['order'])
poll_closing_orders.sort()
grouped = OrderedDict()
for group in poll_closing_orders:
grouped[group] = {
'poll_closing': '',
'races': []
}
for race in races:
if race['total'] == 0 and not race['called'] and race['order'] == group:
grouped[group]['poll_closing'] = race['poll_closing']
grouped[group]['races'].append(race['statename'])
return grouped
def get_unreported_races(races):
unreported = [race['statename'] for race in races if race['total'] == 0 and not race['called']]
return unreported
def _format_poll_closing(poll_closing):
formatted_time = ap_time_filter(poll_closing)
formatted_period = ap_time_period_filter(poll_closing)
return '{0} {1}'.format(formatted_time, formatted_period)
def get_last_updated(races):
last_updated = None
for race in races:
if race['called'] or race['precinctsreporting'] > 0:
for result in race['results']:
if not last_updated or result['lastupdated'] > last_updated:
last_updated = result['lastupdated']
if not last_updated:
last_updated = datetime.utcnow()
return last_updated
def tally_results(raceid, statename):
"""
Add results for a given party on a given date.
"""
tally = models.Result.select(fn.SUM(models.Result.votecount)).where(
models.Result.level == 'state',
models.Result.raceid == raceid,
models.Result.statename == statename
).scalar()
return tally
def convert_serial_date(value):
parsed = datetime(*(xlrd.xldate_as_tuple(float(value), 0)))
eastern = timezone('US/Eastern')
parsed_eastern = eastern.localize(parsed)
parsed_utc = parsed_eastern.astimezone(timezone('GMT'))
parsed_naive = parsed_utc.replace(tzinfo=None)
return parsed_naive
class APDatetimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
thedate = ap_date_filter(obj)
thetime = ap_time_filter(obj)
theperiod = ap_time_period_filter(obj)
return '{0}, {1} {2}'.format(thedate, thetime, theperiod)
elif isinstance(obj, date):
return obj.isoformat()
else:
return super(APDatetimeEncoder, self).default(obj)
| nprapps/elections16 | app/utils.py | utils.py | py | 11,492 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "decimal.Decimal",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "decimal.ROUND_DOWN",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "datetime.dateti... |
32921124712 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import datetime
import re
import redis
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose, Join, Identity
from w3lib.html import remove_tags
from models.es_type import ArticleType
from elasticsearch_dsl.connections import connections
es = connections.create_connection(ArticleType._doc_type.using)
redis_cli = redis.StrictRedis()
def date_convert(value):
try:
create_date = datetime.datetime.strptime(value.replace(u'·', '').strip(), "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
return create_date
def return_value(value):
return value
def get_nums(value):
if value == '' or value == []:
value = 0
match_re = re.match(".*?(\d+).*?", value)
if match_re:
nums = int(match_re.group(1))
else:
nums = 0
return nums
def remove_comment_tags(value):
# 去掉tag中提取的"评论"
if u"评论" in value:
return ""
else:
return value
def gen_suggests(index, info_tuple):
#根据字符串生成搜索建议数组
used_words = set()
suggests = []
for text, weight in info_tuple:
if text:
#调用es的analyze接口分析字符串
words = es.indices.analyze(index=index, analyzer="ik_max_word", params={'filter':["lowercase"]}, body=text)
anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"])>1])
new_words = anylyzed_words - used_words
else:
new_words = set()
if new_words:
suggests.append({"input":list(new_words), "weight":weight})
return suggests
class ArticlespiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class ArticleItemLoader(ItemLoader):
# 自定义itemloader
default_output_processor = TakeFirst()
class Remove_tag(Identity):
def __call__(self, values):
return [tag for tag in values if u"评论" not in tag]
class JobBoleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field(
input_processor = MapCompose(date_convert),
output_processor = TakeFirst()
)
url = scrapy.Field()
url_object_id = scrapy.Field()
front_image_url = scrapy.Field(
output_processor = MapCompose(return_value)
)
front_image_path = scrapy.Field()
praise_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
comment_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
fav_nums = scrapy.Field(
input_processor = MapCompose(get_nums)
)
content = scrapy.Field()
tags = scrapy.Field(
# input_processor = MapCompose(remove_comment_tags),
input_processor = Remove_tag(),
output_processor = Join(",")
)
def get_insert_sql(self):
insert_sql = """
insert into jobbole_article(title, url, url_object_id, create_date, fav_nums, front_image_url, front_image_path,
praise_nums, comment_nums, tags, content)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE fav_nums=VALUES(fav_nums), praise_nums=VALUES(praise_nums), comment_nums=VALUES(comment_nums)
"""
try:
praise_nums = self["praise_nums"]
except:
praise_nums = 0
self["front_image_path"] = ''
params = (self["title"], self["url"], self["url_object_id"], self["create_date"], self["fav_nums"],
self["front_image_url"], self["front_image_path"], praise_nums, self["comment_nums"], self["tags"],
self["content"])
return insert_sql, params
def save_to_es(self):
article = ArticleType()
article.title = self['title']
article.create_date = self["create_date"]
article.content = remove_tags(self["content"][0])
article.front_image_url = self["front_image_url"]
if "front_image_path" in self:
article.front_image_path = self["front_image_path"]
article.praise_nums = self["praise_nums"]
article.fav_nums = self["fav_nums"]
article.comment_nums = self["comment_nums"]
article.url = self["url"]
article.tags = self["tags"]
article.meta.id = self["url_object_id"]
article.suggest = gen_suggests(ArticleType._doc_type.index, ((article.title,10),(article.tags, 7)))
article.save()
redis_cli.incr("jobbole_count")
return | Umi101108/Spider | www.jobbole.com/ArticleSpider/ArticleSpider/items.py | items.py | py | 4,739 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "elasticsearch_dsl.connections.connections.create_connection",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "elasticsearch_dsl.connections.connections",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "models.es_type.ArticleType._doc_type",
... |
70562613224 | from collections import deque
from heapq import heappush, heappop
def solution(priorities, location):
priority_heap = []
queue = deque()
count = 0
for idx, priority in enumerate(priorities):
queue.append((priority, idx))
heappush(priority_heap,(-priority, priority))
while queue:
priority, idx = queue.popleft()
if priority == priority_heap[0][1]:
heappop(priority_heap)
count += 1
if idx == location:
return count
else:
queue.append((priority, idx)) | zsmalla/algorithm-jistudy-season1 | src/Programmers_HighScore_Kit/스택&큐/임지수/프린터_python_임지수.py | 프린터_python_임지수.py | py | 576 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 15,
"usage_type": "call"
}
] |
20145792954 | import argparse
import inspect
def parse_funct_arguments(fn, args=None, free_arguments=None):
if free_arguments is None:
free_arguments = []
fn_parser = argparse.ArgumentParser()
sign = inspect.signature(fn)
for pname, pval in sign.parameters.items():
if pname not in free_arguments:
fn_parser.add_argument('--'+pname, default=pval.default, type=pval.annotation)
fn_args, unk = fn_parser.parse_known_args(args)
def new_fn(*args, **kwargs):
return fn(*args, **kwargs, **vars(fn_args))
return new_fn, vars(fn_args), unk
| antonior92/narx-double-descent | util.py | util.py | py | 588 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "inspect.signature",
"line_number": 9,
"usage_type": "call"
}
] |
34046878888 | from pyspark.sql import SparkSession
from pyspark.sql.types import StringType,StructField,StructType,IntegerType
spark = SparkSession.builder.master("local[*]").appName("romeoJulietWordCount").getOrCreate()
sc = spark.sparkContext
inprdd = sc.textFile("D:/Spark_Scala/data/wordcount/romeojuliet.txt")
#convert to lower case,
# split based on the space
#count the words
"""for x in inprdd.take(10):
print(x)"""
outputRDD = inprdd\
.map(lambda x: x.lower())\
.flatMap(lambda x: x.split(" ")).map(lambda x : (x,1)).filter(lambda x: ((x[0] != ''),x[1]))\
.reduceByKey(lambda a,b : a + b)
#.toDF("word","count")
#outputDF.show()
for x in outputRDD.take(10):
print(x)
outputDF = outputRDD.toDF(["words","count"])
"""schema = StructType([StructField("words",StringType(),True),
StructField("count",IntegerType(),True)])
outputDF = spark.createDataFrame(outputRDD,schema=schema)"""
outputDF.show() | ZosBHAI/pysparkexamples | wordcount01.py | wordcount01.py | py | 940 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.master",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 5,
"usage_type": "n... |
29861997533 | from dataclasses import dataclass
import aiohttp
import time
from datetime import datetime
import orjson
import aiofiles
import asyncio
import logging
_LOGGER = logging.getLogger(__name__)
from .const import JSON_CACHE_FILE
@dataclass
class InverterData:
serial_number: str
firmware_version: str|None
model: str
temperature: float
current_power: float
daily_power_yield: float
alerts: bool|None
@classmethod
def from_dict(cls,data:dict):
return cls(
serial_number=data["serial_number"],
firmware_version=data["firmware_version"],
model=data["model"],
temperature=data["temperature"],
current_power=data["current_power"],
daily_power_yield=data["daily_power_yield"],
alerts=data["alerts"],
)
@dataclass
class WifiDataLoggerData:
online_status:bool #derived from connectivity to wifi data logger stick over network
last_seen:datetime
serial_number:str
firmware_version:str|None
wireless_ap_mode:bool|None
wireless_sta_mode:bool|None
router_ssid:str|None
signal_quality:int
ip_address:str|None
mac_address:str
remote_server_a:bool|None
remote_server_b:bool|None
@classmethod
def from_dict(cls,data:dict):
return cls(
online_status=data["online_status"],
last_seen=data["last_seen"],
serial_number=data["serial_number"],
firmware_version=data["firmware_version"],
wireless_ap_mode=data["wireless_ap_mode"],
wireless_sta_mode=data["wireless_sta_mode"],
router_ssid=data["router_ssid"],
signal_quality=data["signal_quality"],
ip_address=data["ip_address"],
mac_address=data["mac_address"],
remote_server_a=data["remote_server_a"],
remote_server_b=data["remote_server_b"],
)
@dataclass
class SystemData:
inverter:InverterData
wifi_logger:WifiDataLoggerData
@classmethod
def from_dict(cls,data:dict):
return cls(
inverter=InverterData.from_dict(data["inverter"]),
wifi_logger=WifiDataLoggerData.from_dict(data["wifi_logger"])
)
class SolisWifiApi():
def __init__(self,hostname:str,username:str,password:str) -> None:
_LOGGER.info((hostname,username,password))
self._session = aiohttp.ClientSession(base_url=hostname,auth=aiohttp.BasicAuth(username,password))
async def getSystemData(self) -> SystemData:
inverter_data = await self.getInverterData()
wifi_logger_data = await self.getWifiDataLoggerData()
return SystemData(inverter_data,wifi_logger_data)
async def getInverterData(self) -> InverterData:
inverterDataRaw= await self._loadDataAndParseResponse("inverter","Inverter",8)
return InverterData(
inverterDataRaw[0],
inverterDataRaw[1],
inverterDataRaw[2],
float(inverterDataRaw[3]),
float(inverterDataRaw[4]),
float(inverterDataRaw[5]),
#Data in element 6 is 'Total yield' which only show value 'd'??
True if inverterDataRaw[7] == "YES" else False
)
async def getWifiDataLoggerData(self) -> WifiDataLoggerData:
monitorDataRaw= await self._loadDataAndParseResponse("moniter","Wifi Data Logger",13)
return WifiDataLoggerData(
True,
datetime.now(),
monitorDataRaw[0],
monitorDataRaw[1],
True if monitorDataRaw[2] == "Enable" else False,
#Data in elements 3-5 are Null, do not know what they are
True if monitorDataRaw[6] == "Enable" else False,
monitorDataRaw[7],
int(monitorDataRaw[8]),
monitorDataRaw[9],
monitorDataRaw[10],
True if monitorDataRaw[11] == "Connected" else False,
True if monitorDataRaw[12] == "Connected" else False
)
async def getOffLineData(self,last_known_system_data:SystemData) -> SystemData:
if last_known_system_data == None:
last_known_system_data= await self._getCachedData()
inverter_data = InverterData(
last_known_system_data.inverter.serial_number if last_known_system_data else "",
None,
last_known_system_data.inverter.model if last_known_system_data else "",
0,
0,
0,
None
)
wifi_logger_data=WifiDataLoggerData(
False,
last_known_system_data.wifi_logger.last_seen if last_known_system_data else datetime.min,
last_known_system_data.wifi_logger.serial_number if last_known_system_data else "",
None,
None,
None,
None,
0,
last_known_system_data.wifi_logger.ip_address if last_known_system_data else "",
last_known_system_data.wifi_logger.mac_address if last_known_system_data else "",
None,
None
)
return SystemData(inverter_data,wifi_logger_data)
async def _getCachedData(self) -> SystemData | None:
try:
async with aiofiles.open(JSON_CACHE_FILE, mode='rb') as f:
content = await f.read()
system_data_dict=orjson.loads(content)
system_data=SystemData.from_dict(system_data_dict)
return system_data
except OSError:
#await asyncio.sleep(0)
return None
def _generateTimeToken(self) -> str:
return str(int(time.time()))
async def _loadDataAndParseResponse(self,dataSource:str,dataSourceName:str,dataExpectedLength:int)-> list[str]:
response= await self._session.get("/{dataSource}.cgi?t={time}".format(dataSource=dataSource,time=self._generateTimeToken()))
response.raise_for_status()
responseText = await response.text()
dataRaw=self._parseResponseText(responseText)
if len(dataRaw) != dataExpectedLength:
raise SolisWifiApiParseException(f"Could not parse {dataSourceName} data, please check connection")
return dataRaw
def _parseResponseText(self,responseText:str)-> list[str]:
#Removing NUL characters from response
cleanedup=responseText.replace("\x00","").removesuffix(";\r\n")
return cleanedup.split(";")
async def close(self):
await self._session.close()
class SolisWifiApiManager:
def __init__(self,hostname:str,username:str,password:str) -> None:
self._hostname=hostname
self._username=username
self._password=password
async def __aenter__(self) -> SolisWifiApi:
self.soliswifiapi=SolisWifiApi(self._hostname,self._username,self._password)
return self.soliswifiapi
async def __aexit__(self, exc_type, exc, tb):
await self.soliswifiapi.close()
class SolisWifiApiParseException(Exception):
"""When the response payload cannot be parsed"""
| tmulkern/SolisWifiDataLogger | custom_components/solis_wifi_data_logger/solis_wifi_api.py | solis_wifi_api.py | py | 7,146 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "dataclasses... |
34325207808 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
#Author: Read AUTHORS file.
#License: Read COPYING file.
import xml.etree.cElementTree as etree
from os import path
from pkg import pkg
from constants import const
from ..lib import eventHandler, listdir, getfile, ratioCalc
class pkgdir(object):
def __init__(self, actCode):
self.__clear()
self.onError = eventHandler() #(ActionCode, Error code, data)
self.onProcessing = eventHandler() #(ActionCode, ProcessRatio, data)
self.__actCode = actCode
def __clear(self):
self.__path = ""
self.__packages = {} #{"pkg name": pkg obj, ...}
def setTarget(self, p):
self.__clear()
self.__path = path.normpath(p)
def loadList(self):
pkgdirs = listdir(self.__path)
if not pkgdirs:
return False
return(pkgdirs)
def loadPackageInfo(self, d):
xml = getfile( path.join(self.__path, d, const.PACKAGE_INFO_FILE) )
if xml:
package = pkg(xml)
if package.right:
self.__packages[package.n] = package
return(True)
return(False)
def load(self, p):
#
self.__clear()
self.__path = path.normpath(p)
pkgdirs = listdir(self.__path)
if not pkgdirs:
return False
totalPackages = len(pkgdirs)
for d in pkgdirs:
xml = getfile(
path.join(self.__path, d, const.PACKAGE_INFO_FILE)
)
if not xml:
self.__clear()
return(False)
package = pkg(xml)
if package.right:
self.__packages[package.n] = package
self.onProcessing.raiseEvent(
self.__actCode,
ratioCalc(totalPackages, len(self.__packages)),
package.n
)
else:
self.__clear()
return(False)
return(True)
def getPackages(self):
return(self.__packages)
| pardus-anka/paso | src/engine/packages/packages.py | packages.py | py | 2,158 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lib.eventHandler",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "lib.eventHandler",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
33102651292 | from datetime import timedelta, datetime
import os
from airflow import DAG
from google.cloud import storage
from airflow.utils.dates import days_ago
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash import BashOperator
from airflow.decorators import task, dag
from airflow.operators.email import EmailOperator
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocDeleteClusterOperator,
DataprocSubmitJobOperator,
ClusterGenerator
)
from airflow.providers.google.cloud.sensors.dataproc import DataprocJobSensor
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
GOOGLE_CONN_ID = "google_cloud_default"
PROJECT_ID="data-engineering-rj"
BUCKET_NAME = 'fhvhv-data-lake'
CLUSTER_NAME = 'fhvhvcluster'
REGION = 'us-central1'
PYSPARK_FILENAME ='spark_processing.py'
PYSPARK_URI = f'gs://fhvhv-data-lake/spark-job/{PYSPARK_FILENAME}'
LOCAL_PATH = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
BIGQUERY_TABLE = 'data-engineering-rj.fhvhv_analysis.fhvhv_trips_data'
PROCESSED_DATA_PATH = 'gs://fhvhv-data-lake/output_fhv_data/trips_data/*.parquet'
'''
Process:
- create a dataproc cluster
- upload a pyspark file to gcs bucket
- submit spark job to dataproc cluster
- excute pyspark job(load data from gcs-> transform data -> submit data to GCS -> Submit data to bigquery )
- delete the cluster
- submit processed data from GCS to BigQuery
'''
PYSPARK_JOB = {
"reference":{"project_id":PROJECT_ID},
"placement":{"cluster_name":CLUSTER_NAME},
"pyspark_job":{"main_python_file_uri":PYSPARK_URI}
}
CLUSTER_CONFIG = ClusterGenerator(
project_id = PROJECT_ID,
zone="us-central1-a",
master_machine_type="n1-standard-2",
worker_machine_type="n1-standard-2",
num_workers=2,
worker_disk_size=40,
master_disk_size=30,
storage_bucket=BUCKET_NAME,
).make()
default_args = {
'owner': 'Rohit Joshi',
'depends_on_past': False,
'email_on_failure': True,
'email_on_retry': False,
'email_on_success':True,
'retries': 1,
'start_date': days_ago(1),
'retry_delay': timedelta(minutes=3),
'email_on_success': False,
'schedule_interval':'@once',
'email': ['rohitjoshi9july@gmail.com']
}
@task(task_id="upload_pyspark_file")
def upload_to_gcs(bucket_name, filename):
local_path = f"/opt/{filename}"
target_path = f"spark-job/{filename}"
storage.blob._DEFAULT_CHUNKSIZE = 5 * 1024 * 1024
storage.blob._MAX_MULTIPART_SIZE = 5 * 1024 * 1024
client = storage.Client()
bucket = client.bucket(bucket_name)
blob = bucket.blob(target_path)
blob.upload_from_filename(local_path)
print("file added successfully")
with DAG("Spark_FHVHV_ETL", default_args = default_args) as dag:
os.environ['GOOGLE_APPLICATION_CREDENTIALS ']= '/home/rohit/.gc/de-cred.json' #path of google service account credentials
start_pipeline = DummyOperator(
task_id= "start_pipeline",
dag=dag
)
#create dataproc cluster
create_cluster = DataprocCreateClusterOperator(
task_id="create_dataproc_cluster",
project_id=PROJECT_ID,
cluster_config=CLUSTER_CONFIG,
region=REGION,
cluster_name =CLUSTER_NAME,
priority_weight=4
)
#upload pyspark code to gcs
upload_pyspark_file = upload_to_gcs(BUCKET_NAME, PYSPARK_FILENAME)
#submit pyspark job to dataproc
execute_pyspark_task = DataprocSubmitJobOperator(
task_id="submit_pyspark_job",
job=PYSPARK_JOB,
region=REGION,
project_id=PROJECT_ID,
priority_weight=2
)
#delete cluster after processing
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_dataproc_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
priority_weight=1
)
#submit processed data from GCS to BQ
gcs_to_bq = GCSToBigQueryOperator(
task_id= "submit_processed_data_to_bigquery",
bucket= BUCKET_NAME,
source_objects=[PROCESSED_DATA_PATH],
destination_project_dataset_table=BIGQUERY_TABLE,
source_format='parquet',
autodetect=True,
cluster_fields=['trip_month']
)
finish_pipeline = DummyOperator(
task_id="finish_pipeline",
dag=dag
)
start_pipeline >> create_cluster >> upload_pyspark_file >> execute_pyspark_task >> delete_cluster >> gcs_to_bq >> finish_pipeline
| Rohitjoshi07/FHVDataAnalysis | airflow/dags/spark-dataproc.py | spark-dataproc.py | py | 4,553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "airflow.providers.google.cloud.operators.dataproc.ClusterGenerator",
"line_number": 52,
"usage_type": "cal... |
33508499662 | import cv2 as cv
import numpy as np
# Load and Read input
cap = cv.VideoCapture('Test.mp4')
#Array to store orientation of each frame
orient_ation = []
count = 0
orient_ation.append(count)
while True:
#Read input for current frame
ret1,current_frame = cap.read()
#Print error message if there is no input
if ret1 == False:
print('There is no valid input')
break
#Gray-scale conversion of current input
current_frame_gray = cv.cvtColor(current_frame,cv.COLOR_BGR2GRAY)
#current_frame_fil = cv.GaussianBlur(current_frame_gray,(25,25),4)
current_frame_fil = cv.medianBlur(current_frame_gray,19)
#Thresholding
ret4,current_frame_thresh = cv.threshold(current_frame_fil,15,255,cv.THRESH_BINARY_INV)
#Contour Detection
im2,current_frame_cont,hierarchy = cv.findContours(current_frame_thresh,cv.RETR_TREE,cv.CHAIN_APPROX_NONE)
#Creat array to store detected contours in descending order by their area
cnt_area_current = sorted(current_frame_cont, key = cv.contourArea, reverse = True) #Sort Area of contour in descending order
#draw contour to original input
cv.drawContours(current_frame,cnt_area_current[0],-1,(255,0,0),3)
#Moments computation
M_current = cv.moments(cnt_area_current[0]) #Calculates moments of larget contour area
cx_current = int(M_current['m10']/M_current['m00']) #CENTER IN X-AXIS
cy_current = int(M_current['m01']/M_current['m00']) #CENTER IN Y-AXIS
#Draw center of contour on orinal input
cv.circle(current_frame,(cx_current,cy_current),7,(255,0,0),-1)
#Draw arrow from center of frame to contour center of dark region
cv.arrowedLine(current_frame,(640,650),(cx_current,cy_current),(0,255,0),10)
#Index region for direction
left_index = int((4*current_frame.shape[1])/10)
right_index = int((6*current_frame.shape[1])/10)
if cx_current <= left_index:
cv.putText(current_frame,'Move Left',(420,100),cv.FONT_HERSHEY_SIMPLEX,2,(0,255,0),5)
elif cx_current >= right_index:
cv.putText(current_frame,'Move Right',(420,100),cv.FONT_HERSHEY_SIMPLEX,2,(0,255,0),5)
else:
cv.putText(current_frame,'Move Forward',(420,100),cv.FONT_HERSHEY_SIMPLEX,2,(0,255,0),5)
#Computes rotatable rectangle that fits to contour of dark region
min_rec = cv.minAreaRect(cnt_area_current[0])
orient_ation.append(min_rec[2])
rotation = abs(orient_ation[count]-orient_ation[count+1])
count = count+1
#Computes corner points for rotatable rectangle to draw it on orginal image
box = cv.boxPoints(min_rec)
box = np.int0(box)
#Draw rotatable rectange to original image
cv.drawContours(current_frame,[box],0,(0,0,255),2)
#Decision of large orientation
if rotation >= 80 or rotation <= -80:
print('Too much rotation')
i=0;
cv.imwrite('fault_%i.jpg',current_frame)
i=i+1
#produce output
cv.imshow('procedure',current_frame)
cv.imshow('threshold',current_frame_thresh)
if cv.waitKey(30) & 0xFF == 27:
break
cap.release()
cv.destroyAllWindows()
| mightykim91/navigation_system | source_code/version_2AB.py | version_2AB.py | py | 3,430 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.medianBlur",... |
38746128821 | from sqlalchemy import create_engine
from config import MYSQL_ROSPARK_DB
from db.rospark_db import Base, make_engine
try:
engine = make_engine(MYSQL_ROSPARK_DB)
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine,
tables=[Base.metadata.tables["parkings"]])
except Exception as e:
print("Error: " + str(e))
| andxeg/LeadersOfDigital_2020_parking_system | src/recreate_db.py | recreate_db.py | py | 374 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "db.rospark_db.make_engine",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "config.MYSQL_ROSPARK_DB",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "db.rospark_db.Base.metadata.drop_all",
"line_number": 8,
"usage_type": "call"
},
... |
2014711280 | # -*- coding:utf-8 -*-
import requests
import json
import os
import time
start = time.time()
respond = requests.get('http://pvp.qq.com/web201605/js/herolist.json')
respond = respond.read()
respond = respond.encode('utf-8')[3:].decode('utf-8')
json_hero = json.loads(respond)
x = 0
hero_dir = 'D:\img\\'
if not os.path.exists(hero_dir):
os.mkdir(hero_dir)
for m in range(len(hero_dir)):
save_file_name = (hero_dir + str(json))
x = x + 1
print("正在下载....第" + str(x) + "张")
end = time.time()
time_second = end - start
print("共下载" + str(x) + "张,共耗时" + str(time_second) + "秒")
# ename = json_hero[m]['ename']
# cname = json_hero[m]['cname']
# skinName = json_hero[m]['skin_name'].split('|')
#
# skinNumber = len(skinName)
#
# for bigskin in range(1,skinNumber+1):
# urlPocture = 'http://game.gtimg.cn/images/yxzj/img201605/heroimg/hero-info/' + str(ename) + '/' + str(ename) + '-bigskin-' + str(bigskin) + '.jpg'
#
# picture = requests.get(urlPocture).content
#
# with open(hero_dir + cname +"-" + skinName[bigskin-1] + '.jpg', 'wb') as f:
# f.write(picture)
| rinuandengfeng/python_study | 所有/爬虫.py | 爬虫.py | py | 1,158 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 1... |
37661910708 | import numpy as np
from math import pi
import time
from scipy.sparse.linalg import eigs, eigsh, spsolve, lobpcg
class Solution:
def __init__(self, stiffness_matrix, mass_matrix, **kwargs):
self.stiffness_matrix = stiffness_matrix.tocsc()
self.mass_matrix = mass_matrix.tocsc()
self.Kr = kwargs.get("Kr", None)
self.Mr = kwargs.get("Mr", None)
self.presc_dofs_info = kwargs.get("presc_dofs_info", None)
self.free_dofs = kwargs.get("free_dofs", None)
self.frequencies = kwargs.get("frequencies", None)
self.minor_freq = kwargs.get("minor_freq", None)
self.major_freq = kwargs.get("major_freq", None)
self.df = kwargs.get("df", None)
self.number_points = kwargs.get("number_points", None)
self.alpha_v = kwargs.get("alpha_v", None)
self.beta_v = kwargs.get("beta_v", None)
self.alpha_h = kwargs.get("alpha_h", None)
self.beta_h = kwargs.get("beta_h", None)
def modal_analysis(self, number_modes = 10, which = 'LM', sigma = 0.01, timing = False ):
""" Perform a modal analysis and returns natural frequencies and modal shapes normalized
with respect to generalized mass coordinates.
"""
start = time.time()
eigen_values, eigen_vectors = eigs( self.stiffness_matrix,
k = number_modes,
M = self.mass_matrix,
which = which,
sigma = sigma)
end = time.time()
if timing:
print('Time to perform modal analysis :' + str(round((end - start),6)) + '[s]')
natural_frequencies = np.sqrt( np.absolute( np.real(eigen_values) ) ) /(2 * pi)
ind_ord = np.argsort( natural_frequencies )
natural_frequencies = natural_frequencies[ ind_ord ]
modal_shape = np.real( eigen_vectors[ :, ind_ord ] )
return natural_frequencies, modal_shape
def freq_vector(self):
if np.array(self.frequencies).all() == None or self.frequencies==[] :
if self.minor_freq == None:
self.minor_freq = float(input('Enter a value to a minor frequency of analysis: '))
if self.major_freq == None:
self.major_freq = float(input('Enter a value to a major frequency of analysis: '))
if self.df == None and self.number_points == None:
self.df = float(input('Enter a value to frequency resolution: '))
if self.df == None and self.number_points != None:
self.df = (self.major_freq - self.minor_freq)/(self.number_points - 1)
self.frequencies = np.arange(self.minor_freq, self.major_freq + self.df, self.df)
return self.frequencies
def direct_method(self, F, timing = False):
"""
Perform an harmonic analysis through direct method and returns the response of
all nodes due the external or internal equivalent load. It has been implemented two
different damping models: Viscous Proportional and Hysteretic Proportional
Entries for Viscous Proportional Model Damping: (alpha_v, beta_v)
Entries for Hyteretic Proportional Model Damping: (alpha_h, beta_h)
"""
if self.alpha_v == None:
self.alpha_v = 0
if self.beta_v == None:
self.beta_v = 0
if self.alpha_h == None:
self.alpha_h = 0
if self.beta_h == None:
self.beta_h = 0
if self.Kr == None or self.Mr == None:
Kr_v, Mr_v = 0, 0
else:
Kr = (self.Kr.toarray())[ :, self.free_dofs ]
Mr = (self.Mr.toarray())[ :, self.free_dofs ]
Kr_temp = np.zeros(( Kr.shape[1], Kr.shape[0] ))
Mr_temp = np.zeros(( Mr.shape[1], Mr.shape[0] ))
for ind, value in enumerate(self.presc_dofs_info[:,2]):
Kr_temp[ :, ind ] = value*Kr[ ind, : ]
Mr_temp[ :, ind ] = value*Mr[ ind, : ]
Kr_v = np.sum( Kr_temp, axis=1 )
Mr_v = np.sum( Mr_temp, axis=1 )
M = self.mass_matrix
K = self.stiffness_matrix
frequencies = self.freq_vector()
x = np.zeros([ self.stiffness_matrix.shape[0], len(frequencies) ], dtype=complex )
start = time.time()
for i, freq in enumerate(frequencies):
F_add = (1 + 1j*freq*self.beta_v + 1j*self.beta_h)*Kr_v - ( ((2 * pi * freq)**2) - 1j*freq*self.alpha_v - 1j*self.alpha_h)*Mr_v
K_damp = ( 1 + 1j*freq*self.beta_v + 1j*self.beta_h )*K
M_damp = ( -((2 * pi * freq)**2) + 1j*freq*self.alpha_v + 1j*self.alpha_h)*M
A = K_damp + M_damp
x[:,i] = spsolve(A, F - F_add)
if timing:
end = time.time()
print('Time to solve harmonic analisys problem through direct method:' + str(round((end - start),6)) + '[s]')
return x, frequencies
def mode_superposition(self, F, number_modes = 10, which = 'LM', sigma = 0.01, timing = False, **kwargs):
"""
Perform an harmonic analysis through superposition method and returns the response of
all nodes due the external or internal equivalent load. It has been implemented two
different damping models: Viscous Proportional and Hysteretic Proportional
Entries for Viscous Proportional Model Damping: (alpha_v, beta_v)
Entries for Hyteretic Proportional Model Damping: (alpha_h, beta_h)
"""
if self.alpha_v == None:
self.alpha_v = 0
elif self.beta_v == None:
self.beta_v = 0
if self.alpha_h == None:
self.alpha_h = 0
elif self.beta_h == None:
self.beta_h = 0
if self.Kr == None or self.Mr == None:
Kr_v, Mr_v = 0, 0
else:
Kr = (self.Kr.toarray())[ :, self.free_dofs ]
Mr = (self.Mr.toarray())[ :, self.free_dofs ]
Kr_temp = np.zeros(( Kr.shape[1], Kr.shape[0] ))
Mr_temp = np.zeros(( Mr.shape[1], Mr.shape[0] ))
for ind, value in enumerate(self.presc_dofs_info[:,2]):
Kr_temp[ :, ind ] = value*Kr[ ind, : ]
Mr_temp[ :, ind ] = value*Mr[ ind, : ]
Kr_v = np.sum( Kr_temp, axis=1 )
Mr_v = np.sum( Mr_temp, axis=1 )
frequencies = self.freq_vector()
x = np.zeros([ self.stiffness_matrix.shape[0], len(frequencies) ], dtype=complex)
modal_shape = kwargs.get("modal_shape", None)
natural_frequencies = kwargs.get("natural_frequencies", None)
start = time.time()
if np.array(modal_shape).all() == None or modal_shape.shape[1] != number_modes:
natural_frequencies, modal_shape = self.modal_analysis( number_modes = number_modes, which = 'LM', sigma = sigma )
#F_aux = modal_shape.T @ F
for i, freq in enumerate(frequencies):
Kg_damp = (1 + 1j*self.beta_v*freq + 1j*self.beta_h)*((2 * pi * natural_frequencies)**2)
Mg_damp = (1j*freq*self.alpha_v + 1j*self.alpha_h) - ((2 * pi * freq)**2)
data = np.divide(1, (Kg_damp + Mg_damp))
diag = np.diag(data)
F_add = (1 + 1j*freq*self.beta_v + 1j*self.beta_h)*Kr_v - ( ((2 * pi * freq)**2) - 1j*freq*self.alpha_v - 1j*self.alpha_h)*Mr_v
F_aux = modal_shape.T @ (F - F_add)
x[:,i] = modal_shape @ (diag @ F_aux)
end = time.time()
if timing:
print('Time to solve harmonic analisys problem through mode superposition method:' + str(round((end - start),6)) + '[s]')
return x, frequencies, natural_frequencies, modal_shape
| atbrandao/OpenPulse_f | pulse/engine/solution.py | solution.py | py | 8,010 | python | en | code | null | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.linalg.eigs",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_n... |
27033338799 |
from __future__ import print_function
import argparse
from ast import literal_eval
import logging
from utils import metrics_manager
from utils import data_manager
try:
import ConfigParser
config = ConfigParser.ConfigParser()
except ImportError:
import configparser
config = configparser.ConfigParser()
# --metrics-policy metrics_parameters_images --task-name custom.p316xlarge.fp32.bs32 --metrics-suffix nightly --num-gpus 8 --command-to-execute \"Hello world\"
CONFIG_TEMPLATE = './task_config_template.cfg'
def run_benchmark(args):
if 'imagenet' in args.data_set:
data_manager.getImagenetData(args.data_set)
config.read(args.metrics_template)
for name, value in config.items(args.metrics_policy):
if(name == 'patterns'):
metric_patterns = literal_eval(value)
elif(name == 'metrics'):
metric_names= literal_eval(value)
else:
metric_compute_methods = literal_eval(value)
metrics_manager.BenchmarkResultManager.uptime()
metrics_manager.benchmark(
command_to_execute=args.command_to_execute,
metric_patterns=metric_patterns,
metric_names=metric_names,
metric_compute_methods=metric_compute_methods,
num_gpus=args.num_gpus,
task_name=args.task_name,
suffix=args.metrics_suffix,
framework=args.framework
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run a benchmark task.")
parser.add_argument('--framework', type=str, help='Framework eg. mxnet')
parser.add_argument('--metrics-policy', type=str, help='Metrics policy section name e.g. metrics_paramaters_images')
parser.add_argument('--task-name', type=str, help='Task Name e.g. resnet50_cifar10_symbolic.')
parser.add_argument('--metrics-suffix', type=str, help='Metrics suffix e.g. --metrics-suffix daily')
parser.add_argument('--num-gpus', type=int, help='Numbers of gpus. e.g. --num-gpus 8')
parser.add_argument('--command-to-execute', type=str, help='The script command that performs benchmarking')
parser.add_argument('--data-set', type=str, help='The data set to use for benchmarking, eg. imagenet, imagenet-480px-256px-q95')
parser.add_argument('--metrics-template', type=str, help='The template file to use for metrics pattern', default=CONFIG_TEMPLATE)
args = parser.parse_args()
log_file_location = args.task_name + ".log"
logging.basicConfig(filename=log_file_location,level=logging.DEBUG)
try:
run_benchmark(args)
except Exception:
logging.exception("Fatal error in run_benchmark")
exit()
| awslabs/deeplearning-benchmark | benchmark_runner.py | benchmark_runner.py | py | 2,670 | python | en | code | 119 | github-code | 36 | [
{
"api_name": "ConfigParser.ConfigParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utils.data_manager.getImagenetData",
"line_number": 27,
"usage_type": "call"
},
{... |
74483870822 | """
Implement the functions used when the user
chooses either platform (Chess.com or Lichess)
"""
from collections import OrderedDict
def calculate_percentages(moves_dict):
"""
Calculate the winning, losing and drawing percentages
of each position in moves_dict
"""
for move in moves_dict["next_moves"]:
aux_dict = moves_dict["next_moves"][move]
aux_dict["white_percentage"] = round(aux_dict["white_wins"] /
aux_dict["count"]*100)
aux_dict["black_percentage"] = round(aux_dict["black_wins"] /
aux_dict["count"]*100)
aux_dict["draw_percentage"] = 100 - (aux_dict["white_percentage"] +
aux_dict["black_percentage"])
# Iterate recursively through all positions
calculate_percentages(aux_dict)
def order_dict(moves_dict):
"""
Order the moves dictionary by the number of times
the position was reached.
E.g. the position 1.e4 was reached in 500 games, and
the position 1.d4 was reached in 200 games, so, the first position
in moves_dict will be 1.e4 and the second will be 1.d4
"""
# Initialize ordered dictionary
new_dict = OrderedDict()
# Check if this is the first lement of the dictionary,
# which has only the "next_moves" key
if len(moves_dict) != 1:
# Copy all information into the new dictionary
new_dict["count"] = moves_dict["count"]
new_dict["white_wins"] = moves_dict["white_wins"]
new_dict["black_wins"] = moves_dict["black_wins"]
new_dict["white_percentage"] = moves_dict["white_percentage"]
new_dict["black_percentage"] = moves_dict["black_percentage"]
new_dict["draw_percentage"] = moves_dict["draw_percentage"]
new_dict["next_moves"] = OrderedDict()
# Iterate through all moves to order everything
for move in moves_dict["next_moves"]:
moves_dict["next_moves"][move] = order_dict(moves_dict["next_moves"][move])
# Initialize the variable which holds the move
# with the largest count
aux_move = {"move": "", "count": 0}
for _ in range(len(moves_dict["next_moves"])):
for move in moves_dict["next_moves"]:
# Check if the count of the current move is larger than
# the count of the aux variable
if moves_dict["next_moves"][move]["count"] > aux_move["count"]:
aux_move["move"] = move
aux_move["count"] = moves_dict["next_moves"][move]["count"]
# Copy the "move" dictionary into the "next_moves" key of the new dictionary
new_dict["next_moves"][aux_move["move"]] = moves_dict["next_moves"][aux_move["move"]].copy()
# Set the "count" of this move to 0, to get the next largest count value
moves_dict["next_moves"][aux_move["move"]]["count"] = 0
# Reset aux_move
aux_move = {"move": "", "count": 0}
# Return the ordered dictionary
return new_dict
def increment_move_in_moves_dict(moves_dict, move, result):
"""
Add the information to the moves dictonary that the 'move'",
found previously in some game, was reached again in another game.
"""
moves_dict["next_moves"][move]["count"] += 1
if result == 1:
moves_dict["next_moves"][move]["white_wins"] += 1
elif result == 0:
moves_dict["next_moves"][move]["black_wins"] += 1
def create_move_in_moves_dict(moves_dict, move, result):
"""
Add the information to the moves dictonary that a new 'move',
never found previously in a game, was reached in the current analyzed game.
"""
if result == 1:
move_info = {"count": 1, "white_wins": 1, "black_wins": 0, "next_moves": {}}
elif result == 0:
move_info = {"count": 1, "white_wins": 0, "black_wins": 1, "next_moves": {}}
else:
move_info = {"count": 1, "white_wins": 0, "black_wins": 0, "next_moves": {}}
moves_dict["next_moves"][move] = move_info
| felaube/chess-games-explorer | helpers.py | helpers.py | py | 4,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 54,
"usage_type": "call"
}
] |
39326157381 | import requests
import datetime
from datetimerange import DateTimeRange
import json
import math
import pytz
def get_hijri(timezone):
r = requests.get('http://api.aladhan.com/v1/gToH?date='+datetime.datetime.now(pytz.timezone(timezone)).strftime('%d-%m-%Y')).json()
return r['data']['hijri']['day'] +' '+ r['data']['hijri']['month']['en'] + ' ' + r['data']['hijri']['year']
def waktu_tersisa(hour, minute,timezone):
now = datetime.datetime.now(pytz.timezone(timezone))
target = pytz.timezone(timezone).localize(datetime.datetime(*now.timetuple()[0:3], hour, minute))
if target < now: # if the target is before now, add one day
target += datetime.timedelta(days=1)
diff = target - now
hasil = math.ceil(diff.seconds/60) # Dalam Menit
if hasil > 60:
hasil = str(math.ceil(hasil/60))+" Jam Lagi" # Dalam Jam
else:
hasil = str(hasil)+" Menit Lagi" # Menit
return hasil
def current_pray(kota,timezone):
jadwal = get_jadwal(kota)
print(jadwal)
jam = datetime.datetime.now(pytz.timezone(timezone)).time().strftime('%H:%M')
subuh = DateTimeRange(jadwal['jadwal']['data']['subuh'],jadwal['jadwal']['data']['dzuhur'])
dzuhur = DateTimeRange(jadwal['jadwal']['data']['dzuhur'], jadwal['jadwal']['data']['ashar'])
ashar = DateTimeRange(jadwal['jadwal']['data']['ashar'], jadwal['jadwal']['data']['maghrib'])
magrib = DateTimeRange(jadwal['jadwal']['data']['maghrib'],jadwal['jadwal']['data']['isya'])
if jam in subuh:
return('Subuh')
elif jam in dzuhur:
return("Dzuhur")
elif jam in ashar:
return("Ashar")
elif jam in magrib:
return("Maghrib")
else:
return("Isya")
def split_jam(jam):
# H:M
return jam.split(':')
def solat_berikutnya(kota,timezone):
jadwal = get_jadwal(kota)
sekarang = current_pray(kota,timezone)
if sekarang == "Subuh":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['dzuhur'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Dzuhur"
elif sekarang == "Dzuhur":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['ashar'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Ashar"
elif sekarang == "Ashar":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['maghrib'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Maghrib"
elif sekarang == "Maghrib":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['isya'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Isya"
elif sekarang == "Isya":
waktuberikutnya = split_jam(jadwal['jadwal']['data']['subuh'])
waktutersisa = waktu_tersisa(int(waktuberikutnya[0]),int(waktuberikutnya[1]),timezone)
solatberikutnya = "Subuh"
return {
'tersisa':waktutersisa,
'waktuberikutnya':solatberikutnya
}
def get_random_ayat():
# 114 Surat
# 6236 Ayat
r = requests.get('https://api.banghasan.com/quran/format/json/acak').json()
return {'arab':r['acak']['ar']['teks'],
'terjemah':r['acak']['id']['teks'].replace('\n',''),
'surah':r['surat']['nama'],
'arti':r['surat']['arti'],
'ayat':r['acak']['id']['ayat']}
def get_city(city):
"""Menambil Kode Kota
Arguments:
city {str} -- nama kota
Returns:
json -- Kode Kota
"""
try:
r = requests.get('https://api.banghasan.com/sholat/format/json/kota/nama/'+city)
return r.json()['kota'][0]['id']
except:
return 404
def get_jadwal(namakota):
"""Mendapatkan Jadwal Shalat
Arguments:
kode {str} -- nama kota
Returns:
json -- jadwal shalat
"""
kode = get_city(namakota)
r = requests.get('https://api.banghasan.com/sholat/format/json/jadwal/kota/%s/tanggal/%s'%(kode, str(datetime.date.today())))
return r.json()
if __name__ == "__main__":
print(get_jadwal()) | RaihanStark/sakumuslim | engine.py | engine.py | py | 4,264 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pytz.timezone"... |
9663828561 | import os
import pickle
from tqdm import tqdm
import multiprocessing as mp
import itertools
import numpy as np
import functools
from termcolor import colored
import time
from numba import jit
SAVE_DIR = 'iterator_saves'
EXPENDABLE_MEMORY = 5 # in Gig
class Iterator:
def __init__(self, width, height, _print=False, save_at=500_000):
self.width = width
self.height = height
self._print = _print
self.variants = []
self.iteration_counter = 0
self.leaf_counter = 0
self.queue = [] # nodes that are next to be processed
self.nodes = [] # nodes being currently processed
self.save_at = save_at
def iterate(self, depth_first=True, multi_processing=True, parallel=10000, continued=False):
if not continued:
start = Node(grid=create_base_grid(np.zeros((self.width, self.height), dtype=np.byte), positive_cells=[(0, 0)], negative_cells=[]), num_positives=1)
self.queue = [start]
last_checkpoint = len(self.variants)
with tqdm(total=0) as pbar:
while len(self.queue) > 0:
if multi_processing:
pool = mp.Pool(mp.cpu_count())
queue_len = len(self.queue)
if depth_first:
self.queue, self.nodes = (self.queue[:-parallel], self.queue[-parallel:])
else:
self.queue, self.nodes = (self.queue[parallel:], self.queue[:parallel])
pbar.set_description(pretty_description(queue_len, len(self.nodes), self.leaf_counter, len(self.variants)))
full_iteration = pool.map(next_wrapper, self.nodes)
pool.close()
_counter = 0
for _next in full_iteration:
add_to_queue = self.unpack_next(_next)
self.queue += add_to_queue
self.iteration_counter += len(add_to_queue)
_counter += len(add_to_queue)
pbar.update(_counter)
else:
if self.iteration_counter % 1000 == 0:
pbar.set_description(pretty_description(len(self.queue), 1, self.leaf_counter, len(self.variants)))
if depth_first:
next_node = self.queue.pop(len(self.queue) - 1)
else:
next_node = self.queue.pop(0)
self.nodes = [next_node]
_next = next_node.get_next()
add_to_queue = self.unpack_next(_next)
self.queue += add_to_queue
pbar.update(len(add_to_queue))
self.iteration_counter += len(add_to_queue)
pbar.refresh()
if self.save_at is not None:
if len(self.variants) > last_checkpoint + self.save_at:
self.save_wrapper('checkpoint')
last_checkpoint += self.save_at
if self._print:
print("Number of processed nodes: {}".format(self.iteration_counter))
print("Number of checked leafs: {}".format(self.leaf_counter))
print("Number found variants: {}".format(len(self.variants)))
self.nodes = []
self.save_wrapper('complete')
return self.variants
def unpack_next(self, _next):
leaf, content = _next
add_to_queue = []
if leaf:
if content is not None:
self.variants.append(content)
self.leaf_counter += 1
else:
add_to_queue = content
return add_to_queue
def next_wrapper(self, node):
_next = node.get_next()
return self.unpack_next(_next)
def save_wrapper(self, keyword=""):
save_path = os.path.join(os.getcwd(), SAVE_DIR, '{}{}'.format(keyword, time.strftime("%Y%m%d-%H%M%S")))
self.save(save_path)
save_message = '{} save: {} variants found. Iteration Progress saved at {}'.format(keyword, len(self.variants), save_path)
print(colored(save_message, 'green'))
return save_path
def save(self, path):
os.makedirs(path, exist_ok=True)
# save variants via numpy
variants_path = os.path.join(path, 'variants.npy')
np.save(variants_path, self.variants)
# save state of iterator via pickle
state_path = os.path.join(path, 'state.pkl')
iterator_state = {'queue': self.queue,
'nodes': self.nodes,
'width': self.width,
'height': self.height,
'iteration_counter': self.iteration_counter,
'leaf_counter': self.leaf_counter}
with open(state_path, 'wb') as handle:
pickle.dump(iterator_state, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, path):
# load variants via numpy
variants_path = os.path.join(path, 'variants.npy')
self.variants = list(np.load(variants_path))
# load state of iterator via pickle
state_path = os.path.join(path, 'state.pkl')
with open(state_path, 'rb') as handle:
state = pickle.load(handle)
self.queue = state['queue'] + state['nodes']
self.width = state['width']
self.height = state['height']
self.iteration_counter = state['iteration_counter']
self.leaf_counter = state['leaf_counter']
def next_wrapper(node):
return node.get_next()
def create_base_grid(base_grid, positive_cells, negative_cells):
grid = np.copy(base_grid)
for (x, y) in positive_cells:
grid[x][y] = 1
for (x, y) in negative_cells:
grid[x][y] = -1
return grid
def count_positive_cells(grid):
return np.sum(np.array(grid))
@jit(nopython=True)
def count_adjacent(grid, coords):
x, y = coords
counter = 0
adjacent_edge = [(1, 0), (0, 1), (-1, 0), (0, -1)]
adjacent_corner = [[(-1, -1), (-1, 1)], [(-1, -1), (1, -1)], [(1, 1), (1, -1)], [(-1, 1), (1, 1)]]
for index, (_x, _y) in enumerate(adjacent_edge):
if get_safe(grid, (x + _x, y + _y)) > 0:
for (__x, __y) in adjacent_corner[index]:
if get_safe(grid, (x + __x, y + __y)) > 0:
return 2
counter += 1
return counter
@jit(nopython=True)
def get_safe(grid, coords):
x, y = coords
if x >= len(grid) or y >= len(grid[0]) or x < 0 or y < 0:
return 0
return grid[x][y]
@jit(nopython=True)
def make_next_grid(base_grid, combination, possibilites):
"""
Makes new grid from given base grid, as well as a list of possible next cells and a combination of those to select.
Note: the new cells are placed sequentially. Should two new cells be adjacent, the function will return with success=false
:param base_grid: the grid that the new grid is based on
:param combination: a tuple (b1, ..., bn) of booleans indicating which possible cells should be selected
:param possibilites a list [(x1, y1), ..., (xn, yn)] of coords, representing the selectable cells
:return: new grid, counter of new positive cells, success bool
"""
new_positive_cells = 0
next_grid = np.copy(base_grid)
for index, (x, y) in enumerate(possibilites):
if combination[index]:
if count_adjacent(next_grid, (x, y)) != 1:
return None, 0, False
next_grid[x][y] = 1
new_positive_cells += 1
else:
next_grid[x][y] = -1
return next_grid, new_positive_cells, True
@functools.lru_cache()
def get_combinations(length):
combinations = [list(i) for i in itertools.product([0, 1], repeat=length)]
return np.array(combinations)
@functools.lru_cache()
def get_n(width, height):
return int(np.ceil(width / 2) * np.ceil(height / 2)) * 2 - 1
class Node:
def __init__(self, grid, num_positives):
self.grid = grid
self.num_positives = num_positives
def get_next(self):
# leaf node
if self.num_positives == get_n(len(self.grid), len(self.grid[0])):
self.export_grid()
return True, self.grid
possibilites = []
indices = np.argwhere(self.grid == 0)
for (x, y) in indices:
if count_adjacent(self.grid, (x, y)) == 1:
possibilites.append((x, y))
# also leaf node, but invald
if len(possibilites) == 0:
return True, None
_next = []
combinations = get_combinations(len(possibilites))[1:]
for combination in combinations:
next_grid, counter, success = make_next_grid(self.grid, combination, np.array(possibilites))
if not success:
continue
# if self.num_positives + counter > get_n(len(self.grid), len(self.grid[0])):
# print("To many cells!")
# continue
_next.append(Node(next_grid, self.num_positives + counter))
return False, _next
def export_grid(self):
self.grid = np.floor_divide(self.grid + np.ones_like(self.grid), 2)
#####################
#### BASIC UTIL #####
#####################
def print_2d(grid, print_zeros=True, highlight=None):
height = len(grid[0])
width = len(grid)
for y in range(height - 1, -1, -1):
row = ""
for x in range(width):
if highlight is not None:
_x, _y = highlight
if x == _x and y == _y:
row += "{} ".format(colored(grid[x][y], 'yellow'))
continue
if not print_zeros:
row += "{} ".format(" " if grid[x][y] == 0 else grid[x][y])
else:
row += "{} ".format(grid[x][y])
print(row)
print()
def pretty_description(num_queue, num_nodes, num_leafs, num_variants):
string1 = "processing {}/{} items in queue".format(num_nodes, num_queue)
string2 = "{} leafs checked".format(num_leafs)
string3 = "{} variants found".format(num_variants)
description_string = "{}, {}, {}".format(*[colored(string1, 'cyan'), colored(string2, 'yellow'), colored(string3, 'green')])
return description_string
def load_print_variants(path, limit=None, print_zeros=False):
if not os.path.exists(path):
print("Path {} does not exist.".format(colored(path, 'yellow')))
return
variants = np.load(path)
if limit is None:
limit = len(variants)
print("Variants loaded: {}. Printing the first {}:".format(len(variants), limit))
for index in range(limit):
print_2d(variants[index], print_zeros=print_zeros)
def continue_iteration(path, multi_processing=True):
if not os.path.exists(path):
print("Path {} does not exist.".format(colored(path, 'yellow')))
return
iterator = Iterator(0, 0, _print=True)
try:
iterator.load(path)
if len(iterator.queue) > 0:
print(colored('Continue Iteration\n', 'blue'))
time.sleep(0.01)
iterator.iterate(continued=True, depth_first=True, multi_processing=multi_processing, parallel=15000 * EXPENDABLE_MEMORY)
else:
print(colored('Iteration saved in {} already completed.'.format(path), 'blue'))
except KeyboardInterrupt:
print(colored('Program Interrupted. Saving progress..', 'yellow'))
iterator.save_wrapper('interrupted')
def iterate(w=7, h=7, multi_processing=True):
if max(w, h) < 5:
multi_processing = False
iterator = Iterator(w, h, _print=True)
try:
iterator.iterate(multi_processing=multi_processing, depth_first=True, parallel=15000 * EXPENDABLE_MEMORY)
except KeyboardInterrupt:
print(colored('Program Interrupted. Saving progress..', 'yellow'))
iterator.save_wrapper('interrupted')
if __name__ == '__main__':
print(colored("Available cores: {}\n".format(mp.cpu_count()), 'green'))
time.sleep(0.01)
# Iterate from scratch
iterate(w=5, h=5)
# Continue Iteration from save
continue_iteration(os.path.join(os.getcwd(), SAVE_DIR, 'save_name'))
# Print found variants from save
load_print_variants(os.path.join(os.getcwd(), SAVE_DIR, 'save_name', 'variants.npy'), limit=2)
| m1a9l9t7e/RoadGenerator | ip/iterative_construction.py | iterative_construction.py | py | 12,413 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.byte",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"lin... |
24486995491 | """archive hails
Revision ID: da94441f919f
Revises: 51c630a38d3c
Create Date: 2022-03-16 13:46:13.409774
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'da94441f919f'
down_revision = '51c630a38d3c'
branch_labels = None
depends_on = None
def upgrade():
sources_enum = postgresql.ENUM('form', 'api', name='via', create_type=False)
op.create_table(
'archived_hail',
sa.Column('added_at', sa.DateTime(), nullable=True),
sa.Column('added_via', sources_enum, nullable=False),
sa.Column('source', sa.String(length=255), nullable=False),
sa.Column('last_update_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.String(), nullable=False),
sa.Column('status', sa.String(), nullable=False),
sa.Column('moteur', sa.String(), nullable=False),
sa.Column('operateur', sa.String(), nullable=False),
sa.Column('incident_customer_reason', sa.String()),
sa.Column('incident_taxi_reason', sa.String()),
sa.Column('session_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('insee', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('hail', sa.Column('blurred', sa.Boolean(), server_default='false', nullable=True))
def downgrade():
op.drop_column('hail', 'blurred')
op.drop_table('archived_hail')
| openmaraude/APITaxi | APITaxi_models2/migrations/versions/20220316_13:46:13_da94441f919f_archive_hails.py | 20220316_13:46:13_da94441f919f_archive_hails.py | py | 1,462 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "sqlalchemy.dialects.postgresql.ENUM",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.dialects.postgresql",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_table",
"line_number": 22,
"usage_type": "call"
},... |
5892047689 | from typing import Tuple
import numpy as np
import yaml
import os
def PIDController(
v_0: float, y_ref: float, y_hat: float, prev_e_y: float, prev_int_y: float, delta_t: float
) -> Tuple[float, float, float, float]:
"""
PID performing lateral control.
Args:
v_0: linear Duckiebot speed (constant).
y_ref: target y coordinate.
y_hat: the current estimated y.
prev_e_y: tracking error at previous iteration.
prev_int_y: previous integral error term.
delta_t: time interval since last call.
Returns:
v_0: linear velocity of the Duckiebot
omega: angular velocity of the Duckiebot
e: current tracking error (automatically becomes prev_e_y at next iteration).
e_int: current integral error (automatically becomes prev_int_y at next iteration).
"""
# # Read PID gains from file
# script_dir = os.path.dirname(__file__)
# file_path = script_dir + "/GAINS.yaml"
# with open(file_path) as f:
# gains = yaml.full_load(f)
# f.close()
# kp = gains['kp']
# kd = gains['kd']
# ki = gains['ki']
# ------------- DEFINE YOUR PID FUNCTION BELOW ---------
# Tracking error
e = y_ref - y_hat
# integral of the error
e_int = prev_int_y + e * delta_t
# anti-windup - preventing the integral error from growing too much
e_int = max(min(e_int,2),-2)
# derivative of the error
e_diff = (e - prev_e_y) / delta_t
# controller coefficients
Kp = 5
Ki = 0.2
Kd = 0.1
# Compute control signals
omega = Kp * e + Ki * e_int + Kd * e_diff
# Update previous errors for the next iteration
# prev_e_y = e
# prev_int_y = e_int
# # Tracking error
# e = y_ref - y_hat
# # integral of the error
# e_int = prev_int_y + e*delta_t
# # anti-windup - preventing the integral error from growing too much
# e_int = max(min(e_int,2),-2)
# # derivative of the error
# e_der = (e - prev_e_y)/delta_t
# # controller coefficients
# Kp = 15
# Ki = 1
# Kd = 0.1
# # PID controller for omega
# omega = Kp*e + Ki*e_int + Kd*e_der
#print(f"\n\nDelta time : {delta_t} \nE : {np.rad2deg(e)} \nE int : {e_int} \nPrev e : {prev_e} \nU : {u} \nTheta hat: {np.rad2deg(theta_hat)} \n")
return v_0, omega, e, e_int
| bratjay01/bharath_duckiebot | modcon/packages/solution/pid_controller_homework.py | pid_controller_homework.py | py | 2,430 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Tuple",
"line_number": 9,
"usage_type": "name"
}
] |
32094159630 | from decimal import Decimal
import setoptconf as soc
GOOD_SIMPLE_VALUES = (
(soc.String, None, None),
(soc.String, 'foo', 'foo'),
(soc.String, '1', '1'),
(soc.String, 1, '1'),
(soc.String, 1.23, '1.23'),
(soc.String, Decimal('1.23'), '1.23'),
(soc.Integer, None, None),
(soc.Integer, 123, 123),
(soc.Integer, '123', 123),
(soc.Integer, 123.45, 123),
(soc.Integer, Decimal('123'), 123),
(soc.Integer, Decimal('123.45'), 123),
(soc.Float, None, None),
(soc.Float, 123, 123.0),
(soc.Float, '123', 123.0),
(soc.Float, 123.45, 123.45),
(soc.Float, Decimal('123'), 123.0),
(soc.Float, Decimal('123.45'), 123.45),
(soc.Boolean, None, None),
(soc.Boolean, True, True),
(soc.Boolean, False, False),
(soc.Boolean, 'y', True),
(soc.Boolean, 'yes', True),
(soc.Boolean, 't', True),
(soc.Boolean, 'true', True),
(soc.Boolean, 'on', True),
(soc.Boolean, '1', True),
(soc.Boolean, '', False),
(soc.Boolean, 'n', False),
(soc.Boolean, 'no', False),
(soc.Boolean, 'f', False),
(soc.Boolean, 'false', False),
(soc.Boolean, 'off', False),
(soc.Boolean, '0', False),
(soc.Boolean, 123, True),
(soc.Boolean, 0, False),
(soc.Boolean, 123.45, True),
)
BAD_SIMPLE_VALUES = (
(soc.Integer, 'foo'),
(soc.Integer, '123abc'),
(soc.Float, 'foo'),
(soc.Float, '123abc'),
(soc.Float, '123.45abc'),
(soc.Boolean, 'foo'),
)
def test_simple_sanitization():
for datatype, in_value, out_value in GOOD_SIMPLE_VALUES:
yield check_good_value, datatype, in_value, out_value
for datatype, in_value in BAD_SIMPLE_VALUES:
yield check_bad_value, datatype, in_value
def check_good_value(datatype, in_value, out_value):
dt = datatype()
assert dt.sanitize(in_value) == out_value
assert dt.is_valid(in_value) is True
def check_bad_value(datatype, in_value):
dt = datatype()
try:
dt.sanitize(in_value)
except soc.DataTypeError:
pass
else:
assert False, 'Invalid %s allowed: %s' % (
datatype.__name__,
in_value,
)
assert dt.is_valid(in_value) is False
GOOD_LIST_VALUES = (
(soc.String, None, None),
(soc.String, [], []),
(soc.String, ['foo', 'bar'], ['foo', 'bar']),
(soc.String, ('foo', 'bar'), ['foo', 'bar']),
(soc.String(), ['foo', 'bar'], ['foo', 'bar']),
(soc.String, 'foo', ['foo']),
(soc.Integer, [123, '456'], [123, 456]),
)
BAD_LIST_VALUES = (
(soc.Integer, ['foo'], soc.DataTypeError),
(soc.Boolean, [True, False, 'y', 4, 'foo'], soc.DataTypeError),
('a', ['foo'], TypeError),
(soc.Configuration, ['foo'], TypeError),
)
def test_list_sanitization():
for subtype, in_value, out_value in GOOD_LIST_VALUES:
yield check_good_list_value, subtype, in_value, out_value
for subtype, in_value, exc in BAD_LIST_VALUES:
yield check_bad_list_value, subtype, in_value, exc
def check_good_list_value(subtype, in_value, out_value):
dt = soc.List(subtype)
assert dt.sanitize(in_value) == out_value
def check_bad_list_value(subtype, in_value, exc):
try:
dt = soc.List(subtype)
dt.sanitize(in_value)
except exc:
pass
else:
assert False, 'Invalid %s allowed: %s' % (
subtype.__class__.__name__,
in_value,
)
GOOD_CHOICE_VALUES = (
(soc.String, ['foo', 'bar'], None),
(soc.String, ['foo', 'bar'], 'foo'),
(None, ['foo', 'bar'], 'foo'),
(soc.Integer, [1,2,3], 2),
(soc.Integer(), [1,2,3], 2),
)
BAD_CHOICE_VALUES = (
(soc.String, ['foo', 'bar'], 'baz', soc.DataTypeError),
(soc.String, [1, 2, 3], 'baz', soc.DataTypeError),
('a', [1, 2, 3], 4, TypeError),
)
def test_choice_sanitization():
for subtype, choices, value in GOOD_CHOICE_VALUES:
yield check_good_choice_value, subtype, choices, value
for subtype, choices, value, exc in BAD_CHOICE_VALUES:
yield check_bad_choice_value, subtype, choices, value, exc
def check_good_choice_value(subtype, choices, value):
dt = soc.Choice(choices, subtype)
assert dt.sanitize(value) == value
def check_bad_choice_value(subtype, choices, value, exc):
try:
dt = soc.Choice(choices, subtype)
dt.sanitize(value)
except exc:
pass
else:
assert False, 'Invalid choice allowed: %s' % value
| jayclassless/setoptconf | test/test_datatypes.py | test_datatypes.py | py | 4,436 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "setoptconf.String",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "setoptconf.String",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "setoptconf.String",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "set... |
23083264879 | from django.shortcuts import render
from .forms import RegisterForm, LoginForm
from django.shortcuts import redirect
from django.contrib import messages
from django.contrib.auth import authenticate , login
# Create your views here.
def index(request):
return render(request,'acounts/index.html')
def register(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'welcome {username} your account is created')
return redirect('login_view')
else:
form = RegisterForm()
context = {
"form": form,
}
return render(request, "acounts/register.html", context )
def login_view(request):
form = LoginForm(request.POST or None)
msg = None
if request.method == 'POST':
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None and user.is_doctor:
login(request, user)
return redirect('doctorpage')
elif user is not None and user.is_patient:
login(request, user)
return redirect('patientpage')
else:
msg= 'invalid credentials'
else:
msg = 'error validating form'
return render(request, 'acounts/login.html', {'form': form, 'msg': msg})
def doctor(request):
return render(request,'acounts/doctor.html')
def patient(request):
return render(request,'acounts/patient.html')
| Shivam38391/django-asignment | acounts/views.py | views.py | py | 1,792 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "forms.RegisterForm",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 19,
"usage_type": "call"
},
{
"api_nam... |
7112777830 | import scipy.integrate as integrate
import sympy as sp
x = sp.symbols('x')
n = sp.symbols('n')
f = (1/sp.pi) * x**3 * sp.sin(n*x)
lower = -sp.pi
upper = sp.pi
integral = sp.integrate(f,(x,lower,upper))
simplified_integral = sp.simplify(integral)
print(simplified_integral)
| ClarkieUK/Fourier-Series | testing.py | testing.py | py | 276 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sympy.symbols",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sympy.symbols",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sympy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sympy.sin",
"line_number":... |
3006495445 | from pandas.io.parsers import read_csv
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def carga_csv(filename):
valores = read_csv(filename, header=None).to_numpy()
return valores.astype(float)
def h(x, theta):
return theta[0] + theta[1] * x
def func_coste(X, Y, theta):
acc = 0
m = len(X)
acc = np.sum((h(X, theta) - Y) ** 2)
return acc / (2 * m)
def plot_line(X, Y, theta):
min_x = min(X)
max_x = max(X)
min_y = h(min_x, theta)
max_y = h(max_x, theta)
plt.plot(X, Y, "x")
plt.plot([min_x, max_x], [min_y, max_y])
# plt.show()
plt.savefig("apartado1_line.png")
def descenso_gradiente_simple(X, Y, alpha=0.01, iteraciones=1500):
theta_0 = theta_1 = 0
m = len(X)
for _ in range(iteraciones):
acc_0 = np.sum(h(X, [theta_0, theta_1]) - Y)
acc_1 = np.sum((h(X, [theta_0, theta_1]) - Y) * X)
theta_0 = theta_0 - (alpha / m) * acc_0
theta_1 = theta_1 - (alpha / m) * acc_1
return [theta_0, theta_1]
def make_grid(t0_range, t1_range, X, Y, step=0.1):
Theta0 = np.arange(t0_range[0], t0_range[1], step)
Theta1 = np.arange(t1_range[0], t1_range[1], step)
Theta0, Theta1 = np.meshgrid(Theta0, Theta1)
Coste = np.empty_like(Theta0)
#TODO comprobar si se puede limpiar este bucle
for ix, iy in np.ndindex(Theta0.shape):
Coste[ix, iy] = func_coste(X, Y, [Theta0[ix, iy], Theta1[ix, iy]])
return [Theta0, Theta1, Coste]
def show_mesh(data):
fig = plt.figure()
ax = Axes3D(fig)
surf = ax.plot_surface(data[0], data[1], data[2], cmap=cm.jet, linewidth=0, antialiased=False)
# plt.show()
plt.savefig("apartado1_mesh.png")
def show_contour(data):
#TODO preguntar por logspace
plt.contour(data[0],data[1],data[2],np.logspace(-2,3,20),colors='blue')
# plt.scatter(data[0], data[1])
# plt.contour(data[0],data[1],data[2],colors='blue')
# plt.show()
plt.savefig("apartado1_contour.png")
def apartado_1():
datos = carga_csv('ex1data1.csv')
X = datos[:, 0]
Y = datos[:, 1]
theta = descenso_gradiente_simple(X, Y)
# plot_line(X, Y, theta)
grid_data = make_grid([-10, 10], [-1, 4], X, Y)
# show_mesh(grid_data)
show_contour(grid_data)
def normaliza_matriz(x):
mu = np.mean(x, axis=0) # Media de cada columna
sigma = np.std(x, axis=0) # Desviacion estandar por columnas, no confundir con la querida std de c++
x_norm = (x-mu)/sigma
return x_norm, mu, sigma
def coste_vec(X, Y, Theta):
H = np.dot(X, Theta)
Aux = (H-Y) ** 2
return Aux.sum() / (2*len(X))
def gradiente_it(X, Y, Theta, alpha):
m = np.shape(X)[0]
n = np.shape(X)[1]
H = np.dot(X, Theta)
Aux = (H-Y)
for i in range(n):
Aux_i = Aux * X[:, i]
Theta[i] -= (alpha/m) * Aux_i.sum()
return Theta
def gradiente_vec(X, Y, Theta, alpha):
NuevaTheta = Theta
m = np.shape(X)[0]
H = np.dot(X, Theta)
return Theta - (alpha/m) * np.dot(np.transpose(X), (H-Y))
def descenso_gradiente_multiple(X, Y, alpha=0.01, iteraciones=1500):
Theta = np.zeros(np.shape(X)[1])
costes = np.zeros(iteraciones)
for i in range(iteraciones):
costes[i] = coste_vec(X, Y, Theta)
Theta = gradiente_it(X, Y, Theta, alpha)
# Devolveremos todo el proceso para poder comparar distintos
# Factores de aprendizaje
return costes, Theta
def ec_normal(X, Y):
transX = np.transpose(X)
XTX = np.dot(transX, X)
invXT = np.dot(np.linalg.pinv(XTX), transX)
return np.dot(invXT, Y)
def apartado_2():
datos = carga_csv('ex1data2.csv')
mat_norm, mu, sigma = normaliza_matriz(datos)
X = mat_norm[:, :-1] #Todas las columnas excepto la ultima
Y = mat_norm[:, -1] #La ultima columna
m = np.shape(X)[0]
X = np.hstack([np.ones([m, 1]), X])
plt.figure()
Alphas = [(0.01,'lime'),(0.1,'blue'),(0.3,'indigo'),(0.03,'teal')]
for alpha, color in Alphas:
costes, Theta = descenso_gradiente_multiple(X, Y, alpha,iteraciones=500)
plt.scatter(np.arange(np.shape(costes)[0]),costes,c=color,label='alpha {}'.format(alpha))
plt.legend()
plt.savefig("descenso_gradiente.png")
ejemplo = [1650, 3]
ejemplo_norm = (ejemplo - mu[:2]) / sigma[:2] #Normalizamos los datos
ejemplo_norm = np.hstack([[1],ejemplo_norm]) #Añadimos un 1
prediccion = np.sum(Theta * ejemplo_norm) #Multiplicamos elemento a elemnto
print(prediccion*sigma[-1] + mu[-1]) #Escalamos el resultado
def apartado_2_2():
datos = carga_csv('ex1data2.csv')
ejemplo = [[1, 1650, 3]]
X = datos[:, :-1] #Todas las columnas excepto la ultima
Y = datos[:, -1] #La ultima columna
m = np.shape(X)[0]
X = np.hstack([np.ones([m, 1]), X])
Thetas = ec_normal(X, Y)
print(np.shape(X))
print(np.shape(ejemplo))
print(np.shape(Thetas))
prediccion = np.sum(Thetas * ejemplo)
print(prediccion)
def main():
apartado_1()
apartado_2()
apartado_2_2()
main()
| jorgmo02/AA | P1/practica1.py | practica1.py | py | 5,108 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pandas.io.parsers.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
73720676264 | # -*- coding: utf-8 -*-
# @date:2022/12/12 9:55
# @Author:crab-pc
# @file: onlinelibrary_detail
import random
from urllib.parse import urljoin
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import logging
import os
import pandas as pd
from concurrent.futures import ThreadPoolExecutor
from lxml import etree
threadpool = ThreadPoolExecutor(max_workers=2)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s: %(message)s')
chrome_options = Options()
chrome_options.add_experimental_option("debuggerAddress", "127.0.0.1:9222") # 前面设置的端口号
browser = webdriver.Chrome(executable_path=r'D:\python38\chromedriver.exe',
options=chrome_options) # executable执行webdriver驱动的文件
def save_list(data, file, name):
# desk = os.path.join(os.path.expanduser('~'), 'Desktop')
# 当前文件夹
file_path = r'F:\mysubject\contribute_link\contributuLink\投稿链接\\' + file
if os.path.isfile(file_path):
df = pd.DataFrame(data=data)
df.to_csv(file_path, encoding="utf-8", mode='a', header=False, index=False)
else:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
df = pd.DataFrame(data=data, columns=name)
df.to_csv(file_path, encoding="utf-8", index=False)
def first_requests():
pf = pd.read_excel(r'F:\mysubject\contribute_link\contributuLink\spiders\onlinelibrary详情页.xlsx', dtype=str)
sha = pf.shape[0]
for i in range(8, 10):
url = pf.values[i][0]
# input('=====')
# input(f'waiting---------{i}')
browser.get(url)
# time.sleep(random.randint(4, 6))
html = browser.page_source
res = etree.HTML(html)
link = res.xpath('//a[contains(text(), "Submit an article")]/@href | //a[contains(text(), "Submit an Article")]/@href')[0] if res.xpath('//a[contains(text(), "Submit an article")]/@href | //a[contains(text(), "Submit an Article")]/@href') else ''
data = []
links = ''
if link and 'http' not in link:
links = urljoin(url, link)
print(url, link)
data.append(dict(url=url, contribute_link=links, contribute_links=link))
save_list(data, 'onlinelibrary456.csv', data[0].keys())
if __name__ == '__main__':
# first_requests()
pf = pd.read_excel(r'F:\mysubject\contribute_link\contributuLink\spiders\onlinelibrary详情页.xlsx', dtype=str)
pf.to_csv(r'F:\mysubject\contribute_link\contributuLink\spiders\onlinelibrary详情页.csv', index=False,encoding='utf-8') | yjsdl/contribute_link | contributuLink/spiders/onlinelibrary_detail.py | onlinelibrary_detail.py | py | 2,627 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_n... |
34710160257 | import os
import shutil
import time
import unittest
from configparser import ConfigParser
from os import environ
from Bio import SeqIO
from installed_clients.WorkspaceClient import Workspace as workspaceService
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
class MinimalGenbankUploadTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setting up class')
token = environ.get('KB_AUTH_TOKEN', None)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.ws = workspaceService(cls.wsURL, token=token)
cls.impl = GenomeFileUtil(cls.cfg)
cls.MINIMAL_TEST_FILE = os.path.join( cls.cfg['scratch'], 'minimal.gbff')
shutil.copy('data/minimal.gbff', cls.MINIMAL_TEST_FILE )
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.ws.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.ws
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.impl
def getContext(self):
return self.__class__.ctx
def test_upload(self):
# fetch the test files and set things up
genomeFileUtil = self.getImpl()
gbk_path = self.MINIMAL_TEST_FILE
# ok, first test with minimal options
result = genomeFileUtil.genbank_to_genome(self.getContext(),
{
'file':{'path': gbk_path},
'workspace_name': self.getWsName(),
'taxon_id': 4932,
'genome_name': 'something',
'generate_ids_if_needed': 1
})[0]
self.check_minimal_items_exist(result)
# test with setting a taxon_reference directly
result = genomeFileUtil.genbank_to_genome(self.getContext(),
{
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'generate_ids_if_needed': 1
})[0]
self.check_minimal_items_exist(result)
# test setting additional metadata
result = genomeFileUtil.genbank_to_genome(self.getContext(),
{
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'metadata': {'mydata': 'yay', 'otherdata': 'ok' },
'generate_ids_if_needed': 1
})[0]
self.check_minimal_items_exist(result)
metadata_saved = result['genome_info'][10]
self.assertTrue('mydata' in metadata_saved)
self.assertTrue('otherdata' in metadata_saved)
self.assertEqual(metadata_saved['mydata'], 'yay')
invalidate_input_params = {
'workspace_name': 'workspace_name',
'genome_name': 'genome_name',
'file': {'path': 'fasta_file'},
'genetic_code': 'meh'
}
with self.assertRaisesRegex(
ValueError,
'Invalid genetic code specified'):
self.getImpl().genbank_to_genome(self.getContext(), invalidate_input_params)
def check_minimal_items_exist(self, result):
self.assertTrue('genome_info' in result)
self.assertTrue('genome_ref' in result)
genome_info = result['genome_info']
self.assertEqual(genome_info[10]['Number contigs'], '1')
self.assertEqual(genome_info[10]['Number of Protein Encoding Genes'], '2')
self.assertEqual(genome_info[10]['Domain'], 'Eukaryota')
self.assertEqual(genome_info[10]['Genetic code'], '11')
self.assertEqual(genome_info[10]['Name'], 'Saccharomyces cerevisiae')
self.assertEqual(genome_info[10]['Source'], 'Genbank')
self.assertEqual(genome_info[10]['GC content'], '0.37967')
self.assertEqual(genome_info[10]['Size'], '5028')
self.assertEqual(genome_info[10]['Taxonomy'],
'cellular organisms; Eukaryota; Opisthokonta; Fungi; Dikarya; Ascomycota; '+
'saccharomyceta; Saccharomycotina; Saccharomycetes; Saccharomycetales; '+
'Saccharomycetaceae; Saccharomyces')
def test_supply_assembly(self):
genomeFileUtil = self.getImpl()
"""Warning: This test will fail if not run against CI"""
gbk_path = self.MINIMAL_TEST_FILE
with self.assertRaisesRegex(ValueError, "not a valid format."):
result = genomeFileUtil.genbank_to_genome(self.getContext(), {
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'use_existing_assembly': "1",
})[0]
with self.assertRaisesRegex(ValueError, "not a reference to an assembly"):
result = genomeFileUtil.genbank_to_genome(
self.getContext(), {
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'taxon_id': 4932,
'genome_name': 'something',
'use_existing_assembly': "6976/923/6",
})[0]
with self.assertRaisesRegex(ValueError, "following contigs which are not present"):
result = genomeFileUtil.genbank_to_genome(
self.getContext(), {
'file': {'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': 'something',
'taxon_id': 4932,
'use_existing_assembly': "31767/5/1",
})[0]
def test_translation(self):
record = next(SeqIO.parse(open(self.MINIMAL_TEST_FILE), 'genbank'))
f_seq = str(record.seq)
r_seq = f_seq.translate(str.maketrans("CTAG", "GATC"))
def _location(feat):
strand_trans = ("", "+", "-")
loc = []
for part in feat.location.parts:
if part.strand >= 0:
begin = int(part.start) + 1
else:
begin = int(part.end)
loc.append((
record.id,
begin,
strand_trans[part.strand],
len(part)))
return loc
def get_seq(feat):
seq = []
strand = 1
for part in feat.location.parts:
strand = part.strand
if strand >= 0:
seq.append(f_seq[part.start:part.end])
else:
seq.insert(0, r_seq[part.start:part.end])
if strand >= 0:
return "".join(seq)
else:
return "".join(seq)[::-1]
for feat in record.features:
print(feat.id)
seq1 = feat.extract(record)
seq2 = get_seq(feat)
self.assertEqual(str(seq1.seq), seq2)
| kbaseapps/GenomeFileUtil | test/supplemental_genbank_tests/genbank_upload_parameter_test.py | genbank_upload_parameter_test.py | py | 8,715 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "GenomeFileUtil.Genom... |
74963734183 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@FileName: weChatClient
@Author : sky
@Date : 2022/8/1 15:48
@Desc : 客户端
"""
import wx
import socket
import threading
# 客户端继承wx.frame,就拥有了窗口界面
class WeChatClient(wx.Frame):
def __init__(self, c_name):
# 调用父类的构造函数
wx.Frame.__init__(self, None, id=101, title='%s的客户端界面'%c_name, pos=wx.DefaultPosition, size=(400, 700))
pl = wx.Panel(self) # 在窗口初始化一个面板
box = wx.BoxSizer(wx.VERTICAL)
pl.SetSizer(box)
g1 = wx.FlexGridSizer(wx.HORIZONTAL)
conn_button = wx.Button(pl, size=(200, 40), label="连接")
dis_conn_button = wx.Button(pl, size=(200, 40), label="断开")
g1.Add(conn_button, 1, wx.TOP | wx.LEFT)
g1.Add(dis_conn_button, 1, wx.TOP | wx.Right)
box.Add(g1, 1, wx.ALIGN_CENTER)
self.text = wx.TextCtrl(pl, size=(400, 250), style=wx.TE_MULTILINE | wx.TE_READONLY)
box.Add(self.text, 1, wx.ALIGN_CENTER)
self.input_text = wx.TextCtrl(pl, size=(400, 100), style=wx.TE_MULTILINE)
box.Add(self.input_text, 1, wx.ALIGN_CENTER)
g2 = wx.FlexGridSizer(wx.HORIZONTAL)
clear_button = wx.Button(pl, size=(200, 40), label="重置")
send_button = wx.Button(pl, size=(200, 40), label="发送")
g2.Add(clear_button, 1, wx.TOP | wx.LEFT)
g2.Add(send_button, 1, wx.TOP | wx.RIGHT)
box.Add(g2, 1, wx.ALIGN_CENTER)
pl.SetSizer(box)
'''给所有按钮绑定点击事件'''
self.Bind(wx.EVT_BUTTON, self.connect_to_server, conn_button)
self.Bind(wx.EVT_BUTTON, self.send_to, send_button)
self.Bind(wx.EVT_BUTTON, self.go_out, dis_conn_button)
self.Bind(wx.EVT_BUTTON, self.reset, clear_button)
'''客户端属性'''
self.name = c_name
self.isConnected = False # 客户端是否已经连上服务器
self.client_socket = None
# 连接服务器
def connect_to_server(self, event):
print(f"客户端{self.name},开始连接服务器")
if not self.isConnected:
server_host_port = ('localhost', 8888)
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect(server_host_port)
# 之前规定客户端只要连接成功,马上把自己的名字发给服务器
self.client_socket.send(self.name.encode('utf-8'))
self.isConnected = True
t = threading.Thread(target=self.recive_data)
t.setDaemon(True) # 客户端界面如果关闭,当前守护线程也自动关闭
t.start()
# 接收服务器数据
def recive_data(self):
while self.isConnected:
data = self.client_socket.recv(1024).decode('utf-8')
# 从服务器接收到的数据,需要显示
self.text.AppendText(f"{data}\n")
# 客户端发送消息到聊天室
def send_to(self, event):
if self.isConnected:
info = self.input_text.GetValue()
if len(info) > 0:
self.client_socket.send(info.encode('utf-8'))
# 输入框中的数据如果已经发送,输入框设置为空
self.input_text.Clear()
# 客户端离开聊天室
def go_out(self, event):
self.client_socket.send('A^disconnect^B'.encode('utf-8'))
# 客户端主线程也要关闭
self.isConnected = False
# 客户端输入框的信息重置
def reset(self, event):
self.input_text.Clear()
if __name__ == "__main__":
app = wx.App()
name = input("请输入客户端名字:")
WeChatClient(name).Show()
app.MainLoop() # 循环刷新显示
| Bxiaoyu/NotesRep | Wechat/weChatClient.py | weChatClient.py | py | 3,819 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wx.Frame",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "wx.Frame.__init__",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "wx.DefaultPosition",
... |
22056037109 | import os
import re
import sys
import time
from setuptools import setup
HERE = os.path.dirname(__file__)
ROOT_DIR = os.path.abspath(os.path.join(HERE, os.pardir))
TEMP_PATH = "target"
in_src = os.path.isfile(os.path.join(ROOT_DIR, "pom.xml"))
if in_src:
pom_file = os.path.join(ROOT_DIR, 'pom.xml')
with open(pom_file) as pomf:
pom = pomf.read()
version_match = re.search(r'\n <version>([\w\.\-]+)</version>', pom)
if version_match:
version_string = version_match.group(1)
print("Version from: '%s' is: %s" % (pom_file, version_string))
version_elements = version_string.split("-")
is_release = "SNAPSHOT" != version_elements[-1]
base_version_elements = version_elements if is_release else version_elements[0:-1]
base_version = base_version_elements[0] + ".".join(base_version_elements[1:])
version = base_version if is_release else "%s+%08x" % (base_version, int(time.time()))
else:
print("ERROR: Cannot read version from pom file '%s'." % pom_file, file=sys.stderr)
exit(1)
print("Module version is: %s" % version)
print("Writing version file in: %s" % os.path.abspath("."))
with open("pyrander/version.py", "w") as vf:
vf.write("__version__='%s'\n" % version)
with open('pyrander/version.py') as vf:
exec(vf.read())
setup(
name='pyrander',
packages=['pyrander'], # this must be the same as the name above
version=__version__,
description='A random test lib',
author='Piotr Szul',
author_email='piotr.szul@csiro.au',
url='https://github.com/piotrszul/pyrander',
keywords=['testing', 'logging', 'example'], # arbitrary keywords
classifiers=[],
extras_require={
'test': [
'pyspark==2.1.2',
],
'dev': ['twine'],
},
license="MIT",
)
| piotrszul/sparktest | python/setup.py | setup.py | py | 1,848 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
21841926557 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 17:17:57 2015
Kepler problem and modified equations.
Apply the Stoermer-Verlet to the plannar Kepler problem
H(q, p) = 1/2p^Tp - 1/||q||, q, p belong to R^2.
Use the BCH formular (L + R 5.40) to compute the second-order correction of
the modified Hamiltonian \tilde{H} for this particular problem. Verify the
forth order convergence of the Stoermer-Verlet method with respect to the
modified Hamiltonian \tilde{H}_2 numerically. Take, for example, initial
conditions q = (1, 0) and p = (0, 1).
@author: rpoolman
"""
import Steppers.steppers as step
import numpy as np
import matplotlib.pyplot as plt
# setups for Kepler problem
V = lambda qx, qy: -1.0/np.sqrt(qx**2.0 + qy**2.0)
Dt = 0.01
T = 10
N = np.int(T/Dt)
q = np.zeros((N, 2))
p = np.zeros((N, 2))
q[0, :] = np.array([1.0, 0.0])
p[0, :] = np.array([0.0, 1.0])
# integrate
#for qRow, pRow in zip(q, p):
for ii in range(len(q) - 1):
q[ii + 1], p[ii + 1] = step.stoermerstep(V, q[ii], p[ii], Dt)
# plot results
plt.figure(1)
plt.subplots_adjust(hspace = 0.2, wspace = 0.15)
plt.clf()
ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)
plt.title('Real Space Plot of Numerical Solution')
plt.plot(q[:, 0], q[:, 1])
plt.xlabel('X Coordinate')
plt.ylabel('Y Coordinate')
plt.axis([-1.5, 1.5, -1.5, 1.5])
ax2 = plt.subplot2grid((2,2), (1,0))
plt.title('Phase Space Plot of Numerical Solution along X')
plt.plot(q[:, 0], p[:, 0])
plt.xlabel('X Coordinate')
plt.ylabel('X Velocity')
plt.axis([-1.5, 1.5, -1.5, 1.5])
ax3 = plt.subplot2grid((2,2), (1,1))
plt.title('Phase Space Plot of Numerical Solution along X')
plt.plot(q[:, 1], p[:, 1])
plt.xlabel('Y Coordinate')
plt.ylabel('Y Velocity')
plt.axis([-1.5, 1.5, -1.5, 1.5]) | Rhys314/Simulating_Hamiltonian_Dynamics | Section5,5_Ex6.py | Section5,5_Ex6.py | py | 1,760 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,... |
70677270824 | """
Filename: locate_nci_data.py
Author: Damien Irving, irving.damien@gmail.com
Description: Locate CMIP5 data at NCI
"""
# Import general Python modules
import sys, os, pdb
import argparse
from ARCCSSive import CMIP5
import six
import glob
# Define functions
def main(inargs):
"""Run the program."""
cmip5 = CMIP5.DB.connect()
outputs = cmip5.outputs(experiment = inargs.experiment,
variable = inargs.variable,
mip = inargs.mip,
model = inargs.model,
ensemble = inargs.ensemble)
ua6_path = '/g/data/ua6/DRSv2/CMIP5/%s/%s/%s/%s/%s/%s/latest/*' %(inargs.model, inargs.experiment, inargs.time_freq, inargs.realm, inargs.ensemble, inargs.variable)
print('DRSv2:', glob.glob(ua6_path))
my_path = '/g/data/r87/dbi599/DRSv2/CMIP5/%s/%s/%s/%s/%s/%s/latest' %(inargs.model, inargs.experiment, inargs.time_freq, inargs.realm, inargs.ensemble, inargs.variable)
print('Elsewhere path:')
elsewhere_path = []
for o in outputs:
var = o.variable
for v in o.versions:
elsewhere_path.append(v.path)
print(v.path)
print('Elsewhere files:')
for f in outputs.first().filenames():
six.print_(f)
if inargs.symlink:
#assert len(elsewhere_path) == 1
command1 = 'mkdir -p %s' %(my_path)
command2 = 'ln -s -f %s/%s %s/%s' %(elsewhere_path[inargs.elsewhere_index], f, my_path, f)
if inargs.execute:
os.system(command1)
os.system(command2)
else:
print(command1)
print(command2)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
dependencies:
vdi $ pip install --user ARCCSSive
vdi $ export CMIP5_DB=sqlite:////g/data1/ua6/unofficial-ESG-replica/tmp/tree/cmip5_raijin_latest.db
"""
description='Locate CMIP5 data at NCI'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("experiment", type=str, help="Experiment name")
parser.add_argument("variable", type=str, help="var_name")
parser.add_argument("time_freq", type=str, help="e.g. mon or fx")
parser.add_argument("mip", type=str, help="e.g. Omon, Amon, fx or aero")
parser.add_argument("realm", type=str, help="e.g. atmos, ocean or aerosol")
parser.add_argument("model", type=str, help="Model name")
parser.add_argument("ensemble", type=str, help="e.g. r1i1p1")
parser.add_argument("--symlink", action="store_true", default=False,
help="Create a symlink for the elsewhere files")
parser.add_argument("--execute", action="store_true", default=False,
help="Execute the symlink command rather than printing to screen")
parser.add_argument("--elsewhere_index", type=int, default=0,
help="Index for whcih elsewhere path to use")
args = parser.parse_args()
main(args)
| DamienIrving/ocean-analysis | downloads/locate_nci_data.py | locate_nci_data.py | py | 3,321 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "ARCCSSive.CMIP5.DB.connect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "ARCCSSive.CMIP5.DB",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "ARCCSSive.CMIP5",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "gl... |
18937659990 | from django.db import models
from wagtail.admin.panels import FieldPanel
from wagtail.snippets.models import register_snippet
class SimpleTaxonomy(models.Model):
"""An abstract model for simple taxonomy terms."""
class Meta:
abstract = True
ordering = ['title']
title = models.CharField(
max_length=100,
help_text='The title of the category'
)
slug = models.SlugField(
max_length=100,
unique=True,
help_text='The slug must be unique for this category'
)
translation_fields = [
'title',
'slug',
]
panels = [
FieldPanel('title'),
FieldPanel('slug'),
]
def __str__(self):
"""Override magic method to return term title."""
return self.title
@register_snippet
class Constituency(SimpleTaxonomy):
"""A concrete model for constituency taxonomy terms."""
class Meta:
verbose_name = 'Constituency'
verbose_name_plural = 'Constituencies'
| IATI/IATI-Standard-Website | taxonomies/models.py | models.py | py | 1,010 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": ... |
13300689829 | from art import logo
import os
bid = list()
def add_new_bidder(name: str, bid_price: int) ->dict[str, int]:
user_data = dict()
user_data["name"] = name
user_data["bid_price"] = bid_price
return user_data
def find_the_highest_bidder(bid: dict[str, int]) ->tuple[str, int]:
highest_bid = 0
winner = ""
for bidder in bid:
if bidder["bid_price"] > highest_bid:
highest_bid = bidder["bid_price"]
winner = bidder["name"]
return winner, highest_bid
def main():
os.system('clear')
should_continue = True
while should_continue:
print(logo)
name = input("What is your name? ")
bid_price = int(input("What is your bid price?: $"))
bid.append(add_new_bidder(name=name, bid_price=bid_price))
is_next_user = input("Are there other users who want to bid? 'yes' or 'no'?:\n")
if is_next_user == 'no':
should_continue = False
else:
os.system('clear')
winner_name, winner_price = find_the_highest_bidder(bid)
os.system('clear')
print(logo)
print(f"The winner is {winner_name} who paid ${winner_price}")
if __name__ == "__main__":
main()
| robmik1974/secret-auction | main.py | main.py | py | 1,215 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "art.logo",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "os.system",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 41,
... |
14733803314 | # a plugin: CSV whitelist.
# here we create a 'document type' (or 'an instance of Doc') with one input (a csv file)
# NOTE: 'doc' is a magic variable that is used to build a Doc instance `Doc( **module.doc )`
# This eliminates any need for us to 'from doc import Doc', which is good.
from datetime import datetime
from utils import date_from_str
def counter(*args):
global count
try:
count += 1
except:
count = 1
return count
doc = {
'name':'whitelist',
'inputs':{
'name' : 'whitelist_csv', # again, a unique name is always required
# csv_input simply wants to read a file. So 'location' is just a file path.
'location' : 'whitelist.csv', # This path will be read immediately, so we can use a relative path (to the plugin file)
# csv_input only knows how to use one value - a dictionary key we name with 'id'
'id': 'hash',
# 'data' is a 'Mapper': it massages the raw input data into the document's format
'data': {
'REMAP': { # REMAP instructs the Mapper to name outputs directly from inputs
'name': 0, # our output dictionary will have a 'name' field taken from column 0
'hash': 1, # and a 'hash' field taken from column 1
'date.created': (2, lambda v: date_from_str(v)),
'comment': 3,
},
'from_whitelist': True, # this field will simply be copied
'counter': counter, # THIS, IS, PYTHON
'date.retrieved': lambda v: datetime.utcnow().replace(microsecond=0), # yes, we can
},
},
}
| JeffKwasha/hachit | plugins/whitelist.py | whitelist.py | py | 1,667 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "utils.date_from_str",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "name"
}
] |
29413120017 | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Define the codes and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if ret == True:
frame = cv2.flip(frame, 0)
# Write the flipped frame
out.write(frame)
cv2.imshow('frame', gray)
k = cv2.waitKey(1)
if k & 0XFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
| land-pack/opencv-example | basic/simple_cap_save_video.py | simple_cap_save_video.py | py | 553 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
... |
37977404672 | '''
Python module containing "Master" classes of easy_gui project.
The classes in here are designed to be subclassed in user applications.
'''
import tkinter as tk
from tkinter import ttk
from tkinter import _tkinter
from .styles import BaseStyle
from . import widgets
import os
import sys
import threading
import traceback
from typing import List, Dict
def recreate_if_needed(func):
'''
Decorator used to enable addition of Sections or Widgets after GUI has been created.
(that is, can add elements outside of EasyGUI subclass' __init__ method)
'''
def inner(*args, **kwargs):
self = args[0]
value = func(*args, **kwargs)
if self.root.created:
self.root.create() # need to re-create GUI so that the new elements show up!
return value
return inner
class GridMaster():
def __init__(self):
self.grid_areas = {}
self.grid_configuration = []
def configure_grid(self, grid_configuration: List[str]) -> Dict[str, int]:
'''
Specify full-window layout with CSS grid-template-area style list of strings.
- Each item in provided grid_configuration corresponds to a grid row and spaces
delimit each cell.
- Individual cells or rectangular groups of contiguous cells may be indicated by name
while unnamed cells are specified by one or more periods.
'''
self.grid_configuration = grid_configuration
self.grid_rows = len(grid_configuration)
self.grid_columns = len(grid_configuration[0].split())
for row in grid_configuration:
if len(grid_configuration[0].split()) != self.grid_columns:
print('ERROR! Differing number of grid columns specified below:')
print(grid_configuration)
return
names = set(cell for row in grid_configuration for cell in row.split() if '.' not in cell)
for name in names:
first_row, last_row, first_column, last_column = None, None, None, None
for i, row in enumerate(grid_configuration):
if name in row.split():
if first_row is None:
first_row = i # will stay fixed at the first row containing name
last_row = i # will continue to increase for multiple rows
if first_column is None:
row_list = row.split()
first_column = row_list.index(name) # get far left column of name
last_column = len(row_list) - row_list[::-1].index(name) - 1 # reverse to get far right column
self.grid_areas[name] = {'first_row': first_row, 'last_row': last_row,
'first_column': first_column, 'last_column': last_column}
# Now make elements expand evenly with window resize by default
if self.grid_areas != {}:
limits = self.grid_limits()
for row in range(limits['min_row'], limits['max_row'] + 1):
self.grid_rowconfigure(row, weight=1, minsize=10)
for col in range(limits['min_col'], limits['max_col'] + 1):
self.grid_columnconfigure(col, weight=1, minsize=10)
def add_grid_row(self, row_name: str):
if self.grid_configuration == []:
self.grid_configuration = [row_name]
else:
num_columns = len(self.grid_configuration[0].split(' '))
self.grid_configuration.append(' '.join([row_name] * num_columns))
self.configure_grid(self.grid_configuration)
def grid_limits(self) -> dict:
min_row, max_row, min_col, max_col = 500, -500, 500, -500 # arbitrarily large starting points so no risk of surprising row/col not being captured
for area in self.grid_areas.values():
if area['first_row'] < min_row:
min_row = area['first_row']
if area['last_row'] > max_row:
max_row = area['last_row']
if area['first_column'] < min_col:
min_col = area['first_column']
if area['last_column'] > max_col:
max_col = area['last_column']
return {'min_row': min_row, 'max_row': max_row, 'min_col': min_col, 'max_col': max_col}
class SectionMaster():
def __init__(self):
self.sections: dict = {}
self.widgets: dict = {}
@recreate_if_needed
def add_section(self, name='', title=False, grid_area=None,
borderwidth=None, relief=None, tabbed: bool=False, equal_button_width: bool=False, external_section=None):
'''
Add a Section object to the parent (root window or other Section).
'''
if external_section: # if is an externally-built section is passed in
if not name:
name = external_section.__name__
section = external_section(parent=self, name=name, title=title, grid_area=grid_area,
borderwidth=borderwidth, relief=relief, tabbed=tabbed, equal_button_width=equal_button_width)
else:
if name == '':
name = f'section{len(self.sections) + 1}'
if name in self.sections:
raise ValueError('Unable to add section as a section with the given name already exists!')
if borderwidth is None:
borderwidth = self.style.borderwidth
if relief is None:
relief = self.style.section_border
# Next 2 lines set grid_area to be name if not explicitly declared and not already used as a grid_area
if grid_area is None and name not in [s.grid_area for s in self.sections.values()]:
grid_area = name
section = Section(parent=self, name=name, title=title, grid_area=grid_area,
borderwidth=borderwidth, relief=relief, tabbed=tabbed, equal_button_width=equal_button_width)
self.sections[name] = section
return section
@recreate_if_needed
def add_widget(self, type='label', text='', widget_name=None, grid_area=None, **kwargs):
'''
Add a Widget object to this Section by calling the add_widget function in widgets.py
(Easier to keep the function there as it needs access to all the individual Widget classes.)
'''
return widgets.add_widget(self, type=type, text=text, widget_name=widget_name, grid_area=grid_area, **kwargs)
def delete_widget(self, widget_name) -> None:
'''
Fully delete a widget.
Pass without issue if the widget doesn't exist.
'''
try:
self.widgets[widget_name].destroy()
del self.widgets[widget_name]
except:
pass
def delete_all_widgets(self) -> None:
'''
Fully delete all child widgets of this section.
'''
for w_name in list(self.widgets.keys()):
self.delete_widget(w_name)
def _clear_and_recreate_plot(self, mpl_figure, widget_name, grid_area, kwargs):
old_widget = self.widgets[widget_name] # grab reference to widget to be deleted so that its place in dict can be given to new widget
new_widget = self.add_widget(type='matplotlib', widget_name=widget_name, toolbar=old_widget.toolbar, grid_area=grid_area)
new_widget.bindings = old_widget.bindings
new_widget.small_figure_warning_given = old_widget.small_figure_warning_given
new_widget.position()
new_widget.draw_plot(mpl_figure=mpl_figure)
new_widget.position() # have to reposition/create Widget
old_widget.destroy() # destroy after new widget is positioned for slightly less flickering
@recreate_if_needed
def add_tab(self, name='', **kwargs):
if not self.tabbed:
print('Error! Cannot .add_tab to a Section unless tabbed=True when it is created.')
return
section = Section(parent=self.tabs, name=name, **kwargs)
self.sections[name] = section
self.tabs.add(section, text=name)
return section
def delete_section(self, section_name) -> None:
'''
Fully delete a section and all of its child widgets.
Pass without issue if the section doesn't exist.
'''
try:
for key, widget in self.sections[section_name].widgets.items():
widget._widget.destroy()
self.sections[section_name].destroy()
del self.sections[section_name]
except:
pass
class EasyGUI(tk.Tk, GridMaster, SectionMaster):
'''
Main class to be subclassed for full GUI window.
'''
style = BaseStyle()
def __init__(self, alpha: float=1.0, topmost: bool=False, disable_interaction: bool=False, toolwindow: bool=False, fullscreen: bool=False, overrideredirect: bool=False, **kwargs) -> None:
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
EasyGUI.style.create_font() # have to generate font.Font object after initial tk root window is created
self.key_log = [] # record keys/buttons triggered
self.key_triggers = [('closegui', lambda: self.close())]
self.icon(bitmap=os.path.join(os.path.dirname(__file__), 'resources', 'transparent.ico'), default=True)
self.title('EasyGUI')
self.geometry("300x180+100+60") # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
# Instead of setting .geometry, can also set "width", "height" to integer values
# and "center" to True in application subclass to size and center window
self.transparent = False
self.configure(background=self.style.window_color)
if self.style.transparent:
self.wm_attributes('-transparentcolor', 'white') # turn off window shadow
# See documention of below WINDOWS options here: https://wiki.tcl-lang.org/page/wm+attributes
self.wm_attributes('-alpha', alpha)
self.wm_attributes('-fullscreen', fullscreen)
self.wm_attributes('-topmost', topmost) # make root window always on top
self.overrideredirect(overrideredirect) # hide root window drag bar and close button
try:
self.wm_attributes('-disabled', disable_interaction) # disables window interaction for click pass through
self.wm_attributes('-toolwindow', toolwindow) # makes a window with a single close-button (which is smaller than usual) on the right of the title bar
except Exception: # above options only for Windows system
pass
s = ttk.Style()
s.configure('.', background=self.style.widget_bg_color)
s.configure('.', font=self.style.font)
s.configure('.', foreground=self.style.text_color)
self.created = False
def __init_subclass__(cls, **kwargs):
'''
Wraps user subclass __init__ to implicitly handle the EasyGUI.__init__ call along with
calling .create() after application is fully defined in subclass __init__ method
'''
old_init = cls.__init__ # reference to original subclass method so new_init isn't recursive
def new_init(self, *args, **kwargs):
EasyGUI.__init__(self, **kwargs) # in place of super().__init__() in subclass __init__
try:
old_init(self, *args, **kwargs)
except TypeError:
print('\n* Are you passing in kwargs to GUI creation?\n* If so, remember to put a "**kwargs" in the __init__ function!\n')
traceback.print_exc()
self.create() # populates GUI elements
# now change window geometry if "width", "height" and/or "center" attributes are set in subclass' __init__ method
# seems easier to allow this than forcing self.geometry() usage as that is a bit cryptic and hard to remember
# auto-centering by setting self.center = True is also convenient as usually that behavior is desired
self.update_idletasks() # need to run here so any geometry changes from subclass __init__ run before checking sizes
current_width, current_height = self.winfo_width(), self.winfo_height()
frame_width = self.winfo_rootx() - self.winfo_x()
window_width = current_width + 2 * frame_width
titlebar_height = self.winfo_rooty() - self.winfo_y()
window_height = current_height + titlebar_height + frame_width
if hasattr(self, 'width'):
window_width = self.width
if hasattr(self, 'height'):
window_height = self.height
if hasattr(self, 'center') and self.center == True:
center_x_val = int(self.winfo_screenwidth() / 2 - window_width / 2)
center_y_val = int((self.winfo_screenheight() / 2 - window_height / 2))
center_y_val -= 30 # offset a little higher than middle since many people have toolbar on bottom of screen
if center_x_val < 0: # don't let left side of window go beyond screen if too wide
center_x_val = 0
if center_y_val < 0: # don't let top of window go above screen if too tall
center_y_val = 0
self.geometry(f'{window_width}x{window_height}+{center_x_val}+{center_y_val}')
elif hasattr(self, 'width') or hasattr(self, 'height'):
self.geometry(f'{window_width}x{window_height}')
self.bind_all('<Key>', self.log_keys)
self.mainloop() # runs tkinter mainloop
cls.__init__ = new_init # overwrite subclass __init__ method
@property
def root(self):
'''Used by downstream elements to reference EasyGUI as root'''
return self
def log_keys(self, event):
'''
Record key presses up to a maximum of 100 characters.
Also check to see if any triggers are met and execute as needed.
'''
self.key_log.append(event.char)
self.key_log = self.key_log[-100:]
self.check_key_triggers()
def check_key_triggers(self):
'''
Check if a key trigger has been met,
run function if so, and clear out key log.
(so next key doesn't trigger same result)
'''
key_str = ''.join(self.key_log)
for trigger, action in self.key_triggers:
if trigger in key_str:
self.key_log = []
action()
break
def add_key_trigger(self, trigger, func, separate_thread: bool=False):
'''
Bind a function to a sequence of key presses.
Can specify as separate_thread=True for long-running functions.
'''
if separate_thread:
def threaded_func(*args):
threading.Thread(target=func).start()
self.key_triggers.append((trigger, threaded_func))
else:
self.key_triggers.append((trigger, func))
def close(self):
'''
Alias for self.destroy.
Can be used by any GUI element to close the window via "self.root.close()"
since self.root will travel upstream until it hits EasyGUI.close().
'''
self.destroy()
def icon(self, bitmap, default: bool=False) -> None:
'''
Alternate method to call tk.Tk iconbitmap method using altered path handling
so that PyInstaller can package application with specified .ico file.
If not default, warning message is printed on failing to locate .ico file.
'''
try:
super().iconbitmap(bitmap=resource_path(bitmap))
except _tkinter.TclError:
if default:
pass # Pass silently if default .ico not found occurs when using PyInstaller and not adding transparent.ico to "datas"
else:
print(f'Cannot locate {bitmap}! If using PyInstaller, be sure to specify this file in "datas".')
def create(self, force_row=False) -> None:
'''
Positions GUI elements in window.
May be called recursively by child Sections as elements are positioned.
'''
for child in {**self.widgets, **self.sections}.values():
try:
child.create(force_row) # if child is another Section object
except AttributeError:
child.position(force_row) # if child is a Widget object
self.created = True
def add_menu(self,
commands={'File': lambda: print('File button'), 'Edit': lambda: print('Edit button')},
cascades={'Options': {'Option 1': lambda: print('Option 1'), 'Option 2': lambda: print('Option 2')}}) -> None:
'''
Add a Menu to the top of the root window.
'''
self.menu = tk.Menu(self)
for label, cmd in commands.items():
self.menu.add_command(label=label, command=cmd)
for cascade, c_commands in cascades.items():
cascade_menu = tk.Menu(self.menu, tearoff=0)
for label, cmd in c_commands.items():
cascade_menu.add_command(label=label, command=cmd)
self.menu.add_cascade(label=cascade, menu=cascade_menu)
self.config(menu=self.menu)
def __repr__(self):
return 'Main EasyGUI Application'
def popup(self, *args, **kwargs):
'''
Returns a context manager for generating a popup window. Example usage:
with self.popup() as popup:
popup.add_widget('lbl', 'Test1')
popup.add_widget('btn', 'Test Button', command_func=lambda *args: print('Test Button clicked'))
'''
return PopUp(*args, **kwargs)
class PopUp(tk.Toplevel, GridMaster, SectionMaster):
'''
Basically a mini EasyGUI class that inherits from tk.Toplevel instead of tk.Tk.
Re-implements basic methods of EasyGUI class so widgets can be added.
'''
def __init__(self, *args, width: int=300, height: int=180, x: int=120, y: int=80, **kwargs):
if kwargs.get('tooltip', False):
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.wm_attributes('-disabled', True) # disables window interaction for click pass through
self.wm_overrideredirect(True) # removes window
self.wm_attributes('-alpha', 0.8)
self.geometry(f'{width}x{height}+{x}+{y}') # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
self.style = EasyGUI.style
self.style.create_font()
self.configure(bg=self.style.tooltip_color)
else:
super().__init__()
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.icon(bitmap=os.path.join(os.path.dirname(__file__), 'resources', 'transparent.ico'), default=True)
self.geometry(f'{width}x{height}+{x}+{y}') # format of "WIDTHxHEIGHT+(-)XPOSITION+(-)YPOSITION"
self.style = EasyGUI.style
self.style.create_font()
def __enter__(self):
self.created = False
return self
def __exit__(self, *args):
self.create()
@property
def root(self):
'''Used by downstream elements to reference EasyGUI as root'''
return self
def icon(self, bitmap, default: bool=False) -> None:
'''
Alternate method to call tk.Tk iconbitmap method using altered path handling
so that PyInstaller can package application with specified .ico file.
If not default, warning message is printed on failing to locate .ico file.
'''
try:
super().iconbitmap(bitmap=resource_path(bitmap))
except _tkinter.TclError:
if default:
pass # Pass silently if default .ico not found occurs when using PyInstaller and not adding transparent.ico to "datas"
else:
print(f'Cannot locate {bitmap}! If using PyInstaller, be sure to specify this file in "datas".')
def create(self, force_row=False) -> None:
'''Copied from EasyGUI.create'''
for name, section in self.sections.items():
section.create(force_row=force_row)
self.created = True
@recreate_if_needed
def add_widget(self, *args, **kwargs):
'''Copied from EasyGUI.add_widget'''
if '_default' not in self.sections:
self.add_section('_default')
return self.sections['_default'].add_widget(*args, **kwargs)
def __repr__(self):
return 'EasyGUI PopUp Window'
class Section(tk.Frame, GridMaster, SectionMaster):
'''
A Section is a tk.Frame used for storing and managing widgets.
Sections exist as children of the root (EasyGUI) window or other Sections.
'''
def __init__(self, parent=None, name='', title=False, grid_area=None,
tabbed: bool=False, equal_button_width: bool=False, **kwargs) -> None:
borderwidth = kwargs.get('borderwidth', 1)
relief = kwargs.get('relief', 'ridge')
if relief != 'ridge' and not borderwidth:
borderwidth = 1
self.tabbed = tabbed
super().__init__(master=parent,
bg=EasyGUI.style.section_color,
padx=EasyGUI.style.frame_padx,
pady=EasyGUI.style.frame_pady,
borderwidth=borderwidth,
relief=relief)
GridMaster.__init__(self)
SectionMaster.__init__(self)
self.parent = parent
self.name = name
self.grid_area = grid_area
if tabbed:
self.tabs = ttk.Notebook(self)
self.tabs.style = self.style
self.tabs.root = self.root
self.equal_button_width = equal_button_width
if title: # title kwargs can be provided as True or a string
if isinstance(title, str): # if string, use title for label text
self.add_widget(type='label', text=title)
elif title == True: # if True, use the name as the label text
self.add_widget(type='label', text=name)
def __init_subclass__(cls, **kwargs):
'''
Wraps user subclass __init__ to implicitly handle the Section.__init__ call.
This avoids the need for subclass to use "super().__init__(*args, **kwargs)"
'''
old_init = cls.__init__ # reference to original subclass method so new_init isn't recursive
def new_init(self, *args, **kwargs):
Section.__init__(self, **kwargs) # in place of super().__init__() in subclass __init__
try:
old_init(self, *args, **kwargs)
except TypeError:
# traceback.print_exc()
# print('\n\n* Are you subclassing Section or passing in kwargs to Section creation?\n* If so, remember to put a "**kwargs" in the __init__ function!\n')
old_init(self)
cls.__init__ = new_init # overwrite subclass __init__ method
@property
def style(self):
'''Goes upsteam to evenually reference EasyGUI.style'''
return self.parent.style
@property
def root(self):
'''Goes upsteam to evenually reference EasyGUI as root'''
return self.parent.root
def create(self, force_row: bool=False):
'''
Positions this section within the parent along with
positioning all children (Sections and/or Widgets).
'''
self.position(force_row)
if self.equal_button_width:
self.match_child_button_widths()
for child in {**self.widgets, **self.sections}.values():
try:
child.create(force_row) # if child is another Section object
except AttributeError:
child.position(force_row) # if child is a Widget object
def match_child_button_widths(self):
child_buttons = [child for child in self.widgets.values() if isinstance(child, widgets.Button)]
if len(child_buttons) > 1:
max_width = int(round(max(child.width / 7.0 for child in child_buttons if not child.image)))
for child in child_buttons:
if not child.image:
child.config(width=max_width)
def position(self, force_row: bool=False) -> None:
'''
Physically position this Section within its parent container.
'''
try:
if hasattr(self.parent, 'grid_areas'):
if self.parent.grid_areas != {} and self.grid_area and not force_row:
try:
if not hasattr(self.parent, 'tabbed') or not self.parent.tabbed:
bounds = self.parent.grid_areas[self.grid_area]
self.grid(row=bounds['first_row'], column=bounds['first_column'], rowspan=bounds['last_row']-bounds['first_row']+1, columnspan=bounds['last_column']-bounds['first_column']+1, sticky='NSEW')
else:
self.pack()
if self.tabbed:
self.tabs.pack()
return # early return if everything works fine with initial attempt (no other actions needed)
except KeyError:
if self.grid_area != self.name: # basically, if user-specified grid_area (are same if programatically set grid_area)
print(f'"{self.grid_area}" not found in parent\'s grid areas.\nResorting to a new row.')
self.parent.add_grid_row(self.name)
self.grid_area = self.name
self.parent.create()
except _tkinter.TclError:
print(f'\n--- GRID FAILED for Section: "{self.name}" ---\nTry ensuring "grid_area" arg is given for all Sections in a given parent.\nAdding to a new row instead.')
self.parent.create(force_row=True) # go back and fully recreate section forcing all children to be packed/in new rows
@property
def width(self) -> float:
'''
Estimate and return width desired by this Section.
'''
return float(max(widget.width for widget in self.widgets.values()))
@property
def height(self) -> float:
'''
Estimate and return height desired by this Section.
'''
return float(sum(widget.height for widget in self.widgets.values()))
def __repr__(self) -> str:
return f'Section: "{self.name}"'
def resource_path(relative_path):
'''Get absolute path to resource to allow PyInstaller bundling.'''
try:
base_path = sys._MEIPASS # PyInstaller-created temporary folder
except:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
| zachbateman/easy_gui | easy_gui/master_classes.py | master_classes.py | py | 26,902 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "tkinter.Tk",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "styles.BaseStyle",
"line... |
9149914830 | #coding = 'utf-8'
'''
这是一个格栅布局的小例子!
文章链接:http://www.xdbcb8.com/archives/209.html
'''
import sys
from PyQt5.QtWidgets import (QWidget, QPushButton, QApplication, QGridLayout, QLCDNumber)
class Example(QWidget):
'''
格栅布局
'''
def __init__(self):
'''
一些初始设置
'''
super().__init__()
self.Init_UI()
def Init_UI(self):
'''
界面初始设置
'''
grid = QGridLayout()
self.setLayout(grid)
self.setGeometry(300, 300, 400, 300)
self.setWindowTitle('学点编程吧-计算器')
self.lcd = QLCDNumber()
grid.addWidget(self.lcd, 0, 0, 3, 0)#我们使QLCDNumber小部件跨越4行
grid.setSpacing(10)#将垂直和水平间距设置为10
names = ['Cls', 'Bc', '', 'Close',
'7', '8', '9', '/',
'4', '5', '6', '*',
'1', '2', '3', '-',
'0', '.', '=', '+']
positions = [(i, j) for i in range(4, 9) for j in range(4, 8)]#将小部件添加到窗口中
for position, name in zip(positions, names):
#小部件的上的名称和它们的位置一一对应起来,注意zip的用法
if name == '':
continue
button = QPushButton(name)
grid.addWidget(button, *position)
button.clicked.connect(self.Cli)
self.show()
def Cli(self):
'''
点击按钮时对应的槽函数
'''
sender = self.sender().text()
ls = ['/', '*', '-', '=', '+']
if sender in ls:
self.lcd.display('A')#当我们点击'/', '*', '-', '=', '+'时,LCD上显示'A'
else:
self.lcd.display(sender)#反之显示按钮上的名称,如:1
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
app.exit(app.exec_())
| redmorningcn/PyQT5Example | PyQt5All/PyQt56/QGrild layout.pyw | QGrild layout.pyw | pyw | 2,002 | python | zh | code | 1 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QGridLayout",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QLCDNumber",
"line_number": 32,
"usage_type": "call"
},
{
"ap... |
33319841830 | import pygame as pg
from input_box import InputBox
pg.init()
screen = pg.display.set_mode((640, 480))
def main():
clock = pg.time.Clock()
input_box1 = InputBox(100, 100, 140, 32)
done = False
while not done:
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
input_box1.handle_event(event)
screen.fill((30, 30, 30))
input_box1.draw(screen)
input_box1.update()
pg.display.flip()
clock.tick(30)
if __name__ == '__main__':
main()
pg.quit()
| MrRamka/FlyGame | test_input_form.py | test_input_form.py | py | 575 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Cloc... |
495660367 | import os
import types
import pytest
import yaml
from dagster import (
DagsterEventType,
DagsterInvalidConfigError,
RunConfig,
check,
execute_pipeline,
pipeline,
seven,
solid,
)
from dagster.core.instance import DagsterInstance, InstanceRef, InstanceType
from dagster.core.storage.event_log import SqliteEventLogStorage
from dagster.core.storage.local_compute_log_manager import LocalComputeLogManager
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import SqliteRunStorage
def test_fs_stores():
@pipeline
def simple():
@solid
def easy(context):
context.log.info('easy')
return 'easy'
easy()
with seven.TemporaryDirectory() as temp_dir:
run_store = SqliteRunStorage.from_local(temp_dir)
event_store = SqliteEventLogStorage(temp_dir)
compute_log_manager = LocalComputeLogManager(temp_dir)
instance = DagsterInstance(
instance_type=InstanceType.PERSISTENT,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=run_store,
event_storage=event_store,
compute_log_manager=compute_log_manager,
)
run = RunConfig()
execute_pipeline(simple, run_config=run, instance=instance)
assert run_store.has_run(run.run_id)
assert run_store.get_run_by_id(run.run_id).status == PipelineRunStatus.SUCCESS
assert DagsterEventType.PIPELINE_SUCCESS in [
event.dagster_event.event_type
for event in event_store.get_logs_for_run(run.run_id)
if event.is_dagster_event
]
stats = event_store.get_stats_for_run(run.run_id)
assert stats.steps_succeeded == 1
assert stats.end_time is not None
def test_init_compute_log_with_bad_config():
with seven.TemporaryDirectory() as tmpdir_path:
with open(os.path.join(tmpdir_path, 'dagster.yaml'), 'w') as fd:
yaml.dump({'compute_logs': {'garbage': 'flargh'}}, fd, default_flow_style=False)
with pytest.raises(DagsterInvalidConfigError, match='Undefined field "garbage"'):
DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
def test_init_compute_log_with_bad_config_override():
with seven.TemporaryDirectory() as tmpdir_path:
with pytest.raises(DagsterInvalidConfigError, match='Undefined field "garbage"'):
DagsterInstance.from_ref(
InstanceRef.from_dir(tmpdir_path, overrides={'compute_logs': {'garbage': 'flargh'}})
)
def test_init_compute_log_with_bad_config_module():
with seven.TemporaryDirectory() as tmpdir_path:
with open(os.path.join(tmpdir_path, 'dagster.yaml'), 'w') as fd:
yaml.dump(
{'compute_logs': {'module': 'flargh', 'class': 'Woble', 'config': {}}},
fd,
default_flow_style=False,
)
with pytest.raises(check.CheckError, match='Couldn\'t import module'):
DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
MOCK_HAS_RUN_CALLED = False
def test_get_or_create_run():
with seven.TemporaryDirectory() as tmpdir_path:
instance = DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
run = PipelineRun.create_empty_run('foo_pipeline', 'bar_run')
assert instance.get_or_create_run(run) == run
assert instance.has_run(run.run_id)
assert instance.get_or_create_run(run) == run
# Run is created after we check whether it exists
with seven.TemporaryDirectory() as tmpdir_path:
instance = DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
run = PipelineRun.create_empty_run('foo_pipeline', 'bar_run')
def _has_run(self, run_id):
# This is uglier than we would like because there is no nonlocal keyword in py2
global MOCK_HAS_RUN_CALLED # pylint: disable=global-statement
# pylint: disable=protected-access
if not self._run_storage.has_run(run_id) and not MOCK_HAS_RUN_CALLED:
self._run_storage.add_run(PipelineRun.create_empty_run('foo_pipeline', run_id))
return False
else:
return self._run_storage.has_run(run_id)
instance.has_run = types.MethodType(_has_run, instance)
assert instance.get_or_create_run(run) == run
# Run is created after we check whether it exists, but deleted before we can get it
global MOCK_HAS_RUN_CALLED # pylint:disable=global-statement
MOCK_HAS_RUN_CALLED = False
with seven.TemporaryDirectory() as tmpdir_path:
instance = DagsterInstance.from_ref(InstanceRef.from_dir(tmpdir_path))
run = PipelineRun.create_empty_run('foo_pipeline', 'bar_run')
def _has_run(self, run_id):
global MOCK_HAS_RUN_CALLED # pylint: disable=global-statement
# pylint: disable=protected-access
if not self._run_storage.has_run(run_id) and not MOCK_HAS_RUN_CALLED:
self._run_storage.add_run(PipelineRun.create_empty_run('foo_pipeline', run_id))
MOCK_HAS_RUN_CALLED = True
return False
elif self._run_storage.has_run(run_id) and MOCK_HAS_RUN_CALLED:
MOCK_HAS_RUN_CALLED = False
return True
else:
return False
instance.has_run = types.MethodType(_has_run, instance)
with pytest.raises(check.CheckError, match='Inconsistent run storage'):
instance.get_or_create_run(run)
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster_tests/core_tests/storage_tests/test_local_instance.py | test_local_instance.py | py | 5,700 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster.solid",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "dagster.pipeline",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "dagster.seven.TemporaryDirectory",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "dagst... |
5099519953 | import telepot
from flask import Flask, request
try:
from Queue import Queue
except ImportError:
from queue import Queue
TOKEN = "525915971:AAHCrRmA_e8BsKDVLFw6pB6XS_BjJsUEnqM"
CHANNEL = "@signorinaggio"
app = Flask(__name__)
update_queue = Queue()
bot = telepot.Bot(TOKEN)
firma = "@formaementisChat"
EBOOK_LIST = []
def on_chat_message(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type == "document":
file_id = msg['document']['file_id']
messageId = msg['message_id']
bot.sendDocument(CHANNEL,file_id,caption=firma)
EBOOK_LIST.append(file_id)
if chat_id < 0 and chat_id != CHANNEL:
bot.deleteMessage((chat_id, messageId))
elif content_type == "text":
text = msg["text"].lower()
if text.startswith("/start"):
bot.sendMessage(chat_id,"Buongiorno.")
elif text.startswith("/ping"):
bot.sendMessage(chat_id,"Pong.")
bot.message_loop({'chat': on_chat_message}, source=update_queue)
@app.route('/', methods=['GET', 'POST'])
def pass_update():
update_queue.put(request.data)
return 'OK [200] HTTP CODE!!'
if __name__ == '__main__':
app.run(port=8080)
| IlPytone/delegator | app.py | app.py | py | 1,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "telepot.Bot",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "telepot.glance",
"line_number... |
24788878049 | import sys
from collections import defaultdict, deque
def main():
T = int(sys.stdin.readline().strip())
for _ in range(T):
F = int(sys.stdin.readline().strip())
graph = defaultdict(set)
ret = defaultdict(int)
# def dfs(start):
# visited = defaultdict(bool)
# dq = deque()
# dq.append(start)
# visited[start] = True
# count = 0
# while dq:
# node = dq.popleft()
# count += 1
# for next in graph[node]:
# if visited[next]:
# continue
# dq.append(next)
# visited[next] = True
# return count
parent = defaultdict(str)
# union find
def find(x):
if parent[x] == "": # root ый┤
return x
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
x = find(x)
y = find(y)
if ret[x] == 0:
ret[x] = 1
if ret[y] == 0:
ret[y] = 1
if x != y:
parent[y] = x
ret[x] += ret[y]
# print("union", x, ret[x], y, ret[y])
for _ in range(F):
f1, f2 = sys.stdin.readline().strip().split()
if f1 < f2:
union(f1, f2)
else:
union(f2, f1)
print(ret[find(f1)])
if __name__ == "__main__":
main()
"""
2
3
Fred Barney
Barney Betty
Betty Wilma
3
Fred Barney
Betty Wilma
Barney Betty
2
3
4
2
2
4
""" | inhyeokJeon/AALGGO | Python/baekjoon/4195_friend.py | 4195_friend.py | py | 1,644 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"li... |
32793887647 | import os
import torch
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset_path = os.path.dirname(__file__) + '/../data/dataset.txt'
teacher_forcing_ratio = 0.5
HIDDEN_SIZE = 512
def change_to_device(model):
if device.type == 'cpu':
model.cpu()
else:
model.cuda()
| junix/gen_poem | conf/__init__.py | __init__.py | py | 333 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
... |
7040650853 | import pytest
import math
from vec import Vector2
import numpy.testing as npt
from adr.World import Ambient
from adr.Components import FreeBody
from adr.Components.Auxiliary import LandingGear
@pytest.fixture
def plane():
env = Ambient()
plane = FreeBody(
name='plane',
type='plane',
mass=23.4,
position_cg=Vector2(-0.2, 0.02),
pitch_rot_inertia=5.2,
ambient=env,
)
return plane
@pytest.fixture
def main_landing_gear():
main_landing_gear = LandingGear(
name='main_landing_gear',
relative_position=Vector2(x=-0.2, y=0),
relative_angle=math.radians(0),
mass=0.3,
height=0.1,
spring_coeff=1000,
dump_coeff=50,
friction_coeff=0.05
)
return main_landing_gear
def test_instantiation(main_landing_gear):
assert(main_landing_gear.type == 'landing_gear')
assert(main_landing_gear.height == 0.1)
assert(main_landing_gear.spring_coeff == 1000)
assert(main_landing_gear.dump_coeff == 50)
assert(main_landing_gear.friction_coeff == 0.05)
def test_floor_contact_point(main_landing_gear):
contact_point = Vector2(0, -0.1)
npt.assert_almost_equal(contact_point.x, 0)
npt.assert_almost_equal(contact_point.y, -0.1)
def test_gear_reaction(plane, main_landing_gear):
main_landing_gear.set_parent(plane)
plane.velocity = Vector2(6, 0.4)
# Plane on air (position.y = 2m), so no reaction on landing gear is expected
plane.position = Vector2(10, 2)
reaction, contact_point = main_landing_gear.gear_reaction()
assert(type(contact_point) is Vector2)
npt.assert_almost_equal(reaction.y, 0)
# Plane on ground (position.y = 0m), so reaction on landing gear is expected
plane.position = Vector2(10, 0)
reaction, contact_point = main_landing_gear.gear_reaction()
npt.assert_almost_equal(reaction.y, 80.0)
def test_gear_friction(plane, main_landing_gear):
main_landing_gear.set_parent(plane)
plane.velocity = Vector2(6, 0.4)
# Plane on air (position.y = 2m), so no friction on landing gear is expected
plane.position = Vector2(10, 2)
friction, contact_point = main_landing_gear.gear_friction()
assert(type(contact_point) is Vector2)
npt.assert_almost_equal(friction.x, 0)
# Plane on ground (position.y = 0m), going forward, expected friction on negative x direction
plane.position = Vector2(10, 0)
friction, contact_point = main_landing_gear.gear_friction()
npt.assert_almost_equal(friction.x, -4.0)
# Plane on ground (position.y = 0m), going backwards, expected friction on positive x direction
plane.velocity = Vector2(-6, 0.4)
plane.position = Vector2(10, 0)
friction, contact_point = main_landing_gear.gear_friction()
npt.assert_almost_equal(friction.x, 4.0)
| CeuAzul/ADR | tests/Components/Auxiliary/test_LandingGear.py | test_LandingGear.py | py | 2,830 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "adr.World.Ambient",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "adr.Components.FreeBody",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "vec.Vector2",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.fixture"... |
28147682147 | import os
import cv2 as cv
import numpy as np
import time
import json
import threading
from queue import Queue
import sys
picture_path='C:/Users/Administrator/Desktop/1/'
picture_number=0 #第几个图片
num=0 #成功了多少张图片
#魔方的颜色
greenLower = (46, 133, 46)
greenUpper = (85, 255, 255)
redLower = (150, 100, 6)
redUpper = (185, 255, 255)
yellowLower = (21, 84, 46)
yellowUpper = (64, 255, 255)
orangeLower = (2, 150, 100)
orangeUpper = (15, 255, 255)
whiteLower = (0, 0, 146) # gray
whiteUpper = (180, 78, 255)
blueLower = (88, 143, 46)
blueUpper = (120, 255, 255)
Side_length=54
Outer_frame=[[10, 10], [85, 10], [160, 10],
[10, 85], [85, 85], [160, 85],
[10, 160], [85, 160], [160, 160]
]
listnet=[]
listall=[]
listhsv=[]
listrgb=[]
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
#获取图片的路径(返回图片路径)
def read_picture(i):
path=picture_path+'huanyuan{0}.jpg'.format(i)
print(path)
return(path)
def indextocolor(index):
color=()
if (index==0):
color=(0, 0, 255)
if (index==1):
color=(255, 0, 0)
if (index==2):
color=(0, 255, 255)
if (index==3):
color=(0, 165, 255)
if (index==4):
color=(0, 255, 0)
if (index==5):
color=(255, 255, 255)
return (color)
def draw_rectangle(image,color,i):
x=Outer_frame[i][0]
y=Outer_frame[i][1]
x1=Outer_frame[i][0]+Side_length
y1=Outer_frame[i][1]+Side_length
cv.rectangle(image,(x,y),(x1,y1),color,-1)
def get_averageBGR(image,x,y):
img = cv.cvtColor(image,cv.COLOR_HSV2RGB)
img=img[x+20:x+45,y+20:y+45]
per_image_Rmean = []
per_image_Gmean = []
per_image_Bmean = []
list1=[]
per_image_Bmean.append(np.mean(img[:,:,0]))
per_image_Gmean.append(np.mean(img[:,:,1]))
per_image_Rmean.append(np.mean(img[:,:,2]))
R_mean = np.mean(per_image_Rmean)
G_mean = np.mean(per_image_Gmean)
B_mean = np.mean(per_image_Bmean)
list1.append(R_mean)
list1.append(G_mean)
list1.append(B_mean)
return (list1)
def get_averageHSV(img,x,y):
hsv=[]
list1=[]
h=s=v=0
image1=img[x+20:x+45,y+20:y+45]
hsv= cv.cvtColor(image1,cv.COLOR_BGR2HSV)
width = hsv.shape[0]
height= hsv.shape[1]
for index1 in range (width):
for index2 in range (height):
h=h+ hsv[index1,index2,0]
s=s+ hsv[index1,index2,1]
v=v+ hsv[index1,index2,2]
aveh=h//(width*height)
aves=s//(width*height)
avev=v//(width*height)
list1.append(aveh)
list1.append(aves)
list1.append(avev)
return (list1)
def average(img):
# 彩色图像均衡化,需要分解通道 对每一个通道均衡化
image_yuv = cv.cvtColor(img,cv.COLOR_BGR2YUV)
#直方图均衡化
image_yuv[:,:,0] = cv.equalizeHist(image_yuv[:,:,0])
#显示效果
output = cv.cvtColor(image_yuv,cv.COLOR_YUV2BGR)
cv.imshow('HistEqualize',output)
return (output)
# img=cv.cvtColor(img,cv.COLOR_BGR2HSV)
# (b, g, r) = cv.split(img)
# bH = cv.equalizeHist(b)
# gH = cv.equalizeHist(g)
# rH = cv.equalizeHist(r)
# # 合并每一个通道
# result = cv.merge((bH, gH, rH))
# cv.imshow("直方图均衡化", result)
def balance(img_input):
# 完美反射白平衡
# STEP 1:计算每个像素的R\G\B之和
# STEP 2:按R+G+B值的大小计算出其前Ratio%的值作为参考点的的阈值T
# STEP 3:对图像中的每个点,计算其中R+G+B值大于T的所有点的R\G\B分量的累积和的平均值
# STEP 4:对每个点将像素量化到[0,255]之间
# 依赖ratio值选取而且对亮度最大区域不是白色的图像效果不佳。
# :param img: cv2.imread读取的图片数据
# :return: 返回的白平衡结果图片数据
img = img_input.copy()
b, g, r = cv.split(img)
m, n, t = img.shape
sum_ = np.zeros(b.shape)
for i in range(m):
for j in range(n):
sum_[i][j] = int(b[i][j]) + int(g[i][j]) + int(r[i][j])
hists, bins = np.histogram(sum_.flatten(), 766, [0, 766])
Y = 765
num, key = 0, 0
ratio = 0.01
while Y >= 0:
num += hists[Y]
if num > m * n * ratio / 100:
key = Y
break
Y = Y - 1
sum_b, sum_g, sum_r = 0, 0, 0
time = 0
for i in range(m):
for j in range(n):
if sum_[i][j] >= key:
sum_b += b[i][j]
sum_g += g[i][j]
sum_r += r[i][j]
time = time + 1
avg_b = sum_b / time
avg_g = sum_g / time
avg_r = sum_r / time
maxvalue = float(np.max(img))
# maxvalue = 255
for i in range(m):
for j in range(n):
b = int(img[i][j][0]) * maxvalue / int(avg_b)
g = int(img[i][j][1]) * maxvalue / int(avg_g)
r = int(img[i][j][2]) * maxvalue / int(avg_r)
if b > 255:
b = 255
if b < 0:
b = 0
if g > 255:
g = 255
if g < 0:
g = 0
if r > 255:
r = 255
if r < 0:
r = 0
img[i][j][0] = b
img[i][j][1] = g
img[i][j][2] = r
return (img)
def gaussi_blur(img):
blur = cv.GaussianBlur(img,(5,5),0)
#cv.imshow("gaussian",blur)
return (blur)
def k_means(img):
Z = img.reshape((-1,3))
Z = np.float32(Z)
# convert to np.float32
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 8
ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
#cv.imshow("k_means",res2)
return (res2)
'''
image= cv.imread("huanyuan32.jpg")
cv.imshow("image",image)
img1=gaussi_blur(image)
img2=k_means(img1)
cv.imwrite("svmwo1.jpg",img2)
img3=balance(img2)
cv.imshow("balance",img3)
img4=average(img3)
#cv.imwrite("svmwo5.jpg",img4)
'''
def main(src):
img1=gaussi_blur(src)
img2=k_means(img1)
for x,y in (Outer_frame):
listhsv=get_averageHSV(img2,x,y)
listrgb=get_averageBGR(img2,x,y)
listrgb = list(map(int,listrgb))
listnet=listhsv+listrgb
listall.append(listnet)
#print(listall)
#########################多线程尝试#############################################
cube_list_hsv=[[] for _ in range (6)]
cube_list_bgr=[[] for _ in range (6)]
cube_list_all=[[] for _ in range (6)]
cube_list_net=[[] for _ in range (6)]
dict_data={"1":cube_list_all[0],'2':cube_list_all[1],'3':cube_list_all[2],
'4':cube_list_all[3],'5':cube_list_all[4],'6':cube_list_all[5]
}
####多线程分别进行魔方6个面的识别
def job1():
for i in range (1,29):
path1 = read_picture(i)
print (path1,end='\n')
cube_list_hsv[0]=[]
cube_list_bgr[0]=[]
cube_list_net[0]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[0]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[0]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[0]=list(map(int,cube_list_bgr[0]))
cube_list_net[0]=cube_list_hsv[0]+cube_list_bgr[0]
cube_list_all[0].append(cube_list_net[0])
#q.put(cube_list_all[0])
def job2():
for i in range (29,63):
path2 = read_picture(i)
# print (path1,end='\n')
cube_list_hsv[1]=[]
cube_list_bgr[1]=[]
cube_list_net[1]=[]
src1=cv.imread(path2)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[1]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[1]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[1]=list(map(int,cube_list_bgr[1]))
cube_list_net[1]=cube_list_hsv[1]+cube_list_bgr[1]
cube_list_all[1].append(cube_list_net[1])
#q.put(cube_list_all[0])
def job3():
for i1 in range (63,91):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[2]=[]
cube_list_bgr[2]=[]
cube_list_net[2]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[2]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[2]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[2]=list(map(int,cube_list_bgr[2]))
cube_list_net[2]=cube_list_hsv[2]+cube_list_bgr[2]
cube_list_all[2].append(cube_list_net[2])
#q.put(cube_list_all[0])
def job4():
for i1 in range (91,166):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[3]=[]
cube_list_bgr[3]=[]
cube_list_net[3]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[3]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[3]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[3]=list(map(int,cube_list_bgr[3]))
cube_list_net[3]=cube_list_hsv[3]+cube_list_bgr[3]
cube_list_all[3].append(cube_list_net[3])
#q.put(cube_list_all[0])
def job5():
for i1 in range (205,304):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[4]=[]
cube_list_bgr[4]=[]
cube_list_net[4]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[4]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[4]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[4]=list(map(int,cube_list_bgr[4]))
cube_list_net[4]=cube_list_hsv[4]+cube_list_bgr[4]
cube_list_all[4].append(cube_list_net[4])
#q.put(cube_list_all[0])
def job6():
for i1 in range (304,416):
path1 = read_picture(i1)
print (path1,end='\n')
cube_list_hsv[5]=[]
cube_list_bgr[5]=[]
cube_list_net[5]=[]
src1=cv.imread(path1)
# if not src1:
# print('error reading picture')
# sys.exit()
cube1_img1=gaussi_blur(src1)
cube1_img2=k_means(cube1_img1)
for x,y in (Outer_frame):
cube_list_hsv[5]=get_averageHSV(cube1_img2,x,y)
cube_list_bgr[5]=get_averageBGR(cube1_img2,x,y)
cube_list_bgr[5]=list(map(int,cube_list_bgr[5]))
cube_list_net[5]=cube_list_hsv[5]+cube_list_bgr[5]
cube_list_all[5].append(cube_list_net[5])
#q.put(cube_list_all[0])
'''
q=Queue()
threads=[]
t1 = threading.Thread(target=job1,name=('t1',))
t2 = threading.Thread(target=job2,name=('t2',))
t3 = threading.Thread(target=job3,name=('t3',))
t4 = threading.Thread(target=job4,name=('t4',))
t5 = threading.Thread(target=job5,name=('t5',))
t6 = threading.Thread(target=job6,name=('t6',))
t1.start()
threads.append(t1)
t2.start()
threads.append(t2)
t3.start()
threads.append(t3)
t4.start()
threads.append(t4)
t5.start()
threads.append(t5)
t6.start()
threads.append(t6)
for thread in threads:
thread.join()
print('all pictures are taken\n')
'''
#every_data_contain_number
#for key in dict_data:
number_of_dict=len(dict_data)
#声明6个,用来作为文本存储,json不支持numpy 的int32 我用本办法转换
store_data=[[] for _ in range (number_of_dict)]
#把这几个数组百变成字典中列表的格式
for circule_num,value in zip([x for x in range(0,6)],dict_data.values()):
store_data[circule_num] = [[0,0,0,0,0,0] for i in range (len(value))]
for first in range(len(value)):
for two in range(len(value[first])):
store_data[circule_num][first][two]=int(value[first][two])
for json_number in range (6):
file_name="data{0}.json".format(json_number)
with open(file_name,"w") as f:
json.dump(store_data[json_number],f)
f.close()
'''
for i in range(1,29):
path=read_picture(i)
print (path)
listhsv.clear()#清空hsv的tup
listrgb.clear()#清空rgb的tup
listnet.clear()#清空节点的tup
src = cv.imread(path)
while (src is None):
src = cv.imread(path)
if not src:
print('error reading picture')
sys.exit()
main(src)
print(listall)
print ('个数是')
list_num=len(listall)
store = [[0,0,0,0,0,0] for i in range (list_num)]
for list_1 in range(len(listall)):
for list_2 in range(len(listall[list_1])):
store[list_1][list_2]=int(listall[list_1][list_2])
'''
'''
filename='test.json'
with open(filename,'w') as f:
json.dump(store,f)
f.close()
'''
'''
with open('test(副本).txt','w') as f1:
for temp in listall:
print(type(temp[0]))
data='{},{},{},{},{},{}\n'.format(temp[0],temp[1],temp[2],temp[3],temp[4],temp[5])
f1.write(data)
f1.close()
'''
| xiaomoxiao/magic-cube | MultiThreading/code/getdata.py | getdata.py | py | 14,183 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.JSONEncoder",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "numpy.integer",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "numpy.floating",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "numpy.nd... |
13782113749 | from pydantic import BaseModel, validator
import datetime
class Room(BaseModel):
final_date: datetime.datetime = None
initial_date: datetime.datetime = None
size_m2: float = None
location: str = None
mensal_rent: float = None
weekly_rent: float = None
room_id: int = None
deposit_area: float = None
room_TYPE: str = None
hotel_id: int = None
company_cnpj: str = None
@staticmethod
def fromList(list):
return Room(
final_date=list[0],
initial_date=list[1],
size_m2=list[2],
location=list[3],
mensal_rent=list[4],
weekly_rent=list[5],
room_id=list[6],
deposit_area=list[7],
room_TYPE=list[8],
hotel_id=list[9],
company_cnpj=list[10],
)
def __repr__(self):
details = '{\n'
details += 'final_date: ' + self.final_date.strftime('%d/%m/%Y') + '\n'
details += 'initial_date: ' + self.initial_date.strftime('%d/%m/%Y') + '\n'
details += 'size_m2: ' + self.size_m2 + '\n'
details += 'location: ' + self.location + '\n'
details += 'mensal_rent: ' + self.mensal_rent + '\n'
details += 'weekly_rent: ' + self.weekly_rent + '\n'
details += 'room_id: ' + self.room_id + '\n'
details += 'deposit_area: ' + self.deposit_area + '\n'
details += 'room_TYPE: ' + self.room_TYPE + '\n'
details += 'hotel_id: ' + self.hotel_id + '\n'
details += 'company_cnpj: ' + self.company_cnpj + '\n'
details += '}'
return details
def insertSql(self) -> str:
sql = 'insert into room values ('
sql += '"{}"'.format(self.final_date) if self.final_date.strftime('%Y-%m-%d %H:%M:%S') else 'NULL'
sql += ','
sql += '"{}"'.format(self.initial_date) if self.initial_date.strftime('%Y-%m-%d %H:%M:%S') else 'NULL'
sql += ','
sql += '"{}"'.format(self.size_m2) if self.size_m2 else 'NULL'
sql += ','
sql += '"{}"'.format(self.location) if self.location else 'NULL'
sql += ','
sql += '"{}"'.format(self.mensal_rent) if self.mensal_rent else 'NULL'
sql += ','
sql += '"{}"'.format(self.weekly_rent) if self.weekly_rent else 'NULL'
sql += ','
sql += '"{}"'.format(self.room_id) if self.room_id else 'NULL'
sql += ','
sql += '"{}"'.format(self.deposit_area) if self.deposit_area else 'NULL'
sql += ','
sql += '"{}"'.format(self.room_TYPE) if self.room_TYPE else 'NULL'
sql += ','
sql += '"{}"'.format(self.hotel_id) if self.hotel_id else 'NULL'
sql += ','
sql += '"{}"'.format(self.company_cnpj) if self.company_cnpj else 'NULL'
sql += ');'
return sql
@staticmethod
def querySql(where: dict, attr: list = []) -> str:
if len(attr) == 0:
attr = ['final_date', 'initial_date', 'size_m2', 'location', 'mensal_rent', 'weekly_rent', 'room_id', 'deposit_area', 'room_TYPE', 'hotel_id', 'company_cnpj']
sql = 'select {} '.format(','.join(attr))
sql += 'from room '
if len(where.keys()):
sql += "where "
for key, value in where.items():
sql += key
sql += " "
sql += "="
sql += " "
sql += "'{}'".format(value)
sql += " "
sql += ';'
return sql
@staticmethod
def deleteSql(where: dict) -> str:
sql = 'delete from room '
sql += "where "
for key, value in where.items():
sql += key
sql += " "
sql += "="
sql += " "
sql += "'{}'".format(value)
sql += " "
sql += ';'
return sql
@staticmethod
def updateSql(attrDict:dict, where:dict) -> str:
sql = 'update room '
sql += "set "
for key, value in attrDict.items():
sql += "{} = '{}' ".format(key, value)
if len(where.keys()):
sql += "where "
for key, value in where.items():
sql += key
sql += " "
sql += "="
sql += " "
sql += "'{}'".format(value)
sql += " "
sql += ';'
return sql
@validator("final_date", pre=True)
def parse_final_date(cls, value):
return datetime.datetime.strptime(
value,
"%d/%m/%Y"
)
@validator("initial_date", pre=True)
def parse_initial_date(cls, value):
return datetime.datetime.strptime(
value,
"%d/%m/%Y"
)
@staticmethod
def getKeys() -> list[str]:
return ['room_id']
| JulioHey/Banco-de-Dados---EP | server/model/room.py | room.py | py | 4,807 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "datetim... |
14854743181 | import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
from datetime import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
import psutil
from pygame import mixer
import json
import requests
import time
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Jarvis Sir. Please tell me how may I help you")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, duration=0.2)
print("Listening...")
r.energy_threshold = 300
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please...")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('your email', 'password')
server.sendmail('your email', to, content)
server.close()
def musiconloop(file, stopper):
mixer.init()
mixer.music.load(file)
mixer.music.play()
while True:
input_of_user = input()
if input_of_user == stopper:
mixer.music.stop()
break
if __name__ == "__main__":
wishMe()
init_battery = time.time()
battery_secs = 5*60
init_water = time.time()
init_eyes = time.time()
init_exercise = time.time()
watersecs = 2 * 60
exsecs = 20*60
eyessecs = 10*60
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'play music' in query:
# music_dir = 'D:\\Non Critical\\songs\\Favorite Songs2'
# songs = os.listdir(music_dir)
# print(songs)
# os.startfile(os.path.join(music_dir, songs[0]))
webbrowser.open("https://open.spotify.com/collection/tracks")
elif 'time' in query:
strTime = datetime.now().strftime("%H:%M:%S")
print(strTime)
speak(f"Sir, the time is {strTime}")
elif 'open vs code' in query:
codePath = "C:\\Users\\ASUS\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'email to yash' in query:
try:
speak("What should I say?")
content = takeCommand()
to = "receiver's email"
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry Sir. I am not able to send this email")
elif 'news' in query:
speak('News for Today .. ')
speak('So first news is..')
url = 'https://newsapi.org/v2/top-headlines?country=in&apiKey=22fa274e85764348aa45e21d5c3026d3'
news = requests.get(url).text
news_dict = json.loads(news)
arts = news_dict['articles']
# n = len(arts)
i = 0
for article in arts:
time.sleep(1)
if i == 5 - 1:
speak("Today's last News is..")
print(article['title'])
speak(article['title'])
break
print(article['title'])
speak(article['title'])
i += 1
time.sleep(1)
if i != 5 - 1:
speak("Moving to the next news..")
elif 'exit' in query:
speak('Thank You Sir. Have a nice day')
break
battery = psutil.sensors_battery()
percent = battery.percent
if percent < 30:
if time.time() - init_battery > battery_secs:
speak(f"Sir Please Charge Your Laptop {percent}% battery remaining")
init_battery = time.time()
if time.time() - init_water > watersecs:
speak('Sir Please Drink Water')
print("Water Drinking time. Enter 'drank' to stop the alarm.")
musiconloop('Drink Water And Mind My Business.mp3', 'drank')
init_water = time.time()
if time.time() - init_eyes >eyessecs:
speak('Eye exercise time')
print("Eye exercise time. Enter 'doneeyes' to stop the alarm.")
musiconloop('Open Your Eyes ALARM.mp3', 'doneeyes')
init_eyes = time.time()
if time.time() - init_exercise > exsecs:
speak('Physical Activity Time')
print("Physical Activity Time. Enter 'donephy' to stop the alarm.")
musiconloop('Workout Alarm.mp3', 'donephy')
init_exercise = time.time()
| yash358/J.A.R.V.I.S | main.py | main.py | py | 5,838 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyttsx3.init",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "speech_recogniti... |
16044252945 |
from pymysql import connect
import yaml
import logging.config
class DB():
def __init__(self):
"""连接数据库"""
logging.info('===================== init data =====================')
logging.info("connect db")
self.conn = connect(host='127.0.0.1', user='root', password='Zx123456', db='django_restful')
def clear(self, table_name):
"""清除表中数据"""
logging.info("clear db...")
clear_sql = 'truncate ' + table_name + ';' # 注意在truncate后面加上空格
with self.conn.cursor() as cursor:
# 清除外键约束
cursor.execute("set foreign_key_checks=0;")
cursor.execute(clear_sql)
self.conn.commit()
def insert(self, table_name, table_data):
"""插入数据"""
logging.info("insert data...")
# 遍历数据
for key in table_data:
table_data[key] = "'" + str(table_data[key]) + "'"
key = ','.join(table_data.keys())
value = ','.join(table_data.values())
logging.info(key, value)
insert_sql = 'insert into ' + table_name + '('+key+')'+'values'+'('+value+')'
logging.info(insert_sql)
with self.conn.cursor() as cursor:
cursor.execute(insert_sql)
self.conn.commit()
def close(self):
"""关闭数据库连接"""
logging.info("close db")
self.conn.close()
logging.info("===========init finisher!===========")
def init_data(self, datas):
"""初始化数据"""
logging.info("init ab...")
for table, data in datas.items():
self.clear(table)
for d in data:
self.insert(table, d)
self.close()
if __name__ == '__main__':
db = DB()
# 调试各个方法
# db.clear("api_user")
# db.clear("api_group")
# user_data = {'id': 1, 'username': '51zxw', 'email': '51zxw@163.com', 'groups': 'http://127.0.0.1:8000/groups/1'}
# db.insert("api_user", user_data)
# group_data = {'id': 1, 'name': 'Developer'}
# db.insert('api_group', group_data)
# db.close()
# 初始化数据
f = open('datas.yaml', 'r', encoding="utf-8")
datas = yaml.load(f, Loader=yaml.FullLoader) # 禁用警告 yaml.load(input, Loader=yaml.FullLoader)
db.init_data(datas)
| langlixiaobailongqaq/django_restful | api/test_project/mysql_action.py | mysql_action.py | py | 2,049 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.config.info",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "logging.config.info",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.config... |
17386735173 | import numpy as np
import math
from skimage import io, util
import heapq
def randomPatch(texture, patchLength):
h, w, _ = texture.shape
i = np.random.randint(h - patchLength)
j = np.random.randint(w - patchLength)
return texture[i:i+patchLength, j:j+patchLength]
def L2OverlapDiff(patch, patchLength, overlap, res, y, x):
error = 0
if x > 0:
left = patch[:, :overlap] - res[y:y+patchLength, x:x+overlap]
error += np.sum(left**2)
if y > 0:
up = patch[:overlap, :] - res[y:y+overlap, x:x+patchLength]
error += np.sum(up**2)
if x > 0 and y > 0:
corner = patch[:overlap, :overlap] - res[y:y+overlap, x:x+overlap]
error -= np.sum(corner**2)
return error
def randomBestPatch(texture, patchLength, overlap, res, y, x):
h, w, _ = texture.shape
errors = np.zeros((h - patchLength, w - patchLength))
for i in range(h - patchLength):
for j in range(w - patchLength):
patch = texture[i:i+patchLength, j:j+patchLength]
e = L2OverlapDiff(patch, patchLength, overlap, res, y, x)
errors[i, j] = e
i, j = np.unravel_index(np.argmin(errors), errors.shape)
return texture[i:i+patchLength, j:j+patchLength]
def minCutPath(errors):
# dijkstra's algorithm vertical
pq = [(error, [i]) for i, error in enumerate(errors[0])]
heapq.heapify(pq)
h, w = errors.shape
seen = set()
while pq:
error, path = heapq.heappop(pq)
curDepth = len(path)
curIndex = path[-1]
if curDepth == h:
return path
for delta in -1, 0, 1:
nextIndex = curIndex + delta
if 0 <= nextIndex < w:
if (curDepth, nextIndex) not in seen:
cumError = error + errors[curDepth, nextIndex]
heapq.heappush(pq, (cumError, path + [nextIndex]))
seen.add((curDepth, nextIndex))
def minCutPath2(errors):
# dynamic programming, unused
errors = np.pad(errors, [(0, 0), (1, 1)],
mode='constant',
constant_values=np.inf)
cumError = errors[0].copy()
paths = np.zeros_like(errors, dtype=int)
for i in range(1, len(errors)):
M = cumError
L = np.roll(M, 1)
R = np.roll(M, -1)
# optimize with np.choose?
cumError = np.min((L, M, R), axis=0) + errors[i]
paths[i] = np.argmin((L, M, R), axis=0)
paths -= 1
minCutPath = [np.argmin(cumError)]
for i in reversed(range(1, len(errors))):
minCutPath.append(minCutPath[-1] + paths[i][minCutPath[-1]])
return map(lambda x: x - 1, reversed(minCutPath))
def minCutPatch(patch, patchLength, overlap, res, y, x):
patch = patch.copy()
dy, dx, _ = patch.shape
minCut = np.zeros_like(patch, dtype=bool)
if x > 0:
left = patch[:, :overlap] - res[y:y+dy, x:x+overlap]
leftL2 = np.sum(left**2, axis=2)
for i, j in enumerate(minCutPath(leftL2)):
minCut[i, :j] = True
if y > 0:
up = patch[:overlap, :] - res[y:y+overlap, x:x+dx]
upL2 = np.sum(up**2, axis=2)
for j, i in enumerate(minCutPath(upL2.T)):
minCut[:i, j] = True
np.copyto(patch, res[y:y+dy, x:x+dx], where=minCut)
return patch
s = "https://raw.githubusercontent.com/axu2/image-quilting/master/"
def quilt(texture, patchLength, numPatches, mode="cut", sequence=False):
texture = util.img_as_float(texture)
overlap = patchLength // 6
numPatchesHigh, numPatchesWide = numPatches
h = (numPatchesHigh * patchLength) - (numPatchesHigh - 1) * overlap
w = (numPatchesWide * patchLength) - (numPatchesWide - 1) * overlap
res = np.zeros((h, w, texture.shape[2]))
for i in range(numPatchesHigh):
for j in range(numPatchesWide):
y = i * (patchLength - overlap)
x = j * (patchLength - overlap)
if i == 0 and j == 0 or mode == "random":
patch = randomPatch(texture, patchLength)
elif mode == "best":
patch = randomBestPatch(texture, patchLength, overlap, res, y, x)
elif mode == "cut":
patch = randomBestPatch(texture, patchLength, overlap, res, y, x)
patch = minCutPatch(patch, patchLength, overlap, res, y, x)
res[y:y+patchLength, x:x+patchLength] = patch
if sequence:
io.imshow(res)
io.show()
return res
def quiltSize(texture, patchLength, shape, mode="cut"):
overlap = patchLength // 6
h, w = shape
numPatchesHigh = math.ceil((h - patchLength) / (patchLength - overlap)) + 1 or 1
numPatchesWide = math.ceil((w - patchLength) / (patchLength - overlap)) + 1 or 1
res = quilt(texture, patchLength, (numPatchesHigh, numPatchesWide), mode)
return res[:h, :w]
texture = io.imread(s+"test.png")
io.imshow(texture)
io.show()
io.imshow(quilt(texture, 25, (6, 6), "random"))
io.show()
io.imshow(quilt(texture, 25, (6, 6), "best"))
io.show()
io.imshow(quilt(texture, 20, (6, 6), "cut"))
io.show()
io.imshow(quilt(texture, 20, (3, 3), "cut", True))
io.show() | QURATT/https---github.com-QURATT-DIPProject | image_quilting.py | image_quilting.py | py | 5,186 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.randint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random... |
22546241259 | from PySide2.QtUiTools import QUiLoader #pip3 install PySide2
from PySide2.QtWidgets import QApplication, QTableWidgetItem
from PySide2.QtCore import QFile, QIODevice, QTimer
from PySide2.QtWidgets import QFileDialog, QMessageBox
import math
from PySide2.QtCore import QStringListModel
import sys
import os
from PySide2.QtGui import QIcon, QPixmap
import requests
put = os.path.dirname(os.path.realpath(__file__)) + "/"#Путь- (part-1)
R = -1
U_1 = 0
U_2 = 0
group_list = []
import recording_spark_api
def sex(SSS, window,target):
###window.pushButton_2.setEnabled(False)
print(SSS)
a = 0
for m in group_list:
if m[0] == target[5]:
break
a = a + 1
if SSS != a:
window.pushButton_7.setEnabled(True)
else:
window.pushButton_7.setEnabled(False)
def SAS(window):
m = window.radioButton.isChecked()
print(m)
m = window.radioButton_2.isChecked()
print(m)
window.radioButton_2.setChecked(1)
def test(window, target, IM):
global R
if IM == 0 or IM == 2:
#print(window.comboBox.currentIndex())
#print(window.comboBox.currentText())
#print(window.comboBox_2.currentIndex())
#print(window.comboBox_2.currentText())
group_id = group_list[window.comboBox.currentIndex()][0]
E_1 = window.checkBox.isChecked()
E_2 = window.checkBox_2.isChecked()
# add(user_name, email, password, avatar, active, group_id)
M = recording_spark_api.user.add(window.lineEdit.text(), window.lineEdit_2.text(), window.lineEdit_3.text(), E_1, E_2, group_id)
print(M.number)
if M.number == 200:
R = M.response.user_id
window.close()
#return R
else:
msg = QMessageBox(window)
msg.setWindowTitle(f"ERROE {M.number}")
msg.setText(f" \n {M.response.text} \n ")
msg.exec_()
elif IM == 1:
#group_id = target email, password, avatar, active, group_id
group_id = group_list[window.comboBox.currentIndex()][0]
E_1 = window.checkBox.isChecked()
E_2 = window.checkBox_2.isChecked()
if window.lineEdit_3.text() == "" or window.lineEdit_3.text() == None:
password = None
else:
password = window.lineEdit_3.text()
if window.lineEdit.text() == target[1]:
user_name = None
else:
user_name = window.lineEdit.text()
if window.lineEdit_2.text() == target[2]:
email = None
else:
email = window.lineEdit_2.text()
if window.checkBox.isChecked() == target[3]:
avatar = None
else:
avatar = window.checkBox.isChecked()
print(window.checkBox_2.isChecked(), target[4])
if window.checkBox_2.isChecked() == target[4]:
active = None
else:
active = window.checkBox_2.isChecked()
if group_list[window.comboBox.currentIndex()][0] == target[5]:
group_id = None
else:
group_id = group_list[window.comboBox.currentIndex()][0]
if (target[4] == 1 and window.checkBox_2.isChecked() == False) or (password != None):
msg = QMessageBox.question(window, " !!!ВНИМАНИЕ!!! ",
"Вы пытаетесь отключить/сменить пароль у этой учётной запеси!\nВсе открытые сесии будут закрыты\nПроболжать ?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if msg == QMessageBox.Yes:
M = recording_spark_api.user.edit(target[0],user_name, email, password, avatar, active, group_id)
print(M.number)
if M.number == 200:
R = 0
window.close()
#return R
else:
msg = QMessageBox(window)
msg.setWindowTitle(f"ERROE {M.number}")
msg.setText(f" \n {M.response.text} \n ")
msg.exec_()
else:
M = recording_spark_api.user.edit(target[0],user_name, email, password, avatar, active, group_id)
print(M.number)
if M.number == 200:
R = 0
window.close()
#return R
else:
msg = QMessageBox(window)
msg.setWindowTitle(f"ERROE {M.number}")
msg.setText(f" \n {M.response.text} \n ")
msg.exec_()
def SAS_r(window, target,N):
if N == 0:
window.lineEdit.setText(target[1])
elif N == 1:
window.lineEdit_2.setText(target[2])
elif N == 2:
window.checkBox_2.setChecked(target[4])
window.pushButton_6.setEnabled(False)
#window.lineEdit_3.setText(target[8])
elif N == 3:
a = 0
for m in group_list:
if m[0] == target[5]:
break
a = a + 1
window.comboBox.setCurrentIndex(a)
window.pushButton_7.setEnabled(False)
elif N == 4:
#window.comboBox.setCurrentIndex(U_1)
window.lineEdit_3.setText("")
window.pushButton_4.setEnabled(False)
elif N == 5:
window.checkBox.setChecked(target[3])
window.pushButton_5.setEnabled(False)
print(U_1)
print(N)
def start(window, target, IM):
print(f"target - {target}")
global group_list
Alo = recording_spark_api.ls_group()
if Alo.number == 200:
group_list = Alo.response.matrix
#for l in L:
# window.comboBox.addItem(l[1])
# window.comboBox_2.addItem(l[1])
window.lineEdit.setPlaceholderText("Имя")
window.lineEdit_2.setPlaceholderText("")
window.lineEdit_3.setPlaceholderText("")
for mlo in group_list:
window.comboBox.addItem(mlo[1])
if len(target) != 0:
print(target)
window.lineEdit.setText(target[1])
window.lineEdit_2.setText(target[2])
#window.lineEdit_3.setText(target[8])
K = 0
m = True
print(f"group_list - {group_list}, {target}")
for p in group_list:
if target[5] == p[0]:
window.comboBox.setCurrentIndex(K)
U_1 = K
m = False
break
K = K + 1
if m:
group_list.append([target[5],target[6],target[7]])
window.comboBox.addItem(target[6])
window.comboBox.setCurrentIndex(K)
#if m:
# L.append([target[3],target[4],None,None,None,None])
# window.comboBox.addItem(target[4])
# window.comboBox.setCurrentIndex(K)
# U_1 = K
if target[4] == 1:
window.checkBox_2.setChecked(True)
if target[3] == 1:
window.checkBox.setChecked(True)
print("L")
window.pushButton_2.setEnabled(False)
window.pushButton_3.setEnabled(False)
window.pushButton_4.setEnabled(False)
window.pushButton_5.setEnabled(False)
window.pushButton_6.setEnabled(False)
window.pushButton_7.setEnabled(False)
window.setWindowTitle("ID: {} - {}".format(target[0],target[1]))
if IM == 0 or IM == 2:
window.pushButton_2.deleteLater()
window.pushButton_3.deleteLater()
window.pushButton_4.deleteLater()
window.pushButton_5.deleteLater()
window.pushButton_6.deleteLater()
window.pushButton_7.deleteLater()
window.label_7.deleteLater()
window.setWindowTitle("Создания")
def M(window,target,p):
if p == 0:
if target[1] != window.lineEdit.text():
window.pushButton_2.setEnabled(True)
else:
window.pushButton_2.setEnabled(False)
elif p == 1:
if target[2] != window.lineEdit_2.text():
window.pushButton_3.setEnabled(True)
else:
window.pushButton_3.setEnabled(False)
elif p == 2:
if not ("" == window.lineEdit_3.text() or window.lineEdit_3.text() == None): # Проблема
window.pushButton_4.setEnabled(True)
else:
window.pushButton_4.setEnabled(False)
elif p == 3:
if window.checkBox.isChecked() != bool(target[3]):
window.pushButton_5.setEnabled(True)
else:
window.pushButton_5.setEnabled(False)
elif p == 4:
print(window.checkBox_2.isChecked())
if window.checkBox_2.isChecked() != bool(target[4]):
window.pushButton_6.setEnabled(True)
else:
window.pushButton_6.setEnabled(False)
##### !!!СДЕЛАТЬ ПРОВЕРКУ ЧТО ЭТО INT!!!
#if target[9] == None or target[9] == "":
# window.pushButton_7.setEnabled(False)
#else:
# window.pushButton_7.setEnabled(True)
def M_2(window):
print()
"""
if window.lineEdit_4.text() != "":
try:
namber = int(window.lineEdit_4.text())
except ValueError:
window.pushButton.setEnabled(False)
return 0
#if window.pushButton.isEnabled():
if "" == window.lineEdit.text() or window.lineEdit_2.text() == "":
window.pushButton.setEnabled(False)
else:
window.pushButton.setEnabled(True)
"""
#window.lineEdit_2.text()
def GUI(target, IM, themes):
#app = QApplication(sys.argv)
ui_file_name = put + "/content/ui/user.ui"
ui_file = QFile(ui_file_name)
if not ui_file.open(QIODevice.ReadOnly):
print("Cannot open {}: {}".format(ui_file_name, ui_file.errorString()))
sys.exit(-1)
loader = QUiLoader()
window = loader.load(ui_file)
ui_file.close()
if not window:
print(loader.errorString())
sys.exit(-1)
window.show()
window.setWindowIcon(QIcon(f"{put}/content/icon/2icon.png"))
window.setStyleSheet(open(f"{put}content/themes/{themes}/user_all").read())
QTimer.singleShot(0, lambda:start(window, target, IM))
# 71A7BB
#window.pushButton.clicked.connect(lambda:test (window,L))
window.pushButton.clicked.connect(lambda:test(window, target, IM))
if IM == 1:
window.pushButton_2.clicked.connect(lambda:SAS_r (window, target,0))
window.pushButton_3.clicked.connect(lambda:SAS_r (window, target,1))
window.pushButton_6.clicked.connect(lambda:SAS_r (window, target,2))
window.pushButton_7.clicked.connect(lambda:SAS_r (window, target,3))
window.pushButton_4.clicked.connect(lambda:SAS_r (window, target,4))
window.pushButton_5.clicked.connect(lambda:SAS_r (window, target,5))
#window.lineEdit.initStyleOption()
#window.lineEdit.textChanged[str].connect(M)
window.lineEdit.textChanged.connect(lambda:M (window,target,0))
window.lineEdit_2.textChanged.connect(lambda:M (window,target,1))
window.lineEdit_3.textChanged.connect(lambda:M (window,target,2))
window.comboBox.activated.connect(lambda:sex (window.comboBox.currentIndex(),window, target))
window.checkBox.stateChanged.connect(lambda:M (window, target, 3))
window.checkBox_2.stateChanged.connect(lambda:M (window, target, 4))
elif IM == 0 or IM == 2:
window.lineEdit.textChanged.connect(lambda:M_2 (window))
window.lineEdit_2.textChanged.connect(lambda:M_2 (window))
#window_L.widget.hide()
#window_L.setStyleSheet('.QWidget {border-image: url(' + A + ') 0 0 0 0 stretch stretch;} .QLabel{border-image: None;}')
#window_L.pushButton.clicked.connect(lambda:login (window_L))
#sys.exit(app.exec_())
#app.exec_()
print("SEX")
def open_l(target, IM, themes):
#print("Кородний коне: {}, а также наш ооочень длинный и живучий токен {}"
# .format(recording_spark_api.short_token[0],recording_spark_api.live_token[0],))
global R
R = 0
print(target)
GUI(target, IM, themes)
print(f"AAAAA{R}")
return R
#GUI(0)
| romenskiy2012/recording_spark | Client/GUI_user.py | GUI_user.py | py | 12,345 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "recording_spark_api.use... |
10272333559 | from flask import Flask, g, render_template,\
request, redirect, url_for, flash, session
import hashlib
import os
import mysql.connector
import google.oauth2.credentials
import google_auth_oauthlib.flow
from google.auth.transport import requests
import requests, json
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
from models.usuario import Usuario
from models.usuarioDAO import UsuarioDAO
from models.exercicio import exercicio
from models.exercicioDAO import ExercicioDAO
from models.avaliacao import Avaliacao
from models.avaliacaoDAO import AvaliacaoDAO
app = Flask(__name__)
app.secret_key = "senha123"
DB_HOST = "localhost"
DB_USER = "root"
DB_NAME = "academiadb"
DB_PASS = ""
app.auth = {
# acao: { perfil:permissao }
'painel': {0:1, 1:1},
'logout': {0:1, 1:1},
'cadastrar_exercicio': {0:1, 1:1},
'listar_exercicio': {0:1, 1:1},
'cadastrar_saida': {0:1, 1:1}
}
@app.before_request
def autorizacao():
acao = request.path[1:]
acao = acao.split('/')
if len(acao)>=1:
acao = acao[0]
acoes = app.auth.keys()
if acao in list(acoes):
if session.get('logado') is None:
return redirect(url_for('login'))
else:
tipo = session['logado']
if app.auth[acao] == 0:
return redirect(url_for('painel'))
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = mysql.connector.connect(
host=DB_HOST,
user=DB_USER,
password=DB_PASS,
database=DB_NAME
)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route('/')
def index():
return render_template("login.html")
@app.route('/register', methods=['GET', 'POST'])
def register():
msg = ''
if request.method == "POST":
# valor = request.form['campoHTML']
nome = request.form['nome']
sobrenome = request.form['sobrenome']
email = request.form['email']
senha = request.form['senha']
usuario = Usuario(nome, sobrenome, email, senha)
dao = UsuarioDAO(get_db())
codigo = dao.inserir(usuario)
if codigo > 0:
msg = ("Cadastrado com sucesso!")
else:
msg = ("Erro ao cadastrar!")
vartitulo = "Cadastro"
return render_template("register.html", titulo=vartitulo, msg=msg)
@app.route('/cadastrar_treino', methods=['GET', 'POST'])
def cadastrar_exercicios():
if request.method == "POST":
carga = request.form['carga']
series = request.form['series']
repeticoes = request.form['repeticoes']
exercicios = exercicio(carga, series, repeticoes)
dao = ExercicioDAO(get_db())
codigo = dao.inserir(exercicios)
if codigo > 0:
flash("Cadastrado com sucesso! Código %d" % codigo, "success")
else:
flash("Erro ao cadastrar!", "danger")
vartitulo = "Cadastro de Exercicio"
return render_template("exercicio-cadastrar.html", titulo=vartitulo)
@app.route('/avaliacao', methods=['GET', 'POST'])
def avaliacao():
if request.method == "POST":
peso = request.form['peso']
altura = request.form['altura']
braco = request.form['braco']
ombro = request.form['ombro']
peito = request.form['peito']
cintura = request.form['cintura']
quadril = request.form['quadril']
abdominal = request.form['abdominal']
coxaMedial = request.form['coxaMedial']
panturrilha = request.form['panturrilha']
avaliacao = Avaliacao(peso, altura, braco, ombro, peito, cintura, quadril,
abdominal, coxaMedial, panturrilha,session['logado']['codigo'] )
dao = AvaliacaoDAO(get_db())
codigo = dao.inserir(avaliacao)
if codigo > 0:
flash("Cadastrado com sucesso! Código %d" % codigo, "success")
else:
flash("Erro ao cadastrar!", "danger")
vartitulo = "Avaliacao"
return render_template("avaliacao.html", titulo=vartitulo)
@app.route('/listar_exercicio', methods=['GET',])
def listar_exercicio():
dao = ExercicioDAO(get_db())
exercicios_db = dao.listar()
return render_template("exercicio-listar.html", exercicios=exercicios_db)
@app.route('/listaraval', methods=['GET', 'POST'])
def listaraval():
dao = AvaliacaoDAO(get_db())
avaliacao_db = dao.listar()
return render_template("listaraval.html", avaliacao=avaliacao_db)
@app.route('/cadastrar_saida', methods=['GET', 'POST'])
def cadastrar_saida():
daoUsuario = UsuarioDAO(get_db())
daoPlanta = PlantaDAO(get_db())
if request.method == "POST":
dtsaida = request.form['dtsaida']
usuario = request.form['usuario']
planta = request.form['planta']
saida = Saida(usuario, planta, dtsaida)
daoSaida = SaidaDAO(get_db())
codigo = daoSaida.inserir(saida)
if codigo > 0:
flash("Saída cadastrada com sucesso! Código %d" % codigo, "success")
else:
flash("Erro ao registrar saída!", "danger")
usuarios_db = daoUsuario.listar()
plantas_db = daoPlanta.listar()
return render_template("saida-cadastrar.html",
usuarios=usuarios_db, plantas=plantas_db)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == "POST":
email = request.form["email"]
senha = request.form["senha"]
# Verificar dados
dao = UsuarioDAO(get_db())
usuario = dao.autenticar(email, senha)
if usuario is not None:
session['logado'] = {
'codigo': usuario[0],
'nome': usuario[3],
'email': usuario[1],
}
return redirect(url_for('painel'))
else:
flash("Erro ao efetuar login!")
return render_template("login.html", titulo="Login")
@app.route('/logout')
def logout():
session['logado'] = None
session.clear()
return redirect(url_for('index'))
@app.route('/forgot')
def forgot():
return render_template("forgot-password.html", titulo ="Esqueci minha senha")
@app.route('/painel')
def painel():
return render_template("index.html", titulo="index")
@app.route('/peito', methods=['GET', 'POST'])
def peito():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_peito()
return render_template("peito.html", titulo="peito", exercicio=exercicio_db)
@app.route('/perna', methods=['GET', 'POST'])
def perna():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_perna()
return render_template("perna.html", titulo="perna", exercicio=exercicio_db)
@app.route('/braco', methods=['GET', 'POST'])
def braco():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_braco()
return render_template("braco.html", titulo="braco", exercicio=exercicio_db)
@app.route('/costas', methods=['GET', 'POST'])
def costas():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_costas()
return render_template("costas.html", titulo="costas", exercicio=exercicio_db)
@app.route('/abdomen', methods=['GET', 'POST'])
def abdomen():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_abdomen()
return render_template("abdomen.html", titulo="abdomen", exercicio=exercicio_db)
@app.route('/alongamento', methods=['GET', 'POST'])
def alongamento():
dao = ExercicioDAO(get_db())
exercicio_db = dao.listar_alongamento()
return render_template("alongamento.html", titulo="alongamento", exercicio=exercicio_db)
@app.route('/mainaval')
def mainaval():
return render_template("mainaval.html", titulo="mainaval")
@app.route("/login_google")
def login_google():
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
'client_secret.json',
scopes=['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile', 'openid'])
flow.redirect_uri = 'http://localhost/callback'
authorization_url, state = flow.authorization_url(
acess_type='offline',
include_granted_scopes='true')
return redirect(authorization_url)
@app.route('/callback')
def callback():
state = request.args.get('state')
code = request.args.get('code')
if code is None or code == '':
flash('Erro ao logar com conta google', 'danger')
return redirect(url_for('login'))
flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(
'client_secret.json',
scopes=['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile', 'openid'],
state=state)
flow.redirect_uri = url_for('callback', _external=True)
authorization_response = request.url
flow.fetch_token(authorization_response=authorization_response)
credentials = flow.credentials
resposta_api = requests.get("https://www.googleapis.com/oauth2/v1/userinfo?alt=json&access_token=" +
credentials.token)
user_info = resposta_api.json()
email = str(user_info['email'])
dao = UsuarioDAO(get_db())
user = dao.obter(email)
print((user_info["email"]))
if user is None:
hash = hashlib.sha512()
senha = os.urandom(50)
secret = app.config['SECRET_KEY']
hash.update(f'{secret}{senha}'.encode('utf-8'))
senha_criptografa = hash.hexdigest()
usuario = Usuario(
user_info['name'],
user_info['email'],
senha_criptografa,
'',
)
id = None
if usuario.senha and usuario.nome and usuario.email:
id = UsuarioDAO.inserir(usuario)
print(id)
if id is None or id <=0:
flash('Erro ao cadastrar usuário', 'danger')
return redirect(url_for('login'))
else:
user = UsuarioDAO.obter(user_info['email'])
session['logado'] = user
flash(f'Seja bem-vindo, {user[1]}!', 'primary')
revoke = requests.post(
'https://gauth2.googleapis.com/revoke',
params={'token': credentials.token},
headers={'content-type': 'application/x-www-form-urlencoded'})
return redirect(url_for('painel'))
if __name__=='__main__':
app.run(host="0.0.0.0", port=80, debug=True) | FaelPressao/Projeto_Academia_Versao_Final | academia/main.py | main.py | py | 10,533 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
43348915031 | """
Default tests for Env classes
"""
import pytest
import numpy as np
import tensorflow as tf
from sionna.ofdm import PilotPattern
from cebed.envs import OfdmEnv, EnvConfig
def mock_pilot_pattern(config):
"""Dummy pilot pattern where the pilots are set to one"""
shape = [
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
mask = np.zeros(shape, bool)
mask[..., 3, :] = True
shape[2] = 1
pilots = np.zeros(shape, np.complex64)
pilots[..., 0, :] = np.ones((config.fft_size,), np.complex64)
pilots = np.reshape(pilots, [config.n_ues, config.num_streams_per_tx, -1])
return PilotPattern(mask=mask, pilots=pilots)
@pytest.mark.parametrize("n_ues", [1, 4])
@pytest.mark.parametrize("nr", [1, 4])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_env(n_ues, nr):
"""test env works properly"""
config = EnvConfig()
config.num_rx_antennas = nr
config.n_ues = n_ues
env = OfdmEnv(config)
batch_size = 10
snr_db = 20
outputs = env(batch_size, snr_db)
assert len(outputs) == 2
expected_y_shape = [
batch_size,
1,
config.num_rx_antennas,
config.num_ofdm_symbols,
config.fft_size,
]
expected_h_shape = [
batch_size,
1,
config.num_rx_antennas,
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
assert outputs[0].shape == expected_y_shape
assert outputs[1].shape == expected_h_shape
outputs = env(batch_size, snr_db, return_x=True)
assert len(outputs) == 3
expected_x_shape = [
batch_size,
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
assert outputs[0].shape == expected_x_shape
@pytest.mark.parametrize("p_spacing", [1, 2])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_block_pilot_pattern_values(p_spacing):
"""Block pilot pattern values"""
config = EnvConfig()
config.p_spacing = p_spacing
env = OfdmEnv(config)
for i in range(0, config.num_ofdm_symbols):
if i not in env.pilot_ofdm_symbol_indices:
print(env.get_mask().shape)
assert all(env.get_mask()[0, 0, i] == tf.zeros(shape=(config.fft_size,)))
indices = np.arange(0, config.fft_size, config.p_spacing)
for i in env.pilot_ofdm_symbol_indices:
for j in indices:
assert env.get_mask()[0, 0, i, j] == 1
@pytest.mark.parametrize("nues", [2, 4])
def test_get_mask(nues):
config = EnvConfig()
config.n_ues = nues
env = OfdmEnv(config)
mask = env.get_mask()
assert mask.shape == [
nues,
env.config.num_streams_per_tx,
env.config.num_ofdm_symbols,
env.config.fft_size,
]
@pytest.mark.parametrize("p_spacing", [1, 2])
@pytest.mark.parametrize("nr", [4, 8])
@pytest.mark.parametrize("nues", [2, 4])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_mimo_block_pilot_pattern(p_spacing, nr, nues):
"""Test block pilot pattern properties"""
config = EnvConfig()
config.num_rx_antennas = nr
config.n_ues = nues
config.p_spacing = p_spacing
env = OfdmEnv(config)
assert env.n_pilot_symbols == len(config.pilot_ofdm_symbol_indices)
assert env.n_pilot_subcarriers == int(
env.rg.num_effective_subcarriers / config.p_spacing
)
mask = env.get_mask()
assert int(np.count_nonzero(mask)) / nues == env.rg.num_pilot_symbols.numpy()
def test_extract_at_pilot_locations():
"""test extract at pilot locations"""
config = EnvConfig()
config.pilot_pattern = mock_pilot_pattern(config)
env = OfdmEnv(config)
batch_size = 10
y_shape = [
batch_size,
1,
config.num_rx_antennas,
config.num_ofdm_symbols,
config.fft_size,
]
y = np.ones(y_shape, dtype=np.complex64)
y[:, 0, :, 3, :] = -1 * np.ones((config.fft_size,))
yp = env.extract_at_pilot_locations(y)
expect_yp_shape = [
batch_size,
1,
config.num_rx_antennas,
config.n_ues,
config.num_streams_per_tx,
env.rg.pilot_pattern.num_pilot_symbols.numpy(),
]
assert yp.shape == expect_yp_shape
assert (yp.numpy() == -1 * np.ones(expect_yp_shape, np.complex64)).all()
h_hat = env.estimate_at_pilot_locations(y)
expected_h_shape = [
batch_size,
1,
config.num_rx_antennas,
config.n_ues,
config.num_streams_per_tx,
config.num_ofdm_symbols,
config.fft_size,
]
assert h_hat.shape == expected_h_shape
assert (
h_hat[:, 0, :, :, 0, 3, :].numpy()
== -1 * np.ones((config.fft_size,), np.complex64)
).all()
for i in range(config.num_ofdm_symbols):
if i != 3:
assert (
h_hat[:, 0, :, :, 0, i, :].numpy()
== np.zeros((config.fft_size,), np.complex64)
).all()
| SAIC-MONTREAL/CeBed | tests/test_env.py | test_env.py | py | 5,096 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.complex64",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_n... |
73788737704 | import pathlib
import re
import shutil
import subprocess
import tarfile
import tempfile
import urllib.parse
import urllib.request
import zipfile
javaVersion = "11.0.12+7"
def createBinaryArchive(platform: str, arch: str) -> None:
print(f"Processing platform/arch '{platform}/{arch}'...")
lspCliVersion = getLspCliVersion()
targetDirPath = pathlib.Path(__file__).parent.parent.joinpath("target")
lspCliArchivePath = pathlib.Path(__file__).parent.parent.joinpath(
targetDirPath, f"lsp-cli-{lspCliVersion}.tar.gz")
with tempfile.TemporaryDirectory() as tmpDirPathStr:
tmpDirPath = pathlib.Path(tmpDirPathStr)
print("Extracting lsp-cli archive...")
with tarfile.open(lspCliArchivePath, "r:gz") as tarFile: tarFile.extractall(path=tmpDirPath)
lspCliDirPath = tmpDirPath.joinpath(f"lsp-cli-{lspCliVersion}")
relativeJavaDirPath = downloadJava(tmpDirPath, lspCliDirPath, platform, arch)
print("Setting default for JAVA_HOME in startup script...")
if platform == "windows":
lspCliDirPath.joinpath("bin", "lsp-cli").unlink()
binScriptPath = lspCliDirPath.joinpath("bin", "lsp-cli.bat")
searchPattern = re.compile("^set REPO=.*$", flags=re.MULTILINE)
else:
lspCliDirPath.joinpath("bin", "lsp-cli.bat").unlink()
binScriptPath = lspCliDirPath.joinpath("bin", "lsp-cli")
searchPattern = re.compile("^BASEDIR=.*$", flags=re.MULTILINE)
with open(binScriptPath, "r") as file: binScript = file.read()
if platform == "windows":
insertStr = f"\r\nif not defined JAVA_HOME set JAVA_HOME=\"%BASEDIR%\\{relativeJavaDirPath}\""
else:
insertStr = f"\n[ -z \"$JAVA_HOME\" ] && JAVA_HOME=\"$BASEDIR\"/{relativeJavaDirPath}"
regexMatch = searchPattern.search(binScript)
assert regexMatch is not None
binScript = binScript[:regexMatch.end()] + insertStr + binScript[regexMatch.end():]
with open(binScriptPath, "w") as file: file.write(binScript)
lspCliBinaryArchiveFormat = ("zip" if platform == "windows" else "gztar")
lspCliBinaryArchiveExtension = (".zip" if platform == "windows" else ".tar.gz")
lspCliBinaryArchivePath = targetDirPath.joinpath(
f"lsp-cli-{lspCliVersion}-{platform}-{arch}")
print(f"Creating binary archive '{lspCliBinaryArchivePath}{lspCliBinaryArchiveExtension}'...")
shutil.make_archive(str(lspCliBinaryArchivePath), lspCliBinaryArchiveFormat,
root_dir=tmpDirPath)
print("")
def downloadJava(tmpDirPath: pathlib.Path, lspCliDirPath: pathlib.Path,
platform: str, arch: str) -> str:
javaArchiveExtension = (".zip" if platform == "windows" else ".tar.gz")
javaArchiveName = (f"OpenJDK11U-jdk_{arch}_{platform}_hotspot_"
f"{javaVersion.replace('+', '_')}{javaArchiveExtension}")
javaUrl = ("https://github.com/adoptium/temurin11-binaries/releases/download/"
f"jdk-{urllib.parse.quote_plus(javaVersion)}/{javaArchiveName}")
javaArchivePath = lspCliDirPath.joinpath(javaArchiveName)
print(f"Downloading JDK from '{javaUrl}' to '{javaArchivePath}'...")
urllib.request.urlretrieve(javaUrl, javaArchivePath)
print("Extracting JDK archive...")
if javaArchiveExtension == ".zip":
with zipfile.ZipFile(javaArchivePath, "r") as zipFile: zipFile.extractall(path=tmpDirPath)
else:
with tarfile.open(javaArchivePath, "r:gz") as tarFile: tarFile.extractall(path=tmpDirPath)
print("Removing JDK archive...")
javaArchivePath.unlink()
relativeJavaDirPathString = f"jdk-{javaVersion}"
jdkDirPath = tmpDirPath.joinpath(relativeJavaDirPathString)
jmodsDirPath = (jdkDirPath.joinpath("jmods") if platform == "mac" else
jdkDirPath.joinpath("Contents", "Home", "jmods"))
javaTargetDirPath = lspCliDirPath.joinpath(relativeJavaDirPathString)
print("Creating Java distribution...")
subprocess.run(["jlink", "--module-path", str(jmodsDirPath), "--add-modules", "java.se",
"--strip-debug", "--no-man-pages", "--no-header-files", "--compress=2",
"--output", str(javaTargetDirPath)])
print("Removing JDK directory...")
shutil.rmtree(jdkDirPath)
return relativeJavaDirPathString
def getLspCliVersion() -> str:
with open("pom.xml", "r") as file:
regexMatch = re.search(r"<version>(.*?)</version>", file.read())
assert regexMatch is not None
return regexMatch.group(1)
def main() -> None:
createBinaryArchive("linux", "x64")
createBinaryArchive("mac", "x64")
createBinaryArchive("windows", "x64")
if __name__ == "__main__":
main()
| valentjn/lsp-cli | tools/createBinaryArchives.py | createBinaryArchives.py | py | 4,493 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
... |
8899583521 | from flask import redirect, render_template, request, url_for
from flask_login import login_required
from application import app, db, get_css_framework, ITEMS_PER_PAGE
from application.room.models import Room
from application.place.models import Place
from application.place.forms import PlaceForm
from application.place.forms import PlaceUpdateForm
from flask_paginate import Pagination, get_page_parameter
@app.route("/place", methods=["GET"])
def place_index():
search = False
q = request.args.get('q')
if q:
search = True
page = request.args.get(get_page_parameter(), type=int, default=1)
total = Place.query.count()
places = Place.query.order_by(Place.name)\
.slice((page - 1) * ITEMS_PER_PAGE, page * ITEMS_PER_PAGE)
pagination = Pagination(page=page, total=total, search=search, record_name='places', per_page=ITEMS_PER_PAGE,
css_framework=get_css_framework(), format_total=True, format_number=True)
return render_template("place/list.html", places=places, pagination=pagination)
@app.route("/place/new/")
@login_required
def place_form():
return render_template("place/new.html", form=PlaceForm())
@app.route("/place/<place_id>/delete/", methods=["POST"])
@login_required
def place_delete(place_id):
place = Place.query.get(place_id)
roomswithdeletedplace = Room.query.filter(Room.place_id == place_id).all()
for room in roomswithdeletedplace:
room.place_id = None
message = "Place " + place.name + " deleted!"
db.session().delete(place)
db.session().commit()
return render_template("info.html", message=message)
@app.route("/place/<place_id>/", methods=["GET"])
def place_view(place_id):
place = Place.query.get(place_id)
return render_template("place/update.html", place=place, form=PlaceUpdateForm())
@app.route("/place/<place_id>/update/", methods=["POST"])
@login_required
def place_update(place_id):
form = PlaceUpdateForm(request.form)
place = Place.query.get(place_id)
if form.name.data == "":
form.name.data = place.name
if not form.validate():
return render_template("place/update.html", place=place, form=form)
place.name = form.name.data
if form.address.data != "":
place.address = form.address.data
db.session().commit()
message = "Place updated!"
return render_template("place/update.html", place=place, form=form, message=message)
@app.route("/place/", methods=["POST"])
@login_required
def place_create():
form = PlaceForm(request.form)
if not form.validate():
return render_template("place/new.html", form=form)
place = Place(form.name.data, form.address.data)
db.session().add(place)
db.session().commit()
message = "Place created!"
return render_template("place/new.html", form=form, message=message)
| Robustic/Orchestime | application/place/views.py | views.py | py | 2,867 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.args.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.re... |
34493975789 | import logging
import os
from argparse import ArgumentParser
from typing import Dict, List, Tuple, Set
import pandas as pd
from tqdm import tqdm
from gebert.utils.io import save_node_id2terms_list, save_dict, save_tuples, read_mrconso, read_mrrel
def get_concept_list_groupby_cui(mrconso_df: pd.DataFrame, cui2node_id: Dict[str, int]) \
-> (Dict[int, Set[str]], Dict[int, str], Dict[str, int]):
logging.info("Started creating CUI to terms mapping")
node_id2terms_list: Dict[int, Set[str]] = {}
logging.info(f"Removing duplicated (CUI, STR) pairs, {mrconso_df.shape[0]} rows before deletion")
mrconso_df.drop_duplicates(subset=("CUI", "STR"), keep="first", inplace=True)
logging.info(f"Removed duplicated (CUI, STR) pairs, {mrconso_df.shape[0]} rows after deletion")
unique_cuis_set = set(mrconso_df["CUI"].unique())
logging.info(f"There are {len(unique_cuis_set)} unique CUIs in dataset")
# node_id2cui: Dict[int, str] = {node_id: cui for node_id, cui in enumerate(unique_cuis_set)}
# cui2node_id: Dict[str, int] = {cui: node_id for node_id, cui in node_id2cui.items()}
# assert len(node_id2cui) == len(cui2node_id)
for _, row in tqdm(mrconso_df.iterrows(), miniters=mrconso_df.shape[0] // 50):
cui = row["CUI"].strip()
term_str = row["STR"].strip().lower()
if term_str == '':
continue
node_id = cui2node_id[cui]
if node_id2terms_list.get(node_id) is None:
node_id2terms_list[node_id] = set()
node_id2terms_list[node_id].add(term_str.strip())
logging.info("CUI to terms mapping is created")
return node_id2terms_list
def extract_umls_oriented_edges_with_relations(mrrel_df: pd.DataFrame, cui2node_id: Dict[str, int],
rel2rel_id: Dict[str, int], rela2rela_id: Dict[str, int],
ignore_not_mapped_edges=False) -> List[Tuple[int, int, int, int]]:
cuis_relation_str_set = set()
logging.info("Started generating graph edges")
edges: List[Tuple[int, int, int, int]] = []
not_mapped_edges_counter = 0
for idx, row in tqdm(mrrel_df.iterrows(), miniters=mrrel_df.shape[0] // 100, total=mrrel_df.shape[0]):
cui_1 = row["CUI1"].strip()
cui_2 = row["CUI2"].strip()
rel = row["REL"]
rela = row["RELA"]
# Separator validation
for att in (cui_1, cui_2, rel, rela):
assert "~~" not in str(att)
if cui2node_id.get(cui_1) is not None and cui2node_id.get(cui_2) is not None:
cuis_relation_str = f"{cui_1}~~{cui_2}~~{rel}~~{rela}"
if cuis_relation_str not in cuis_relation_str_set:
cui_1_node_id = cui2node_id[cui_1]
cui_2_node_id = cui2node_id[cui_2]
rel_id = rel2rel_id[rel]
rela_id = rela2rela_id[rela]
edges.append((cui_1_node_id, cui_2_node_id, rel_id, rela_id))
cuis_relation_str_set.add(cuis_relation_str)
else:
if not ignore_not_mapped_edges:
raise AssertionError(f"Either CUI {cui_1} or {cui_2} are not found in CUI2node_is mapping")
else:
not_mapped_edges_counter += 1
if ignore_not_mapped_edges:
logging.info(f"{not_mapped_edges_counter} edges are not mapped to any node")
logging.info(f"Finished generating edges. There are {len(edges)} edges")
return edges
def create_graph_files(mrconso_df: pd.DataFrame, mrrel_df: pd.DataFrame, rel2id: Dict[str, int],
cui2node_id: Dict[str, int], rela2id: Dict[str, int], output_node_id2terms_list_path: str,
output_node_id2cui_path: str, output_edges_path: str, output_rel2rel_id_path: str,
output_rela2rela_id_path, ignore_not_mapped_edges: bool):
node_id2cui: Dict[int, str] = {node_id: cui for cui, node_id in cui2node_id.items()}
node_id2terms_list = get_concept_list_groupby_cui(mrconso_df=mrconso_df, cui2node_id=cui2node_id)
logging.info("Generating edges....")
edges = extract_umls_oriented_edges_with_relations(mrrel_df=mrrel_df, cui2node_id=cui2node_id,
rel2rel_id=rel2id, rela2rela_id=rela2id,
ignore_not_mapped_edges=ignore_not_mapped_edges)
logging.info("Saving the result....")
save_node_id2terms_list(save_path=output_node_id2terms_list_path, mapping=node_id2terms_list, )
save_dict(save_path=output_node_id2cui_path, dictionary=node_id2cui)
save_dict(save_path=output_rel2rel_id_path, dictionary=rel2id)
save_dict(save_path=output_rela2rela_id_path, dictionary=rela2id)
save_tuples(save_path=output_edges_path, tuples=edges)
def create_cui2node_id_mapping(mrconso_df: pd.DataFrame) -> Dict[str, int]:
unique_cuis_set = set(mrconso_df["CUI"].unique())
cui2node_id: Dict[str, int] = {cui: node_id for node_id, cui in enumerate(unique_cuis_set)}
return cui2node_id
def create_relations2id_dicts(mrrel_df: pd.DataFrame):
mrrel_df.REL.fillna("NAN", inplace=True)
mrrel_df.RELA.fillna("NAN", inplace=True)
rel2id = {rel: rel_id for rel_id, rel in enumerate(mrrel_df.REL.unique())}
rela2id = {rela: rela_id for rela_id, rela in enumerate(mrrel_df.RELA.unique())}
rel2id["LOOP"] = max(rel2id.values()) + 1
rela2id["LOOP"] = max(rela2id.values()) + 1
logging.info(f"There are {len(rel2id.keys())} unique RELs and {len(rela2id.keys())} unique RELAs")
print("REL2REL_ID", )
for k, v in rel2id.items():
print(k, v)
print("RELA2RELA_ID", rela2id)
for k, v in rel2aid.items():
print(k, v)
return rel2id, rela2id
def main():
parser = ArgumentParser()
parser.add_argument('--mrconso')
parser.add_argument('--mrrel')
parser.add_argument('--split_val', action="store_true")
parser.add_argument('--train_proportion', type=float)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
split_val = args.split_val
output_dir = args.output_dir
if not os.path.exists(output_dir) and output_dir != '':
os.makedirs(output_dir)
logging.info("Loading MRCONSO....")
mrconso_df = read_mrconso(args.mrconso)
mrconso_df["STR"].fillna('', inplace=True)
logging.info("Loading MRREL....")
mrrel_df = read_mrrel(args.mrrel)
logging.info("Generating node index....")
rel2id, rela2id = create_relations2id_dicts(mrrel_df)
if split_val:
train_dir = os.path.join(output_dir, "train/")
val_dir = os.path.join(output_dir, "val/")
for d in (train_dir, val_dir):
if not os.path.exists(d):
os.makedirs(d)
train_proportion = args.train_proportion
num_rows = mrconso_df.shape[0]
shuffled_mrconso = mrconso_df.sample(frac=1.0, random_state=42)
del mrconso_df
num_train_rows = int(num_rows * train_proportion)
train_mrconso_df = shuffled_mrconso[:num_train_rows]
val_mrconso_df = shuffled_mrconso[num_train_rows:]
del shuffled_mrconso
train_output_node_id2terms_list_path = os.path.join(train_dir, "node_id2terms_list")
val_output_node_id2terms_list_path = os.path.join(val_dir, "node_id2terms_list")
train_output_node_id2cui_path = os.path.join(train_dir, "id2cui")
val_output_node_id2cui_path = os.path.join(val_dir, "id2cui")
train_output_edges_path = os.path.join(train_dir, "edges")
val_output_edges_path = os.path.join(val_dir, "edges")
train_output_rel2rel_id_path = os.path.join(train_dir, "rel2id")
val_output_rel2rel_id_path = os.path.join(val_dir, "rel2id")
train_output_rela2rela_id_path = os.path.join(train_dir, "rela2id")
val_output_rela2rela_id_path = os.path.join(val_dir, "rela2id")
train_cui2node_id = create_cui2node_id_mapping(mrconso_df=train_mrconso_df)
val_cui2node_id = create_cui2node_id_mapping(mrconso_df=val_mrconso_df)
logging.info("Creating train graph files")
create_graph_files(mrconso_df=train_mrconso_df, mrrel_df=mrrel_df, rel2id=rel2id, rela2id=rela2id,
cui2node_id=train_cui2node_id,
output_node_id2terms_list_path=train_output_node_id2terms_list_path,
output_node_id2cui_path=train_output_node_id2cui_path,
output_edges_path=train_output_edges_path,
output_rel2rel_id_path=train_output_rel2rel_id_path,
output_rela2rela_id_path=train_output_rela2rela_id_path, ignore_not_mapped_edges=True, )
logging.info("Creating val graph files")
create_graph_files(mrconso_df=val_mrconso_df, mrrel_df=mrrel_df, rel2id=rel2id, rela2id=rela2id,
cui2node_id=val_cui2node_id,
output_node_id2terms_list_path=val_output_node_id2terms_list_path,
output_node_id2cui_path=val_output_node_id2cui_path,
output_edges_path=val_output_edges_path, output_rel2rel_id_path=val_output_rel2rel_id_path,
output_rela2rela_id_path=val_output_rela2rela_id_path,
ignore_not_mapped_edges=True, )
else:
logging.info("Creating graph files")
output_node_id2terms_list_path = os.path.join(output_dir, "node_id2terms_list")
output_node_id2cui_path = os.path.join(output_dir, "id2cui")
output_edges_path = os.path.join(output_dir, "edges")
output_rel2rel_id_path = os.path.join(output_dir, f"rel2id")
output_rela2rela_id_path = os.path.join(output_dir, f"rela2id")
cui2node_id = create_cui2node_id_mapping(mrconso_df=mrconso_df)
create_graph_files(mrconso_df=mrconso_df, mrrel_df=mrrel_df, rel2id=rel2id, rela2id=rela2id,
cui2node_id=cui2node_id,
output_node_id2terms_list_path=output_node_id2terms_list_path,
output_node_id2cui_path=output_node_id2cui_path,
output_edges_path=output_edges_path, output_rel2rel_id_path=output_rel2rel_id_path,
output_rela2rela_id_path=output_rela2rela_id_path, ignore_not_mapped_edges=True, )
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', )
main()
| Andoree/GEBERT | gebert/data/umls2graph.py | umls2graph.py | py | 10,606 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"lin... |
9454046228 | # coding: utf-8
import os
from mongoengine import connect
from fastapi import APIRouter
from app.database.documents import Article
from app.database.utils import query_to_dict
router = APIRouter(prefix="/api", tags=["Api"])
@router.get("/articles")
def articles(skip: int = 0, limit: int = 10):
"""List the articles in database. This endpoint provides a `skip` and
`limit` parameters to navigate among the articles. Throw a 400 HTTP response
with an error message if arguments are not set properly.
Args:
skip (int, optional): how many documents must be skipped. Defaults to 0.
limit (int, optional): limit to the retrieved number of documents.
Defaults to 10.
"""
connect(host=os.environ["MONGODB_URL"])
count = Article.objects.count()
if skip + limit > count:
return {"error": f"Database counts only {count} articles."}, 400
elif skip < 0:
return {"error": "`skip` argument must be >= 0."}, 400
elif skip > limit:
return {
"error": (
"`skip` argument value cannot be higher than `limit`"
" argument value."
)
}, 400
articles = query_to_dict(query_set=Article.objects[skip:skip + limit])
return {"count": len(articles), "items": articles}
@router.get("/article")
def article(url: str):
"""Target an article to retrieve with its URL.
Args:
url (str): the URL of the article to retrieve.
"""
connect(host=os.environ["MONGODB_URL"])
articles = query_to_dict(query_set=Article.objects(url=url))
return {"article": articles[0]} | nicolasjlln/lbc-challenge | app/routers/api.py | api.py | py | 1,626 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "mongoengine.connect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "app.database.doc... |
37854390965 | #!/usr/bin/env python3
'''
curve fit to histogram
'''
import collections
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.axes as maxes
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D as mline
from .markline import add_fcurve
__all__=['add_gauss_fit']
# gaussian fit
def gauss_1d(x, x0, sigma, I):
return I*np.exp(-(x-x0)**2/(2*sigma**2))
def cents_to_edges(cents):
'''
bin center to bin edges
'''
semiws=np.diff(cents)/2
edges=cents[1:]-semiws
edges=np.asarray([cents[0]-semiws[0], *edges, cents[-1]+semiws[-1]])
return edges
def fit_gauss1d_to_data(cnts, xs):
if len(cnts)+1==len(xs):
edges=xs
cents=(edges[:-1]+edges[1:])/2
elif len(cnts)==len(xs):
cents=xs
edges=cents_to_edges(cents)
else:
raise ValueError('mismatch between len of `cnts` and `xs`')
# init guess
ws=cnts/np.sum(cnts)
x0=np.sum(ws*cents)
std=np.sqrt(np.sum(ws*(cents-x0)**2))
I=np.sum(cnts*np.diff(edges))/(np.sqrt(2*np.pi)*std)
p0=(x0, std, I)
popt, _=curve_fit(gauss_1d, cents, cnts, p0=p0)
func=lambda x: gauss_1d(x, *popt)
# to namedtuple
t_gauss1d=collections.namedtuple('Gauss1d', ['x0', 'sigma', 'I'])
popt=t_gauss1d(*popt)
return func, popt
# data from object returned by hist plot
def get_data_from_polygon(p):
'''
get cnts, edges from object returned from `hist` plot
'''
path=p.get_path()
verts=path.vertices
xs, ys=verts.T
# stepfilled
backs,=np.nonzero(np.diff(xs)<0) # backward path
if len(backs)>0:
n=backs[0]+1
xs=xs[:n]
ys=ys[:n]
cnts=ys[1:-1:2]
edges=xs[::2]
return cnts, edges
def get_data_from_line(l):
'''
get ys, xs from Line2D
'''
assert isinstance(l, mline)
xs, ys=l.get_data()
return ys, xs
def get_data_from_bars(p):
'''
cnts, edges from BarContainer
'''
cnts=[]
cents=[]
# edges=[]
for b in p:
x0, y0=b.get_xy()
w=b.get_width()
h=b.get_height()
cnts.append(y0+h)
cents.append(x0+w/2)
cnts=np.asarray(cnts)
cents=np.asarray(cents)
# bin centers to edges
edges=cents_to_edges(cents)
return cnts, edges
def get_data_from_plt(p):
'''
get cnts, edges from object returned from `hist` plot
'''
if isinstance(p, mpatches.Polygon):
return get_data_from_polygon(p)
# list returned from hist plot
if len(p)==1 and isinstance(p[0], mpatches.Polygon):
return get_data_from_polygon(p[0])
# bar collection
if not all([isinstance(t, mpatches.Rectangle) for t in p]):
s='only support `mpatches.Polygon` and collection of bars'
raise ValueError(s)
return get_data_from_bars(p)
# get patches from ax
def split_hist_patches(patches):
'''
split hist patches based on
- type: polygon (for step) and rectangle (for bars)
- fc: facecolor for bars
'''
hists=[]
prevfc=None # fc of previous patch, None if not bar
for p in patches:
if isinstance(p, mpatches.Polygon):
hists.append([p])
prevfc=None
continue
elif not isinstance(p, mpatches.Rectangle):
# only consider Polygon and Rectangle
continue
# first bar in new group
if prevfc is None or p.get_fc()!=prevfc:
hists.append([p])
prevfc=p.get_fc()
else: # same group
hists[-1].append(p)
return hists
def get_patches_from_ax(ax, hlabel=None, hind=None):
'''
get patches of hist plot from given ax
patches in ax is first splitted to groups of hist plot,
based on
- type: polygon (for step) and rectangle (for bars)
- fc: facecolor for bars
if `hlabel` is given, groups with given label is selected
`hind` specify index of group in hists to return
if both `hlabel` and `hind` None, use all patches
'''
if hlabel is None and hind is None:
return ax.patches
hists=split_hist_patches(ax.patches)
if hlabel is not None:
hists=[g for g in hists if g[0].get_label()==hlabel]
if hind is None:
if len(hists)>1:
raise ValueError('too many hist groups found. use `hind` to specify one')
return hists[0]
return hists[hind]
def add_gauss_fit(*args, **kwargs):
'''
add gaussian fit for hist plot
2 way to call
add_gauss_fit(p, **kwargs) # for p from hist plot
add_gauss_fit(ax, hlabel='some hist', hind=0) # use patches with given label in ax
add_gauss_fit(ax, cnts, edges)
'''
if len(args)==1:
p,=args
if isinstance(p, maxes.Axes):
ax=p
pkws={}
for k in ['hlabel', 'hind']:
if k in kwargs:
pkws[k]=kwargs.pop(k)
p=get_patches_from_ax(ax, **pkws)
elif isinstance(p, mpatches.Polygon):
ax=p.axes
else:
ax=p[0].axes
cnts, edges=get_data_from_plt(p)
else:
ax, cnts, edges=args
func, popt=fit_gauss1d_to_data(cnts, edges)
add_fcurve(ax, func, **kwargs)
return popt
| hujh08/datapy | plot/curvefit.py | curvefit.py | py | 5,360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.diff",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 45,... |
28091727369 | from flask import render_template, request, redirect, url_for, send_from_directory, jsonify, make_response, flash, Markup
import os
from werkzeug.utils import secure_filename
from web_scripts import *
@app.route('/')
def home():
return render_template('main.html')
@app.route('/upload-music', methods = ['GET', 'POST'])
def upload_music():
if request.method == 'POST':
try:
#checking for file size using data from cookies
if not allowed_filesize(request.cookies.get('filesize')):
print('File exceeded maximum size')
return make_response(jsonify({'message':'Exceeded Max Size'}), 300)
music = request.files.get('file')
impulse = request.cookies.get('user_choice')
impulse = f'/{impulse}.wav'
print(music.filename)
if music.filename == "":
print('Music must have a filename')
return make_response(jsonify({'message':'Must have a filename'}), 300)
if not allowed_file(music.filename):
#checking for invalid extensions
print('Invalid Music Extension')
return make_response(jsonify({'message':'Invalid Music Extension (mp3 & wav only)'}), 300)
else:
#checking for malicious filenames
filename = secure_filename(music.filename)
#saving uploaded music into directory
music.save(os.path.join(app.config["MUSIC_UPLOADS"],filename))
#applying reverb algorithm
path = build_reverb(filename, impulse)
#downloads the slowed & reverbed file
return make_response(jsonify({'message':path, 'title':filename}), 200)
except:
url = request.get_json()['url']
#downloading file from youtube
try:
filename, title = get_music(url)
impulse = f'/{request.cookies.get("user_choice")}.wav'
print('reverbing...')
path = build_reverb(filename, impulse)
return make_response(jsonify({'message':path, 'title':title}), 200)
except Exception as e:
return make_response(jsonify({'message':e}), 300)
return render_template('upload_music.html')
| philipk19238/slowed-and-reverbed | app/routes.py | routes.py | py | 2,366 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask.render_template",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.re... |
31456650987 |
from nltk.corpus import movie_reviews
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
import string
clitics = open('clitics', 'r').readlines()
sents0 = movie_reviews.words("neg/cv000_29416.txt")
sents1 = movie_reviews.words("pos/cv041_21113.txt")
texts2 = sents0 + sents1
# ################################################
# Remove all newline characters.
def RemoveAllNewline():
a1 = []
a1 = string.splitlines(texts2)
return a1
# ################################################
# Replace HTML character codes (i.e., &...;) with whitespace.
def ReplaceHTMLCharacters():
a=[]
for w in texts2:
a.append(re.sub('<*?&;>', ' ', w))
return a
#################################################
# Remove all URLs .
def RemoveAllURLs():
b = []
for w in ReplaceHTMLCharacters():
b.append(re.sub(r'^https?://.*[\r\n]*', '', w))
return b
#################################################
#Split each punctuation (using library called string to detectpunctuation symbols) into its own token using whitespace
def SplitEachPunctuation():
c = []
punct=[string.punctuation]
for item in RemoveAllURLs():
if item not in punct:
c.append(item)
return c
################################################
#Split clitics using whitespace (see clitics file in the section materials).
def SplitClitics():
d =[]
for item in SplitEachPunctuation():
for i in clitics:
d.append(re.sub(i, ' ' + i, item))
return d
################################################
# Remove stopwords.
def RemoveStopwords():
e = []
stop_words = set(stopwords.words("english"))
for item in SplitClitics():
if item not in stop_words:
e.append(item)
return e
#################################################
#Each token is tagged with its part-of-speech using nltk tagger .
def pos():
f = []
for t in RemoveStopwords():
f = word_tokenize(t)
f.append(pos_tag(t))
return f
#################################################
# Apply lemmatization using nltk.
def lemmatization():
g = []
for w in RemoveStopwords():
lemma = WordNetLemmatizer().lemmatize(w, pos='n')
g.append(lemma)
return g
#################################################
# Convert text to lowercase.
def lowCase():
h = []
for w in RemoveStopwords():
h.append(w.lower())
return h
##################################################
print(lowCase())
| hassanMetwally/pre-processing | pre processing.py | pre processing.py | py | 2,678 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.movie_reviews.words",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.movie_reviews",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.movie_reviews.words",
"line_number": 15,
"usage_type": "call"
},
... |
34326375432 | import tensorflow as tf
# from tensorflow.keras import layers
from tensorflow import keras
from data import DataManager
import os
from utils import utils
# https://github.com/rlcode/reinforcement-learning-kr/blob/master/3-atari/1-breakout/breakout_a3c.py
# https://github.com/yinchuandong/A3C-keras/blob/master/a3c.py
# https://github.com/seungeunrho/minimalRL/blob/master/a3c.py
class BaseModel:
''' Super Class Model '''
dense_input = None
cnn_input = None
output_activation = None
model_actor = None
model_critic = None
train_x_raw = None
train_x_chart = None
train_y = None
eval_x = None
eval_y = None
epoch = None
def __init__(self, _input_size, _output_size, output_activation='tanh'):
self.input_size = _input_size
self.output_size = _output_size
self.output_activation = output_activation
# self.train_x_raw = _train_x_raw
# self.train_x_chart = _train_x_chart
# self.train_y = _train_y
# self.eval_x = _eval_x
# self.eval_y = _eval_y
self.epoch = 10
def get_cnn_model(self):
self.cnn_input = keras.layers.Input(shape=(299, 299, 5), name='cnn_input')
model_cnn = keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(self.cnn_input)
model_cnn = keras.layers.Conv2D(256, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
# model_cnn = keras.layers.Conv2D(512, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
model_cnn = keras.layers.Conv2D(256, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
model_cnn = keras.layers.Conv2D(128, kernel_size=(3, 3), activation='relu', strides=2, padding='same')(model_cnn)
model_cnn = keras.layers.AveragePooling2D((10, 10))(model_cnn)
model_cnn = keras.layers.Flatten()(model_cnn)
return model_cnn
def get_dense_model(self):
self.dense_input = keras.layers.Input(shape=(self.input_size,), name='dense_input')
model_dense = keras.layers.Dense(128, activation='relu')(self.dense_input)
model_dense = keras.layers.Dense(256, activation='relu')(model_dense)
# model_dense = keras.layers.Dense(512, activation='relu')(model_dense)
# model_dense = keras.layers.Dense(1024, activation='relu')(model_dense)
# model_dense = keras.layers.Dense(512, activation='relu')(model_dense)
model_dense = keras.layers.Dense(256, activation='relu')(model_dense)
model_dense = keras.layers.Dense(128, activation='relu')(model_dense)
return model_dense
def get_dense_out_model(self, model_dense, model_cnn):
model_share = keras.layers.concatenate([model_dense, model_cnn])
model_share = keras.layers.Flatten()(model_share)
model_share = keras.layers.Dense(512, activation='relu')(model_share)
# model_out = keras.layers.Dense(1024, activation='relu')(model_out)
# model_out = keras.layers.Dense(2048, activation='relu')(model_out)
# model_out = keras.layers.Dense(1024, activation='relu')(model_out)
model_actor = keras.layers.Dense(256, activation='relu')(model_share)
model_actor = keras.layers.Dense(128, activation='relu')(model_actor)
model_actor = keras.layers.Dense(self.output_size, activation=self.output_activation, name='model_out')(model_actor)
model_critic = keras.layers.Dense(256, activation='relu')(model_share)
model_critic = keras.layers.Dense(128, activation='relu')(model_critic)
model_critic = keras.layers.Dense(1, activation=self.output_activation, name='model_out')(model_critic)
return model_actor, model_critic
def build_model(self):
model_dense = self.get_dense_model()
model_cnn = self.get_cnn_model()
model_actor, model_critic = self.get_dense_out_model(model_dense, model_cnn)
self.model_actor = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_actor])
self.model_critic = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_critic])
return self.model_actor, self.model_critic
def get_global_model(self, _class_name):
model_dense = self.get_dense_model()
model_cnn = self.get_cnn_model()
model_actor, model_critic = self.get_dense_out_model(model_dense, model_cnn)
model_actor = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_actor])
model_critic = keras.Model(inputs=[self.dense_input, self.cnn_input], outputs=[model_critic])
file_actor, file_critic = self.get_weight_file(_class_name)
if file_actor is None:
model_actor.load_weights(file_actor)
model_critic.load_weights(file_critic)
return model_actor, model_critic
def get_model_weight_path(self, _class_name):
paths = os.getcwd() + '/model_weight/' + _class_name + '/'
if not os.path.exists(paths):
os.makedirs(paths)
return paths
def get_weight_file(self, _class_name):
best_loss_file = None
best_loss = 100
file_list = os.listdir(self.get_model_weight_path(_class_name))
file_list.sort()
# for file in file_list:
# loss = float(file.split('.')[0].split('_')[3])
# if best_loss > loss:
# best_loss = loss
# best_loss_file = file
# return best_loss_file, best_loss
actor = file_list[-2]
critic = file_list[-1]
return actor, critic
def model_evaluate_and_save(self, _actor, _critic, _class_name):
# self.model_actor.compile(optimizer='rmsprop', loss=_loss_func, metrics=['accuracy'])
# loss, accuracy = self.model_actor.evaluate(self.eval_x, self.eval_y)
#
# _, best_loss = self.get_best_loss_file(_class_name)
# if best_loss > loss:
today = utils.get_today()
time_now = utils.get_time()
path = self.get_model_weight_path(_class_name)
file_path = path + _class_name + '_' + today + '_' + time_now + '_'
_actor.save_weights(file_path + 'actor.h5')
_critic.save_weights(file_path + 'critic.h5')
| aoba0203/magi | train/agent/BaseModel.py | BaseModel.py | py | 6,290 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.layers.Input",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 39,
"usage_type": "name"
},
{
"api_n... |
13628425885 | from collections import Counter
import pandas as pd
import nltk
from src.tagger import Tagger
def get_counts(dataf):
with open(dataf, "r") as fh:
# Get counts
raw = fh.read()
# tokens = nltk.word_tokenize(raw)
tokens = raw.split()
unigrm = Counter(tokens)
bigrm = nltk.bigrams(tokens)
bigrm_fdist = nltk.FreqDist(bigrm)
return unigrm, bigrm_fdist
def get_tps(word, nextword, unigrm, bigrm):
counts_word = unigrm[str(word)]
counts_next = unigrm[str(nextword)]
counts_bigram = bigrm[(str(word), str(nextword))]
# There can be a count of 0 in rare cases when spacy removes apostrophes (e.g. c')
fwtp = 0 if counts_word == 0 else (counts_bigram / counts_word)
bwtp = 0 if counts_next == 0 else (counts_bigram / counts_next)
return fwtp, bwtp
def main(lang, dataf, prefix=""):
# Find N-Adj, Adj-N pairs and get their FW-TP and BW-TP
adjnoun = []
nounadj = []
alls = []
j = 0
# Tagger
tagger = Tagger(lang)
# Get unigrams and bigrams
print("Getting counts...")
unigrm, bigrm = get_counts(dataf)
print("Counts done.")
with open(dataf, "r") as fh:
for line in fh:
j += 1
if j % 1000 == 0:
print("%i sentences parsed" % j)
sentence = line.strip()
parsed = tagger.parseSentence(sentence)
for i, word in enumerate(parsed):
nextword = ""
if (i + 1) < len(parsed):
nextword = parsed[i + 1]
# There can be a count of 0 in rare cases when spacy removes apostrophes (e.g. c')
if unigrm[str(word)] == 0 or unigrm[str(nextword)] == 0:
pass
else:
# Adj-Noun
if tagger.isAdj(word) and tagger.isNoun(nextword):
# print("Adj-N", word, nextword)
fw, bw = get_tps(word, nextword, unigrm, bigrm)
adjnoun.append([lang, word, nextword, "fw", fw])
adjnoun.append([lang, word, nextword, "bw", bw])
alls.append([lang, "fw", fw])
alls.append([lang, "bw", bw])
# Noun-Adj
if tagger.isNoun(word) and tagger.isAdj(nextword):
# print("N-adj", word, nextword)
fw, bw = get_tps(word, nextword, unigrm, bigrm)
nounadj.append([lang, word, nextword, "fw", fw])
nounadj.append([lang, word, nextword, "bw", bw])
alls.append([lang, "fw", fw])
alls.append([lang, "bw", bw])
# Create dataframes
ANdf = pd.DataFrame(adjnoun, columns=["lang", "word", "nextword", "direction", "prob"])
NAdf = pd.DataFrame(nounadj, columns=["lang", "word", "nextword", "direction", "prob"])
alldf = pd.DataFrame(alls, columns=["lang", "direction", "prob"])
# Save them to file
ANdf.to_csv("{}_{}_AdjNoun_tps.csv".format(prefix, lang), sep=";")
NAdf.to_csv("{}_{}_NounAdj_tps.csv".format(prefix, lang), sep=";")
alldf.to_csv("{}_{}_tps.csv".format(prefix, lang), sep=";")
| rgalhama/retro_adjs | src/analyses_TPs/tps.py | tps.py | py | 3,266 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nltk.bigrams",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.FreqDist",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "src.tagger.Tagger",
... |
151229982 | import sys
import uuid
import os
import shutil
from lxml import etree
import openpyxl
from zipfile import ZipFile
core = "docProps/core.xml"
def extractWorkbook(filename, outfile="xml"):
with ZipFile(filename, "r") as zip:
zip.extract(core, outfile)
def checkForCheaters(filename):
try:
parser = etree.XMLParser(load_dtd=True, resolve_entities=True, no_network=False)
tree = etree.parse(filename, parser=parser)
root = tree.getroot()
print(etree.tostring(root))
arr=[]
for child in root:
if 'creator' in child.tag or 'lastModifiedBy' in child.tag:
arr.append(child.text)
print(child.text)
flag=True
if len(arr)!=2 or arr[0]==arr[1]:
flag=False
return (flag, arr)
except Exception:
print("Error! checkForCheaters")
return None
def getScore(filename,answers):
try:
wb_obj = openpyxl.load_workbook(filename)
sheet_obj = wb_obj.active
score=0
for i in range(len(answers)):
studentsAnswer = str(sheet_obj.cell(row=i+1, column=1).value)
answer=answers[i]
if answer==studentsAnswer:
score+=1
return score
except Exception:
print("Error! getScore")
return None
if __name__ == "__main__":
# if len(sys.argv) == 2:
# filename = sys.argv[1]
# else:
# print("Usage:", sys.argv[0], "<filename>")
# exit(1)
filename='xls.xlsx'
tmpFolder = "./uploads/" + str(uuid.uuid4())
os.mkdir(tmpFolder)
extractWorkbook(filename, tmpFolder)
workbook = tmpFolder + "/" + core
cheater=checkForCheaters(workbook)
score=getScore(filename,['aboba','aboba1','None','123'])
print(score)
print("Removing tmp folder:", workbook)
shutil.rmtree(tmpFolder) | suborofu/tulactf-2022-writeups | web/Cheaters/web/flask-serv/tester.py | tester.py | py | 1,878 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lxml.etree.XMLParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "lxml.etree.parse",
... |
28318096244 | from sqlalchemy import create_engine, Column, String, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import pymysql
pymysql.install_as_MySQLdb()
# 构建连接引擎对象
engine = create_engine("mysql://root@localhost/py1709_torn_db1",
encoding="utf-8", echo=True)
# 获取一个连接会话
Session = sessionmaker(bind=engine)
session = Session()
# 构建一个基础类型
Base = declarative_base(bind=engine)
# 定义自定义类型
# 自定义类型创建完成之后,sqlalchemy会根据管理的类型自动创建一个intrumentation管理对象
# 通过intrumentation管理对象底层封装了自定义类型和数据库表之间的各种关联操作
class Person(Base):
__tablename__ = "persons"
id = Column(Integer, primary_key=True)
name = Column(String(50))
age = Column(Integer)
# 通过类型的__table__属性查看它的数据库表元数据
# 通过Base。metadata属性封装的函数完成数据库之间的数据同步操作
# print(Person.__table__)
# Base.metadata.create_all() # 将所有salalchemy管理的对象同步到数据库中产生对应的数据表
# 1. 程序中直接创建的对象,是保存并运行在内存中的~一旦程序结束,内存中的数据会清空
# 临时状态(游离状态):程序中直接创建的对象,临时对象
# 特点:程序中有数据,缓存中无数据,数据库中无数据
p = Person(name="jerry", age=12)
print(p, p.id, p.name, p.age)
# 2. 程序中的对象,可以通过连接会话session的add()函数,将对象交给sqlalchemy进行管理
# 缓存状态(托管状态):对象只是存在于连接会话缓存中,数据库中并没有相关数据,缓存对象
# 特点:程序中有数据,缓存中有数据,数据库中无数据
session.add(p)
# 3. 缓存中的数据,可以通过连接会话session的commit()函数,将缓存数据提交给数据库进行持久化保存
# 持久状态(持久persistent状态):对象在程序中存在,在数据库中有对应的记录
# 特点:程序中有数据{id}, 缓存中有数据, 数据库中有数据
session.commit()
print(p.id, p.name, p.age)
# 修改操作
# 一旦对缓存状态的对象进行修改,此时缓存对象和数据库中的数据不一致~
# 就会形成脏数据,脏数据并不是不可取的,更新操作就是将这样的数据从缓存同步到数据库(commit)
p.name = "shuke"
# 可以通过session.dirty来查询缓存中的脏数据
session.commit()
# 删除操作
session.delete(p)# 直接删除一个缓存的数据[脏数据],通过commit()提交到数据库
session.commit()
# 注意删除的只能是持久对象
#p2 = Person(id=1)
#session.delete(p2)# 抛出异常~不能删除,因为p2不是持久对象is not persisted
| laomu/py_1709 | 2.Tornado_cursor/days02数据模型/demo02sqlalchemy增删改.py | demo02sqlalchemy增删改.py | py | 2,838 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "pymysql.install_as_MySQLdb",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 14,
"usage_type": "call"
},
{
"ap... |
36378630131 | import unittest
import os
import opendatasets as od
import sqlite3
import pandas as pd
#Testing automated pipeline
class TestDownloadAndSaveDataset(unittest.TestCase):
def setUp(self):
# Set up necessary variables for testing
self.dataset_url = 'https://www.kaggle.com/datasets/thedevastator/jobs-dataset-from-glassdoor/download?datasetVersionNumber=2'
self.file_path = 'jobs-dataset-from-glassdoor/salary_data_cleaned.csv'
self.db_path = '../data/clean_salary.sqlite'
def test_download_and_save_dataset(self):
# Download dataset
od.download(self.dataset_url)
# Check if the downloaded file exists
self.assertTrue(os.path.exists(self.file_path))
# Read the CSV file into a DataFrame
cleancsv_df = pd.read_csv(self.file_path)
# Check if DataFrame is not empty
self.assertFalse(cleancsv_df.empty)
# Connect to SQLite database and save the DataFrame
conn = sqlite3.connect(self.db_path)
cleancsv_df.to_sql('clean_salary', conn, index=False, if_exists='replace')
# Check if the table exists in the database
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='clean_salary';")
result = cursor.fetchone()
self.assertIsNotNone(result)
# Close the database connection
conn.close()
def tearDown(self):
# Clean up after the test
if os.path.exists(self.file_path):
os.remove(self.file_path)
if os.path.exists(self.db_path):
os.remove(self.db_path)
if __name__ == '__main__':
unittest.main()
| arpita739/made-template | project/test.py | test.py | py | 1,677 | python | en | code | null | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "opendatasets.download",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
4454907121 | from flask_testing import TestCase
from config import create_app
from db import db
AUTHORISED_ENDPOINTS_DATA = (
("POST", "/new_resource/"),
("POST", "/tag_resource/"),
("POST", "/upload_file/1/"),
("PUT", "/resource_status/1/read/"),
("PUT", "/resource_status/1/dropped/"),
("PUT", "/resource_status/1/to_read/"),
("PUT", "/update_resource/"),
("PUT", "/update_user/"),
("DELETE", "/delete_resource/1/"),
("DELETE", "/delete_tag/1/"),
("DELETE", "/delete_file/1/"),
("GET", "/my_user/"),
("GET", "/my_resources/"),
("GET", "/my_tags/"),
("GET", "/my_resources_with_tag/1/"),
)
UNAUTHORISED_ENDPOINTS_DATA = (
("POST", "/register/"),
("POST", "/login/"),
)
NO_INPUT_ENDPOINTS_DATA = (("GET", "/general_stats/"),)
class TestApp(TestCase):
"""
Some basic tests validating that everything is okay with the user authentication.
"""
def create_app(self):
return create_app("config.TestingConfig")
def setUp(self):
db.init_app(self.app)
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def iterate_endpoints(
self,
endpoints_data,
status_code_method,
expected_resp_body,
headers=None,
payload=None,
):
"""
A simple function to iterate across endpoints. Makes it easier to test stuff.
"""
if not headers:
headers = {}
if not payload:
payload = {}
resp = None
for method, url in endpoints_data:
if method == "GET":
resp = self.client.get(url, headers=headers)
elif method == "POST":
resp = self.client.post(url, headers=headers)
elif method == "PUT":
resp = self.client.put(url, headers=headers)
elif method == "DELETE":
resp = self.client.delete(url, headers=headers)
status_code_method(resp)
if not expected_resp_body == "":
self.assertEqual(resp.json, expected_resp_body)
def test_protected_endpoints(self):
"""
Go through all endpoints that require authentication and make sure you can't get any information without a token.
"""
self.iterate_endpoints(
AUTHORISED_ENDPOINTS_DATA,
self.assert_401,
{
"message": "You need a token to get access to this endpoint \N{winking face}"
},
)
def test_unprotected_endpoints(self):
"""
Go through all endpoints that don't require a token, but require input, and make sure you don't get anything
without providing the right input.
"""
self.iterate_endpoints(UNAUTHORISED_ENDPOINTS_DATA, self.assert_400, "")
def test_no_input_endpoints(self):
"""
Go through all unprotected endpoints that don't need input and make sure you get a response 200 OK.
"""
self.iterate_endpoints(NO_INPUT_ENDPOINTS_DATA, self.assert_200, "")
def test_expired_token_raises(self):
"""
Go though all protected endpoints and make sure you get the right error when you use an expired token.
"""
headers = {
"Authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOjM2LCJleHAiOjE2NjA4OTE1MTZ9.pbx2hPf9hi7JhHkRPsHeQIrcDKsZn9n80jNCVaPo3IA"
}
self.iterate_endpoints(
AUTHORISED_ENDPOINTS_DATA,
self.assert_401,
{"message": "Sorry, your token has expired. Please, log in again."},
headers,
)
def test_invalid_token_raises(self):
"""
Go though all protected endpoints and make sure you get the right error when you use an invalid token.
"""
headers = {"Authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGcin9n80jNCVaPo3IA"}
self.iterate_endpoints(
AUTHORISED_ENDPOINTS_DATA,
self.assert_401,
{
"message": "Sorry, your token is invalid \N{unamused face}. Please, register or login again to obtain a valid token."
},
headers,
)
| tedypav/FlaskCourse_OnlinePersonalLibrary | tests/test_application.py | test_application.py | py | 4,228 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask_testing.TestCase",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "config.create_app",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "db.db.init_app",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "db.db",
"... |
74147827945 | import re
import argparse
from os import listdir
def read_file(filename: str) -> str:
with open("./regex_labs/src/{}.txt".format(filename)) as f:
return f.read()
def creditcards(content):
"""All credit card numbers and respective brands"""
matches = re.findall(r"([0-9\s]+)\n?([a-zA-Z\s]+)\n?", content)
mylist = []
for match in matches:
number = match[0].replace(" ", "").replace("\n", "")
brand = match[1].replace("\n", "")
mylist.append((number, brand))
return mylist
def phonenumbers(content):
"""All Portuguese phone numbers"""
matches = re.findall(r"\(\+?0?0?351\).?([0-9- ]*)", content)
return [match.replace("-", "").replace(" ", "") for match in matches]
def emails(content):
"""All emails except the ones with username: jose"""
matches = re.findall(r"(.*(?<!\njose)@.+)", content)
return [match for match in matches]
def urls(content):
"""All urls and respective query arguments"""
matches = re.finditer(r"https?://(?P<domain>.+)/(?P<args>\?.+)?", content)
mylist = []
for match in matches:
args = match.group("args")
args = args[1:].split("&") if args else []
mylist.append((match.group("domain"), args))
return mylist
if __name__ == '__main__':
""" python -m regex_labs.regex -r <filename> """
examples = [f.replace(".txt", "") for f in listdir("./regex_labs/src/")]
parser = argparse.ArgumentParser()
parser.add_argument("--run", '-r', choices=examples, required=True)
args = parser.parse_args()
file_content = read_file(args.run)
[print(line) for line in eval(args.run)(file_content)]
| zepcp/code_labs | regex_labs/regex.py | regex.py | py | 1,669 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.finditer",
"line_number": 38,... |
42778570533 | from typing import Any
import pytest
from pydantic import ValidationError
from toucan_connectors.toucan_connector import ToucanDataSource
class DataSource(ToucanDataSource):
collection: str # required, validated against type
query: Any # required, not validated
comment: str = None # not required, no default, validated against type when present
test_default: int = 101 # not required because it has a default, validated
def test_instantiation():
# no errors with required args at the right type
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
}
mds = DataSource(**data_source)
assert mds.name == data_source['name']
assert mds.test_default == 101
def test_required_arg():
# error with missing required arg
data_source = {'name': 'my_name', 'collection': 'my_collection', 'query': {}}
with pytest.raises(ValidationError) as e:
DataSource(**data_source)
assert 'domain' in e.value.errors()[0]['loc'] # Are we testing pydantic here ?
assert e.value.errors()[0]['msg'] == 'field required'
def test_required_arg_wrong_type():
# error with required arg of wrong type
data_source = {'domain': [], 'name': 'my_name', 'collection': 'my_collection', 'query': {}}
with pytest.raises(ValidationError) as e:
DataSource(**data_source)
assert 'domain' in e.value.errors()[0]['loc']
assert e.value.errors()[0]['msg'] == 'str type expected'
def test_not_required():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'comment': 'test',
}
mds = DataSource(**data_source)
assert mds.comment == 'test'
def test_default_override():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'test_default': 102,
}
mds = DataSource(**data_source)
assert mds.test_default == 102
def test_default_override_validated():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'test_default': {},
}
with pytest.raises(ValidationError):
DataSource(**data_source)
def test_unknown_arg():
data_source = {
'domain': 'my_domain',
'name': 'my_name',
'collection': 'my_collection',
'query': {},
'unk': '@',
}
with pytest.raises(ValidationError) as e:
DataSource(**data_source)
assert 'unk' in e.value.errors()[0]['loc']
assert e.value.errors()[0]['msg'] == 'extra fields not permitted'
def test_get_form():
default_form = ToucanDataSource.get_form(None, {})
assert default_form == {
'title': 'ToucanDataSource',
'type': 'object',
'properties': {
'domain': {'title': 'Domain', 'type': 'string'},
'name': {'title': 'Name', 'type': 'string'},
'type': {'title': 'Type', 'type': 'string'},
'load': {'title': 'Load', 'type': 'boolean', 'default': True},
'live_data': {'title': 'Live Data', 'type': 'boolean', 'default': False},
'validation': {'title': 'Validation', 'type': 'object'},
'parameters': {'title': 'Parameters', 'type': 'object'},
'cache_ttl': {
'title': "Slow Queries' Cache Expiration Time",
'description': 'In seconds. Will override the 5min instance default and/or the connector value',
'type': 'integer',
},
},
'required': ['domain', 'name'],
'additionalProperties': False,
}
| ToucanToco/toucan-connectors | tests/test_datasource.py | test_datasource.py | py | 3,757 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "toucan_connectors.toucan_connector.ToucanDataSource",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pytest.raises",
"line_number": 32,
"usage_type": "call"
},
{
"api_n... |
21131295808 | """Nautobot Golden Config plugin application level metrics ."""
from django.conf import settings
from django.db.models import Count, F, Q
from nautobot.dcim.models import Device
from prometheus_client.core import GaugeMetricFamily
from nautobot_golden_config.models import ComplianceFeature, ComplianceRule, ConfigCompliance, GoldenConfig
PLUGIN_SETTINGS = settings.PLUGINS_CONFIG.get("nautobot_golden_config", {})
def metric_gc_functions():
"""Calculate the successful vs the failed GC jobs for backups, intended & compliance.
Yields:
GaugeMetricFamily: Prometheus Metrics
"""
backup_gauges = GaugeMetricFamily("nautobot_gc_backup_total", "Nautobot Golden Config Backups", labels=["status"])
successful_backups = GoldenConfig.objects.filter(backup_last_attempt_date=F("backup_last_success_date")).count()
attempted_backups = GoldenConfig.objects.filter(backup_last_attempt_date__isnull=False).count()
backup_gauges.add_metric(labels=["success"], value=successful_backups)
backup_gauges.add_metric(labels=["failure"], value=attempted_backups - successful_backups)
yield backup_gauges
intended_gauges = GaugeMetricFamily(
"nautobot_gc_intended_total", "Nautobot Golden Config Intended", labels=["status"]
)
successful_intended = GoldenConfig.objects.filter(
intended_last_attempt_date=F("intended_last_success_date")
).count()
attempted_intended = GoldenConfig.objects.filter(intended_last_attempt_date__isnull=False).count()
intended_gauges.add_metric(labels=["success"], value=successful_intended)
intended_gauges.add_metric(labels=["failure"], value=attempted_intended - successful_intended)
yield intended_gauges
compliance_gauges = GaugeMetricFamily(
"nautobot_gc_compliance_total", "Nautobot Golden Config Compliance", labels=["status"]
)
successful_compliance = GoldenConfig.objects.filter(
compliance_last_attempt_date=F("compliance_last_success_date")
).count()
attempted_compliance = GoldenConfig.objects.filter(compliance_last_attempt_date__isnull=False).count()
compliance_gauges.add_metric(labels=["success"], value=successful_compliance)
compliance_gauges.add_metric(labels=["failure"], value=attempted_compliance - successful_compliance)
yield compliance_gauges
def metric_devices_per_feature():
"""Calculate number of devices configured for GC Compliance feature.
Yields:
GaugeMetricFamily: Prometheus Metrics
"""
features = ComplianceFeature.objects.all()
devices_gauge = GaugeMetricFamily(
"nautobot_gc_devices_per_feature", "Nautobot Golden Config Devices per feature", labels=["device"]
)
for feature in features:
rules_per_feature = ComplianceRule.objects.filter(feature=feature)
if rules_per_feature:
devices_gauge.add_metric(
labels=[feature.name], value=Device.objects.filter(platform=rules_per_feature.first().platform).count()
)
else:
devices_gauge.add_metric(labels=[feature.name], value=0)
yield devices_gauge
def metric_compliance_devices():
"""Calculate Compliant & Non-Compliant total number of devices per feature.
Yields:
GaugeMetricFamily: Prometheus Metrics
"""
compliance_gauge = GaugeMetricFamily(
"nautobot_gc_compliant_devices_by_feature_total",
"Nautobot Golden Config Compliance",
labels=["feature", "compliant"],
)
queryset = ConfigCompliance.objects.values("rule__feature__slug").annotate(
compliant=Count("rule__feature__slug", filter=Q(compliance=True)),
non_compliant=Count("rule__feature__slug", filter=~Q(compliance=True)),
)
counters = {item["rule__feature__slug"]: {"compliant": 0, "non_compliant": 0} for item in queryset}
for feature in queryset:
counters[feature["rule__feature__slug"]]["compliant"] += feature["compliant"]
counters[feature["rule__feature__slug"]]["non_compliant"] += feature["non_compliant"]
for feature, counter_value in counters.items():
compliance_gauge.add_metric(labels=[feature, "true"], value=counter_value["compliant"])
compliance_gauge.add_metric(labels=[feature, "false"], value=counter_value["non_compliant"])
yield compliance_gauge
metrics = [metric_gc_functions, metric_devices_per_feature, metric_compliance_devices]
| nautobot/nautobot-plugin-golden-config | nautobot_golden_config/metrics.py | metrics.py | py | 4,420 | python | en | code | 91 | github-code | 36 | [
{
"api_name": "django.conf.settings.PLUGINS_CONFIG.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.PLUGINS_CONFIG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 9,
"usage_type": "na... |
1762674415 | import logging
import itertools
from typing import Optional
import demoji
from .apple import scraper_apple
from .google import scraper_google
__all__ = ["scraper", "scraper_google", "scraper_apple"]
def content_filter(content: str) -> Optional[str]:
content = demoji.replace(content)
if len(content) < 20:
return None
content = " ".join(filter(lambda x: len(x) < 15, content.split()))
return content
def scraper(
google_package: str,
apple_name: str,
lans: list[str] = ["en"],
countries: list[str] = ["us"],
count: int = 10000,
):
for lan, country in itertools.product(lans, countries):
logging.info(f"read reviews on {lan}, {country} @ google")
for review in scraper_google(google_package, lan, country, count):
review = content_filter(review)
if review: yield review
for country in countries:
logging.info(f"read reviews on {country} @ apple")
for review in scraper_apple(apple_name, country, count):
review = content_filter(review)
if review: yield review
| moriW/app_words | scraper/__init__.py | __init__.py | py | 1,099 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "demoji.replace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "logging.info",
"... |
5134072941 | import asyncio
from telethon.tl.functions.channels import EditAdminRequest
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.types import ChatAdminRights
from telethon.errors.rpcerrorlist import ChatSendMediaForbiddenError, PeerIdInvalidError
from . import *
@telebot.on(admin_cmd(pattern="schd ?(.*)"))
@telebot.on(sudo_cmd(pattern="schd ?(.*)", allow_sudo=True))
async def schd(event):
a = event.pattern_match.group(1)
b = a.split(" ")
wwait = b[0]
times = int(b[1])
idds = b[2]
previous_message = await event.get_reply_message()
if previous_message:
previous_message = await event.get_reply_message()
idds = previous_message.id
if idds:
idds = int(b[2])
kk = await event.reply("`Schedule Broadcasting Msg...`")
er = 0
done = 0
count = 0
chatidd = await event.get_chat()
chatidd = chatidd.id
while count != times:
count += 1
er = 0
done = 0
await asyncio.sleep(int(wwait))
await kk.edit("`Broadcasting...`")
msg = await borg.get_messages(chatidd, ids=idds)
async for x in event.client.iter_dialogs():
if x.is_group:
chat = x.id
try:
done += 1
await borg.send_message(chat, msg)
except BaseException:
er += 1
await kk.edit(f"Done in {done} chats, error in {er} chat(s)")
await kk.reply("`Schedule Broadcast Finished...`")
| ankitkumarbh/Telegram-Userbot | telebot/plugins/schd.py | schd.py | py | 1,538 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "asyncio.sleep",
"line_number": 34,
"usage_type": "call"
}
] |
7044056393 | # !/usr/bin/env python
import rospy
import websocket
import json
# from msg.ObjectArray import ObjectArray
from detection.msg._ObjectArray import ObjectArray
LABELS = ["human", "unknown", "animals"]
try:
import thread
except ImportError:
import _thread as thread
import time
def on_message(ws, message):
pass
# print(message)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
drone_id = 1
latitude = 55.751574
longtitude = 37.573856
while True:
if on_object_array.last_object_array is None:
continue
object_array = on_object_array.last_object_array
on_object_array.last_object_array = None
cnt = {l: 0 for l in LABELS}
for object in object_array.objects:
cnt[LABELS[object.label]] += 1
data = {
'drone_id': drone_id,
'latitude': latitude,
'longtitude': longtitude,
'timestamp': time.time(),
'humans': cnt['human'],
'animals': cnt["animals"],
'unknown': cnt["unknown"]
}
time.sleep(1)
latitude += 0.001
ws.send(json.dumps(data))
ws.close()
thread.start_new_thread(run, ())
def on_object_array(msg):
on_object_array.last_object_array = msg
on_object_array.last_object_array = None
if __name__ == "__main__":
rospy.init_node("sender")
sub_objects = rospy.Subscriber("/detection/yolo/objects", ObjectArray, on_object_array)
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://192.168.86.248:8080/drone/", on_message=on_message, on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
| cds-mipt/animal_ir_detection | sender/scripts/sender.py | sender.py | py | 1,890 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "_thread.start_new_thread",
"line_... |
28511009690 | import tester # tester
import random
import pexpect
import time
import struct
import sys
import socket
import importlib.util
EASYDB_PATH = "/cad2/ece326f/tester/bin/easydb"
def load_module(modname):
path = tester.datapath(modname + ".py", 'asst3')
spec = importlib.util.spec_from_file_location(modname, path)
mod = importlib.util.module_from_spec(spec)
tester.includepath()
spec.loader.exec_module(mod)
return mod
def try_connect(db, server):
retry = 0
while retry < 3:
try:
return db.connect(server.host, server.port)
except ConnectionRefusedError:
retry += 1
print("Connection Refused -- retrying in 1 second")
time.sleep(1)
db.connect(server.host, server.port)
class Client:
def __init__(self, server):
# make sure server is running
assert(server.program)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((server.host, server.port))
# Dump rows of a table from the database.
# table_id: int, table id of the table.
def dump(self, table_id):
self.sock.send(bytearray([0, 0, 0, 42, 0, 0, 0, table_id]))
resp = self.sock.recv(4096)
if struct.unpack("!i", resp[:4])[0] == 1:
rows = resp[4:].decode("utf-8").split('\n')
return [ row.split('\t') for row in rows if len(row) > 0 ]
return None
def close(self):
self.sock.close()
del self.sock
def __del__(self):
if hasattr(self, 'sock'):
self.sock.close()
# convenience function
def dump(server, table_id):
client = Client(server)
return client.dump(table_id)
class Server:
def __init__(self, filename=None):
self.host = "localhost"
self.port = random.randint(1024, 9999)
if filename is None:
self.schema = tester.datapath('export.txt', 'asst3')
else:
self.schema = filename
def start(self, datafile=None):
if datafile is not None:
self.datafile = tester.datapath(datafile, 'asst3')
else:
self.datafile = ""
path = "%s -g %d %s localhost %s"%(EASYDB_PATH, self.port, self.schema,
self.datafile)
self.program = pexpect.spawn(path, [], encoding='utf-8')
self.program.logfile = open('tester.log', 'a')
self.program.logfile.write("\n-------- %s --------\n\n"%sys.argv[0])
idx = self.program.expect([r"\]", pexpect.EOF])
self.program.logfile.flush()
if idx != 0:
self.program.close(force=True)
self.program.logfile.close()
del self.program
return False
return True
def expect(self, substr, timeout=3):
try:
return self.program.expect_exact(substr, timeout=timeout)
except:
return None
def look(self, regex, timeout=3):
try:
return self.program.expect(regex, timeout=timeout)
except:
return None
def end(self):
self.program.terminate(force=True)
self.program.expect(pexpect.EOF)
self.program.logfile.flush()
self.program.close(force=True)
self.program.logfile.close()
del self.program
def __del__(self):
if hasattr(self, 'program'):
self.end()
def start_test(testname, marks):
test = tester.Core(testname, marks)
tester.includepath()
return test
# Run the test case of a given function and return updated total mark.
# func: python function, function to run the test case on; funcArgs: tuple, arguments of the function to run; case_number: int or str, case number;
# mark:int, mark of this test case; total_mark: int, total mark so far; error_raise: bool, True if an given error should raise in the test casek;
# error: error that should / should not raise in the test case; false_error: bool, False if other errors can raise but not this one.
def run_test_case(func, funcArgs, case_number, mark, total_mark, error_raise, error, false_error=False):
result = None
try:
# Run the funcion with given arguments.
result = func(*funcArgs)
except error as e:
# If other errors can raise but not this one...
if false_error:
print("CASE {} FAIL: an error except {} should raise, but {} raises instead: {}".format(case_number, error, error, str(e)))
# If the given error should raise...
elif error_raise and (not false_error):
total_mark = total_mark + mark
print("CASE {} PASS".format(case_number))
# If an error should not raise...
else:
print("CASE {} FAIL: no error should raise, but an errror raises: {}".format(case_number, str(e)))
except Exception as e:
# If other errors raise but not this particular one...
if false_error:
total_mark = total_mark + mark
print("CASE {} PASS".format(case_number))
else:
# If a particular error should raise but other error raise instead...
if error_raise:
print("CASE {} FAIL: {} should raise, but other error raises instead: {}".format(case_number, error, str(e)))
# If an error raises while the code should not raise any error...
else:
print("CASE {} FAIL: no error should raise, but an error raises: {}".format(case_number, str(e)))
else:
# If an error should raise...
if error_raise:
if false_error:
print("CASE {} FAIL: an error except {} should raise, but no error raises".format(case_number, error))
else:
print("CASE {} FAIL: {} should raise, but no error raises".format(case_number, error))
# If an error should not raise...
else:
total_mark = total_mark + mark
print("CASE {} PASS".format(case_number))
# Return the updated total mark.
return (total_mark, result)
| CoraZhang/Object-Oriented-Programming | tester/scripts/asst3.py | asst3.py | py | 6,122 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tester.datapath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "importlib.util.util.spec_from_file_location",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "importlib.util.util",
"line_number": 14,
"usage_type": "attribute"
},
{
... |
17218989395 | import torch
import torch.nn as nn
from modules.updown_cell import UpDownCell
from modules.captioner import Captioner
class UpDownCaptioner(Captioner):
def __init__(self, vocab, image_feature_size=2048, embedding_size=1000, hidden_size=512,
attention_projection_size=512, seq_length=20, beam_size=3,
pretrained_embedding=None, state_machine=None):
super(UpDownCaptioner, self).__init__()
vocab_size = len(vocab)
self.vocab = vocab
self.seq_length = seq_length
self.state_machine = state_machine
self.image_feature_size = image_feature_size
self.beam_size = beam_size
# define up-down cell
self._cell = UpDownCell(image_feature_size=image_feature_size, embedding_size=embedding_size,
hidden_size=hidden_size, attention_projection_size=attention_projection_size)
# define embedding layer
if pretrained_embedding is not None:
# if use pre-trained word embedding
self._embedding_layer = nn.Embedding.from_pretrained(pretrained_embedding).float()
else:
self._embedding_layer = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embedding_size)
# produce the logits which used to soft-max distribution
self._output_layer = nn.Linear(hidden_size, vocab_size, bias=True)
self._log_softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.vocab['<unk>'])
def load(self, PATH):
self.load_state_dict(torch.load(PATH))
| Songtuan/Captioning-Model | modules/captioner/UpDownCaptioner.py | UpDownCaptioner.py | py | 1,641 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "modules.captioner.Captioner",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "modules.updown_cell.UpDownCell",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn.Embedding.from_pretrained",
"line_number": 27,
"usage_type": "call"
... |
34588741728 | import os
from pathlib import Path
from pyontutils.utils import get_working_dir
from pyontutils.integration_test_helper import _TestScriptsBase as TestScripts
from .common import project_path, project_path_real, test_organization, onerror
from .common import fake_organization
import sparcur
import sparcur.cli
import sparcur.paths
import sparcur.backends
from sparcur.utils import log
from sparcur.pennsieve_api import FakeBFLocal
def fake_setup(self, *args, **kwargs):
""" replace _setup_bfl with a version that handles repated invocation of
cli.Main.__init__ as occurs during testing """
# FIXME obviously the whole init process should be reworked to avoid the
# utter insanity that cli.Main.__init__ is at the moment ...
if self.options.clone or self.anchor.id != fake_organization:
self.Remote = self._remote_class._new(
self._cache_class._local_class, self._cache_class)
if (hasattr(self.Remote, '_api') and
not isinstance(self.Remote._api, self.Remote._api_class)):
log.warning(f'stale _api on remote {self.Remote._api}')
for cls in self.Remote.mro():
if hasattr(cls, '_api'):
try:
del cls._api
except AttributeError as e:
pass
self._old_setup_bfl()
else:
self._cache_class._anchor = self.anchor # don't trigger remote lookup
self.bfl = self._remote_class._api = FakeBFLocal(self.anchor.id, self.anchor)
sparcur.cli.Main._old_setup_bfl = sparcur.cli.Main._setup_bfl
sparcur.cli.Main._setup_bfl = fake_setup
only = tuple()
skip = ('dashboard_server',)
ci_skip = tuple()
working_dir = get_working_dir(__file__)
if working_dir is None:
# python setup.py test will run from the module_parent folder
working_dir = Path(__file__).parent.parent
post_load = lambda : None
def post_main():
# just wipe out the state of these after every test
# there are countless strange and hard to debug errors
# that can occur because of mutation of class aka global state
# they really don't teach the fact that class level variables
# are actually global variables and should be treated with fear
sparcur.backends.PennsieveRemote._new(sparcur.paths.Path,
sparcur.paths.PennsieveCache)
mains = {'cli-real': [['spc', 'clone', test_organization],
['spc', 'pull'],
#['spc', 'refresh'], # XXX insanely slow and no longer used due to brokeness
['spc', 'fetch'],
# nonsense with consistently incorrectly sized files in pandora
# find objects/ -exec ls -al {} \+ | grep -v 1024 | grep -v 4096 | grep -v total | grep -v objects | grep tom
['spc', 'fetch', '--mbf'], # FIXME abstract --mbf
#['spc', 'report', 'access'], # TODO no easy way to test this ...
['spc', 'rmeta'],],
'cli': [['spc', 'find', '--name', '*.xlsx'],
['spc', 'find', '--name', '*', '--limit', '3'],
['spc', 'status'],
['spc', 'meta'],
['spc', 'export'],
['spc', 'report', 'completeness'],
['spc', 'report', 'contributors'],
['spc', 'report', 'filetypes'],
['spc', 'report', 'keywords'],
['spc', 'report', 'subjects'],
['spc', 'report', 'samples'],
['spc', 'report', 'pathids'],
['spc', 'report', 'errors'],
['spc', 'report', 'size'],
['spc', 'report', 'test'],
['spc', 'tables'],
['spc', 'missing'],
#['spc', 'annos'], # XXX insanely slow
#['spc', 'annos', 'export'], # XXX insanely slow
],
}
mains['cli'] = [args +
['--project-path', project_path.as_posix(), '-N', '--local', '--jobs', '1'] +
(['--raw'] if 'report' in args else [])
for args in mains['cli']]
_cli_real = mains.pop('cli-real')
if 'CI' not in os.environ:
mains['cli'].extend([args + ['--project-path', project_path_real.as_posix(), '-N', '--jobs', '1']
for args in _cli_real])
# if the real project path exists then remove it so that we can test cloning
# and keep the cloned directory around until the next time we run the tests
if project_path_real.exists():
project_path_real.rmtree(onerror=onerror)
log.info(skip)
TestScripts.populate_tests(sparcur, working_dir, mains, skip=skip,
post_load=post_load, post_main=post_main,
only=only, do_mains=True)
| SciCrunch/sparc-curation | test/test_integration.py | test_integration.py | py | 4,836 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "common.fake_organization",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sparcur.utils.log.warning",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sparcur.utils.log",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "... |
31911094208 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import shutil
import sys
sys.path.insert(0, os.path.abspath("../..")) # path to the actual project root folder
# -- Project information -----------------------------------------------------
project = "Spotted dmi bot"
copyright = "2021, Tend, drendog, alepiaz, Helias"
author = "Tend, drendog, alepiaz, Helias"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest", # to run doctests
"sphinx.ext.napoleon", # to use NumPy and Google style docstrings
"sphinx.ext.githubpages", # generates the .nojekyll file
"sphinx.ext.viewcode", # add source code links to the documentation
"sphinx_rtd_dark_mode", # dark mode for ReadTheDocs
"sphinx_autodoc_typehints", # improves the type hinting
"sphinx.ext.viewcode", # add source code links to the documentation
"sphinx.ext.coverage", # add coverage links to the documentation
"sphinx.ext.intersphinx", # add external mapping to other documentation
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme" # [optional, to use the far superior Read the Docs theme]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/dark.css",
]
html_logo = "_static/img/spotted-logo.jpg"
# -- Extension configuration -------------------------------------------------
# -- Configuration of "sphinx_autodoc_typehints" -----------------------------
typehints_use_rtype = False
typehints_defaults = "comma"
# -- Run sphinx-apidoc -------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/readthedocs/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(os.path.dirname(__file__), "api")
module_dir = os.path.join(os.path.dirname(__file__), "../../src/spotted")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
cmd_line = f"sphinx-apidoc --implicit-namespaces -t templates -f -o {output_dir} {module_dir}"
args = cmd_line.split(" ")
if tuple(sphinx.__version__.split(".")) >= ("1", "7"):
# This is a rudimentary parse_version to avoid external dependencies
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- External mapping --------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://numpy.org/doc/stable", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"setuptools": ("https://setuptools.pypa.io/en/stable/", None),
"pyscaffold": ("https://pyscaffold.org/en/stable", None),
"telegram": ("https://docs.python-telegram-bot.org/en/stable/", None),
}
| TendTo/Telegram-SpottedDMI-Bot | docs/source/conf.py | conf.py | py | 4,898 | python | en | code | null | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
5972086108 | import random
import pygame
import copy
l = [[random.choice([0, 1]) for i in range(48)] for i in range(48)]
k = [[0 for i in range(48)] for i in range(48)]
pygame.init()
s = pygame.display.set_mode((480, 480), 0, 32)
o = True
def z(x, y):
m = 0
for i in (x - 1, x, x + 1):
for j in (y - 1, y, y + 1):
if i == x and y == j:
continue
if i < 0 or i > 47 or j < 0 or j > 47:
continue
if l[i][j] == 1:
m += 1
return m
while o:
s.fill((255, 255, 255))
for e in pygame.event.get():
if e.type == pygame.QUIT:
o = False
for x in range(48):
for y in range(48):
a = z(x, y)
if a == 2:
k[x][y] = l[x][y]
elif a == 3:
k[x][y] = 1
else:
k[x][y] = 0
for x in range(48):
for y in range(48):
if k[x][y] == 1:
s.fill((0, 0, 255), (y * 10, x * 10, 10, 10))
pygame.draw.rect(s, (0, 0, 0), (y * 10, x * 10, 10, 10), 1)
l = copy.deepcopy(k)
pygame.display.update()
pygame.time.wait(100)
| Lil-Shawn/game-of-life | main.py | main.py | py | 1,240 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"... |
34105486078 | from pymongo import MongoClient
from wa_api import WA_API
# Collection Names
AC = "archers"
CC = "competitions"
QC = "qualifications"
QAC = "qualifications_arrows"
class MongoManage:
def __init__(self, host='localhost', port=27017, rs=None):
if rs:
self.client = MongoClient(host=host, port=port, replicaset=rs, readPreference='primaryPreferred')
else:
self.client = MongoClient(host=host, port=port)
self.db = None
def set_database(self, db_name='wa'):
self.db = self.client[db_name]
def insert(self, collection, obj):
"""
Insert for the collections with no dependencies with other collections
OR where dependencies has already been resolved
"""
try:
result = self.db[collection].insert_one(obj)
return result.inserted_id
except:
print("{0} collection: failed to insert object: {1}".format(collection, obj))
return -1
def insert_qualification(self, qualification):
competition_id = self.db[CC].find_one({'wa_id': qualification['competition_id']})['_id']
qualification['competition_id'] = competition_id
wa_archer_ids = [aid for aid in qualification['archer_ids']]
archer_ids = []
for wa_ai in wa_archer_ids:
try:
aid = self.db[AC].find_one({'wa_id': wa_ai})['_id']
except TypeError:
# if no such archer is found in MongoDB, find him via API and add him
print("Archer with {0} World Archer ID was not found in the DB, inserting it...")
wa = WA_API()
archer = wa.db__get_single_archer(wa_ai)
aid = self.insert(AC, archer)
print("...inserting of archer is done, _id: {0}".format(aid))
archer_ids.append(aid)
qualification['archer_ids'] = archer_ids
try:
result = self.db[QC].insert_one(qualification)
return result.inserted_id
except:
print("Qualifications collection: failed to insert qualification: {0}".format(qualification))
return -1
def get_qualifications(self, individual_team=None):
"""
:param individual_team:
1 - return only individual qualification results
2 - return only team qualification results
(others) - return both
"""
if individual_team == 1:
qualifications = self.db[QC].find({"is_team": 0})
elif individual_team == 2:
qualifications = self.db[QC].find({"is_team": 1})
else:
qualifications = self.db[QC].find()
# populate the Competitions and Archers Collections
qualifications = list(qualifications)
for i in range(0, len(qualifications)):
qualifications[i]['competition_id'] = self.db[CC].find_one({
"_id": qualifications[i]['competition_id']
})['wa_id']
qualifications[i]['archer_ids'] = [self.db[AC].find_one({
"_id": aid
})['wa_id'] for aid in qualifications[i]['archer_ids']]
return qualifications
def get_arrows_within_competition(self, competition_wa_id):
competition_id = self.db[CC].find_one({"wa_id": competition_wa_id})['_id']
qualifications = self.db[QC].find({"competition_id": competition_id, "is_team": 0})
qualification_ids = [q['_id'] for q in qualifications]
qualification_arrows = self.db[QAC].find({"qualification_id": {"$in": qualification_ids}})
qualification_arrows = [qa['arrows'] for qa in qualification_arrows]
arrows = []
for arrows_list in qualification_arrows:
arrows.extend(arrows_list)
return arrows
def get_competitions(self):
competitions = self.db[CC].find()
return list(competitions)
def get_individual_qualification_scores_within_competition(self, competition_wa_id):
competition_id = self.db[CC].find_one({"wa_id": competition_wa_id})['_id']
qualifications = self.db[QC].find({"competition_id": competition_id, "is_team": 0})
return list(qualifications)
def get_maximum_individual_qualification_score(self):
male = self.db[QC].find({'is_team': 0, 'category': 'RM'}).sort([('score', -1)]).limit(1)[0]
female = self.db[QC].find({'is_team': 0, 'category': 'RW'}).sort([('score', -1)]).limit(1)[0]
# populate the Competitions And Archers Collections
male['competition_id'] = self.db[CC].find_one({'_id': male['competition_id']})
female['competition_id'] = self.db[CC].find_one({'_id': female['competition_id']})
male['archer_ids'] = self.db[AC].find_one({'_id': male['archer_ids'][0]})
female['archer_ids'] = self.db[AC].find_one({'_id': female['archer_ids'][0]})
return {
"male": male,
"female": female,
}
def get_archer_results(self, archer_wa_id):
archer = self.db[AC].find_one({"wa_id": archer_wa_id})
qualifications = self.db[QC].find({"archer_ids": archer['_id']})
qualifications = list(qualifications)
# populate the Competitions Collection
for i in range(0, len(qualifications)):
qualifications[i]['competition_id'] = self.db[CC].find_one({'_id': qualifications[i]['competition_id']})
return {
"archer": archer,
"qualifications": qualifications,
}
def get_country_results(self, NOC):
qualifications = self.db[QC].aggregate([
{
"$unwind": "$archer_ids",
},
{
"$lookup":
{
"from": AC,
"localField": "archer_ids",
"foreignField": "_id",
"as": "archers",
},
},
{
"$match": {"{0}.NOC".format(AC): NOC},
},
])
# The Mongo Request above does return a little broken results
# So that's why we have to adjust and combine them a bit
qualifications = list(qualifications)
unique_qualifications = list({q['_id']: q for q in qualifications}.values())
for q in qualifications:
for i in range(0, len(unique_qualifications)):
if q['_id'] == unique_qualifications[i]['_id']:
for archer in unique_qualifications[i]['archers']:
if archer['wa_id'] == q['archers'][0]['wa_id']:
break
else:
unique_qualifications[i]['archers'].append(q['archers'][0])
# For each of unique qualifications,
# populate the Competitions Collection
# and delete the unnecessary "archer_ids" field
for i in range(0, len(unique_qualifications)):
unique_qualifications[i]['competition_id'] = self.db[CC].find_one({"_id": unique_qualifications[i]['competition_id']})
try:
del unique_qualifications[i]['archer_ids']
except KeyError:
pass
return unique_qualifications
| Tayum/di0d | courses/database_discipline/course3_term2/coursework/mongomanage.py | mongomanage.py | py | 7,237 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "wa_api.WA_API",
"line_number": 46,
"usage_type": "call"
}
] |
2266175166 | import dash_core_components as dcc
import dash_html_components as html
from dash_devices.dependencies import Input, Output, State
import numpy as np
import pandas as pd
import sys
import re
import num2words
from app import app
from apps import user_mode
sys.path.insert(1, 'C:\\Users\\Antoine\\CloudStation\\EPFL\\Master 4\\Master project\\masterProject')
from UIDatasheet import UIDatasheet
import ui_finder
ui_path = 'C:\\Users\\Antoine\\CloudStation\\EPFL\\Master 4\\Master project\\Dataset\\VINS Dataset\\ui_list.json'
ui_df = pd.read_json(ui_path)
current_page = 0
max_page = 0
filtered_ui = []
image_selected = ''
images = ['', '', '', '', '', '', '', '']
description_layout = html.Div([
html.Div([
html.H3('Descriptions'),
html.P(id='content-description', className='ow'),
html.Button('Copy description', id='button-copy-description', n_clicks=0),
], style={'margin': '15px'}),
html.Div([
html.H3('AI search'),
], style={'margin': '15px'}),
html.Div([
html.Div([
dcc.Textarea(id='content-text-value', value='', cols=70, placeholder='Text value'),
], style={'flex-grow': '1'}),
html.Div([
dcc.Input(id='input-topk', type='number', value=16, min=1, placeholder='Top-k value'),
], style={'flex-grow': '1'}),
html.Div([
html.Button('Run', id='button-run-ai', n_clicks=0),
], style={'flex-grow': '1'}),
], style={'margin': '15px', 'display': 'flex'}),
html.Div([
html.P(id='content-info-retrieved'),
], style={'margin': '15px'}),
html.Div([
html.Div([
html.H3('Label'),
dcc.Dropdown(
id='dropdown-label',
options=[
{'label': 'Bare', 'value': 'bare'},
{'label': 'Shop', 'value': 'shop'},
{'label': 'Form', 'value': 'form'},
{'label': 'Gallery', 'value': 'gallery'},
{'label': 'List', 'value': 'list'},
{'label': 'Login', 'value': 'login'},
{'label': 'Map', 'value': 'map'},
{'label': 'Menu', 'value': 'menu'},
{'label': 'Modal', 'value': 'modal'},
{'label': 'News', 'value': 'news'},
{'label': 'Profile', 'value': 'profile'},
{'label': 'Search', 'value': 'search'},
{'label': 'Settings', 'value': 'settings'},
{'label': 'Terms', 'value': 'terms'},
{'label': 'Tutorial', 'value': 'tutorial'},
{'label': 'Other', 'value': 'other'},
],
),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Number of buttons'),
dcc.Input(id='input-buttons', type='number', min=0),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Number of input fields'),
dcc.Input(id='input-input-fields', type='number', min=0),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Page indicator'),
dcc.Dropdown(
id='dropdown-page-indicator',
options=[
{'label': 'Yes', 'value': 'yes'},
{'label': 'No', 'value': 'no'},
],
),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Map'),
dcc.Dropdown(
id='dropdown-map',
options=[
{'label': 'Yes', 'value': 'yes'},
{'label': 'No', 'value': 'no'},
],
),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.H3('Text filter'),
dcc.Textarea(id='content-text-filter', value='', rows=7),
], style={'flex-grow': '2', 'margin': '15px'}),
], style={'display': 'flex'}),
html.Div([
html.Button('Clear filters', id='button-clear-filters', n_clicks=0, style={'margin': '15px'}),
html.Button('Search', id='button-search', n_clicks=0, style={'margin': '15px'}),
], style={'margin-bottom': '10px',
'textAlign': 'center',
'margin': 'auto'}),
html.Div([
html.Progress(id='progress-search', value='0', max=100, style={'width': '30%'}),
html.P(id='content-search'),
], style={'margin-bottom': '10px',
'textAlign': 'center',
'margin': 'auto'}),
html.Div([
html.Div([
html.Div([
html.H3('Image selections'),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select0', style={'width': '20%'}),
html.Button('Select 1st image', id='button-select0', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select0'),
html.Button('Clear', id='button-clear0', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select1', style={'width': '20%'}),
html.Button('Select 2nd image', id='button-select1', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select1'),
html.Button('Clear', id='button-clear1', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select2', style={'width': '20%'}),
html.Button('Select 3rd image', id='button-select2', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select2'),
html.Button('Clear', id='button-clear2', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_select3', style={'width': '20%'}),
html.Button('Select 4th image', id='button-select3', n_clicks=0, style={'height': '10%'}),
html.P('None', id='content-select3'),
html.Button('Clear', id='button-clear3', n_clicks=0, style={'height': '10%'}),
], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 5th image', id='button-select4', n_clicks=0),
# html.P('None', id='content-select4')
# ], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 6th image', id='button-select5', n_clicks=0),
# html.P('None', id='content-select5')
# ], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 7th image', id='button-select6', n_clicks=0),
# html.P('None', id='content-select6')
# ], style={'display': 'flex', 'margin': '15px'}),
# html.Div([
# html.Button('Select 8th image', id='button-select7', n_clicks=0),
# html.P('None', id='content-select7')
# ], style={'display': 'flex', 'margin': '15px'}),
html.Button('Send images', id='button-send-images', n_clicks=0),
], style={'margin': '15px'})
], style={'float': 'left', 'width': '20%'}),
html.Div([
html.Div(
[
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god0', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god1', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god2', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god3', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
], style={'display': 'flex'}
),
html.Div(
[
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god4', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god5', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god6', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
html.Div([
html.Img(src=app.get_asset_url('background.png'), id='img_god7', className='imageUI',
n_clicks_timestamp=-1),
], style={'flex-grow': '1', 'margin': '15px'}),
], style={'display': 'flex'}
),
html.Div(
[
html.Div([
html.Button('Previous page', id='button-previous', n_clicks=0),
], style={'flex-grow': '1', 'textAlign': 'right', 'margin': '15px'}),
html.Div([
html.Button('Next page', id='button-next-page', n_clicks=0),
], style={'flex-grow': '1', 'textAlign': 'left', 'margin': '15px'}),
], style={'display': 'flex'}
),
html.Div(
[
html.P('Page ... out of ...', id='content-page-number'),
], style={'textAlign': 'center', 'margin': 'auto'}
),
], style={'float': 'right', 'width': '80%'}),
]),
])
commands_layout = html.Div([
html.Div([
html.H3('Selected Images'),
html.P(id='content-image', className='ow'),
], style={'margin': '15px'}),
html.Div([
html.H3('Commands'),
html.P(id='content-command', className='ow'),
], style={'margin': '15px'}),
html.Div(
[
html.Button('Send error message', id='button-send-error', n_clicks=0, style={'margin': '15px'}),
html.Button('Clear error message', id='button-clear-error', n_clicks=0, style={'margin': '15px'}),
],
style={'margin-bottom': '10px',
'textAlign': 'center',
'width': '220px',
'margin': 'auto'}
),
])
layout = html.Div([
dcc.Tabs(id='tabs-god-mode', value='tab-description', children=[
dcc.Tab(label='User\'s descriptions', value='tab-description', children=description_layout),
dcc.Tab(label='User\'s commands', value='tab-commands', children=commands_layout),
])
])
@app.callback(None,
[Input('button-send-error', 'n_clicks')])
def send_error(n_clicks):
if n_clicks:
app.push_mods({'content-error': {'children': 'Error: the requested command is too complex for the system, please modify your request'}})
@app.callback(None,
[Input('button-clear-error', 'n_clicks')])
def send_error(n_clicks):
if n_clicks:
app.push_mods({'content-error': {'children': ''}})
@app.callback(None,
[Input('button-select0', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[0] = image_selected
app.push_mods({'content-select0': {'children': image_selected}})
app.push_mods({'img_select0': {'src': app.get_asset_url('wireframes/' + images[0])}})
@app.callback(None,
[Input('button-select1', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[1] = image_selected
app.push_mods({'content-select1': {'children': image_selected}})
app.push_mods({'img_select1': {'src': app.get_asset_url('wireframes/' + images[1])}})
@app.callback(None,
[Input('button-select2', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[2] = image_selected
app.push_mods({'content-select2': {'children': image_selected}})
app.push_mods({'img_select2': {'src': app.get_asset_url('wireframes/' + images[2])}})
@app.callback(None,
[Input('button-select3', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global image_selected
if image_selected:
images[3] = image_selected
app.push_mods({'content-select3': {'children': image_selected}})
app.push_mods({'img_select3': {'src': app.get_asset_url('wireframes/' + images[3])}})
@app.callback(None,
[Input('button-clear0', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[0] = ''
app.push_mods({'content-select0': {'children': 'None'}})
app.push_mods({'img_select0': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-clear1', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[1] = ''
app.push_mods({'content-select1': {'children': 'None'}})
app.push_mods({'img_select1': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-clear2', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[2] = ''
app.push_mods({'content-select2': {'children': 'None'}})
app.push_mods({'img_select2': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-clear3', 'n_clicks')])
def select_image(n_clicks):
if n_clicks:
global images
images[3] = ''
app.push_mods({'content-select3': {'children': 'None'}})
app.push_mods({'img_select3': {'src': app.get_asset_url('background.png')}})
@app.callback(None,
[Input('button-copy-description', 'n_clicks')])
def copy_description(n_clicks):
if n_clicks:
if user_mode.data.descriptions:
description = user_mode.data.descriptions[-1]
app.push_mods({'content-text-value': {'value': description}})
@app.callback(None,
[Input('img_god0', 'n_clicks_timestamp'),
Input('img_god1', 'n_clicks_timestamp'),
Input('img_god2', 'n_clicks_timestamp'),
Input('img_god3', 'n_clicks_timestamp'),
Input('img_god4', 'n_clicks_timestamp'),
Input('img_god5', 'n_clicks_timestamp'),
Input('img_god6', 'n_clicks_timestamp'),
Input('img_god7', 'n_clicks_timestamp')])
def select_image(n_clicks_timestamp0, n_clicks_timestamp1, n_clicks_timestamp2, n_clicks_timestamp3,
n_clicks_timestamp4, n_clicks_timestamp5, n_clicks_timestamp6, n_clicks_timestamp7):
if filtered_ui:
global current_page
global image_selected
list_timestamps = [n_clicks_timestamp0, n_clicks_timestamp1, n_clicks_timestamp2, n_clicks_timestamp3,
n_clicks_timestamp4, n_clicks_timestamp5, n_clicks_timestamp6, n_clicks_timestamp7]
max_idx = np.argmax(list_timestamps)
if sum(list_timestamps) != -1 * len(list_timestamps):
if max_idx + current_page * 8 < len(filtered_ui):
image_selected = filtered_ui[max_idx + current_page * 8]
app.push_mods({'img_god0': {'className': 'imageUI'}})
app.push_mods({'img_god1': {'className': 'imageUI'}})
app.push_mods({'img_god2': {'className': 'imageUI'}})
app.push_mods({'img_god3': {'className': 'imageUI'}})
app.push_mods({'img_god4': {'className': 'imageUI'}})
app.push_mods({'img_god5': {'className': 'imageUI'}})
app.push_mods({'img_god6': {'className': 'imageUI'}})
app.push_mods({'img_god7': {'className': 'imageUI'}})
app.push_mods({'img_god{}'.format(max_idx): {'className': 'imageUIselected'}})
@app.callback(None,
[Input('button-send-images', 'n_clicks')])
def send_images(n_clicks):
if n_clicks:
if images[0]:
app.push_mods({'img0': {'hidden': False}})
app.push_mods({'img0': {'src': app.get_asset_url('wireframes/' + images[0])}})
else:
app.push_mods({'img0': {'hidden': True}})
if images[1]:
app.push_mods({'img1': {'hidden': False}})
app.push_mods({'img1': {'src': app.get_asset_url('wireframes/' + images[1])}})
else:
app.push_mods({'img1': {'hidden': True}})
if images[2]:
app.push_mods({'img2': {'hidden': False}})
app.push_mods({'img2': {'src': app.get_asset_url('wireframes/' + images[2])}})
else:
app.push_mods({'img2': {'hidden': True}})
if images[3]:
app.push_mods({'img3': {'hidden': False}})
app.push_mods({'img3': {'src': app.get_asset_url('wireframes/' + images[3])}})
else:
app.push_mods({'img3': {'hidden': True}})
# app.push_mods({'img4': {'src': app.get_asset_url('wireframes/' + images[4])}})
# app.push_mods({'img5': {'src': app.get_asset_url('wireframes/' + images[5])}})
# app.push_mods({'img6': {'src': app.get_asset_url('wireframes/' + images[6])}})
# app.push_mods({'img7': {'src': app.get_asset_url('wireframes/' + images[7])}})
user_mode.data.images_selected = images
user_mode.data.image_sent = True
#
# @app.callback(None,
# [Input('tabs-god-mode', 'value'), ])
# def render_content(tab):
# if tab == 'tab-description':
# app.push_mods({'tabs-layout': {'children': [description_layout]}})
# description_content = ''
# if user_mode.data.descriptions:
# nb_desc = len(user_mode.data.descriptions)
# for i in range(nb_desc):
# description_content += '{}: {} <br>'.format(i + 1, user_mode.data.descriptions[i])
# app.push_mods({'content-description': {'children': DangerouslySetInnerHTML(description_content)}})
# elif tab == 'tab-commands':
# app.push_mods({'tabs-layout': {'children': [commands_layout]}})
# image_content = ''
# if user_mode.data.images:
# nb_desc = len(user_mode.data.images)
# for i in range(nb_desc):
# image_content += '{}: {} <br>'.format(i + 1, user_mode.data.images[i].replace('.jpg', ''))
# app.push_mods({'content-image': {'children': DangerouslySetInnerHTML(image_content)}})
#
# command_content = ''
# if user_mode.data.commands:
# nb_desc = len(user_mode.data.commands)
# for i in range(nb_desc):
# command_content += '{}: {} <br>'. format(i+1, user_mode.data.commands[i])
# app.push_mods({'content-command': {'children': DangerouslySetInnerHTML(command_content)}})
@app.callback(None,
[Input('button-previous', 'n_clicks')])
def control_previous(n_clicks_previous):
global current_page
global max_page
global filtered_ui
if max_page:
if n_clicks_previous:
if current_page > 0:
current_page -= 1
app.push_mods(
{'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0 + current_page * 8])}})
app.push_mods(
{'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1 + current_page * 8])}})
app.push_mods(
{'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2 + current_page * 8])}})
app.push_mods(
{'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3 + current_page * 8])}})
app.push_mods(
{'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4 + current_page * 8])}})
app.push_mods(
{'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5 + current_page * 8])}})
app.push_mods(
{'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6 + current_page * 8])}})
app.push_mods(
{'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7 + current_page * 8])}})
app.push_mods(
{'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
@app.callback(None,
[Input('button-clear-filters', 'n_clicks')])
def clear_filters(n_clicks):
if n_clicks:
app.push_mods({'dropdown-label': {'value': ''}})
app.push_mods({'input-buttons': {'value': ''}})
app.push_mods({'input-input-fields': {'value': ''}})
app.push_mods({'dropdown-page-indicator': {'value': ''}})
app.push_mods({'dropdown-map': {'value': ''}})
app.push_mods({'content-text-filter': {'value': ''}})
global max_page
max_page = 0
global filtered_ui
filtered_ui = []
@app.callback(None,
[Input('button-next-page', 'n_clicks')])
def control_next(n_clicks_next):
global current_page
global max_page
global filtered_ui
if max_page:
if n_clicks_next:
if current_page < max_page - 1:
current_page += 1
app.push_mods({'img_god0': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god1': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god2': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god3': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god4': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god5': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god6': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god7': {'src': app.get_asset_url('background.png')}})
if len(filtered_ui) > 0 + current_page * 8:
app.push_mods(
{'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0 + current_page * 8])}})
if len(filtered_ui) > 1 + current_page * 8:
app.push_mods(
{'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1 + current_page * 8])}})
if len(filtered_ui) > 2 + current_page * 8:
app.push_mods(
{'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2 + current_page * 8])}})
if len(filtered_ui) > 3 + current_page * 8:
app.push_mods(
{'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3 + current_page * 8])}})
if len(filtered_ui) > 4 + current_page * 8:
app.push_mods(
{'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4 + current_page * 8])}})
if len(filtered_ui) > 5 + current_page * 8:
app.push_mods(
{'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5 + current_page * 8])}})
if len(filtered_ui) > 6 + current_page * 8:
app.push_mods(
{'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6 + current_page * 8])}})
if len(filtered_ui) > 7 + current_page * 8:
app.push_mods(
{'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7 + current_page * 8])}})
app.push_mods(
{'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
@app.callback(None,
[Input('button-search', 'n_clicks')],
[State('dropdown-label', 'value'),
State('input-buttons', 'value'),
State('input-input-fields', 'value'),
State('dropdown-page-indicator', 'value'),
State('dropdown-map', 'value'),
State('content-text-filter', 'value')])
def filter_ui(n_clicks, label, nb_buttons, nb_input, page, map_, text_filter):
if n_clicks:
index_list = list(range(len(ui_df)))
global filtered_ui
filtered_ui = []
if label:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Label filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
if ui_df.iloc[index].label != label:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if nb_buttons:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Button filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_buttons_ui = components.type.str.count('TextButton').sum()
if nb_buttons_ui != nb_buttons:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if nb_input:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Input fields filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_input_ui = components.type.str.count('EditText').sum()
if nb_input_ui != nb_input:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if page:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Page indicator filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_page_ui = components.type.str.count('PageIndicator').sum()
if page == 'yes':
if nb_page_ui == 0:
drop.append(index)
if page == 'no':
if nb_page_ui > 0:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if map_:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Map filtering...'}})
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
nb_map_ui = components.type.str.count('Map').sum()
if map_ == 'yes':
if nb_map_ui == 0:
drop.append(index)
if map_ == 'no':
if nb_map_ui > 0:
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
if text_filter:
drop = []
count = 0
app.push_mods({'content-search': {'children': 'Text filtering...'}})
text_filter_words = text_filter.lower().split()
for index in index_list:
progress = int((count / len(index_list)) * 100)
count += 1
if progress % 10 == 0:
app.push_mods({'progress-search': {'value': str(progress)}})
components = pd.DataFrame.from_dict(ui_df.iloc[index].components)
text_ui = ' '.join(components.text.tolist()).lower()
if not all(text in text_ui for text in text_filter_words):
drop.append(index)
index_list = list(set(index_list) - set(drop))
app.push_mods({'progress-search': {'value': '100'}})
app.push_mods({'content-search': {'children': ''}})
for index in index_list:
filtered_ui.append(ui_df.iloc[index, 0] + '.jpg')
app.push_mods({'img_god0': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god1': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god2': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god3': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god4': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god5': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god6': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god7': {'src': app.get_asset_url('background.png')}})
if len(filtered_ui) == 0:
app.push_mods({'content-search': {'children': 'No results found for the given filters.'}})
if len(filtered_ui) > 0:
app.push_mods({'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0])}})
if len(filtered_ui) > 1:
app.push_mods({'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1])}})
if len(filtered_ui) > 2:
app.push_mods({'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2])}})
if len(filtered_ui) > 3:
app.push_mods({'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3])}})
if len(filtered_ui) > 4:
app.push_mods({'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4])}})
if len(filtered_ui) > 5:
app.push_mods({'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5])}})
if len(filtered_ui) > 6:
app.push_mods({'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6])}})
if len(filtered_ui) > 7:
app.push_mods({'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7])}})
page_size = 8
global max_page
global current_page
current_page = 0
max_page = int((len(filtered_ui) + page_size - 1) / page_size)
if max_page == 0:
max_page = 1
app.push_mods({'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
@app.callback(None,
[Input('button-run-ai', 'n_clicks')],
[State('content-text-value', 'value'),
State('input-topk', 'value')])
def run_ai(n_clicks, description, k):
if n_clicks and description and k:
app.push_mods({'content-info-retrieved': {'children': 'Running...'}})
description = re.sub(r"(\d+)", lambda x: num2words.num2words(int(x.group(0))), description)
ui_datasheet = UIDatasheet()
ui_datasheet.description = description
ui_finder.get_label(ui_datasheet)
ui_finder.get_components(ui_datasheet)
info = ui_finder.print_info(ui_datasheet)
app.push_mods({'content-info-retrieved': {'children': info}})
wf_list = ui_finder.search_wf(ui_datasheet, k)
global filtered_ui
filtered_ui = []
for wf in wf_list:
filtered_ui.append(wf + '.jpg')
app.push_mods({'img_god0': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god1': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god2': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god3': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god4': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god5': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god6': {'src': app.get_asset_url('background.png')}})
app.push_mods({'img_god7': {'src': app.get_asset_url('background.png')}})
if len(filtered_ui) == 0:
app.push_mods({'content-search': {'children': 'No results found for the given filters.'}})
if len(filtered_ui) > 0:
app.push_mods({'img_god0': {'src': app.get_asset_url('wireframes/' + filtered_ui[0])}})
if len(filtered_ui) > 1:
app.push_mods({'img_god1': {'src': app.get_asset_url('wireframes/' + filtered_ui[1])}})
if len(filtered_ui) > 2:
app.push_mods({'img_god2': {'src': app.get_asset_url('wireframes/' + filtered_ui[2])}})
if len(filtered_ui) > 3:
app.push_mods({'img_god3': {'src': app.get_asset_url('wireframes/' + filtered_ui[3])}})
if len(filtered_ui) > 4:
app.push_mods({'img_god4': {'src': app.get_asset_url('wireframes/' + filtered_ui[4])}})
if len(filtered_ui) > 5:
app.push_mods({'img_god5': {'src': app.get_asset_url('wireframes/' + filtered_ui[5])}})
if len(filtered_ui) > 6:
app.push_mods({'img_god6': {'src': app.get_asset_url('wireframes/' + filtered_ui[6])}})
if len(filtered_ui) > 7:
app.push_mods({'img_god7': {'src': app.get_asset_url('wireframes/' + filtered_ui[7])}})
page_size = 8
global max_page
global current_page
current_page = 0
max_page = int((len(filtered_ui) + page_size - 1) / page_size)
if max_page == 0:
max_page = 1
app.push_mods({'content-page-number': {'children': 'Page {} out of {}'.format(current_page + 1, max_page)}})
| antoine-zurcher/master-project | prototype/apps/god_mode.py | god_mode.py | py | 37,217 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_json",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dash_html_components.D... |
74879223784 | import pandas as pd
import streamlit as st
import numpy as np
from common import session_manager
ssm = session_manager.st_session()
# ssm.write_session_info()
st.title("表を描画する")
# データフレームを元に表を表示する
df = pd.DataFrame({
'first column': [1, 2, 3, 4],
'second column': [10, 20, 30, 40]
})
st.write("write関数")
st.write(df)
st.write("table関数")
st.table(df)
st.write("dataframe関数、writeとほとんど同じ?")
st.dataframe(df)
#
df2 = np.random.randn(10, 20)
st.dataframe(df2)
#
dataframe = pd.DataFrame(
np.random.randn(10, 20),
columns=('col %d' % i for i in range(20))) # col名を設定
# highlight_max(最大値にハイライトする)、axis=0(インデックス(列?)の対して評価する、1とすると行になる)
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.io.formats.style.Styler.highlight_max.html#pandas.io.formats.style.Styler.highlight_max
st.dataframe(dataframe.style.highlight_max(axis=1,color="red"))
#
st.table(dataframe.style.highlight_max(axis=1,color="red"))
| nishimu555/streamlit-lab | lab2/app/pages/02_write_and_table.py | 02_write_and_table.py | py | 1,107 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "common.session_manager.st_session",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "common.session_manager",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "streamlit.title",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": ... |
4079175993 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from deconstruct_lc import read_config
from deconstruct_lc import tools_fasta
from deconstruct_lc import tools_lc
from deconstruct_lc.scores.norm_score import NormScore
class RemovePfam(object):
def __init__(self):
config = read_config.read_config()
self.data_dp = os.path.join(config['fps']['data_dp'])
self.puncta = os.path.join(self.data_dp, 'experiment', 'puncta_uni.fasta')
self.nopuncta = os.path.join(self.data_dp, 'experiment', 'nopuncta_uni.fasta')
self.pfam_puncta = os.path.join(self.data_dp, 'experiment', 'puncta_pfam.tsv')
self.pfam_nopuncta = os.path.join(self.data_dp, 'experiment', 'nopuncta_pfam.tsv')
self.k = 6
self.lce = 1.6
self.lca = 'SGEQAPDTNKR'
self.lc_m = 0.06744064704548541
self.lc_b = 16.5
def run_percent_pfam(self):
puncta_perc = os.path.join(self.data_dp, 'experiment', 'puncta_percent_pfam.tsv')
self.percent_pfam(self.puncta, self.pfam_puncta, puncta_perc)
nopuncta_perc = os.path.join(self.data_dp, 'experiment', 'nopuncta_percent_pfam.tsv')
self.percent_pfam(self.nopuncta, self.pfam_nopuncta, nopuncta_perc)
def percent_pfam(self, fasta_fp, pfam_fp, fpo):
df = pd.read_csv(pfam_fp, sep='\t')
pids, seqs = tools_fasta.fasta_to_id_seq(fasta_fp)
frac_pfam = []
for id, seq in zip(pids, seqs):
ndf = df[df['uniprot_acc'] == id]
ndf = ndf.sort_values(by='seq_start')
segmented = self.segment_seq(seq, ndf)
len_seg = 0
for seg in segmented:
len_seg += len(seg)
frac_pfam.append(float(len(seq) - len_seg)/float(len(seq)))
ns = NormScore()
scores = ns.lc_norm_score(seqs)
df_out = pd.DataFrame({'Uniprot ID': pids, 'LC Score': scores,
'Pfam Fraction': frac_pfam}, columns=['Uniprot ID', 'LC Score', 'Pfam Fraction'])
df_out = df_out.sort_values(by='LC Score', ascending=False)
df_out.to_csv(fpo, sep='\t')
print(np.mean(frac_pfam))
def run_with_pfam(self):
puncta_out = os.path.join(self.data_dp, 'experiment', 'puncta_nopfam.tsv')
self.with_pfam(self.puncta, self.pfam_puncta, puncta_out)
nopuncta_out = os.path.join(self.data_dp, 'experiment', 'nopuncta_nopfam.tsv')
self.with_pfam(self.nopuncta, self.pfam_nopuncta, nopuncta_out)
def with_pfam(self, fasta_fp, pfam_fp, fpo):
"""
How many proteins in the set have pfam domains?
What is the fraction occupied by pfam domains?"""
df = pd.read_csv(pfam_fp, sep='\t')
pfam_ids = list(set(df['uniprot_acc']))
pids, seqs = tools_fasta.fasta_to_id_seq(fasta_fp)
print(len(pids))
nopfam_ids = list(set(pids) - set(pfam_ids))
nopfam_seqs = []
for pid, seq in zip(pids, seqs):
if pid in nopfam_ids:
nopfam_seqs.append(seq)
ns = NormScore()
scores = ns.lc_norm_score(nopfam_seqs)
df_out = pd.DataFrame({'UniProt ID': nopfam_ids, 'LC Score': scores}, columns=['UniProt ID', 'LC Score'])
df_out = df_out.sort_values(by='LC Score', ascending=False)
df_out.to_csv(fpo, sep='\t')
def fetch_score(self, df, pids):
scores = []
for pid in pids:
df = df[df['Protein ID'] == pid]
scores.append(list(df['LC Score'])[0])
return scores
def score_in_pfam(self):
ids, seqs = tools_fasta.fasta_to_id_seq(self.nopuncta)
df = pd.read_csv(self.pfam_nopuncta, sep='\t', index_col=0)
below = 0
above = 0
norm_scores = []
fl_norm_scores = []
for id, seq in zip(ids, seqs):
ndf = df[df['uniprot_acc'] == id]
ndf = ndf.sort_values(by='seq_start')
segmented = self.pfam_segments(seq, ndf)
total = 0
for item in segmented:
total += len(item)
if total >= 100:
above += 1
fl_score, fl_length = self.get_segment_scores([seq])
fl_norm = self.norm_function([fl_score], [fl_length])
raw_score, length = self.get_segment_scores(segmented)
norm_score = self.norm_function([raw_score], [length])
norm_scores.append(norm_score[0])
fl_norm_scores.append(fl_norm[0])
else:
below += 1
print(above)
print(below)
print(np.mean(norm_scores))
print(np.mean(fl_norm_scores))
print(np.median(norm_scores))
print(np.median(fl_norm_scores))
plt.hist(fl_norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Full length scores')
plt.hist(norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Inside Pfam scores')
plt.legend()
plt.show()
def run(self):
ids, seqs = tools_fasta.fasta_to_id_seq(self.puncta)
df = pd.read_csv(self.pfam_puncta, sep='\t', index_col=0)
new_seqs = []
below = 0
above = 0
norm_scores = []
fl_norm_scores = []
for id, seq in zip(ids, seqs):
ndf = df[df['uniprot_acc'] == id]
ndf = ndf.sort_values(by='seq_start')
segmented = self.segment_seq(seq, ndf)
total = 0
for item in segmented:
total += len(item)
if total >= 100:
above += 1
fl_score, fl_length = self.get_segment_scores([seq])
fl_norm = self.norm_function([fl_score], [fl_length])
raw_score, length = self.get_segment_scores(segmented)
norm_score = self.norm_function([raw_score], [length])
norm_scores.append(norm_score[0])
fl_norm_scores.append(fl_norm[0])
else:
below += 1
print(above)
print(below)
print(np.mean(norm_scores))
print(np.mean(fl_norm_scores))
print(np.median(norm_scores))
print(np.median(fl_norm_scores))
plt.hist(fl_norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Full length scores')
plt.hist(norm_scores, alpha=0.5, bins=20, range=(-100, 200), label='Outside Pfam scores')
plt.legend()
plt.show()
def pfam_segments(self, seq, df):
new_seq = []
for i, row in df.iterrows():
new_seq.append(seq[row['seq_start']: row['seq_end']+1])
return new_seq
def segment_seq(self, seq, df):
"""Given intervals, pull out the domain, and segment around it"""
start = 0
new_seq = []
for i, row in df.iterrows():
new_seq.append(seq[start:row['seq_start']])
start = row['seq_end'] + 1
new_seq.append(seq[start:])
return new_seq
def pfam_in_common(self):
df = pd.read_csv(self.pfam_puncta, sep='\t', index_col=0)
print(df['pfamA_acc'].value_counts())
def get_segment_scores(self, segment_seq):
total_motifs = 0
total_length = 0
for seq in segment_seq:
motifs = tools_lc.count_lc_motifs(seq, self.k, self.lca, self.lce)
total_motifs += motifs
total_length += len(seq)
return total_motifs, total_length
def norm_function(self, raw_scores, lengths):
norm_scores = []
for raw_score, length in zip(raw_scores, lengths):
norm_score = raw_score - ((self.lc_m * length) + self.lc_b)
norm_scores.append(norm_score)
return norm_scores
def main():
rp = RemovePfam()
rp.pfam_in_common()
if __name__ == '__main__':
main() | shellydeforte/deconstruct_lc | deconstruct_lc/remove_structure/remove_pfam.py | remove_pfam.py | py | 7,821 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "deconstruct_lc.read_config.read_config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "deconstruct_lc.read_config",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"ap... |
22277300373 | #!/usr/bin/env python
"""
https://www.codewars.com/kata/520b9d2ad5c005041100000f/python
"""
import ipdb
import pytest
"""
pig_it('Pig latin is cool') # igPay atinlay siay oolcay
pig_it('Hello world !') # elloHay orldway !
"""
# from codewars solution
def pig_it(text):
lst = text.split()
return ' '.join( [word[1:] + word[:1] + 'ay' if word.isalpha() else word for word in lst])
@pytest.mark.parametrize("input_, expected",
[('Pig latin is cool', 'igPay atinlay siay oolcay'),
('Hello world !', 'elloHay orldway !')])
def test_solution(input_, expected):
assert move_first_letter_of_each_word_to_the_end(input_) == expected
def move_first_letter_of_each_word_to_the_end(text: str)-> str:
return ' '.join([f"{i[1:]}{i[0]}ay" if i not in ['!',',','?'] else i for i in text.split() ])
# out_str= ''
# for i in s.split():
# #print(i)
# out_str += i[1:]
# return out_str
if __name__ == '__main__':
print(move_first_letter_of_each_word_to_the_end('Pig latin is cool'))
print(move_first_letter_of_each_word_to_the_end('Hello world !')) | romantix74/codewars | move_first_letter_word_end.py | move_first_letter_word_end.py | py | 1,144 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.mark.parametrize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 20,
"usage_type": "attribute"
}
] |
74612434345 | '''
概念:一种保存数据的格式
作用:可以保存本地的json文件,也可以将json串进行传输,通常将json称为轻量级的传输方式
json文件组成
{} 代表对象(字典)
[] 代表列表
: 代表键值对
, 分隔两个部分
'''
import json
jsonStr = '''{
"rate": "8.0",
"cover_x": 1400,
"title": "我是余欢水",
"url": "https:\/\/movie.douban.com\/subject\/33442331\/",
"playable": true,
"cover": "https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2574916002.jpg",
"id": "33442331",
"cover_y": 2139,
"is_new": false
}
'''
#将json格式的字符串转为python数据类型的对象
#将json格式数据转换为字典,方便取值
jsonData = json.loads(jsonStr)
print(jsonData)
print(type(jsonData))
print(type(jsonStr))
jsonData2 = {
"rate": "8.0",
"cover_x": 1400,
"title": "我是余欢水",
"url": "https:\/\/movie.douban.com\/subject\/33442331\/",
"playable": True,
"cover": "https://img3.doubanio.com\/view\/photo\/s_ratio_poster\/public\/p2574916002.jpg",
"id": "33442331",
"cover_y": 2139,
"is_new": False
}
#将python数据类型的对象转换为json格式的字符串
jsonStr2 = json.dumps(jsonData2)
print(jsonStr2)
print(type(jsonData2))
print(type(jsonStr2)) | hanyb-sudo/hanyb | 正则表达式与爬虫/3、爬虫/7、json数据解析.py | 7、json数据解析.py | py | 1,279 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 50,
"usage_type": "call"
}
] |
23234426744 | from motor import Motor
import keyboard, time, sys
from threading import Thread
import tkinter as tk
'''
#PINOS DE COMUNICAÇÃO EM BINÁRIO
#[0,1,2,3, 4, 5, 6, 7] - BITS DA PLACA
#[1,2,4,8,16,32,64,128] - SINAL DE COMUNICAÇÃO CORRESPONDENTE
'''
mx = Motor(4, 8)
my = Motor(16, 32)
dx, dy = 2700, 270
while(my.pos <= 2700):
mx.andar(dx, 1)
if(mx.exit == True):
break
my.andar(dy, 1)
if(my.exit == True):
break
mx.andar(dx, 0)
if(mx.exit == True):
break
my.andar(dy, 1)
if(my.exit == True):
break
print(mx.pos, my.pos)
print('cabou') | eduardof-rabelo/IC | main.py | main.py | py | 642 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "motor.Motor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "motor.Motor",
"line_number": 13,
"usage_type": "call"
}
] |
17310575825 | # MIT License
#
# Copyright (c) 2023 Andrey Zhdanov (rivitna)
# https://github.com/rivitna
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import io
import struct
import zlib
MARKER = b'\xFE\x09\x00\x00\x8D'
def decompress_data(data):
"""Decompress data"""
decompress = zlib.decompressobj(-zlib.MAX_WBITS)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
#
# Main
#
if len(sys.argv) != 2:
print('Usage: '+ sys.argv[0] + ' filename')
sys.exit(0)
filename = sys.argv[1]
with io.open(filename, 'rb') as f:
file_data = f.read()
pos = 0
# Find configuration data
while True:
pos = file_data.find(MARKER, pos)
if pos < 0:
break
pos += len(MARKER)
# stsfld
if file_data[pos + 4] != 0x80:
continue
cfg_data_token, = struct.unpack_from('<L', file_data, pos + 5)
# ldsfld
if file_data[pos + 9] != 0x7E:
continue
token, = struct.unpack_from('<L', file_data, pos + 10)
if token == cfg_data_token:
pos += 9
break
if pos < 0:
print('Error: Configuration data not found.')
sys.exit(1)
print('cfg data position: %08X' % pos)
print('cfg data token: 0x%08X' % cfg_data_token)
cfg_data_dict = {}
# Parse IL code
while pos + 16 <= len(file_data):
# ldsfld
if file_data[pos] != 0x7E:
break
pos += 1
token, = struct.unpack_from('<L', file_data, pos)
if token != cfg_data_token:
break
pos += 4
# ldc.i4
if file_data[pos] != 0x20:
break
pos += 1
idx, = struct.unpack_from('<L', file_data, pos)
if cfg_data_dict.get(idx) is not None:
break
pos += 4
# ldc.i4, stelem.i1
if (file_data[pos] != 0x20) or (file_data[pos + 5] != 0x9C):
break
pos += 1
val, = struct.unpack_from('<L', file_data, pos)
if val > 255:
break
pos += 5
cfg_data_dict[idx] = val
# skip nop
if file_data[pos] == 0:
pos += 1
pack_cfg_data_size = max(cfg_data_dict.keys()) + 1
print('compressed cfg data size: %d' % pack_cfg_data_size)
pack_cfg_data = b''
for i in range(pack_cfg_data_size):
val = cfg_data_dict.get(i)
if val is None:
break
pack_cfg_data += bytes([val])
cfg_data = decompress_data(pack_cfg_data)
print('cfg data size: %d' % len(cfg_data))
cfg_filename = filename + '.cfg'
with io.open(cfg_filename, 'wb') as f:
f.write(cfg_data)
| rivitna/Malware | HsHarada/hsharada_extract_cfg.py | hsharada_extract_cfg.py | py | 3,594 | python | en | code | 218 | github-code | 36 | [
{
"api_name": "zlib.decompressobj",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "zlib.MAX_WBITS",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"... |
12717941520 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
# ---
# title: hello,hikyson
# tags: [Default]
# category: [Default]
# comments: true
# date: 2014-04-20 22:18:43
# ---
#
# hello,hikyson
#
# <!-- more -->
#
# |Version|Codename|API|Distribution|
# |---|---|---|---|
# |111|222|333|444|
import os
from ScrapyForAndroidDashboard.git_pusher import post_title, local_time_str, post_name, push, post_file_dir
class ScrapyforandroiddashboardPipeline(object):
def process_item(self, item, spider):
# generate md file
divider = "---"
line_feed = "\r\n"
title = post_title
tags = "[android,spider,scrapy]"
category = "[scrapy]"
comments = "true"
date = local_time_str
more = "<!-- more -->"
head = "".join(
[divider, line_feed, "title: ", title, line_feed, "tags: ", tags, line_feed, "category: ", category,
line_feed, "comments: ", comments, line_feed, "date: ", date, line_feed, divider, line_feed])
summary = "This is a post generate by a spider , grab from url: [developer.android.google.cn](developer.android.google.cn)"
updatetime = "Update time: %s" % local_time_str
version_data_dict = json.loads(item["version_data"])
version_chart_url = "https:" + version_data_dict["chart"] + ".png"
# version text
text_version = "".join(
["" % version_chart_url, line_feed, line_feed, "|Codename|API|Distribution|",
line_feed, "|---|---|---|", line_feed])
version_items = version_data_dict["data"]
for version_item in version_items:
api = version_item["api"]
name = version_item["name"]
perc = version_item["perc"]
text_version = text_version + "|" + str(api) + "|" + name + "|" + str(perc) + "|" + line_feed
post = "".join(
[head, line_feed, line_feed, summary, line_feed, updatetime, line_feed, line_feed, more, line_feed,
line_feed, text_version])
for file_name in os.listdir(post_file_dir):
if file_name.find(post_title) >= 0:
os.remove(os.path.join(post_file_dir, file_name))
file_name = os.path.join(post_file_dir, post_name)
with open(file_name, 'wb') as f:
f.write(post)
push()
return item
| Kyson/ScrapyForAndroidDashboard | ScrapyForAndroidDashboard/ScrapyForAndroidDashboard/pipelines.py | pipelines.py | py | 2,522 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ScrapyForAndroidDashboard.git_pusher.post_title",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "ScrapyForAndroidDashboard.git_pusher.local_time_str",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "ScrapyForAndroidDashboard.git_pusher.local_ti... |
74772398185 | # https://cloud.google.com/pubsub/docs/create-topic#create_a_topic
# https://cloud.google.com/python/docs/reference/pubsub/latest
# %%
from google.cloud import pubsub_v1
# TODO(developer)
project_id = "podact-topic-extractor"
topic_id = "your-topic-id"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
topic = publisher.create_topic(request={"name": topic_path})
print(f"Created topic: {topic.name}")
# %%
# When you delete a topic, its subscriptions are not deleted.
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
publisher.delete_topic(request={"topic": topic_path})
print(f"Topic deleted: {topic_path}")
# %%
from google.cloud import pubsub_v1
publisher = pubsub_v1.PublisherClient()
project_path = f"projects/{project_id}"
for topic in publisher.list_topics(request={"project": project_path}):
print(topic)
# %%
"""Publishes multiple messages to a Pub/Sub topic with an error handler."""
from concurrent import futures
from typing import Callable
from google.cloud import pubsub_v1
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
publish_futures = []
def get_callback(
publish_future: pubsub_v1.publisher.futures.Future, data: str
) -> Callable[[pubsub_v1.publisher.futures.Future], None]:
def callback(publish_future: pubsub_v1.publisher.futures.Future) -> None:
try:
# Wait 60 seconds for the publish call to succeed.
print(publish_future.result(timeout=60))
except futures.TimeoutError:
print(f"Publishing {data} timed out.")
return callback
for i in range(10):
data = str(i)
# When you publish a message, the client returns a future.
publish_future = publisher.publish(topic_path, data.encode("utf-8"))
# Non-blocking. Publish failures are handled in the callback function.
publish_future.add_done_callback(get_callback(publish_future, data))
publish_futures.append(publish_future)
# Wait for all the publish futures to resolve before exiting.
futures.wait(publish_futures, return_when=futures.ALL_COMPLETED)
print(f"Published messages with error handler to {topic_path}.")
# %%
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
for n in range(1, 10):
data_str = f"Message number {n}"
# Data must be a bytestring
data = data_str.encode("utf-8")
# Add two attributes, origin and username, to the message
future = publisher.publish(
topic_path, data, origin="python-sample", username="gcp"
)
print(future.result())
print(f"Published messages with custom attributes to {topic_path}.")
# %%
from google.cloud import pubsub_v1
# TODO(developer): Choose an existing topic.
# project_id = "your-project-id"
# topic_id = "your-topic-id"
publisher_options = pubsub_v1.types.PublisherOptions(enable_message_ordering=True)
# Sending messages to the same region ensures they are received in order
# even when multiple publishers are used.
client_options = {"api_endpoint": "us-east1-pubsub.googleapis.com:443"}
publisher = pubsub_v1.PublisherClient(
publisher_options=publisher_options, client_options=client_options
)
# The `topic_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/topics/{topic_id}`
topic_path = publisher.topic_path(project_id, topic_id)
for message in [
("message1", "key1"),
("message2", "key2"),
("message3", "key1"),
("message4", "key2"),
]:
# Data must be a bytestring
data = message[0].encode("utf-8")
ordering_key = message[1]
# When you publish a message, the client returns a future.
future = publisher.publish(topic_path, data=data, ordering_key=ordering_key)
print(future.result())
print(f"Published messages with ordering keys to {topic_path}.")
# %%
from concurrent import futures
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
# Configure the batch to publish as soon as there are 10 messages
# or 1 KiB of data, or 1 second has passed.
batch_settings = pubsub_v1.types.BatchSettings(
max_messages=10, # default 100
max_bytes=1024, # default 1 MB
max_latency=1, # default 10 ms
)
publisher = pubsub_v1.PublisherClient(batch_settings)
topic_path = publisher.topic_path(project_id, topic_id)
publish_futures = []
# Resolve the publish future in a separate thread.
def callback(future: pubsub_v1.publisher.futures.Future) -> None:
message_id = future.result()
print(message_id)
for n in range(1, 10):
data_str = f"Message number {n}"
# Data must be a bytestring
data = data_str.encode("utf-8")
publish_future = publisher.publish(topic_path, data)
# Non-blocking. Allow the publisher client to batch multiple messages.
publish_future.add_done_callback(callback)
publish_futures.append(publish_future)
futures.wait(publish_futures, return_when=futures.ALL_COMPLETED)
print(f"Published messages with batch settings to {topic_path}.")
# %%
from google import api_core
from google.cloud import pubsub_v1
# %%
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
# Configure the retry settings. Defaults shown in comments are values applied
# by the library by default, instead of default values in the Retry object.
custom_retry = api_core.retry.Retry(
initial=0.250, # seconds (default: 0.1)
maximum=90.0, # seconds (default: 60.0)
multiplier=1.45, # default: 1.3
deadline=300.0, # seconds (default: 60.0)
predicate=api_core.retry.if_exception_type(
api_core.exceptions.Aborted,
api_core.exceptions.DeadlineExceeded,
api_core.exceptions.InternalServerError,
api_core.exceptions.ResourceExhausted,
api_core.exceptions.ServiceUnavailable,
api_core.exceptions.Unknown,
api_core.exceptions.Cancelled,
),
)
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
for n in range(1, 10):
data_str = f"Message number {n}"
# Data must be a bytestring
data = data_str.encode("utf-8")
future = publisher.publish(topic=topic_path, data=data, retry=custom_retry)
print(future.result())
print(f"Published messages with retry settings to {topic_path}.") | lgarzia/topic_extractions | pub_sub_tutorials/create_and_manage_topic.py | create_and_manage_topic.py | py | 6,502 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "google.cloud.pubsub_v1.PublisherClient",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "google.cloud.pubsub_v1",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "google.cloud.pubsub_v1.PublisherClient",
"line_number": 20,
"usage_type": "... |
39303555840 | #!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: admin
@file: MultiHeadedAttention.py
@time: 2021/09/02
@desc:
"""
import copy
import torch
import math
from torch import nn
import torch.nn.functional as F
def clones(module, N):
"""
克隆基本单元,克隆的单元之间参数不共享
"""
return nn.ModuleList([
copy.deepcopy(module) for _ in range(N)
])
def attention(query, key, value, mask=None, dropout=None):
"""
Scaled Dot-Product Attention(方程(4))
"""
# q、k、v向量长度为d_k
d_k = query.size(-1)
# 矩阵乘法实现q、k点积注意力,sqrt(d_k)归一化
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
# 注意力掩码机制
if mask is not None:
scores = scores.masked_fill(mask==0, -1e9)
# 注意力矩阵softmax归一化
p_attn = F.softmax(scores, dim=-1)
# dropout
if dropout is not None:
p_attn = dropout(p_attn)
# 注意力对v加权
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention(编码器(2))
"""
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
"""
`h`:注意力头的数量
`d_model`:词向量维数
"""
# 确保整除
assert d_model % h == 0
# q、k、v向量维数
self.d_k = d_model // h
# 头的数量
self.h = h
# WQ、WK、WV矩阵及多头注意力拼接变换矩阵WO
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
# 批次大小
nbatches = query.size(0)
# WQ、WK、WV分别对词向量线性变换,并将结果拆成h块
query, key, value = [
l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
# 注意力加权
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 多头注意力加权拼接
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
# 对多头注意力加权拼接结果线性变换
return self.linears[-1](x) | coinyue/Transformer | model/MultiHeadedAttention.py | MultiHeadedAttention.py | py | 2,463 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.ModuleList",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_n... |
2835263565 | import pandas as pd
import plotly.express as px
from ..sequence_info.sequences import group_2_coins
from ..utils import google_form_question_to_coin_sequence
DATA_FILENAME = "C:/Users/Crystal Wang/Downloads/9.660/9.660-final-project/data/data.csv"
def get_df():
df = pd.read_csv(DATA_FILENAME)
df = df.drop("Timestamp", axis = 1)
# Get rid of the music data
df = df.drop(df.columns[-70:], axis = 1)
# Clean ratings
df = df.replace("1 (least representative)", 1)
df = df.replace("7 (most representative)", 7)
# Set index
df = df.set_index("Name")
return df
def split_groups(df):
group1_df = df[df["Who asked you to do this survey?"] == "Crystal (Group 1)"]
group1_df = group1_df.drop(group1_df.columns[-37:], axis = 1)
group1_df = group1_df.drop("Who asked you to do this survey?", axis = 1)
group2_df = df[df["Who asked you to do this survey?"] == "Julia (Group 2)"]
group2_df = group2_df.drop(group2_df.columns[-73: -36], axis = 1)
group2_df = group2_df.drop("Who asked you to do this survey?", axis = 1)
return group1_df, group2_df
def get_control_data(g_df):
g_control_df = g_df.drop(g_df.columns[5:], axis = 1)
g_control_df = g_control_df.astype(int)
g_control_df.columns = [1,2,3,4,5]
return g_control_df
def plot_line_data(df):
df = df.stack().reset_index()
df = df.rename(columns = {"level_1": "x", 0: "y"})
px.line(df, x = "x", y = "y", color = "Name")
def significance_t_test(df1, df2):
df1_means = df1.mean(axis = 0)
df2_means = df2.mean(axis = 0)
df1_vars = df1.std(axis = 0, ddof = 0) ** 2
df2_vars = df2.std(axis = 0, ddof = 0) ** 2
df1_vars /= len(df1.index)
df2_vars /= len(df2.index)
numerator = df1_means - df2_means
denominator = (df1_vars + df2_vars) ** 0.5
return (numerator / denominator).abs() > 1.96
def verify_control_significance(g1_df, g2_df, plot = False):
g1_control_df = get_control_data(g1_df)
g2_control_df = get_control_data(g2_df)
if plot:
plot_line_data(g1_control_df)
plot_line_data(g2_control_df)
# See if there is a significant difference for each sequence
is_significant = significance_t_test(g1_control_df, g2_control_df)
if is_significant.any():
raise Exception("The samples are significantly different in control responses!")
else:
print("No significant difference between the control responses.\n")
return
def remove_control_data(g_df):
return g_df.drop(g_df.columns[:5], axis = 1).astype(int)
def sort_columns(df):
return df.reindex(sorted(df.columns), axis=1)
def clean_group_1(g1_df):
g1_df.columns = [google_form_question_to_coin_sequence(column) for column in g1_df.columns]
g1_df = sort_columns(g1_df)
return g1_df
def clean_group_2(g2_df):
g2_df.columns = [
google_form_question_to_coin_sequence(f"{column} [Coin {coin}: ]")
for column, coin in zip(g2_df.columns, group_2_coins)
]
g2_df = sort_columns(g2_df)
return g2_df
def test_experiment_significance(g1_df, g2_df):
print("---- GROUP 1 vs. GROUP 2 EXPERIMENT ----")
is_significant = significance_t_test(g1_df, g2_df)
if is_significant.any():
significant_cols = list((is_significant.loc[is_significant]).index)
print(f"There is a significant difference between group 1 and group 2: {significant_cols}\n")
else:
print("There is NO significant difference between group 1 and group 2\n")
def get_groups():
df = get_df()
g1_df, g2_df = split_groups(df)
verify_control_significance(g1_df, g2_df)
g1_df = remove_control_data(g1_df)
g2_df = remove_control_data(g2_df)
g1_df = clean_group_1(g1_df)
g2_df = clean_group_2(g2_df)
test_experiment_significance(g1_df, g2_df)
return g1_df, g2_df | cwcrystal8/9.660-final-project | coins/data_cleaning/groups.py | groups.py | py | 3,884 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "plotly.express.line",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "utils.google_form_... |
26376325124 | #!/usr/bin/env python
# coding: utf-8
# In[12]:
# Question 1 c)
# Author: Ilyas Sharif
import numpy as np
import matplotlib.pyplot as plt
# Defining the parameters that didn't change (same as code for before)
v_f = 0.1
omega_0 = 1
tau = 1
gamma = 0.5
a = 0.0
b = 100.0
N = 10000
h = (b-a)/N
tpoints = np.arange(a, b,h)
x_0 = 0
y_0 = 0
# Defining the xpoints and ypoints array.
vp = v_f * np.log((gamma*tau)/v_f)
v_p = [0.1*vp,0.25*vp, 0.5*vp, 0.75*vp, 1*vp, 1.25*vp, 1.5*vp]
indexing = [0.1,0.25,0.5,0.75,1,1.25,1.5]
for i in range(len(v_p)):
C = v_p[i]
xpoints = []
r = np.array([x_0, y_0], float)
# Creating f(r,t) where r = (x, y = dx/dt)
def f(r,t):
x = r[0]
y = r[1]
fx = y
fy = -(omega_0**2)*((x) - v_p[i] * t) - (y)/tau - gamma*np.exp(-np.abs(y)/v_f)
return np.array([fx, fy], float)
# Creating r array and computing RK4 method (copied from Newman odesim.py)
for t in tpoints:
xpoints.append(r[0])
k1 = h*f(r,t)
k2 = h*f(r+0.5*k1,t+0.5*h)
k3 = h*f(r+0.5*k2,t+0.5*h)
k4 = h*f(r+k3,t+h)
r += (k1+2*k2+2*k3+k4)/6
plt.plot(tpoints, xpoints, label = '$v_p$ = ' + str(indexing[i]) + '$v_p$')
# I'm going to comment this out, but if you want to see the constant velocity
# solutions that each of them oscillate around, feel free to comment out the
# 2 lines below :)
#x0 = -(1/omega_0**2)*(C/tau + gamma*np.exp(-C/v_f)) + (v_p[i]*tpoints)
#plt.plot(tpoints,x0, linestyle = ":", color = 'k')
plt.title("Comparison of Different Choices for $v_p$")
plt.xlabel(" time (seconds) ")
plt.ylabel(" position (meters) ")
plt.legend()
plt.xlim(0,100)
plt.show()
# In[ ]:
| SpencerKi/Computational-Methods | Differentiation and Differential Equations/Lab06_Q1_c.py | Lab06_Q1_c.py | py | 1,726 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 43,... |
2808401161 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = "0.1.2"
__author__ = "Abien Fred Agarap"
import argparse
from models.svm.svm import Svm
# Hyper-parameters
BATCH_SIZE = 256
LEARNING_RATE = 1e-5
N_CLASSES = 2
SEQUENCE_LENGTH = 21
def parse_args():
parser = argparse.ArgumentParser(description="SVM for Intrusion Detection")
group = parser.add_argument_group("Arguments")
group.add_argument(
"-o",
"--operation",
required=True,
type=str,
help='the operation to perform: "train" or "test"',
)
group.add_argument(
"-t",
"--train_dataset",
required=False,
type=str,
help="the NumPy array training dataset (*.npy) to be used",
)
group.add_argument(
"-v",
"--validation_dataset",
required=True,
type=str,
help="the NumPy array validation dataset (*.npy) to be used",
)
group.add_argument(
"-c",
"--checkpoint_path",
required=True,
type=str,
help="path where to save the trained model",
)
group.add_argument(
"-l",
"--log_path",
required=False,
type=str,
help="path where to save the TensorBoard logs",
)
group.add_argument(
"-m",
"--model_name",
required=False,
type=str,
help="filename for the trained model",
)
group.add_argument(
"-r",
"--result_path",
required=True,
type=str,
help="path where to save the actual and predicted labels",
)
arguments = parser.parse_args()
return arguments
def main(arguments):
if arguments.operation == "train":
train_features, train_labels = data.load_data(dataset=arguments.train_dataset)
validation_features, validation_labels = data.load_data(
dataset=arguments.validation_dataset
)
train_size = train_features.shape[0]
validation_size = validation_features.shape[0]
model = Svm(
alpha=LEARNING_RATE,
batch_size=BATCH_SIZE,
svm_c=arguments.svm_c,
num_classes=N_CLASSES,
num_features=SEQUENCE_LENGTH,
)
model.train(
checkpoint_path=arguments.checkpoint_path,
log_path=arguments.log_path,
model_name=arguments.model_name,
epochs=arguments.num_epochs,
result_path=arguments.result_path,
train_data=[train_features, train_labels],
train_size=train_size,
validation_data=[validation_features, validation_labels],
validation_size=validation_size,
)
elif arguments.operation == "test":
test_features, test_labels = data.load_data(
dataset=arguments.validation_dataset
)
test_size = test_features.shape[0]
test_features = test_features[: test_size - (test_size % BATCH_SIZE)]
test_labels = test_labels[: test_size - (test_size % BATCH_SIZE)]
test_size = test_features.shape[0]
Svm.predict(
batch_size=BATCH_SIZE,
num_classes=N_CLASSES,
test_data=[test_features, test_labels],
test_size=test_size,
checkpoint_path=arguments.checkpoint_path,
result_path=arguments.result_path,
)
if __name__ == "__main__":
args = parse_args()
main(args)
| AFAgarap/gru-svm | svm_main.py | svm_main.py | py | 3,519 | python | en | code | 136 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.svm.svm.Svm",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.svm.svm.Svm.predict",
"line_number": 116,
"usage_type": "call"
},
{
"api_name":... |
9924072616 | import os
import re
import sys
from lib.instruction import Instruction, AInstruction, CInstruction, LInstruction
from typing import Generator, Tuple
class Parser:
"""
Parse the Xxx.asm into stream of instructions.
- read source file
- understand the format of input file
- break each into different components
- C: dest, comp, jump
- A: value
"""
def __init__(self, path: str):
self._path = path
def _get_clean_line(self, line):
return self.strip_comments(line.strip())
def _parse_c_instruction(self, line):
if line.count(";") > 1:
raise ValueError('line format error, should not have more than one ";" ')
if line.count("=") > 1:
raise ValueError('line format error, should not have more than one "="')
if "=" in line and ";" in line:
# D=M; JMP
dest, tail = line.split("=")
comp, jump = tail.split(";")
return CInstruction(dest=dest.strip(), comp=comp.strip(), jump=jump.strip())
elif ";" in line:
# M; JMP
comp, jump = line.split(";")
return CInstruction(dest=None, comp=comp.strip(), jump=jump.strip())
elif "=" in line:
# M=D
dest, comp = line.split("=")
return CInstruction(dest=dest.strip(), comp=comp.strip(), jump=None)
else:
# D
return CInstruction(dest=None, comp=line.strip(), jump=None)
raise ValueError("line format invalid: ", line)
def _parse(self, line) -> Instruction:
if line.startswith("("):
inst = LInstruction(name=line[1:-1].strip())
elif line.startswith("@"):
inst = AInstruction(value=line[1:])
else:
inst = self._parse_c_instruction(line)
return inst
def get_instruction(self) -> Generator[Tuple[int, Instruction], None, None]:
with open(self._path, "r") as f:
for line in f:
line = self._get_clean_line(line)
if not line:
continue
instruction = self._parse(line)
yield instruction
def strip_comments(self, text):
return re.sub(
"//.*?$|/\*.*?\*/|'(?:\\.|[^\\'])*'|\"(?:\\.|[^\\\"])*\"",
"",
text,
flags=re.S,
).strip()
| mtx2d/nand2tetris | projects/06/src/lib/parser.py | parser.py | py | 2,398 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lib.instruction.CInstruction",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "lib.instruction.CInstruction",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "lib.instruction.CInstruction",
"line_number": 44,
"usage_type": "call"
},
{... |
36307745878 | import cv2
import numpy
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
import os
import random
from tqdm import tqdm
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry, SamPredictor
import argparse
from utils.utils import *
import time
def get_embedding(img, predictor):
predictor.set_image(img)
img_emb = predictor.get_image_embedding()
return img_emb
def train(args, predictor):
data_path = args.data_path
assert os.path.exists(data_path), 'data path does not exist!'
num_image = args.k
fnames = os.listdir(os.path.join(data_path, 'images'))
# get 20 random indices from fnames
random.shuffle(fnames)
fnames = fnames[:num_image]
image_embeddings = []
labels = []
# get the image embeddings
print('Start training...')
t1 = time.time()
i = 0
for fname in tqdm(fnames):
# read data
image = cv2.imread(os.path.join(data_path, 'images', fname))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(os.path.join(data_path, 'masks', fname))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(mask, 128, 1, cv2.THRESH_BINARY) # threshold the mask to 0 and 1
downsampled_mask = cv2.resize(mask, dsize=(64, 64), interpolation=cv2.INTER_NEAREST)
img_emb = get_embedding(image, predictor)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
image_embeddings.append(img_emb)
labels.append(downsampled_mask.flatten())
i += 1
if i > num_image: break
t2 = time.time()
print("Time used: {}m {}s".format((t2 - t1) // 60, (t2 - t1) % 60))
image_embeddings_cat = np.concatenate(image_embeddings)
labels = np.concatenate(labels)
# Create a linear regression model and fit it to the training data
model = LogisticRegression(max_iter=1000)
model.fit(image_embeddings_cat, labels)
return model
def test_visualize(args, model, predictor):
data_path = args.data_path
num_image = args.k
fnames = os.listdir(os.path.join(data_path, 'images'))
random.shuffle(fnames)
fnames = fnames[num_image:]
num_visualize = args.visualize_num
dice_linear = []
dice1 = []
dice2 = []
dice3 = []
i = 0
for fname in tqdm(fnames[:num_visualize]):
# read data
image = cv2.imread(os.path.join(data_path, 'images', fname))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(os.path.join(data_path, 'masks', fname))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(mask, 128, 1, cv2.THRESH_BINARY)
H, W, _ = image.shape
# get the image embedding and flatten it
img_emb = get_embedding(image, predictor)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
# get the mask predicted by the linear classifier
y_pred = model.predict(img_emb)
y_pred = y_pred.reshape((64, 64))
# mask predicted by the linear classifier
mask_pred_l = cv2.resize(y_pred, (mask.shape[1], mask.shape[0]), interpolation=cv2.INTER_NEAREST)
# use distance transform to find a point inside the mask
fg_point = get_max_dist_point(mask_pred_l)
# Define the kernel for dilation
kernel = np.ones((5, 5), np.uint8)
eroded_mask = cv2.erode(mask_pred_l, kernel, iterations=3)
mask_pred_l = cv2.dilate(eroded_mask, kernel, iterations=5)
# set the image to sam
predictor.set_image(image)
# prompt the sam with the point
input_point = np.array([[fg_point[0], fg_point[1]]])
input_label = np.array([1])
masks_pred_sam_prompted1, _, _ = predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=None,
multimask_output=False,
)
# prompt the sam with the bounding box
y_indices, x_indices = np.where(mask_pred_l > 0)
if np.all(mask_pred_l == 0):
bbox = np.array([0, 0, H, W])
else:
x_min, x_max = np.min(x_indices), np.max(x_indices)
y_min, y_max = np.min(y_indices), np.max(y_indices)
H, W = mask_pred_l.shape
x_min = max(0, x_min - np.random.randint(0, 20))
x_max = min(W, x_max + np.random.randint(0, 20))
y_min = max(0, y_min - np.random.randint(0, 20))
y_max = min(H, y_max + np.random.randint(0, 20))
bbox = np.array([x_min, y_min, x_max, y_max])
masks_pred_sam_prompted2, _, _ = predictor.predict(
point_coords=None,
point_labels=None,
box=bbox[None, :],
multimask_output=False,)
# prompt the sam with both the point and bounding box
masks_pred_sam_prompted3, _, _ = predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=bbox[None, :],
multimask_output=False,)
dice_l = dice_coef(mask, mask_pred_l)
dice_p = dice_coef(mask, masks_pred_sam_prompted1[0])
dice_b = dice_coef(mask, masks_pred_sam_prompted2[0])
dice_i = dice_coef(mask, masks_pred_sam_prompted3[0])
dice_linear.append(dice_l)
dice1.append(dice_p)
dice2.append(dice_b)
dice3.append(dice_i)
# plot the results
fig, ax = plt.subplots(1, 5, figsize=(15, 10))
ax[0].set_title('Ground Truth')
ax[0].imshow(mask)
ax[1].set_title('Linear + e&d')
ax[1].plot(fg_point[0], fg_point[1], 'r.')
ax[1].imshow(mask_pred_l)
ax[2].set_title('Point')
ax[2].plot(fg_point[0], fg_point[1], 'r.')
ax[2].imshow(masks_pred_sam_prompted1[0])
ax[3].set_title('Box')
show_box(bbox, ax[3])
ax[3].imshow(masks_pred_sam_prompted2[0])
ax[4].set_title('Point + Box')
ax[4].plot(fg_point[0], fg_point[1], 'r.')
show_box(bbox, ax[4])
ax[4].imshow(masks_pred_sam_prompted3[0])
[axi.set_axis_off() for axi in ax.ravel()]
if os.path.exists(args.save_path) == False:
os.mkdir(args.save_path)
plt.savefig(os.path.join(args.save_path, fname.split('.')[0]+str(i)))
mdice0 = round(sum(dice_linear)/float(len(dice_linear)), 5)
mdice1 = round(sum(dice1)/float(len(dice1)), 5)
mdice2 = round(sum(dice2)/float(len(dice2)), 5)
mdice3 = round(sum(dice3)/float(len(dice3)), 5)
print('For the first {} images: '.format(num_visualize))
print('mdice(linear classifier: )', mdice0)
print('mDice(point prompts): ', mdice1)
print('mDice(bbox prompts): ', mdice2)
print('mDice(points and boxes): ', mdice3)
def test(args, predictor):
data_path = args.data_path
images = []
masks = []
fnames = os.listdir(os.path.join(data_path, 'images'))
print(f'loading images from {data_path}...')
for fname in tqdm(fnames):
# read data
image = cv2.imread(os.path.join(data_path, 'images', fname))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(os.path.join(data_path, 'masks', fname))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(mask, 128, 1, cv2.THRESH_BINARY)
images.append(image)
masks.append(mask)
kf = KFold(n_splits=5, shuffle=True, random_state=42)
for train_index, text_index in kf.split(images):
train_images = [images[i] for i in train_index]
train_masks = [masks[i] for i in train_index]
test_images = [images[i] for i in text_index]
test_masks = [masks[i] for i in text_index]
# train the linear classifier
k = args.k
random_indices = random.sample(range(len(train_images)), k)
image_embeddings = []
labels = []
for idx in random_indices:
image = train_images[idx]
mask = train_masks[idx]
downsampled_mask = cv2.resize(mask, dsize=(64, 64), interpolation=cv2.INTER_NEAREST)
img_emb = get_embedding(image)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
image_embeddings.append(img_emb)
labels.append(downsampled_mask.flatten())
image_embeddings_cat = numpy.concatenate(image_embeddings)
labels = numpy.concatenate(labels)
model = LogisticRegression(max_iter=1000) # how to set parameters?? C, max_iter, verbose, solver
model.fit(image_embeddings_cat, labels)
# test
dice_linear=[]
dice1=[]
dice2=[]
dice3=[]
for idx in range(len(test_images)):
image = test_images[idx]
mask = test_masks[idx]
H, W, _ = image.shape
img_emb = get_embedding(image)
img_emb = img_emb.cpu().numpy().transpose((2, 3, 1, 0)).reshape((64, 64, 256)).reshape(-1, 256)
# ger the mask predicted by the linear classifier
y_pred = model.predict(img_emb)
y_pred = y_pred.reshape((64,64))
mask_pred_l = cv2.resize(y_pred, (mask.shape[1], mask.shape[0]), interpolation=cv2.INTER_NEAREST)
# use distance transform to find a point inside the mask
fg_point = get_max_dist_point(mask_pred_l)
# Define the kernel for dilation
kernel = np.ones((5, 5), np.uint8)
eroded_mask = cv2.erode(mask_pred_l, kernel, iterations=3)
mask_pred_l = cv2.dilate(eroded_mask, kernel, iterations=5)
# set the image to sam
predictor.set_image(image)
# prompt sam with the point
input_point = np.array([[fg_point[0], fg_point[1]]])
input_label = np.array([1])
masks_pred_sam_prompted1, _, logits = predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=None,
multimask_output=False,)
# prompt sam with the bbox
y_indices, x_indices = np.where(mask_pred_l > 0)
if np.all(mask_pred_l==0):
bbox = np.array([0 ,0, H, W])
else:
x_min, x_max = np.min(x_indices), np.max(x_indices)
y_min, y_max = np.min(y_indices), np.max(y_indices)
H, W = mask_pred_l.shape
x_min = max(0, x_min - np.random.randint(0, 20))
x_max = min(W, x_max + np.random.randint(0, 20))
y_min = max(0, y_min - np.random.randint(0, 20))
y_max = min(H, y_max + np.random.randint(0, 20))
bbox = np.array([x_min, y_min, x_max, y_max])
masks_pred_sam_prompted2, _, _ = predictor.predict(
point_coords=None,
point_labels=None,
box=bbox[None, :],
multimask_output=False,)
masks_pred_sam_prompted3, _, _,= predictor.predict(
point_coords=input_point,
point_labels=input_label,
box=bbox[None, :],
multimask_output=False,)
dice_l = dice_coef(mask, mask_pred_l)
dice_p = dice_coef(mask, masks_pred_sam_prompted1[0])
dice_b = dice_coef(mask, masks_pred_sam_prompted2[0])
dice_c = dice_coef(mask, masks_pred_sam_prompted3[0])
dice_linear.append(dice_l)
dice1.append(dice_p)
dice2.append(dice_b)
dice3.append(dice_c)
mdice0 = round(sum(dice_linear)/float(len(dice_linear)), 5)
mdice1 = round(sum(dice1)/float(len(dice1)), 5)
mdice2 = round(sum(dice2)/float(len(dice2)), 5)
mdice3 = round(sum(dice3)/float(len(dice3)), 5)
print('mdice(linear classifier: )', mdice0)
print('mDice(point prompts): ', mdice1)
print('mDice(bbox prompts): ', mdice2)
print('mDice(points and boxes): ', mdice3)
print('\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda:0', help='device')
parser.add_argument('--k', type=int, default=10, help='number of pics')
parser.add_argument('--data_path', type=str, default='./data/Kvasir-SEG', help='path to train data')
parser.add_argument('--model_type', type=str, default='vit_b', help='SAM model type')
parser.add_argument('--checkpoint', type=str, default='./checkpoints/sam_vit_b_01ec64.pth', help='SAM checkpoint')
parser.add_argument('--visualize', type=bool, default=True, help='visualize the results')
parser.add_argument('--save_path', type=str, default='./results', help='path to save the results')
parser.add_argument('--visualize_num', type=int, default=30, help='number of pics to visualize')
args = parser.parse_args()
# set random seed
random.seed(42)
# register the SAM model
sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint).to(args.device)
global predictor
predictor = SamPredictor(sam)
print('SAM model loaded!', '\n')
if args.visualize:
model = train(args, predictor)
test_visualize(args, model, predictor)
else:
test(args, predictor)
if __name__ == '__main__':
main() | PeterYYZhang/few-shot-self-prompt-SAM | main.py | main.py | py | 13,776 | python | en | code | 44 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
74605435944 | from model.contact import Contact
from datetime import datetime
import re
import csv
class Phonebook:
"""
The Phonebook class allows users to create, update, delete, search, and perform various operations on contacts.
Attributes:
contacts (list): A list of Contact objects representing the phonebook's contacts.
Methods:
search_contacts(): Searches for contacts based on user-defined criteria.
create_contact(): Creates a new contact and adds it to the phonebook.
validate_phone_number(phone_number): Validates a phone number's format.
validate_email_address(email_address): Validates an email address's format.
update_contact(): Updates an existing contact's information.
delete_contact(): Deletes an existing contact from the phonebook.
print_all_contacts(): Prints details of all contacts in the phonebook.
print_contact_history(): Prints the contact history for a specific contact.
sort_contacts(): Sorts the contacts in the phonebook.
group_contacts(): Groups contacts by the initial letter of their last names.
"""
def __init__(self):
self.contacts = [Contact]*0
print("Starting phonebook application...")
def search_contacts(self):
"""
Searches for contacts in the contact list based on user-defined criteria.
The method allows the user to choose between two search options:
- 0: Search by name or phone number. The user can enter characters and view matching results.
- 1: Search for contacts added within a specific time frame. The user enters start and end dates.
Depending on the user's choice, the method displays matching results or contacts
added within the specified time frame.
Returns:
None
"""
choice = input("Options: \n 0. Search with name or phone number \n " +
"1. Search for contacts added within specific time frame \n How do you want to search for the contact: ")
if choice == "0":
user_input = input("To search for contacts, start entering characters below and press enter to see results: \n")
counter = 0
print("Below is a list of matching results: \n")
for contact in self.contacts:
if (user_input in contact.get_first_name()
or user_input in contact.get_last_name()
or user_input in contact.get_phone_number()
or user_input in contact.get_email_address()
or user_input in contact.get_address()):
print("Contact id: ", counter)
contact.print_contact()
counter+=1
print("\n \n")
elif choice == "1":
start_date = input("Please enter start date in yyyy/MM/dd format: ")
end_date = input("Please enter end date in yyyy/MM/dd format: ")
while True:
try:
start_time=datetime(*[int(i) for i in start_date.split('/')])
end_time=datetime(*[int(i) for i in end_date.split('/')]).replace(hour=23,minute=59,second=59)
break
except:
print("Please enter a valid date")
print("Start time: ", start_time)
print("End Time: ", end_time)
filtered_contacts = [filtered_contact for filtered_contact in self.contacts if start_time <= filtered_contact.create_time <= end_time]
print("\nBelow is a list of matching results: \n")
counter = 0
for contact in filtered_contacts:
print("Contact id: ", counter)
contact.print_contact()
counter+=1
print("\n \n")
else:
print("Please enter a valid option")
def create_contact(self):
"""
Creates a new contact and adds it to the contact list.
This method provides two options to create a contact:
- Option 0: Manually enter individual contact details
- Option 1: Load contacts in batch from a CSV file.
Depending on the user's choice, the method either guides the user to enter
individual contact details or loads contacts from a CSV file.
It validates the phone number and email address format, checks for duplicate
contacts, and adds the new contacts to the contact list.
Returns:
None
"""
print("Creating contact...")
print("Options: \n 0. Enter individual contact manually \n 1. Load contacts in batch from csv file")
batch_load = input("How do you want to add contact: ")
if batch_load=="0":
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
while True:
phone_number = input("Enter phone number in (XXX) XXX-XXXX format : ")
if self.validate_phone_number(phone_number)==False:
print("Please enter a valid phone number. Make sure format is (XXX) XXX-XXXX")
continue
else:
break
while True:
email_address = input("Enter email address, press enter to skip: ")
if email_address=="": email_address=None
if self.validate_email_address(email_address)==False:
print("Please enter a valid email address.")
continue
else:
break
address = input("Enter address, press enter to skip: ")
if address=="": address=None
contact_exists=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
contact_exists=True
if contact_exists==True:
print("Contact already exists! Please check the contact details and delete or update it as per your need.")
else:
new_contact = Contact(first_name,last_name,phone_number,email_address,address)
self.contacts.append(new_contact)
print("Contact added successfully!")
self.print_all_contacts()
elif batch_load=="1":
print("\n We have sample_contacts.csv file already present in data folder. \n You can copy your required csv file to that path first.")
file_name = input("Now enter the file name you want to load from the data folder:")
csv_file_path = "data/"+file_name
try:
with open(csv_file_path, mode='r', newline='') as file:
csv_reader = csv.reader(file)
for contact in csv_reader:
first_name = contact[0]
last_name = contact[1]
phone_number = contact[2]
if self.validate_phone_number(phone_number)==False:
print("Phone number: ", phone_number, " is not valid format (XXX) XXX-XXXX, exiting csv file. Please try again after fixing the value in csv file.")
return
email_address = contact[3]
if email_address!="" and self.validate_email_address(email_address)==False:
print("Email address: ", email_address, " is not valid format, exiting csv file. Please try again after fixing the value in csv file.")
return
address = contact[4]
contact_exists=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
contact_exists=True
if contact_exists==True:
print("Contact with first name: ", first_name, " and last name: ", last_name +
" already exists! Please check the contact details and delete or update it as per your need.")
else:
new_contact = Contact(first_name,last_name,phone_number,email_address,address)
self.contacts.append(new_contact)
print("Contacts added successfully from csv file in batch")
self.print_all_contacts()
except:
print("Error opening the file, please check the file name.")
else:
print("Please enter a valid option!")
def validate_phone_number(self, phone_number):
"""
Validates a phone number to ensure it matches the format '(###) ###-####'.
Args:
phone_number (str): The phone number to be validated.
Returns:
bool: True if the phone number is in the correct format, False otherwise.
"""
pattern = r'^\(\d{3}\) \d{3}-\d{4}$'
if re.match(pattern,phone_number):
return True
else:
return False
def validate_email_address(self, email_address):
"""
Validates an email address to ensure it matches a standard email format.
Args:
email_address (str): The email address to be validated.
Returns:
bool: True if the email address is in a valid format, False otherwise.
"""
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
if re.match(pattern, email_address):
return True
else:
return False
def update_contact(self):
"""
Updates an existing contact's information in the contact list.
This method prompts the user to enter the first name and last name of the contact to be updated.
If the contact is found, the user is presented with a menu to choose which field to update:
- 0: First Name
- 1: Last Name
- 2: Phone Number
- 3: Email Address
- 4: Address
After selecting a field to update, the user is prompted to enter the new value for that field.
The contact's information is then updated, and the updated contact list is displayed.
If the specified contact does not exist in the list, a message is displayed indicating that
the contact was not found.
"""
first_name = input("Enter first name of contact to be updated: ")
last_name = input("Enter last name of contact to be updated: ")
found_contact=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
found_contact=True
print("Fields: \n 0. First Name \n 1. Last Name \n 2. Phone Number \n 3. Email Address \n 4. Address")
user_input = input("Enter which field you want to update: ")
if user_input=="0":
updated_first_name=input("Enter the new first name: ")
contact.update_first_name(updated_first_name)
elif user_input=="1":
updated_last_name=input("Enter the new last name: ")
contact.update_last_name(updated_last_name)
elif user_input=="2":
updated_phone_number=input("Enter the new phone number: ")
contact.update_phone_number(updated_phone_number)
elif user_input=="3":
updated_email_address=input("Enter the new email address: ")
contact.update_email_address(updated_email_address)
elif user_input=="4":
updated_address=input("Enter the new address: ")
contact.update_address(updated_address)
else:
print("Please enter a valid option!")
self.print_all_contacts()
if found_contact==False:
print("Contact does not exist, please check the first and last name you entered.")
def delete_contact(self):
"""
Deletes an existing contact from the contact list.
This method prompts the user to enter the first name and last name of the contact to be deleted.
If the contact is found in the list, it is removed from the list, and a confirmation message
is displayed indicating that the contact has been deleted.
If the specified contact does not exist in the list, a message is displayed indicating that
the contact was not found.
Returns:
None
"""
first_name = input("Enter first name of contact to be deleted: ")
last_name = input("Enter last name of contact to be deleted: ")
found_contact=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
found_contact=True
self.contacts.remove(contact)
print("Contact deleted successfully!")
if found_contact==False:
print("Contact does not exist, please check the first and last name you entered.")
def print_all_contacts(self):
"""
Prints the details of all contacts in the contact list.
This method displays the details of each contact in the contact list using a counter
to keep track of contact ids displayed.
If the contact list is empty, it notifies the user to add new contacts.
Returns:
None
"""
counter = 0
if(self.contacts.count==0):
print("Contact list is empty, please add new contacts.")
else:
print("\nFull Contact List: ")
for contact in self.contacts:
print("Contact id: ", counter)
contact.print_contact()
counter+=1
print("\n \n")
def print_contact_history(self):
"""
Prints the contact history for a specific contact.
This method prompts the user to enter the first name and last name of a contact
to retrieve their contact history.
If the contact is found in the contact list, it displays their contact history,
which may include details of previous interactions or communications.
If the specified contact does not exist in the list, a message is displayed indicating
that the contact was not found.
Returns:
None
"""
first_name = input("Enter first name of contact: ")
last_name = input("Enter last name of contact: ")
found_contact=False
for contact in self.contacts:
if contact.get_first_name()==first_name and contact.get_last_name()==last_name:
found_contact=True
print("Contact History: ", contact.get_contact_history())
if found_contact==False:
print("Contact does not exist, please check the first and last name you entered.")
def sort_contacts(self):
"""
Sorts the contacts in the contact list based on the user's choice.
This method allows the user to choose between two sorting options:
- 0: Ascending order based on first names.
- 1: Descending order based on first names.
Depending on the user's choice, it sorts the contacts accordingly and provides feedback
to the user.
Returns:
None
"""
choice=input("\n\nOptions: \n0. Ascending order \n1. Descending order \n\nHow do you want to sort: ")
if choice=="0":
self.contacts.sort(key=lambda contact: contact.get_first_name())
print("Contacts sorted in ascending order. Press 4 to view all contacts.")
elif choice=="1":
self.contacts.sort(key=lambda contact: contact.get_first_name(), reverse=True)
print("Contacts sorted in descending order. Press 4 to view all contacts.")
else:
print("Please enter a valid option")
def group_contacts(self):
"""
This method sorts the contacts based on the initial letter of their last names,
effectively grouping them alphabetically.
Returns:
None
"""
print("Grouping contacts by initial letter of last name")
self.contacts.sort(key=lambda contact:contact.get_last_name()[0] )
print("Contacts successfully grouped. Press 4 to view all contacts.")
| Kartik-Nair/PhoneBook | phonebook.py | phonebook.py | py | 16,960 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "model.contact.Contact",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "model.conta... |
26540686747 | from random import randint
import xlrd
from datetime import datetime
import matplotlib.pyplot as plt
# this should be done with a database, so I should not put too much effort into making this program easy to use
PATH = "/home/yannick/git-repos/MyPython/math programs/investi.xls" # .xls only
DATA_RANGE = (15, 559) # tuple of (start_num, end_num) !starting from 0, subtract 1 from line number
COLUMNS = {"year": "B", "id": "C", "A_or_R": "E", "m_score": "F", "e_score": "G"} # dict with "data name": "column charachter (A-ZZ)"
MODULO_NUMBER = 10
# For blank values, I just set a score of 0
def make_data_list(sheet, data_range, columns):
data_list = []
for i_line in range(data_range[0], data_range[1]):
line = []
for key in columns.keys():
line.append(sheet.cell_value(rowx=i_line, colx=columns[key]))
data_list.append(line)
return data_list
def column_to_number(col): # char of colum name, only defined from A to ZZ
col = col.upper() # make all chars uppercase
if len(col) == 1:
return ord(col) - 65 # ord returns the ascii value which has an offset of 65
elif len(col) == 2:
value = (26 * (ord(col[0])-64)) + (ord(col[1])-65) # first char is like offset of 26 columns
# A means zero as the second char, but as the first char it stands for + 1*26, so we need to subtract only 64
return value
return -1 # if column name is too long, return -1
def convert_columns(columns):
# convert column letters to numbers
for key in columns.keys():
columns[key] = column_to_number(columns[key])
return columns
def replace_something(list, replace, replaceWith): # two dimensional list, returns modified list number of items replaced
counter = 0
to_be_replaced = []
for i1 in range(len(list)):
for i2 in range(len(list[i1])):
if list[i1][i2] is replace:
to_be_replaced.append([i1, i2])
counter += 1
for bad in to_be_replaced:
list[bad[0]][bad[1]] = replaceWith
return list
def make_columns_for_list(columns):
i = 0
list_columns = {}
for key in columns.keys():
list_columns[key] = i
i += 1
return list_columns
def get_average_scores(list, english): # returns a tuple of (A scores, R scores)
sum_of_english_A = 0
num_of_english_A = 0
sum_of_english_R = 0
num_of_english_R = 0
if english:
key = "e_score"
else:
key = "m_score"
for student in list:
if student[columns_for_list["A_or_R"]] == "A":
sum_of_english_A += student[columns_for_list[key]]
num_of_english_A += 1
elif student[columns_for_list["A_or_R"]] == "R":
sum_of_english_R += student[columns_for_list[key]] # english is the 5th column as defined above
num_of_english_R += 1
else:
print("wrong a or r")
if num_of_english_R == 0 or num_of_english_A == 0:
return None
return (sum_of_english_A/num_of_english_A, sum_of_english_R/num_of_english_R)
def get_error(real_value, value):
return ((value-real_value)/real_value)*100
print(randint(0,6))
start_time = datetime.now()
book = xlrd.open_workbook(PATH)
sh = book.sheet_by_index(0)
columns = convert_columns(COLUMNS)
columns_for_list = make_columns_for_list(columns)
list_of_students = make_data_list(sh, DATA_RANGE, columns)
list_of_students = replace_something(list_of_students, '_', 0) # what should a bad entry be replaced with?? or just discard the entire data point?
new_list = []
for i in range(len(list_of_students)):
if i % MODULO_NUMBER == 0:
new_list.append(list_of_students[i])
average_real_scores = get_average_scores(list_of_students, True)
average_sample_scores = get_average_scores(new_list, True)
print("This took a time of {} ".format(datetime.now()-start_time))
print(len(list_of_students))
print(len(new_list))
average_sample_scores = get_average_scores(new_list, True)
average_sample_scores_M = get_average_scores(new_list, False)
average_real_scores = get_average_scores(list_of_students, True)
error_A = get_error(average_real_scores[0], average_sample_scores[0])
error_R = get_error(average_real_scores[1], average_sample_scores[1])
print("The average sample English scores are {:.4} for A-students and {:.4} for R-students".format(average_sample_scores[0], average_sample_scores[1]))
print("The average sample Math scores are {:.4} for A-students and {:.4} for R-students".format(average_sample_scores_M[0], average_sample_scores_M[1]))
print("The sampled and real value differ by {:.2} percent for A-students and {:.2} percent for R-students".format(error_A, error_R))
| su595/MyPython | math programs/statistics.py | statistics.py | py | 4,798 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "xlrd.open_w... |
32882450678 | #!/usr/bin/python3
import os, os.path
import json
import subprocess
from flask import Flask, request, redirect, abort
from time import sleep
app = Flask(__name__)
GITROOT = '/home/ubuntu/service/'
@app.route('/')
def index():
return redirect('https://github.com/TauWu/spider_monitor_api')
@app.route('/', methods=['POST'])
def commit():
payload = {"repository":{"name":"spider_monitor_api"}}
reponame = payload['repository']['name']
reponame = "%s/bash"%reponame
repodir = os.path.join(GITROOT, reponame)
os.chdir(repodir)
os.system("./start_service.sh")
sleep(10)
return 'success.'
application = app # For WSGI
if __name__ == '__main__':
app.run('0.0.0.0',port=9000,debug=True)
| TauWu/spider_monitor_api | extra/hook.py | hook.py | py | 732 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
30325551719 | # -*- coding: utf-8 -*-
import http.client
import csv
import json
conn = http.client.HTTPSConnection("empresa.app.invoicexpress.com")
# Lendo os dados do arquivo CSV com ponto e vírgula como separador
with open("itens2.csv", newline="") as csvfile:
reader = csv.reader(csvfile, delimiter=";") # Especificando o separador como ponto e vírgula
next(reader) # Ignorar o cabeçalho do CSV
for row in reader:
name = row[0]
description = row[1]
unit_price = row[2]
payload = {
"item": {
"name": name,
"description": description,
"unit_price": unit_price,
"unit": "unit",
"tax": {"name": "IVA23"}
}
}
payload_str = json.dumps(payload)
headers = {
'accept': "application/json",
'content-type': "application/json"
}
conn.request("POST", "/items.json?api_key=sua-api-key-aqui", payload_str, headers)
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
| wesleyy598/Consumindo-API-Python | InvoiceXpress/Importar Invoice/Importar Preços de Portugal.py | Importar Preços de Portugal.py | py | 1,145 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "http.client.client.HTTPSConnection",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "http.client.client",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "c... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.