seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42183818903 | """
Module for entities implemented using the
switch platform (https://www.home-assistant.io/integrations/switch/).
"""
from __future__ import annotations
import logging
from typing import Any
from hahomematic.const import TYPE_ACTION, HmPlatform
import hahomematic.device as hm_device
from hahomematic.entity import GenericEntity
_LOGGER = logging.getLogger(__name__)
class HmSwitch(GenericEntity[bool]):
"""
Implementation of a switch.
This is a default platform that gets automatically generated.
"""
def __init__(
self,
device: hm_device.HmDevice,
unique_id: str,
channel_address: str,
parameter: str,
parameter_data: dict[str, Any],
):
super().__init__(
device=device,
unique_id=unique_id,
channel_address=channel_address,
parameter=parameter,
parameter_data=parameter_data,
platform=HmPlatform.SWITCH,
)
@property
def value(self) -> bool | None:
"""Get the value of the entity."""
if self._type == TYPE_ACTION:
return False
return self._value
async def turn_on(self) -> None:
"""Turn the switch on."""
await self.send_value(True)
async def turn_off(self) -> None:
"""Turn the switch off."""
await self.send_value(False)
| Lemocin/hahomematic | hahomematic/platforms/switch.py | switch.py | py | 1,379 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "hahomematic.entity.GenericEntity",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "hahomematic.device.HmDevice",
"line_number": 25,
"usage_type": "attribute"
},
{
... |
73934624355 |
"""General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from --checkpoints_dir and save the results to --results_dir.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for --num_test images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
import util.util as util
import torch
import util.patchify as patchify
import time
from pytorch_lightning import metrics
import pytorch_fid
import matplotlib.pyplot as plt
import scipy.ndimage
import nibabel
import numpy as np
import numpy as np
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 1 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
# train_dataset = create_dataset(util.copyconf(opt, phase="train"))
model = create_model(opt) # create a model given opt.model and other options
# create a webpage for viewing the results
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
# print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# prepare metrics
fake_key = 'fake_' + opt.direction[-1]
real_key = 'real_' + opt.direction[-1]
metricMAE = metrics.MeanAbsoluteError().to(torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu'))
metricMSE = metrics.MeanSquaredError().to(torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu'))
for i, data in enumerate(dataset):
if i == 0:
model.data_dependent_initialize(data)
model.setup(opt) # regular setup: load and print networks; create schedulers
# model.parallelize()
if opt.eval:
model.eval()
# if i >= opt.num_test: # only apply our model to opt.num_test images.
# break
real_A = data['A']
real_B = data['B']
print(data.keys())
print('Input min', np.amin(real_A.numpy()))
print('Input max', np.amax(real_A.numpy()))
print('Input max', real_A.numpy().shape)
patches = patchify.patchify(real_A.numpy(),1, 512)
for p in range(len(patches)):
patch = patches[p]
# transposed = np.transpose(patch.patch, axes=(0,1,3,2))
data['A'] = torch.tensor(patch.patch).type(torch.FloatTensor)
model.set_input(data) # unpack data from data loader
model.test() # run inference
fake_B = model.fake_B.clone().detach()
patch.patch = fake_B.cpu().numpy() # get image results
img_path = model.get_image_paths() # get image paths
prediction = patchify.unpatchify(patches, 0, 512)
new_CT = 4095*prediction - 1024
new_CT = np.array(new_CT, dtype=np.int)
ni_image = nibabel.Nifti1Image(new_CT[0,0,:,:], np.eye(4))
os.makedirs('content/results', exist_ok=True)
nibabel.save(ni_image, 'content/results/'+img_path[0].split('/')[-1])
prediction = (prediction-0.5)/0.5
print(prediction)
# prediction = np.array(scipy.ndimage.zoom(prediction, (1, 1, 2, 2), order=1),dtype=np.float)
print('Input min', np.amin(prediction))
print('Input max', np.amax(prediction))
print(prediction.shape)
real_A = (real_A-0.5)/0.5
real_B = (real_B-0.5)/0.5
# real_A = np.array(scipy.ndimage.zoom(real_A, (1, 1, 2, 2), order=1), dtype=np.float)
# real_B = np.array(scipy.ndimage.zoom(real_B, (1, 1, 2, 2), order=1),dtype=np.float)
visuals = {'real_A': real_A, 'fake_B': torch.tensor(prediction), 'real_B': real_B}
# apply metrics
# metricMAE(visuals[fake_key], visuals[real_key])
# metricMSE(visuals[fake_key], visuals[real_key])
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, width=opt.display_winsize)
webpage.save() # save the HTML
# compute metrics
# mae = metricMAE.compute()
# mse = metricMSE.compute()
# print('MAE: ', mae)
# print('MSE: ', mse)
fid_paths = [os.path.join(web_dir, 'images', fake_key), os.path.join(web_dir, 'images', real_key)]
# fid_value = fid_score.calculate_fid_given_paths(fid_paths,
# batch_size=50,
# device=None,
# dims=2048)
# print('FID: ', fid_value)
| deltahue/DL-Project-2020 | contrastive-unpaired-translation-master/test.py | test.py | py | 6,810 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "options.test_options.TestOptions",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "data.create_dataset",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "models.create_model",
"line_number": 66,
"usage_type": "call"
},
{
"api_name... |
74136555873 | import numpy as np
import copy
import cv2
import operator
from scipy.ndimage import zoom
from skimage import measure
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian
def CRF(img,anno,gt_prob):
gt, labels = np.unique(anno, return_inverse=True)
gt=gt[1:]
n_labels = len(set(labels.flat)) - 1
NL = np.unique(labels)
d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], n_labels)
U = unary_from_labels(labels, n_labels, gt_prob=gt_prob, zero_unsure=True)
d.setUnaryEnergy(U)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(sxy=(3, 3), compat=3, kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(sxy=(80, 80), srgb=(5, 5, 5), rgbim=img,
compat=4,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = d.inference(5)
MAP = np.argmax(Q, axis=0)
FMAP = copy.deepcopy(MAP)
for i in range(len(NL) - 1):
ind = np.where(MAP == i)
FMAP[ind] = gt[i]
index = np.where(FMAP == 21)
FMAP[index] = 0
return FMAP.reshape(img.shape[0],img.shape[1])
def sod(img_name,cam_img_fg,sup_cue_img,sod_img,orig_img,gt_prob):
sup_cue_img_Nobg = copy.deepcopy(sup_cue_img)
sup_cue_img_Nobg[sup_cue_img == 22] = 0
dss_img = zoom(sod_img, (41.0 / sod_img.shape[0], 41.0 / sod_img.shape[1]), order=1)
dss_img_TV = cv2.threshold(dss_img, 150, 255, cv2.THRESH_BINARY)
dss_img_TV_labels = measure.label(dss_img_TV[1], connectivity=1)
dss_c=np.delete(np.unique(dss_img_TV_labels),0)
#out for fore obj
out = np.zeros_like(sup_cue_img)
for dss_c_i in dss_c:
label_index_i=np.where(dss_img_TV_labels==dss_c_i)
cue_c=np.unique(sup_cue_img_Nobg[label_index_i[0],label_index_i[1]])
if len(cue_c)==2 and cue_c[0]==0:
out[label_index_i[0],label_index_i[1]]=cue_c[1]
elif len(cue_c) == 1 and cue_c[0] > 0:
out[label_index_i[0], label_index_i[1]] = cue_c[0]
elif (cue_c[0] > 0 and len(cue_c) > 1) or len(cue_c) > 2:
out1 = 21*np.ones_like(out)
out1[label_index_i[0], label_index_i[1]] = sup_cue_img_Nobg[label_index_i[0], label_index_i[1]]
img = zoom(orig_img, (41.0 / orig_img.shape[0], 41.0 / orig_img.shape[1], 1), order=1)
crf_img = CRF(img, out1,gt_prob)
out[label_index_i[0], label_index_i[1]] = crf_img[label_index_i[0], label_index_i[1]]
cue_img_TV = copy.deepcopy(cam_img_fg)
cue_img_TV[cue_img_TV != 0] = 1
cue_img_TV_labels = measure.label(cue_img_TV, connectivity=1)
cue_img_TV_c = np.delete(np.unique(cue_img_TV_labels), 0)
for cue_img_TV_c_i in cue_img_TV_c:
label_index_i = np.where(cue_img_TV_labels == cue_img_TV_c_i)
out_c = list(np.unique(out[label_index_i[0], label_index_i[1]]))
if operator.eq(out_c, [0]):
out[label_index_i[0], label_index_i[1]] = cam_img_fg[label_index_i[0], label_index_i[1]]
# out for bg obj
out[out==0]=22
sup_bg_indedx=np.where(sup_cue_img==0)
out[sup_bg_indedx[0],sup_bg_indedx[1]]=0
return out
| DQDH/Semantic_Image_Segmentation | make_localization_cues/generate_cues/tools/sod.py | sod.py | py | 3,447 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.unique",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pydensecrf.densecrf.DenseCRF2D",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pydensecrf.d... |
37820308114 | import os
import sys
import time
import shutil
import random
import socket
import winreg
from multiprocessing.spawn import freeze_support
import psutil
import logging
import hashlib
from contextlib import closing
from http_server import start_web
from configparser import ConfigParser
from concurrent.futures import ProcessPoolExecutor
def check_socket(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(1)
if sock.connect_ex(("127.0.0.1", port)) == 0:
return True
else:
return False
def gen_http_port():
port = random.randint(1024, 65535)
if check_socket(port):
gen_http_port()
else:
return port
def find_nx():
try:
local_reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
reg_key = winreg.OpenKey(local_reg, r"SOFTWARE\Classes\nxl\shell\open\command")
_nx_path = winreg.QueryValueEx(reg_key, "")[0]
_nx_path = _nx_path.replace('"', "").split("\\nexon_launcher.exe")[0]
return _nx_path
except Exception as e:
print(e)
return None
def write_patch(save_path, data):
with open(save_path, "w") as ofile:
ofile.write(data)
def remove_data(remove_path):
if os.path.isfile(remove_path):
os.remove(remove_path)
if os.path.isdir(remove_path):
shutil.rmtree(remove_path)
def check_process_running(process_name_list: list):
for proc in psutil.process_iter():
try:
for process_name in process_name_list:
if process_name.lower() in proc.name().lower():
return True
else:
continue
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False
if __name__ == '__main__':
freeze_support()
# fix frozen exe
if getattr(sys, 'frozen', False):
pwd = os.path.dirname(os.path.abspath(sys.executable))
else:
pwd = os.path.dirname(os.path.abspath(__file__))
cfg = ConfigParser()
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s] %(threadName)s: %(message)s")
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler("usage.log")
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.DEBUG)
log_level = logging.INFO
auto_start = True
try:
cfg.read(os.path.join(pwd, 'config.ini'), encoding="UTF-8")
log_level = cfg.get('settings', 'log_level')
auto_start = cfg.getboolean("settings", "auto_start")
nexon_dir = cfg.get("settings", "nexon_dir")
if log_level == "DEBUG":
rootLogger.setLevel(logging.DEBUG)
else:
rootLogger.setLevel(logging.INFO)
except Exception as e:
logging.error("Cannot read config.ini")
logging.debug(e)
time.sleep(6)
sys.exit(1)
# pre init
nx_exe_list = ["nexon_launcher", "nexon_runtime", "nexon_updater", "nexon_agent", "nexon_client"]
if check_process_running(nx_exe_list):
logging.error("This tool cannot run when Nexon Launcher was running")
time.sleep(6)
sys.exit(1)
# fuck kd
kd_list = ["kdjsq"]
if check_process_running(nx_exe_list):
logging.warning('致KD用户: 这个工具会"损坏"NX登录器,请勿使用.')
logging.warning('3秒后自动退出...')
time.sleep(3)
sys.exit(1)
patch_data = '''
from urllib3 import request as _orig_request
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
if "download2.nexon.net" in url.lower():
url = url.replace("download2.nexon.net", "127.0.0.1:_HTTP_PORT_")
# try to fix for ms2
if url.lower().startswith("https://"):
url = url.replace("https://", "http://")
method = method.upper()
urlopen_kw["request_url"] = url
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw)
_orig_request.RequestMethods.request = request
'''
logging.info("UUCCS UPUP, 一时手冲一时爽,一直手冲一直爽!")
logging.info("Setting up ENV......")
# gen http port
http_port = gen_http_port()
logging.info("HTTP Server Port: {}".format(http_port))
# find nx dir
nx_reg_data = find_nx()
if nx_reg_data:
nx_path = os.path.join(nx_reg_data, r"bin\modules\contenttools")
target_file = os.path.join(nx_path, "__init__.py")
logging.info("Nexon Launcher: {}".format(nx_reg_data))
else:
logging.warning("Fail to find Nexon Launcher folder via registry.. trying config setting")
if nexon_dir:
nx_path = os.path.join(nexon_dir, r"bin\modules\contenttools")
target_file = os.path.join(nx_path, "__init__.py")
logging.info("Nexon Launcher: {}".format(nexon_dir))
else:
logging.error("Sorry, Could not found Nexon Launcher folder...")
logging.error("Exit within 10s.")
time.sleep(10)
sys.exit(1)
# write patch data
if os.path.isdir(nx_path):
if os.path.exists(target_file):
logging.warning("Old Patch File found!...removing")
remove_data(target_file)
remove_data(os.path.join(nx_path, "__pycache__"))
logging.warning("Trying patch Nexon Launcher...")
patch_data = patch_data.replace("_HTTP_PORT_", str(http_port))
patch_data_md5 = hashlib.md5(patch_data.encode('utf-8')).hexdigest()
logging.debug("Patch Data String md5: {}".format(patch_data_md5))
write_patch(target_file, patch_data)
file_md5 = hashlib.md5(open(target_file, 'r').read().encode()).hexdigest()
if os.path.exists(target_file) and file_md5 == patch_data_md5:
logging.info("Nexon Launcher Patched!")
else:
logging.error("Nexon Launcher Patch Failed, please run with administrator rights!\n\tor allow {} in "
"your Antivirus software!".format(sys.executable))
logging.debug("String md5: {} --- File md5: {}".format(patch_data_md5, file_md5))
logging.debug("File Status: {}".format(os.path.exists(target_file)))
time.sleep(6)
sys.exit(1)
# start http
file_path = os.path.join(pwd, "Game")
if not os.path.isdir(file_path):
file_path = pwd
logging.error("Static folder not found! Set to current folder")
logging.warning("THIS WILL BE VERY SLOW!!!")
if auto_start:
logging.info("Start Nexon Launcher Now...")
os.system('cd "{}" & {}'.format(find_nx(), "nexon_launcher.exe"))
remove_data(target_file)
with ProcessPoolExecutor(max_workers=1) as web_pool:
web_pool.submit(start_web, file_path, http_port)
sys.exit(0)
| nyacat/gms_auto | gms_patcher/main.py | main.py | py | 7,199 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "contextlib.closing",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STRE... |
18934781052 | import argparse
from typing import Any, Dict, List, Optional, Tuple, Union
from nerf_turret_utils.number_utils import map_range
def assert_in_int_range(value: int, min_val: int, max_val: int) -> int:
"""
Check whether an input integer value is within a certain range.
Parameters:
value: The input value to check, as a integer.
min_val: The minimum allowed value (inclusive).
max_val: The maximum allowed value (inclusive).
Returns:
int: The input value, if it is within the allowed range.
Raises:
argparse.ArgumentTypeError: If the input value is not an integer or is outside the allowed range.
"""
i_value = value
if type(i_value) != int or type(min_val) != int or type(max_val) != int or i_value < min_val or i_value > max_val :
raise argparse.ArgumentTypeError(f"{value} is not within range [{min_val}, {max_val}] or is not an integer")
return i_value
def slow_start_fast_end_smoothing(x: float, p: float, max_value: int) -> float:
"""
Maps an input value to an output value using a power function with a slow start and fast end.
The output value increases slowly at the beginning when the input value is small,
but increases more rapidly as the input value approaches the maximum value of 10.
Args:
x (float): The input value to be mapped, in the range of 0 to 10.
p (float): The exponent of the power function used to map the input value to the output value.
A larger value of p will result in a faster increase in the output value towards the end of the range.
Returns:
float: The mapped output value, in the range of 0 to 10.
"""
ratio = x / max_value
output = ratio ** p * max_value
return output if x >= 0 else -abs(output)
def get_priority_target_index(targets: List[dict], type: str, target_ids: List[str]=[]) -> Optional[int]:
"""
Returns the index of the highest priority target in the `targets` list based on the input `ids` and `type`.
Args:
targets: A list of dictionaries, each representing a target with the keys 'id' and 'type'.
type: A target type to prioritize if none of the target IDs are found.
target_ids: A list of target IDs to prioritize over the target types.
Returns:
The index of the highest priority target in the `targets` list. Returns 0 if no target is found.
Example:
targets = [
{"id": "001", "type": "person"},
{"id": "002", "type": "vehicle"},
{"id": "003", "type": "person"}
]
ids = ["003", "004"]
type = "vehicle"
index = get_priority_target_index(targets, ids, type)
# Returns 1 (index of the "002" target in the `targets` list)
"""
if len(target_ids) > 0: # If there are target IDs just check if the target IDs face is in the list
assert type == "face", "The `type` argument must be 'face' if the `ids` argument is not empty, as these are face IDs."
for i, target in enumerate(targets):
if target.get("id", None) in target_ids:
return i
else:
for i, target in enumerate(targets):
if target["type"] == 'person' and type == 'person':
# If the target is a person, check if it has a face
for index, person in enumerate(targets):
if person['type'] == 'face':
return index
return i # If no face is found, return the person
elif target["type"] == 'face' and type == 'person':
return i # Because a face part of a person
if target["type"] == type:
return i
return None
def get_elevation_speed(args: Any, view_height:int, movement_vector:Tuple, target_box: Tuple[int,int,int,int]) -> int:
"""
Calculates the elevation speed of a Nerf turret.
Args:
args: Any object containing the necessary arguments for the calculation.
view_heigh: The height of the camera view in pixels.
movement_vector : A tuple containing x,y movement vector of the turret to te center of the view.
target_box: A tuple containing the coordinates of the target box [left, top, right, bottom].
Returns:
int: The elevation speed of the turret.
"""
top = target_box[1]
max_elevation = (view_height/2)
abs_movement_vector = abs(movement_vector[1])
if top == 0:
return 1 # Do this of the target is to the edge of the camera view but filling it up
elevation_speed_adjusted = map_range(abs_movement_vector - args.accuracy_threshold_y, 0, max_elevation, 0 , args.max_elevation_speed) * float(args.y_speed)
# TODO: implement a smoothing function to smooth out the speed
# smooth_elevation_speed_adjusted = min(0,slow_start_fast_end_smoothing(elevation_speed_adjusted, float(args.y_smoothing) + 1.0, 10))
final_speed = round(elevation_speed_adjusted / 2 , args.elevation_dp)
return int(final_speed)
def get_elevation_clockwise(movement_vector: Tuple[float, float]) -> bool:
"""
Determines whether the Nerf turret elevation stepper motor should rotate clockwise based on its movement vector.
Args:
movement_vector (Tuple[float, float]): A tuple containing the movement vector of the turret,
where the first element is the horizontal movement and the second element is the vertical movement.
Returns:
bool: True if the turret elevation should rotate clockwise, False otherwise.
"""
is_clockwise = movement_vector[1] < 0
return is_clockwise | anjrew/Autonomous-Nerf-Turret | components/ai_controller/ai_controller_utils.py | ai_controller_utils.py | py | 5,722 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typing.Any",
... |
36105481363 | import gzip
from tempfile import mkdtemp
import os
import numpy as np
from astropy.io import fits
def _make_file_for_testing(file_name='', **kwd):
img = np.uint16(np.arange(100))
hdu = fits.PrimaryHDU(img)
for k, v in kwd.items():
hdu.header[k] = v
hdu.writeto(file_name)
def directory_for_testing():
"""
Set up directory with these contents:
One file with imagetyp BIAS. It has an the keyword EXPOSURE in
the header, but no others beyond IMAGETYP and the bare minimum
created with the FITS file.
File name(s)
------------
no_filter_no_object_bias.fit
Five (5) files with imagetyp LIGHT, including two compressed
files.
+ One file for each compression type, currently .gz and .fz.
+ ALL of the files will have the keyword EXPOSURE
in the header.
+ Only ONE of them will have the value EXPOSURE=15.0.
+ All of the files EXCEPT ONE will have the keyword
FILTER with the value 'R'.
+ NONE of the files have the keyword OBJECT
File names
----------
test.fits.fz
filter_no_object_light.fit
filter_object_light.fit.gz
filter_object_light.fit
no_filter_no_object_light.fit <---- this one has no filter
"""
n_test = {
'files': 6,
'missing_filter_value': 1,
'bias': 1,
'compressed': 2,
'light': 5
}
test_dir = mkdtemp()
# Directory is reset on teardown.
original_dir = os.getcwd()
os.chdir(test_dir)
_make_file_for_testing(file_name='no_filter_no_object_bias.fit',
imagetyp='BIAS',
EXPOSURE=0.0)
_make_file_for_testing(file_name='no_filter_no_object_light.fit',
imagetyp='LIGHT',
EXPOSURE=1.0)
_make_file_for_testing(file_name='filter_no_object_light.fit',
imagetyp='LIGHT',
EXPOSURE=1.0,
filter='R')
_make_file_for_testing(file_name='filter_object_light.fit',
imagetyp='LIGHT',
EXPOSURE=1.0,
filter='R')
with open('filter_object_light.fit', 'rb') as f_in:
with gzip.open('filter_object_light.fit.gz', 'wb') as f_out:
f_out.write(f_in.read())
# filter_object.writeto('filter_object_RA_keyword_light.fit')
_make_file_for_testing(file_name='test.fits.fz',
imagetyp='LIGHT',
EXPOSURE=15.0,
filter='R')
os.chdir(original_dir)
return n_test, test_dir
def sample_directory_with_files():
"""
Returns the path to the small sample directory used
in the tests of ``ImageFileCollection``. Primarily intended
for use in the doctests.
"""
n_test, tmpdir = directory_for_testing()
return tmpdir
| astropy/ccdproc | ccdproc/utils/sample_directory.py | sample_directory.py | py | 2,932 | python | en | code | 86 | github-code | 1 | [
{
"api_name": "numpy.uint16",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.PrimaryHDU",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits"... |
15674221296 | import sys
import googlemaps
from datetime import datetime,date
from datetime import timedelta
try:
from config import config
except:
print("No existe el archivo config, stop")
sys.exit()
from helpers import helpers
H=helpers()
def groute(start,end,timet,mode="walking"):
gmaps = googlemaps.Client(key=config.gmapsAPI)
if mode=="transit":
today = datetime.today()
#last_monday = today - datetime.timedelta(days=today.weekday())
timet= today + timedelta( (4-today.weekday()) % 7 )
#print("timet",str(int(timet.timestamp())))
#timet=(int(timet.timestamp()))
# Geocoding an address
#geocode_result = gmaps.geocode('1600 Amphitheatre Parkway, Mountain View, CA')
# Look up an address with reverse geocoding
#reverse_geocode_result = gmaps.reverse_geocode((40.714224, -73.961452))
# Request directions via public transit
directions_result = gmaps.directions(start,#string coords separated by comma without spaces
end,#string coords separated by comma without spaces
#language="en",
mode=mode,#options: driving, walking, bicycling, transit (metro etc)
departure_time=timet)
#print("directions_result",directions_result)
return directions_result
######
print(directions_result)
routeSteps=[]
for l in directions_result[0]["legs"][0]["steps"]:
#}print(l["html_instructions"])
routeSteps.append(l["html_instructions"])
if "steps" in l:
for i in l["steps"]:
routeSteps.append(i["html_instructions"])
#print(">>>>>>>",H.stripTags(i["html_instructions"]))
#print(l)
#print("")
#print(directions_result[0]["legs"])
return routeSteps
if __name__ == "__main__":
now = datetime.now()
print(now)
route=groute("41.4036521,2.1718907","41.3863801,2.1254988",now,"transit")
print(route)
#graphic representation:
#https://stackoverflow.com/questions/16180104/get-a-polyline-from-google-maps-directions-v3
| carlitoselmago/streetPred | googledirections.py | googledirections.py | py | 2,186 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.exit",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "helpers.helpers",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "googlemaps.Client",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "config.config.gmapsAPI",
... |
27394255300 | import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error
# 输入数据
X = np.array([[2022], [2021], [2020]]) # 年份
Y = np.array([442, 440, 458]) # 录取分数
best_degree = 1 # 最佳多项式次数
best_mse = float('inf') # 初始化最佳均方误差为无穷大
for degree in range(1, 6): # 尝试多项式次数从1到5
# 创建多项式特征
poly_features = PolynomialFeatures(degree=degree)
X_poly = poly_features.fit_transform(X)
# 创建多项式回归模型
model = LinearRegression()
# 训练模型
model.fit(X_poly, Y)
# 预测数据
Y_pred = model.predict(X_poly)
# 计算均方误差
mse = mean_squared_error(Y, Y_pred)
# 比较并更新最佳拟合方案
if mse < best_mse:
best_mse = mse
best_degree = degree
# 选择最佳拟合方案
poly_features = PolynomialFeatures(degree=best_degree)
X_poly = poly_features.fit_transform(X)
model = LinearRegression()
model.fit(X_poly, Y)
# 预测2023年的录取分数
year_2023 = np.array([[2020]]) # 今年的年份
year_2023_poly = poly_features.transform(year_2023)
predicted_score = model.predict(year_2023_poly)
# 获取拟合函数的系数和截距
coefficients = model.coef_
intercept = model.intercept_
# 构建拟合函数的表达式
expression = f"{intercept:.2f}"
for i, coef in enumerate(coefficients[1:], start=1):
expression += f" + {coef:.2f} * X^{i}"
# 输出结果
print("最佳拟合方案的多项式次数:", best_degree)
print("最佳拟合方案的均方误差:", best_mse)
print("预测的2023年录取分数:", predicted_score[0])
print("拟合函数的表达式:", expression)
| lyscf/gaokao-analytics | predict/predict_demo.py | predict_demo.py | py | 1,771 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.PolynomialFeatures",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklear... |
22852644906 | from django.shortcuts import render, HttpResponse, redirect
from django.db.models import Max, Min, Count
from attendance.models import extemployeeatt, holiday, classlist, classes, classsolt, timesolt, checkinout
from hr.models import employee
from . import msslqdb
import datetime
import time
import threading
from xml.etree.ElementTree import parse
# Create your views here.
def index(request):
return redirect('/att/attendance/')
def attendance(request):
context = {}
context['title'] = '签到'
if request.method == "GET":
# print('get')
if 'datecheck' in request.GET:
datecheck = request.GET['datecheck']
daytoday = datetime.datetime.strptime(datecheck, '%Y-%m-%d')
else:
daytoday = datetime.datetime.today()
if "namecheck" in request.GET:
employeename = request.GET['namecheck']
if employeename == '':
pass
if 'departmentcheck' in request.GET:
department = request.GET['departmentcheck']
if department == '':
pass
context['daytoday'] = daytoday
d = []
pindicts= {}
# 查找PIN
# emppins = employee.objects.all() \
emppins = employee.objects.exclude(extemployeeatt__pin=None) \
.order_by('employee_department__departmentid__name', 'name')\
.values('name', 'extemployeeatt__pin', 'employee_department__departmentid__name')
print(len(emppins))
for emppin in emppins:
pindicts[emppin['extemployeeatt__pin']] = emppin
print(emppin)
# # 查找班次
# ks = classlist.objects.all() \
# .values('id',
# 'employeeid',
# 'classid__name',
# 'employeeid__extemployeeatt__pin',
# 'datestart',
# 'dateend') \
# .order_by('datestart')
# print(ks)
# 填写一览
ps = checkinout.objects.filter(checktime__year=daytoday.year,
checktime__month=daytoday.month,
checktime__day=daytoday.day) \
.values('pin') \
.annotate(last=Max('checktime'), fast=Min('checktime'), count=Count('userid'))
for p in ps:
i = {}
if p['pin'] in pindicts:
i.update(pindicts[p['pin']])
i.update(p)
i['long'] = i['last'] - i['fast']
d.append(i)
# print(i)
context['context2'] = d
# ps = checkinout.objects.filter(checktime__year=daytoday.year,
# checktime__month=daytoday.month,
# checktime__day=daytoday.day,
# pin=32).values().order_by('-checktime')
# emppins = employee.objects.exclude(extemployeeatt__pin=None)\
# .order_by('employee_department__departmentid__name', 'name')\
# .values('name', 'extemployeeatt__pin', 'employee_department__departmentid__name')
# pinsdict = {}
# for i in emppins:
# pinsdict[i['extemployeeatt__pin']] = i
# del pinsdict[i['extemployeeatt__pin']]['extemployeeatt__pin']
# p = []
# for k in ps:
# if k['pin']:
# if k['pin'] in pinsdict:
# k.update(pinsdict[k['pin']])
# p.append(k)
# context['context'] = p
return render(request, 'att_attendance_list.html', context)
def summary(request):
context={}
context['title'] = '出勤'
return render(request, 'att_summary_list.html', context)
def att_collect_detail(request):
context={}
context['title'] = '明细'
return render(request, 'att_collect_detailed.html', context)
def attclass(request):
context={}
context['title'] = '班次'
ps = classlist.objects.all()\
.values('employeeid_id',
'employeeid__name',
'employeeid__employee_department__departmentid__name',
'employeeid__extemployeeatt__pin',
'classid__name',
'datestart',
'dateend',
'active').order_by('employeeid_id', 'employeeid__employee_department__departmentid__name', 'datestart')
context['context'] = ps
return render(request, 'att_class_board.html', context)
def attholiday(request):
context={}
context['title'] = '假期'
return render(request, 'att_holiday_board.html', context)
def configatt(request):
context={}
context['title'] = '设置'
return render(request, 'attendance_conf.html', context)
def institem(items=None):
for userid, checktime, checktype, verifycode, sensorid, memoinfo, workcode, sn, userextfmt, logid, id in items:
nitem = checkinout.objects.get_or_create(userid=userid,
checktime=checktime,
checktype=checktype,
verifycode=verifycode,
sensorid=sensorid,
memoinfo=memoinfo,
workcode=workcode,
sn=sn,
userextfmt=userextfmt)
def inputxml(request):
with open('CHECKINOUT.XML', 'r', encoding="UTF-8") as f:
et = parse(f)
root = et.getroot()
checkinouts = []
for childone in root:
item = []
for childtwo in childone:
item.append(childtwo.text)
# print(childtwo.tag, ":", childtwo.text)
checkinouts.append(item)
# print(item)
i = 0
threads = []
itemgroup = []
for item in checkinouts:
item.append(i)
itemgroup.append(item)
if (i % 2000) == 0:
thread = threading.Thread(target=institem, args=(tuple(itemgroup),))
threads.append(thread)
itemgroup = []
i += 1
if itemgroup:
thread = threading.Thread(target=institem, args=(tuple(itemgroup),))
threads.append(thread)
for threadrun in threads:
threadrun.start()
while True:
if len(threading.enumerate()) <= 3:
print('='*24)
break
time.sleep(5)
print(len(threading.enumerate()))
print('all done')
with open('USERINFO.xml', 'r', encoding="UTF-8") as f:
et = parse(f)
root = et.getroot()
checkinouts = []
for childone in root:
item = []
for childtwo in childone:
item.append(childtwo.text)
checkinouts.append(item)
for useritem in checkinouts:
checkinouts = checkinout.objects.filter(userid=useritem[0])
if checkinouts:
checkinouts.update(pin=useritem[1])
print(useritem[0], useritem[1], useritem[3], len(checkinouts))
k = employee.objects.filter(name=useritem[3]).last()
if k:
l = extemployeeatt.objects.get_or_create(employeeid=k,
pin=useritem[1])
print(l)
return HttpResponse(len(checkinouts))
def checkeveryday(request):
context = []
body = 400
daystart = datetime.datetime.strptime('2015-12-1', '%Y-%m-%d')
dayend = datetime.datetime.strptime('2015-12-31', '%Y-%m-%d')
eitems = checkinout.objects.filter(checktime__range=(daystart, dayend), userid=body).order_by('userid', 'checktime')
context = eitems.values_list()
# for i in eitems.values_list():
# print(i)
lenday = dayend - daystart
daytag = daystart
while True:
if daytag > dayend:
break
print(daytag)
daytag += datetime.timedelta(days=1)
print(lenday.days)
print(daystart.year)
print(daystart.month)
print(daystart.day)
print(len(eitems))
# 方式
#不打 为1
#打 为1
#和 为1
# 设计班次
# 天排班、周期
# 人员周期内的排班表
# 人员表
# 计算得出每天状况
# 产生全表
# 汇总统计
return HttpResponse(context)
def getmssql(request):
import pymssql
if request.method == "POST":
if "days" in request.POST:
# days = request.POST.get('days', '1')
days = request.POST['days']
days = int(days)
dayfrom = datetime.datetime.today() - datetime.timedelta(days=days)
dbsql = msslqdb.getmssqldb()
conn = pymssql.connect(dbsql['server'], dbsql['user'], dbsql['password'], database=dbsql['database'])
cursor = conn.cursor()
strsql = "select * from CHECKINOUT where CHECKTIME >='" + dayfrom.strftime("%Y-%m-%d") + "'"
cursor.execute(strsql)
row = cursor.fetchone()
crows = []
while row:
crows.append(row)
row = cursor.fetchone()
cursor = conn.cursor()
cursor.execute('select * from userinfo')
row = cursor.fetchone()
prows = []
while row:
prows.append(row)
row = cursor.fetchone()
conn.close()
uidpin = {}
for prow in prows:
uidpin[prow[0]] = prow[1]
r = 0
for userid, checktime, checktype, verifycode, sensorid, memoinfo, workcode, sn, userextfmt in crows:
nitem = checkinout.objects.get_or_create(userid=userid,
checktime=checktime,
checktype=checktype,
verifycode=verifycode,
sensorid=sensorid,
memoinfo=memoinfo,
workcode=workcode,
sn=sn,
userextfmt=userextfmt,
pin=uidpin[userid])
m, n = nitem
if n:
r += 1
return HttpResponse(" 更新记录:" + str(r))
def getmssqlpin(request):
import pymssql
dbsql = msslqdb.getmssqldb()
conn = pymssql.connect(dbsql['server'], dbsql['user'], dbsql['password'], database=dbsql['database'])
cursor = conn.cursor()
cursor.execute('select * from userinfo')
row = cursor.fetchone()
prows = []
while row:
prows.append(row)
row = cursor.fetchone()
conn.close()
for prow in prows:
k = employee.objects.filter(name=prow[3])
if k:
l = extemployeeatt.objects.filter(employeeid=k.first())
if l:
# 更新
l.update(pin=prow[1])
else:
# 新记录
ttt = extemployeeatt.objects.get_or_create(employeeid=k.first(), pin=prow[1])
else:
print("+"*3, prow[3])
r = u = 0
return HttpResponse("已有记录: " + str(u) +" 更新记录:" + str(r))
def cmpcheck(request):
employeeid = 17
checkday = datetime.datetime.strptime('2019-4-1', '%Y-%m-%d')
datestart = datetime.datetime.strptime('2019-4-1', '%Y-%m-%d')
dateend = datetime.datetime.strptime('2019-4-30', '%Y-%m-%d')
# 计算工作日 ==========================
holidays = holiday.objects.filter(starttime__range=[datestart, dateend]).values().order_by('starttime')
thisdate = datestart
ppds=[]
while thisdate <= dateend:
ee = {}
ee['employeeid'] = employeeid
ee['daycheck'] = thisdate
ee['weekday'] = thisdate.weekday()
ee['autopb'] = True
# 周末计算
if (ee['weekday'] == 5) or (ee['weekday'] == 6):
ee['workday'] = False
ee['quot'] = 1
else:
ee['workday'] = True
ee['quot'] = 0
thisdate += datetime.timedelta(days=1)
ppds.append(ee)
# 节假日更新
for hday in holidays:
firsday = datetime.datetime.strptime(str(hday['starttime']), '%Y-%m-%d')
adddays = hday['duration']
quotient = hday['quotient']
endday = firsday + datetime.timedelta(days=(adddays-1))
for pb in ppds:
if (pb['daycheck'] >= firsday) and (pb['daycheck'] <= endday):
if quotient == 0:
pb['workday'] = True
else:
pb['workday'] = False
pb['quot'] = quotient
# =======================
# 填写班次
chcemployee = employee.objects.get(pk=employeeid)
ks = classlist.objects.filter(employeeid=chcemployee) \
.values('id',
'classid',
'classid__name',
'employeeid__name',
'employeeid__extemployeeatt__pin',
'datestart',
'dateend')\
.order_by('employeeid', 'datestart')
print(ks)
for pb in ppds:
thisdate = pb['daycheck']
if pb['workday'] and ks:
for k in ks:
# pb['pin'] = k['employeeid__extemployeeatt__pin']
sr = datetime.datetime.strptime(str(k['datestart']), "%Y-%m-%d")
sp = datetime.datetime.strptime(str(k['dateend']), "%Y-%m-%d")
if (thisdate >= sr) and (thisdate <= sp):
pb['classes'] = k['classid']
pb['classname'] = k['classid__name']
else:
pb['classes'] = 0
pb['classname'] = None
print(pb)
# =======================
# 计算步骤
# 计算排班
chcemployee = employee.objects.get(pk=employeeid)
# k = classlist.objects.filter(employeeid=chcemployee)\
k = classlist.objects.all() \
.values('id',
'employeeid',
'employeeid__name',
'employeeid__extemployeeatt__pin',
'datestart',
'dateend',
'classid__classsolt__timesoltid_id',
'classid__classsolt__timesoltid_id__name',
'classid__classsolt__timesoltid_id__intime')\
.order_by('employeeid', 'datestart')
timequery = timesolt.objects.all().values()
timedicts = {}
for i in timequery:
timedicts[i['id']] = i
# 根据排班规则自动排班
emppbs = []
for i in k:
# print(i)
e = i['datestart'].strftime("%Y-%m-%d")
sr = datetime.datetime.strptime(e, "%Y-%m-%d")
e = i['dateend'].strftime("%Y-%m-%d")
sp = datetime.datetime.strptime(e, "%Y-%m-%d")
thisdate = datestart
while thisdate <= dateend:
pb = {}
pb['employeeid'] = i['employeeid']
pb['daycheck'] = thisdate
pb['pin'] = i['employeeid__extemployeeatt__pin']
pb['weekday'] = thisdate.weekday()
pb['autopb'] = True
if (thisdate >= sr)and(thisdate <= sp):
pb['timesoltid'] = i['classid__classsolt__timesoltid_id']
else:
pb['timesoltid'] = 0
emppbs.append(pb)
thisdate += datetime.timedelta(days=1)
# 根据周末数据,校正周末排班
# 工作日为0,公休日1,节假日2
for pb in emppbs:
if (pb['weekday'] == 5) or (pb['weekday'] == 6):
pb['timesoltid'] = 0
pb['quot'] = 1
else:
# pb.update(timedicts[i['classid__classsolt__timesoltid_id']])
pb['quot'] = 0
# print(pb)
# 根据节假日数据,校正节假日排班
holidays = holiday.objects.filter(starttime__range=[datestart, dateend]).values().order_by('starttime')
# print(len(holidays))
for hday in holidays:
firsday = datetime.datetime.strptime(str(hday['starttime']), '%Y-%m-%d')
adddays = hday['duration']
quotient = hday['quotient']
endday = firsday + datetime.timedelta(days=(adddays-1))
for pb in emppbs:
if (pb['daycheck'] >= firsday) and (pb['daycheck'] <= endday):
pb['timesoltid'] = 0
pb['quot'] = quotient
# print(len(emppbs))
# 根据单据校正打卡规则及时段时间
pass
# 根据排班表提取当时数据
ps = checkinout.objects.filter(checktime__range=[datestart, dateend]) \
.values_list('id', 'pin', 'checktime')\
.order_by('checktime')
# print(len(ps))
for pb in emppbs:
if pb['timesoltid'] != 0:
pb['ckecktimes'] = []
for checkt in ps.filter(checktime__year=pb['daycheck'].year, checktime__month=pb['daycheck'].month, checktime__day=pb['daycheck'].day, pin=pb['pin']):
pb['ckecktimes'].append(checkt[2])
# print(pb)
# 找到对应数据,计算规所需的对应源数据
# 使用源数据,核对规则
# 计算出结果并保存进表格
# kkk=llll
return HttpResponse("ok")
| linuxsjun/assect | attendance/views.py | views.py | py | 16,883 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_n... |
40420201014 | from pathlib import Path
import re
from math import ceil
import copy
data_folder = Path(__file__).parent.resolve()
file = data_folder / "input.txt"
find_ingredients = re.compile(r"(\d+ \w+)+")
class Ingredient:
def __init__(self, name, quantity):
self.name = name
self.quantity = int(quantity)
class Reaction:
def __init__(self, recipe):
ingredients = find_ingredients.findall(recipe)
for i in range(len(ingredients)):
ingredients[i] = ingredients[i].split(" ")
ingredients[i] = Ingredient(ingredients[i][1], ingredients[i][0])
self.reactants = dict(
zip(
[ingredient.name for ingredient in ingredients[:-1]],
[ingredient.quantity for ingredient in ingredients[:-1]],
)
)
self.product = ingredients[-1]
self.n_reactants = len(self.reactants)
class Reactions:
def __init__(self, reactions):
self.reactions = dict(
zip([reaction.product.name for reaction in reactions], reactions)
)
def total_reactants(self, chemical):
total = set()
if chemical == "ORE":
return total
for name in self.reactions[chemical].reactants:
total = total.union(self.total_reactants(name)).union({name})
return total
def get_ore_cost(self, ingredient):
if ingredient.name == "ORE":
return ingredient.quantity
reaction = self.reactions[ingredient.name]
multiple = ceil(ingredient.quantity / reaction.product.quantity)
ingredients = copy.deepcopy(reaction.reactants)
for name in ingredients:
ingredients[name] *= multiple
while (len(ingredients.keys()) > 1) or (list(ingredients.keys())[0] != "ORE"):
for curr_ingredient_name in ingredients:
if curr_ingredient_name != "ORE":
total_reactants_other = set()
for name in ingredients:
if name != curr_ingredient_name:
total_reactants_other = total_reactants_other.union(
self.total_reactants(name)
)
if curr_ingredient_name not in total_reactants_other:
ingredients = self._replace_ingredient_with_reactants(
curr_ingredient_name, ingredients
)
break
return ingredients["ORE"]
def _replace_ingredient_with_reactants(self, ingredient_name, ingredients):
multiple = ceil(
ingredients[ingredient_name]
/ self.reactions[ingredient_name].product.quantity
)
for reactant_name in self.reactions[ingredient_name].reactants:
added_reactant = (
multiple * self.reactions[ingredient_name].reactants[reactant_name]
)
if reactant_name in ingredients:
ingredients[reactant_name] += added_reactant
else:
ingredients[reactant_name] = added_reactant
del ingredients[ingredient_name]
return ingredients
def get_max_fuel(self, ore_reserve):
fuel = Ingredient("FUEL", 1)
unit_cost = self.get_ore_cost(fuel)
l = ore_reserve // unit_cost
fuel.quantity = l
if self.get_ore_cost(fuel) == ore_reserve:
return fuel.quantity
r = l * 2
fuel.quantity = r
while self.get_ore_cost(fuel) <= ore_reserve:
r *= 2
fuel.quantity = r
while r - l > 1:
mid = (r + l) // 2
fuel.quantity = mid
cost = self.get_ore_cost(fuel)
if cost == ore_reserve:
return mid
elif cost < ore_reserve:
l = mid
else:
r = mid
return l
def main():
reactions = Reactions([Reaction(reaction) for reaction in file.read_text().split("\n")])
ingredient = Ingredient("FUEL", 1)
print("Part 1")
print(
f"The minimum cost of producing one unit of FUEL is {reactions.get_ore_cost(ingredient)} ORE"
)
print()
print("Part 2")
ore_reserve = 1000000000000
print(
f"The maximum amount of FUEL that can be produced for\n{ore_reserve} ORE is {reactions.get_max_fuel(ore_reserve)} FUEL"
)
if __name__ == "__main__":
main()
| eirikhoe/advent-of-code | 2019/14/sol.py | sol.py | py | 4,470 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 50... |
43027922069 | from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('transactions',views.transactions,name='transactions'),
path('updatePage',views.updatePage,name='updatePage'),
path('update',views.update,name='update'),
path('delete', views.delete, name="delete"),
path('deletePage', views.deletePage, name="deletePage"),
path('export',views.export, name='export'),
]
| mathurtanmay02/Expensez | dashboard/urls.py | urls.py | py | 436 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
45493379784 | # -*- coding: utf-8 -*-
"""
DockWidget
-----------------
begin: 2016-08-26
last: 2019-11
"""
from pathlib import Path
import sys
from datetime import datetime
from qgis.PyQt import QtGui, QtWidgets, uic
from qgis.PyQt.QtCore import pyqtSignal
#from qgis.PyQt.QtWidgets import QFileDialog
# QWidget, QListWidget, QGridLayout, QPushButton
# not in 'classes', but directly in plugin folder:
plugin_dir = Path(__file__).resolve().parent.parent
image_dir = plugin_dir / 'img'
"""
Widget with the plugin functionality (DockWidget):
"""
WIDGET_FORM_CLASS, _ = uic.loadUiType(plugin_dir / 'dock_widget.ui')
def get_icon(img_basename):
''' simplifies creating a QIcon '''
img_full_path = image_dir / img_basename
return QtGui.QIcon(str(img_full_path))
def get_image(img_basename):
''' simplifies creating a Image '''
img_full_path = image_dir / img_basename
return QtGui.QPixmap(str(img_full_path))
class DockWidget(QtWidgets.QDockWidget, WIDGET_FORM_CLASS):
# Indices of tabs: so that someone can easily change the order of the tabs
TAB_RADOLAN_LOADER = 0
TAB_RADOLAN_ADDER = 1
TAB_REGNIE = 2
TAB_STATISTICS = 3
TAB_SETTINGS = 4
TAB_ABOUT = 5
closingPlugin = pyqtSignal()
def __init__(self, parent=None):
""" Constructor. """
super(DockWidget, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://doc.qt.io/qt-5/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.setFloating(False) # prevent losing the widget
# -> doesn't seem to have an effect -> deselected this property in the .ui-file
# seems that is impossible to set this as initial property in QT Creator.
""" If you want to prevent the user from moving it to a floating window
you need to set the "features" of the widget. In the example below,
the widget is movable and closable, but not floatable: """
#self.setFeatures(QtWidgets.QDockWidget.DockWidgetClosable | QtWidgets.QDockWidget.DockWidgetMovable)
self.btn_close.clicked.connect(self.close) # global close button
self.btn_close.setIcon(get_icon('close.png'))
self._tab_index = 0
self.tabWidget.currentChanged.connect(self._tab_changed)
############################
# Tab "RADOLAN single mode"
############################
folder_icon = get_icon('folder.png')
self.tabWidget.setTabIcon(DockWidget.TAB_RADOLAN_LOADER, get_icon('execute.png'))
# set toolbar button icons:
self.btn_load_project.setIcon(get_icon('new.png'))
self.btn_load_radars.setIcon(get_icon('radar.png'))
self.filedialog_input.setIcon(folder_icon)
self.filedialog_mask.setIcon(folder_icon)
self.filedialog_qml.setIcon(folder_icon)
self.widget_symb.setVisible(False)
# Only enabled for RX products (check at every load):
self.check_rvp6tomm.setVisible(False)
# connect functions:
#self.btn_info.clicked.connect(self.open_about_dialog)
# passing parameters to connected method only possible with keyword 'lambda':
self.check_cut.stateChanged.connect(lambda: self._checkbox_state_changed(self.check_cut))
self.check_symb.stateChanged.connect(lambda: self._checkbox_state_changed(self.check_symb))
self.check_rvp6tomm.stateChanged.connect(lambda: self._checkbox_state_changed(self.check_rvp6tomm))
#if not self.dock.inputpath.text():
# #self.dock.button_box.button(QDialogButtonBox.Cancel).setEnabled(True)
# self.dock.button_box.button(QDialogButtonBox.Apply).setEnabled(False)
# #self.out("OK button disabled -> please load a RADOLAN binary file first!")
self.btn_action.setIcon(get_icon('execute.png'))
self.btn_action.setEnabled(False) # initially disabled, need to load RADOLAN file
# trigger deactivating clipping:
self.check_cut.setChecked(False)
self._checkbox_state_changed(self.check_cut)
############################
# Tab Statistics
############################
tab_no = DockWidget.TAB_STATISTICS
self.tabWidget.setTabEnabled(tab_no, False) # second tab "statistics"
self.tabWidget.setTabIcon(tab_no, get_icon('stats.png'))
############################
# Tab TIF storage
############################
self.tabWidget.setTabIcon(DockWidget.TAB_SETTINGS, get_icon('execute.png'))
self.btn_select_storage_dir.setIcon(folder_icon)
self.btn_save.setIcon(get_icon('save.png'))
# save button is disabled by default
self.btn_save.setEnabled(False)
#self.setWindowIcon(get_icon('folder.png'))
############################
# Tab REGNIE
############################
self.tabWidget.setTabIcon(DockWidget.TAB_REGNIE, get_icon('regnie.png'))
self.btn_select_regnie.setIcon(folder_icon)
self.btn_load_regnie.setIcon(get_icon('regnie.png'))
############################
# Tab "RADOLANAdder"
############################
self.tabWidget.setTabIcon(DockWidget.TAB_RADOLAN_ADDER, get_icon('stack.png'))
self.btn_select_dir_adder.setIcon(folder_icon)
self.btn_scan.setIcon(get_icon('search.png'))
self.btn_run_adder.setIcon(get_icon('execute.png'))
############################
# Tab "about"
############################
self.tabWidget.setTabIcon(DockWidget.TAB_ABOUT, get_icon('info.png'))
# insert images:
dt_today = datetime.today()
# Christmas period?
if dt_today.month == 12 and dt_today.day >= 20 and dt_today.day <= 31:
# set QMovie as label:
movie = QtGui.QMovie(str(image_dir / 'weihnachten.gif'))
# set 'ScaledContents' in QtDesigner to False or self.label_logo.setScaledContents(False)
self.label_logo.setMovie(movie)
movie.start()
else:
self.label_logo.setPixmap(get_image('plugin_logo.png'))
self.label_img_info.setPixmap(get_image('sw_info.png'))
self.label_img_download.setPixmap(get_image('sw_download.png'))
# fill text fields with metadata:
self._fill_fields()
############################
#self.list_widget = FileList()
def __str__(self):
return self.__class__.__name__
def out(self, s, ok=True):
if ok:
print(f"{self}: {s}")
else:
print(f"{self}: {s}", file=sys.stderr)
def _checkbox_state_changed(self, checkbox):
name = checkbox.objectName()
b = checkbox.isChecked()
# Diag:
#self.out("_checkbox_state_changed() from '{}': {}".format(name, b))
if name == 'check_cut':
self.inputmask.setEnabled(b)
self.filedialog_mask.setEnabled(b)
elif name == 'check_symb':
self.widget_symb.setVisible(b)
def _tab_changed(self):
index = self.tabWidget.currentIndex()
# save index only, if it is a relevant function tab:
if index != DockWidget.TAB_STATISTICS and index != DockWidget.TAB_ABOUT:
self._tab_index = index
#msg = "Tab index changed! Save current tab index: {}".format(index)
#self.out(msg)
'''
def _show_list(self):
if self.list_widget.isVisible():
self.list_widget.close()
else:
self.list_widget.show()
'''
def closeEvent(self, event):
self.closingPlugin.emit()
event.accept()
def _fill_fields(self):
metadata_file = plugin_dir / 'metadata.txt'
version = "?"
issue_tracker = "?"
mail_link = "?"
self.out("reading '{}'".format(metadata_file))
with metadata_file.open() as f:
for line in f:
""" filter lines from metadata file:
version=0.6
email=radolan2map@e.mail.de
tracker=https://gitlab.com/Weatherman_/radolan2map/issues
"""
if line.startswith('version'):
version = self.__get_value(line)
elif line.startswith('email'):
mailadress = self.__get_value(line)
mail_link = f'<a href="mailto:{mailadress}">{mailadress}</a>'
elif line.startswith('tracker'):
issue_link = self.__get_value(line)
issue_tracker = f'<a href="{issue_link}">{issue_link}</a>'
# for
# with
self.text_version.setText(version)
self.text_issue.setText(issue_tracker)
self.text_mailaddress.setText(mail_link)
def __get_value(self, line):
return line.strip().split('=')[1] # version=0.6
@property
def tab_index(self):
return self._tab_index
@tab_index.setter
def tab_index(self, i):
self.tabWidget.setCurrentIndex(i)
'''
class FileList(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Select a file')
self.listWidget = QListWidget()
self.listWidget.clicked.connect(self.clicked)
self.btn_close = QPushButton("Close")
self.btn_close.clicked.connect(self.close)
layout = QGridLayout()
layout.addWidget(self.listWidget)
layout.addWidget(self.btn_close)
self.setLayout(layout)
def __str__(self):
return self.__class__.__name__
def out(self, s, ok=True):
if ok:
print("{}: {}".format(self, s))
else:
print("{}: {}".format(self, s), file=sys.stderr)
def clicked(self):
item = self.listWidget.currentItem()
print(item.text())
def add_items(self, l_items):
self.out("load {} files".format(len(l_items)))
self.listWidget.clear()
self.listWidget.addItems(l_items)
'''
"""
if __name__ == "__main__":
dlg = AboutDialog()
dlg.show()
"""
| Weathermann/radolan2map | classes/gui.py | gui.py | py | 10,897 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "qgis.PyQt.uic.loadUiType",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "qgis.PyQt.uic",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "qgis.PyQt.QtGui.Q... |
7639807209 |
import pandas as pd
import os
import numpy as np
import re
import time
import sys
from fuzzywuzzy import fuzz
import logging
from static import *
# To calculate: TF-IDF & Cosine Similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import csr_matrix
import sparse_dot_topn.sparse_dot_topn as ct
import warnings
warnings.filterwarnings("ignore")
import nltk.corpus
nltk.download('stopwords')
from nltk.corpus import stopwords
# DEFINITIONS
# parameters
country = ''
parent_chain = ''
item_column = 'item_name'
language_ = 'en'
# canonical file
canonical_file = 'canonical_catalog'
# hiperparameters
threshold_products = 85
threshold_package = 75
def read_and_select():
print('Reading canonical and applicants files..')
data = pd.read_csv(f'data/{country}/{country}_{parent_chain}_uuid_name.csv')
# dict to map item_name with image_url:
item_name_image_dict = dict(zip(data['item_name'], data['image_url']))
data = data.loc[:, ['item_uuid', 'item_name', 'number_sku_sold']]
print(f'Initial dataframe shape: {data.shape}')
print(f'Initial unique products - messy: {len(data["item_name"].unique())}')
return data, item_name_image_dict
def nlp_regex_cleaning(language_, data):
print('NLP + Regex product name cleaning..')
if language_ == 'en':
stop_words = stopwords.words('english')
elif language_ == 'es':
stop_words = stopwords.words('spanish')
regex_clean = r'(pm \d+\w+)|(pm \d+\.\d+)|(pm\d+\.\d+)|(\d+ pmp)|(pm\d+)|( \.+)|(pmp\d+.\d+)|(\d+pmp)|(pmp \d+)|(\d+.\d+ pm)'
data_nlp = nlp_cleaning(data, stop_words, regex_clean)
print(f'Percentage of unique products after NLP: {round(len(data_nlp.product_name.unique())/len(data_nlp.item_name.unique()), 3)}')
return data_nlp.loc[:, ['item_uuid', 'item_name', 'product_name']]
def raw_vs_clean_name_mapping(df_nlp, item_name_image_dict):
print('Saving file to back propagate matches..')
df_back_propagation = df_nlp.loc[:, ['item_uuid', 'item_name', 'product_name']]
# adding image_url column
df_back_propagation['image_url'] = df_back_propagation['item_name'].map(item_name_image_dict)
clean_product_image_dict = dict(zip(df_back_propagation['product_name'], df_back_propagation['image_url']))
df_back_propagation.to_csv(f'back_propagation/{country}/raw_vs_clean_{country}_{parent_chain}_products_{threshold_products}_{threshold_package}.csv', index=False)
return df_back_propagation, clean_product_image_dict
def pareto_products(data):
print(f'Identifying the products that represent the 80% of the sales..')
pareto_df = data.loc[:, ['product_name', 'number_sku_sold']]
pareto_df = pareto_df.drop_duplicates().reset_index(drop=True)
# grouping to aggregate units sold
pareto_df = pareto_df.groupby('product_name').agg({'number_sku_sold': sum}).reset_index()
pareto_df = pareto_df.sort_values(by='number_sku_sold', ascending=False).reset_index(drop=True)
# cumulative aggregations to filter 80/20
pareto_df['cumulate'] = pareto_df["number_sku_sold"].cumsum()
pareto_df["cum_percentage"] = (pareto_df['cumulate'] / pareto_df["number_sku_sold"].sum()) * 100
pareto_set = list(set(pareto_df[pareto_df['cum_percentage'] <= 80]['product_name']))
print(f'Number of products that represent Pareto 80/20: {len(pareto_set)}')
print(f'Percentage of products that represent Pareto 80/20: {round(len(pareto_set)/len(pareto_df["product_name"].unique()), 3)}')
return pareto_set
def direct_matches(data_nlp):
print('Identifying direct matches: member --> canonical_member')
# reading file with links between raw items and canonical data (when we run bivariate, this file has been already created)
canonical_links = pd.read_csv(f'canonical_data/{country}/{country}_canonical_links.csv')
for col in ['canonical_leader', 'canonical_member']:
canonical_links[col] = canonical_links[col].str.lower()
canonical_members = list(set(canonical_links['canonical_member']))
# dataframe with direct matches
direct_df = data_nlp[data_nlp['product_name'].isin(canonical_members)].reset_index(drop=True)
if direct_df.shape[0] > 0:
direct_members = list(set(direct_df['product_name']))
print(f'Number of direct matches: {len(direct_members)}')
# removing products that don't have direct matches
data_not_direct = data_nlp[~data_nlp['product_name'].isin(direct_members)].reset_index(drop=True)
# save link between: member --> canonical_member
canonical_links_direct = canonical_links.copy()
canonical_links_direct.drop(['item_uuid', 'item_name'], axis=1, inplace=True)
canonical_links_direct = canonical_links_direct.drop_duplicates().reset_index(drop=True)
direct_df = direct_df.merge(canonical_links_direct, how='left', left_on='product_name', right_on='canonical_member')
direct_matches_df = direct_df.loc[:, ['item_uuid', 'item_name', 'canonical_id', 'canonical_leader', 'canonical_member']]
direct_matches_df = direct_matches_df.drop_duplicates().reset_index(drop=True)
print(f'Validation - Number of direct matches: {len(direct_matches_df["canonical_member"].unique())}')
else:
print(f'Number of direct matches: 0')
# just to keep structure
data_not_direct = data_nlp.copy()
direct_matches_df = pd.DataFrame() # --> empty DF (PROBABLY NOT USEFUL TO RETURN IT)
# return case: no direct matches
return data_not_direct, canonical_links, direct_matches_df
def validate_products(data_nlp, direct_matches_df, data_not_direct):
print(f"Validating that we haven't lost products in the process..")
if direct_matches_df.shape[0] > 0:
added_products = list(set(direct_matches_df['canonical_member'])) + list(set(data_not_direct['product_name']))
number_products_not_added = len(list(set(data_nlp[~data_nlp['product_name'].isin(added_products)]['product_name'])))
print(f'Number of products lost after extracting direct matches: {number_products_not_added}')
def product_space_to_detect_similarities(data_not_direct, canonical_links):
print(f'Preparing set to identify similiarities by TF-IDF + Fuzzy..')
applicants_not_direct = list(data_not_direct['product_name'].unique())
# we use leaders as they reduce the space and represent the members
canonical_leaders = list(canonical_links[~canonical_links['canonical_leader'].isna()]['canonical_leader'].unique())
# concatenation of: applicants with no direct match + canonical leaders
product_space = list(set(applicants_not_direct + canonical_leaders))
print(f'Number of products to match and group (not_direct + canonical_leaders): {len(product_space)}')
return product_space
def leaders_lead(canonical_links, groups_df):
print(f'Making sure leaders are leaders..')
canonical_leaders = canonical_links['canonical_leader'].unique()
# identifying all groups where canonical members are present
canonical_leaders_group_df = groups_df.loc[groups_df['member'].isin(canonical_leaders)][['group_id', 'member']].drop_duplicates().reset_index(drop=True)
# dict to replace: group leader by canonical_leader
canonical_leader_replace_dict = dict(zip(canonical_leaders_group_df['group_id'], canonical_leaders_group_df['member']))
# adding canonical leader lable: are we able to modify the leader?
groups_df['modify_leader'] = 'Yes'
# replace canonical leaders in potential group leader column
for group_id, leader in canonical_leader_replace_dict.items():
groups_df.loc[groups_df['group_id'] == group_id, ['leader', 'modify_leader']] = leader, 'No'
# removing canonical leaders from members --> may lead to issue (leaders being mapped to other leaders)
groups_df = groups_df[~groups_df['member'].isin(canonical_leaders)].copy()
return groups_df
def extracting_pareto_groups(groups_df, pareto_set):
print(f'Extracing groups where pareto members are assigned..')
groups_in_pareto = list(set(groups_df[(groups_df['leader'].isin(pareto_set))|(groups_df['member'].isin(pareto_set))]['group_id']))
pareto_groups_df = groups_df[groups_df['group_id'].isin(groups_in_pareto)].reset_index(drop=True)
non_pareto_groups_df = groups_df[~groups_df['group_id'].isin(groups_in_pareto)].reset_index(drop=True)
print(f'Pareto dataframe shape to be reviewed by agents: {pareto_groups_df.shape[0]}')
return pareto_groups_df, non_pareto_groups_df
def validate_products_missing(data_nlp, pareto_groups_df, non_pareto_groups_df, direct_matches_df):
print(f"Validating that we haven't lost products in the process..")
if direct_matches_df.shape[0] > 0:
added_products = list(set(pareto_groups_df['member'])) + list(set(non_pareto_groups_df['member'])) + list(set(direct_matches_df['canonical_member']))
else:
added_products = list(set(pareto_groups_df['member'])) + list(set(non_pareto_groups_df['member']))
number_products_not_added = len(list(set(data_nlp[~data_nlp['product_name'].isin(added_products)]['product_name'])))
print(f'Number of products lost in the process: {number_products_not_added}')
def remove_duplication_for_uuid(data):
print(f"UUIDs may be assigned to more than a single product; Fixing this issue..")
# identifies the existance of uuids assigned to more than 1 item name
identify_duplication_df = data.groupby('item_uuid').agg({'item_name': 'count'}).reset_index().sort_values(by='item_name', ascending=False).reset_index(drop=True)
number_uuids_more_than_1 = identify_duplication_df[identify_duplication_df['item_name'] > 1].drop_duplicates('item_uuid').reset_index(drop=True).shape[0]
print(f"Number of UUIDs assigned to more than 1 product: {number_uuids_more_than_1}")
# aggregates and sorts values
duplicated_df = data.groupby(['item_uuid', 'item_name']).agg({'number_sku_sold': sum}).reset_index()
duplicated_df = duplicated_df.sort_values(by=['item_uuid', 'number_sku_sold'], ascending=False).reset_index(drop=True)
# removes duplicated item names --> idea: keep the item name with the higher number of sales
duplicated_df = duplicated_df.drop_duplicates('item_uuid').reset_index(drop=True)
duplicated_unique_uuids_list = list(set(duplicated_df['item_uuid']))
print(f'Missing UUIDs after removing duplicated assingments: {len(list(set(data[~data["item_uuid"].isin(duplicated_unique_uuids_list)]["item_uuid"])))}')
print(f'Dataframe shape at this stage of the process (remove duplicated uuids): {duplicated_df.shape}')
return duplicated_df
def main():
# Initial time
t_initial = gets_time()
# reading CSV files: canonical & applicnats
data, item_name_image_dict = read_and_select()
# fixing issue: existance of uuid's assigned to more than 1 item name
data = remove_duplication_for_uuid(data)
# NLP + regex product name cleaning --> new column: product_name
data_nlp = nlp_regex_cleaning(language_, data)
# saving raw product name - clean product name (post NLP + regex): mapping
df_back_propagation, clean_product_image_dict = raw_vs_clean_name_mapping(data_nlp, item_name_image_dict)
# Identifying direct matches: member --> canonical_member
data_not_direct, canonical_links, direct_matches_df = direct_matches(data_nlp)
validate_products(data_nlp, direct_matches_df, data_not_direct)
# identifies the 20% of the products that represent the 80% of the sales
pareto_set = pareto_products(data)
# Preparing set to run grouping script
product_space = product_space_to_detect_similarities(data_not_direct, canonical_links)
df_product_space = pd.DataFrame(data={'product_name': product_space})
# Appying TF-IDF method
df_tf, tf_idf_matrix = tf_idf_method(df_product_space)
# Applying cosine similarity to detect most similar products (potential group)
matches_df = cosine_similarity_calculation(df_tf, tf_idf_matrix)
# Calculating fuzzy ratios and keeping products with similarity above threshold_products
df_similars = fuzzy_ratios(matches_df, threshold_products)
# extending product similarities: A similar to B, and B similar to D; then A, B, and D are similars
df_similars_ext = extends_similarities(df_similars)
# calculating fuzzy ratios between product packages, keeping similarities above threshold_package
df_clean = cleaning_by_package_similarity(df_similars_ext, threshold_package)
# dictionaries to map product_name --> index
product_index_dict, index_product_dict = creating_product_index_name_mapping_dict(df_tf)
# product names into integers --> easy to compare
df_clean = product_name_replacement(df_clean, product_index_dict)
# concatenating groups to global dataframe
groups_df, track_df = groups_concatenation(df_clean, df_similars, index_product_dict)
# leaders lead
groups_df = leaders_lead(canonical_links, groups_df)
# re-organizing and removing non pareto products
groups_df = groups_df.sort_values(by=['leader', 'member']).reset_index(drop=True)
groups_df['image_url'] = groups_df['member'].map(clean_product_image_dict)
pareto_groups_df, non_pareto_groups_df = extracting_pareto_groups(groups_df, pareto_set)
# saving results
if not os.path.isdir(f'bivariate_outputs/{country}/{parent_chain}'):
os.mkdir(f'bivariate_outputs/{country}/{parent_chain}')
groups_df.to_csv(f'bivariate_outputs/{country}/{parent_chain}/bivariate_groups_{country}_{parent_chain}_{threshold_products}_{threshold_package}.csv', index=False)
pareto_groups_df.to_csv(f'bivariate_outputs/{country}/{parent_chain}/bivariate_pareto_groups_{country}_{parent_chain}_{threshold_products}_{threshold_package}.csv', index=False)
non_pareto_groups_df.to_csv(f'bivariate_outputs/{country}/{parent_chain}/bivariate_non_pareto_groups_{country}_{parent_chain}_{threshold_products}_{threshold_package}.csv', index=False)
if direct_matches_df.shape[0] != 0:
direct_matches_df.to_csv(f'bivariate_outputs/{country}/{parent_chain}/direct_matches_{country}_{parent_chain}_{threshold_products}_{threshold_package}.csv', index=False)
# verifying if products were lost in the process
validate_products_missing(data_nlp, pareto_groups_df, non_pareto_groups_df, direct_matches_df)
# Complete run time
t_complete = gets_time() - t_initial
print(f'Time to run the script: {round(t_complete/60, 3)} minutes!')
print('Success!')
return pareto_groups_df, non_pareto_groups_df, df_back_propagation
if __name__ == "__main__":
main()
| oportusgonzalo/product-deduplication | bivariate_comparison.py | bivariate_comparison.py | py | 14,706 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.download",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nltk.corpus",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pandas.read_... |
30906550732 | import numpy as np
import time
import pandas
from keras_retinanet.models import load_model
from keras_retinanet.utils.image import preprocess_image, resize_image
from osgeo import gdal
from helpers import sliding_window
from helpers import pixel2coord
from helpers import non_max_suppression_fast
(winW, winH, stepSize) = (500, 500, 400)
scorethreshold = 0.5
iouthreshold = 0.5
file = "D:/Muaro Jambi/GOOGLE/Resize"
model_name = "GOOGLE05-101-all2"
model = load_model('export/infer-' + model_name + '-model.h5', backbone_name='resnet101')
ds = gdal.Open(file + ".tif")
width = ds.RasterXSize
height = ds.RasterYSize
bboxes = []
x_list = []
y_list = []
for (x, y) in sliding_window(width, height, stepSize, windowSize=(winW, winH)):
st = time.time()
# Stop sliding windows if widows end
if x + winH > width or y + winW > height:
continue
# crop image
a_image = ds.ReadAsArray(x,y,winW,winH)
crop = np.dstack((a_image[0],a_image[1],a_image[2]))
# preprocess image for network
image = preprocess_image(crop)
image, scale = resize_image(image)
# process image
boxes, scores, labels = model.predict(np.expand_dims(image, axis=0))
# correct for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(scores[0, :] >= scorethreshold)[0]
# select those scores
scores = scores[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)
# select detections
image_boxes = boxes[0, indices[scores_sort], :]
for i in indices:
b = np.array(image_boxes[i,:]).astype(int)
x1 = b[0] + x
y1 = b[1] + y
x2 = b[2] + x
y2 = b[3] + y
bboxes.append([x1, y1, x2, y2])
print('Elapsed time = {}'.format(time.time() - st))
bboxes = np.array(bboxes, dtype=np.float32)
print('Non max suppression all detected box')
# non max suppression on overlay bboxes
new_boxes = non_max_suppression_fast(bboxes, iouthreshold)
print('Creating point from bbox')
for jk in range(new_boxes.shape[0]):
b = np.array(new_boxes[jk,:]).astype(int)
x1 = b[0]
y1 = b[1]
x2 = b[2]
y2 = b[3]
# Centroid
xc = (x1 + x2) / 2
yc = (y1 + y2) / 2
# get geo coordinate
(coor_x, coor_y) = pixel2coord(ds, xc, yc)
x_list.append(coor_x)
y_list.append(coor_y)
df = pandas.DataFrame(data={"x": x_list, "y": y_list})
df.to_csv(file + "_" + model_name + "_" + str(scorethreshold) +".csv", sep=',',index=False) | muhanur/detect-from-google | process.py | process.py | py | 2,635 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "keras_retinanet.models.load_model",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "helpers.... |
37873244448 | import os
from flask import Flask
from flask import render_template
app = Flask(__name__)
locations = []
@app.route('/')
def home():
origin = '37.866197,+-122.252968'
destination = '37.876031,+-122.258791'
waypoints = 'International+House+Berkeley|Greek+Theater+Berkeley|GSPP+Berkeley'
return render_template('home.html', \
origin =origin, \
destination=destination, \
waypoints = waypoints)
@app.route('/loc/<lat>/<lng>')
def echo_loc(lat, lng):
locations.append((lat, lng))
return str(locations)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port) | ashirahattia/prov02website | __init__.py | __init__.py | py | 659 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ",
"lin... |
25362261753 | import pyclipper
import math
of = pyclipper.PyclipperOffset()
def anyValidPath(paths):
''' checks the list of paths to find any non empty path '''
for pth in paths:
if len(pth) > 0:
return True
return False
def closePath(path):
''' closes the path if open by adding the point, destructive - changes the existing path '''
if len(path) == 0:
return path
lastPtIndex = len(path)-1
if path[lastPtIndex][0] != path[0][0] or path[lastPtIndex][1] != path[0][1]:
path.append([path[0][0], path[0][1]])
return path
def genToolGeometry(toolRadiusScaled):
''' returns circular geometry of a given radius in the clipper coord. space '''
of = pyclipper.PyclipperOffset()
of.AddPath([[0, 0], [0, 0]], pyclipper.JT_ROUND, pyclipper.ET_OPENROUND)
geo = of.Execute(toolRadiusScaled)[0]
return pyclipper.CleanPolygon(geo)
def translatePath(path, pt):
output = []
for p in path:
output.append([p[0]+pt[0], p[1] + pt[1]])
return output
def translatePaths(paths, pt):
outpaths = []
for path in paths:
output = []
for p in path:
output.append([p[0] + pt[0], p[1] + pt[1]])
outpaths.append(output)
return outpaths
def centroid(path):
''' finds the center coordinate of a polygon '''
c = [0, 0]
sa = 0
x0 = 0
y0 = 0
x1 = 0
y1 = 0
a = 0
cnt = len(path)
path = closePath(path)
for i in range(0, cnt):
x0 = path[i][0]
y0 = path[i][1]
x1 = path[(i+1) % cnt][0]
y1 = path[(i+1 % cnt)][1]
a = x0*y1-x1*y0
sa = sa + a
c[0] = c[0] + (x0+x1)*a
c[1] = c[1] + (y0+y1)*a
sa = 3*sa
c[0] = c[0] / sa
c[1] = c[1] / sa
return c
def getDirectionVAt(path, index):
''' direction vector for a point on path given by index, it averages 3 segments around the point '''
p1i = index-1
p2i = index
p3i = index+1
p4i = index+2
if p3i > len(path)-1:
p3i = p3i-(len(path)-1)
if p4i > len(path)-1:
p4i = p4i - (len(path)-1)
#find delta vectors between points
pt1 = path[p1i]
pt2 = path[p2i]
pt3 = path[p3i]
pt4 = path[p4i]
v1 = [pt2[0]-pt1[0], pt2[1]-pt1[1]]
v2 = [pt3[0]-pt2[0], pt3[1]-pt2[1]]
v3 = [pt4[0]-pt3[0], pt4[1]-pt3[1]]
#add those two vectors - for smoothing of angles at corners
v = [v1[0]+v2[0]+v3[0], v1[1]+v2[1]+v3[1]]
#v =[pt2[0]-pt1[0],pt2[1]-pt1[1]]
#print pt2,pt1
#print v
#normalize
d = math.sqrt(v[0]*v[0] + v[1]*v[1])
return [v[0]/d, v[1]/d]
def normalize(v):
''' normalize 2d vector '''
d = math.sqrt(v[0]*v[0] + v[1]*v[1])
return [v[0]/d, v[1]/d]
def getDirectionV(pt1, pt2):
''' direction vector from two points '''
#find delta vector between points
v = [pt2[0]-pt1[0], pt2[1]-pt1[1]]
#normalize
d = math.sqrt(v[0]*v[0] + v[1]*v[1])
return [v[0]/d, v[1]/d]
def getAngle(v):
''' angle of 2d vector '''
return math.atan2(v[1], v[0])
def magnitude(v):
''' magnitude/length of 2d vector '''
return math.sqrt(v[0]*v[0] + v[1]*v[1])
#get angle between two vectors
def getAngle2v(v1, v2):
''' angle between two 2d vectors '''
try:
d = (v1[0]*v2[0] + v1[1]*v2[1])
m = (math.sqrt(v1[0]*v1[0] + v1[1]*v1[1])) * \
(math.sqrt(v2[0]*v2[0] + v2[1]*v2[1]))
if m != 0:
return math.acos(d/m)
else:
return math.pi/32
except:
#print "matherror",v1,v2
return math.pi/4
def sub2v(v1, v2):
''' subtract two vectors '''
return [v1[0] - v2[0], v1[1] - v2[1]]
def sumv(path):
''' sum of array of 2d vectors '''
res = [0, 0]
for pt in path:
res[0] = res[0] + pt[0]
res[1] = res[1] + pt[1]
return res
def getIntersectionPointLWP(lineSegment, paths):
''' finds first intersection point of the given line segment with given paths '''
l1 = lineSegment # first line segment
for pth in paths:
if len(pth) > 1:
for i in range(0, len(pth)):
l2 = [pth[i-1], pth[i]] # second line segment (path line)
d = (l1[1][1]-l1[0][1])*(l2[1][0]-l2[0][0]) - \
(l2[1][1]-l2[0][1])*(l1[1][0]-l1[0][0])
if d == 0: # lines are parallel
continue
p1d = (l2[1][1]-l2[0][1])*(l1[0][0]-l2[0][0]) - \
(l2[1][0]-l2[0][0])*(l1[0][1]-l2[0][1])
p2d = (l1[1][0]-l1[0][0])*(l2[0][1]-l1[0][1]) - \
(l1[1][1]-l1[0][1])*(l2[0][0]-l1[0][0])
#clamp
if d < 0:
if (p1d < d or p1d > 0):
continue # not inside segment
if (p2d < d or p2d > 0):
continue # not inside segment
else:
if (p1d < 0 or p1d > d):
continue # not inside segment
if (p2d < 0 or p2d > d):
continue # not inside segment
return [l1[0][0] + (l1[1][0]-l1[0][0])*p1d/d, l1[0][1] + (l1[1][1]-l1[0][1])*p1d/d]
#nothing found, return None
return None
def rotate(v, rad):
''' rotate 2d vector by given radians '''
c = math.cos(rad)
s = math.sin(rad)
return [c*v[0] - s*v[1], s*v[0] + c*v[1]]
def pointToLineSegmentDistanceSquared(p1,p2,pt, clamp = True):
''' p1 and p2 define the line seqgment, pt defines the point '''
lsq = (p2[0] - p1[0]) * (p2[0] - p1[0]) + \
(p2[1] - p1[1]) * (p2[1] - p1[1])
if lsq == 0: # segment is very short take the distance to one of end points
distSq = (pt[0] - p1[0]) * (pt[0] - p1[0]) + \
(pt[1] - p1[1]) * (pt[1] - p1[1])
clp = p1
else:
#((point.x - this.start.x) * (this.end.x - this.start.x) + (point.y - this.start.y) * (this.end.y - this.start.y))
#parameter of the closest point
t = (((pt[0] - p1[0]) * (p2[0] - p1[0]) +
(pt[1] - p1[1]) * (p2[1] - p1[1])))
#clamp it
if clamp:
if t > lsq:
t = lsq
if t < 0:
t = 0
#point on line at t
clp = [p1[0] + t*(p2[0]-p1[0])/lsq, p1[1] +
t*(p2[1]-p1[1])/lsq]
distSq = (pt[0]-clp[0])*(pt[0]-clp[0]) + \
(pt[1]-clp[1])*(pt[1]-clp[1])
return clp, distSq
def getClosestPointOnPaths(paths, pt):
''' get closest point on path to point '''
#closestPathIndex = 0
#closestPtIndex = 0
minDistSq = 100000000000000
closestPt = []
for pthi in range(0, len(paths)):
path = paths[pthi]
for i in range(0, len(path)):
clp,distSq = pointToLineSegmentDistanceSquared(path[i-1],path[i],pt)
if distSq < minDistSq:
#closestPtIndex = i
#closestPathIndex = pthi
minDistSq = distSq
closestPt = clp
return closestPt, math.sqrt(minDistSq)
def closeToOneOfPoints(pt, points, toleranceScaled):
''' check if point is within tolerance distance to one of points in the array '''
for p2 in points:
if magnitude(sub2v(p2, pt)) <= toleranceScaled:
return True
return False
def cleanPath(path, tolerance):
''' removes uneccessary points from path while keeping remaining segments within tolerance with original path '''
output = []
firstPoint = True
for pt in path:
if firstPoint:
firstPoint=False
output.append(pt)
else:
if len(output)>2:
#if line is (within tolerance) on the same (last) line segment, extend the segment to the point
clp,distSq = pointToLineSegmentDistanceSquared(output[-2],output[-1],pt,False)
if math.sqrt(distSq)<tolerance:
output.pop(-1) #remove last segment point
output.append(pt) #add new point
else:
output.append(pt)
elif magnitude(sub2v(pt,output[-1]))<tolerance: # if point to close to last point - replace it
output.pop(-1) #remove last point
output.append(pt) #add new point
else:
output.append(pt)
return output
| kreso-t/FreeCAD_Mod_Adaptive_Path | Adaptive/GeomUtils.py | GeomUtils.py | py | 8,377 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "pyclipper.PyclipperOffset",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyclipper.PyclipperOffset",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyclipper.JT_ROUND",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_na... |
33420603124 | from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from cloudmailin.views import MailHandler
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from mailpost.views import create_post, FakeEmailView
from django.contrib.auth.decorators import login_required
import django_cron
django_cron.autodiscover()
from dajaxice.core import dajaxice_autodiscover
dajaxice_autodiscover()
urlpatterns = patterns('',
url(r'^books/', include('books.urls',namespace='books',app_name='books')),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='auth_logout'),
url(r'^%s/' % settings.DAJAXICE_MEDIA_PREFIX, include('dajaxice.urls')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('bookere.api.urls')),
url(r'^accounts/', include('registration_backend.urls',namespace="registration",app_name="registration")),
url(r'^', include('bookere.frontend.urls',namespace='frontend',app_name='frontend')),
)
mail_handler = MailHandler()
my_address="39b5ef0e6660524333d3@cloudmailin.net"
my_secret= "9e6a1be956c7ef8aea57"
mail_handler.register_address(
address = my_address,
secret = my_secret,
callback = create_post,
)
urlpatterns += patterns('',
url(r'^cloudmailin/$', mail_handler, name='cloudmailin'),
url(r'^fake_email_client/$', login_required(FakeEmailView.as_view(address=my_address, secret=my_secret))),
)
urlpatterns += patterns('django.contrib.staticfiles.views',
url(r'^static/(?P<path>.*)$', 'serve', kwargs={"insecure": True}),
)
| sharnett/BookerE | bookere/urls.py | urls.py | py | 1,949 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django_cron.autodiscover",
"line_number": 10,
"usage_type": "call"
},
{
"api... |
19294746004 | #!/usr/bin/env python
# coding: utf-8
# In[76]:
import pyspark
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.conf import SparkConf
from pyspark.sql.session import SparkSession
# Need to make declaration in SparkContext() when submit pyspark job
sc = SparkContext()
spark = SQLContext(sc)
type(spark)
# In[77]:
# In[78]:
series_data_1 = spark.read.options(delimiter=';', header='True').csv("gs://bigdata-etl-2_flights/qoala-query-result-5")
# In[83]:
series_data_1.registerTempTable("series_data_1")
# In[84]:
spark.sql("select count(*) from series_data_1").show()
# In[85]:
series_1 = spark.sql("select * from series_data_1")
# In[86]:
series_data_2 = spark.read.options(delimiter=';', header='True').csv("gs://bigdata-etl-2_flights/qoala-query-result-4")
# In[87]:
series_data_2.registerTempTable("series_data_2")
# In[88]:
spark.sql("select count(*) from series_data_2").show()
# In[89]:
series_2 = spark.sql("select * from series_data_2")
from datetime import date
current_date = date.today()
file_name = str(current_date)
bucket_name = "gs://bigdata-etl-2_flights"
# In[95]:
output_movies_ata_1 = bucket_name+"/series_data_output/"+file_name+"_datamart_1"
output_movies_ata_2 = bucket_name+"/series_data_output/"+file_name+"_datamart_2"
# In[97]:
series_1.coalesce(1).write.format("json").save(output_movies_ata_1)
series_2.coalesce(1).write.format("json").save(output_movies_ata_2)
# In[ ]:
| rauldatascience/semi-structured-dwh | series_spark_job.py | series_spark_job.py | py | 1,490 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyspark.SparkContext",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SQLContext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "datet... |
34842760480 | # Evaluate Division: https://leetcode.com/problems/evaluate-division/
# You are given an array of variable pairs equations and an array of real numbers values, where equations[i] = [Ai, Bi] and values[i] represent the equation Ai / Bi = values[i]. Each Ai or Bi is a string that represents a single variable.
# You are also given some queries, where queries[j] = [Cj, Dj] represents the jth query where you must find the answer for Cj / Dj = ?.
# Return the answers to all queries. If a single answer cannot be determined, return -1.0.
# Note: The input is always valid. You may assume that evaluating the queries will not result in division by zero and that there is no contradiction.
from collections import defaultdict
from types import List
# This problem is vary complicated at first but can be boiled down to a backtracking problem where we keep track of A -> B = val and B -> A = 1 / val
# This means we need to explore down every path and return the res if found or -1 if we cant find it
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
ourMap = defaultdict(defaultdict)
for (A, B), val in zip(equations, values):
ourMap[A][B] = val
ourMap[B][A] = 1 / val
def backtrack(node, target, visited, value):
res = -1
# Try current node
visited.add(node)
neighbors = ourMap[node]
# see if target is B
if target in neighbors:
res = value * neighbors[target]
# otherwise try all neighbors not in visited
else:
for nei in neighbors:
if nei not in visited:
res = backtrack(nei, target, visited,
value * neighbors[nei])
if res != -1:
return res
# remove this node from visited for backtrack
visited.remove(node)
return res
result = []
for A, B in queries:
if A not in ourMap or B not in ourMap:
result.append(-1)
elif A == B:
result.append(1)
else:
visited = set()
result.append(backtrack(A, B, visited, 1))
return result
# This runs in O(M * N) as for every A we may have to look through all nodes
# and this only uses o(N) space for our map and stack space
# Score Card
# Did I need hints? N
# Did you finish within 30 min? 8
# Was the solution optimal? This is optimal
# Were there any bugs? No
# 5 5 5 5 = 5
| KevinKnott/Coding-Review | Month 03/Week 04/Day 02/d.py | d.py | py | 2,683 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "types.List",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 18,
"usage_type": "call"
}
] |
43849622923 | from django import forms
from apps.inicio.models import Actor
class ActorForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ActorForm, self).__init__(*args, **kwargs)
for i, (fname, field) in enumerate(self.fields.iteritems()):
field.widget.attrs["class"] = field.widget.attrs.get("class", "") + " form-control"
class Meta:
model = Actor
fields = ["nombre", "nacimiento"]
| JhonnySA/Videoteca | proyectos/videoteca/apps/inicio/formstotal/actor.py | actor.py | py | 443 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "apps.inicio.models.Actor",
"line_number": 13,
"usage_type": "name"
}
] |
27287255173 | import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
import cleanvision
# To ensure backwards compatibility
from cleanvision.imagelab import Imagelab
class TestImagelabSaveLoad:
def test_save(self, generate_local_dataset, tmp_path):
imagelab = Imagelab(data_path=generate_local_dataset)
save_folder = tmp_path / "T_save_folder/"
imagelab.save(save_folder)
assert os.path.exists(save_folder / "imagelab.pkl")
assert os.path.exists(save_folder / "issues.csv")
assert os.path.exists(save_folder / "issue_summary.csv")
@pytest.mark.parametrize(
"force",
[True, False],
ids=["overwrite", "do not overwrite"],
)
def test_force_save(self, generate_local_dataset, tmp_path, force):
save_folder = tmp_path / "T_save_folder/"
save_folder.mkdir()
imagelab = Imagelab(data_path=generate_local_dataset)
if force:
imagelab.save(save_folder, force=force)
assert os.path.exists(save_folder / "imagelab.pkl")
assert os.path.exists(save_folder / "issues.csv")
assert os.path.exists(save_folder / "issue_summary.csv")
else:
with pytest.raises(FileExistsError):
imagelab.save(save_folder, force=force)
def test_load(self, generate_local_dataset, tmp_path):
imagelab = Imagelab(data_path=generate_local_dataset)
imagelab.find_issues()
save_folder = tmp_path / "T_save_folder/"
imagelab.save(save_folder)
loaded_imagelab = Imagelab.load(save_folder)
assert loaded_imagelab is not None
assert_frame_equal(loaded_imagelab.issues, imagelab.issues)
assert_frame_equal(loaded_imagelab.issue_summary, imagelab.issue_summary)
self.compare_dict(loaded_imagelab.info, imagelab.info)
def compare_dict(self, a, b):
assert len(a) == len(b)
for k, v in a.items():
print(k)
assert k in b
if isinstance(v, dict):
self.compare_dict(v, b[k])
elif isinstance(v, pd.DataFrame):
assert_frame_equal(v, b[k])
elif isinstance(v, pd.Series):
assert_series_equal(v, b[k])
else:
assert v == b[k]
def test_load_file_does_not_exist(self, generate_local_dataset, tmp_path):
save_folder = tmp_path / "T_save_folder/"
with pytest.raises(ValueError):
Imagelab.load(save_folder)
def test_warning_raised_on_diff_version(
self, generate_local_dataset, tmp_path, monkeypatch
):
save_folder = tmp_path / "T_save_folder/"
imagelab = Imagelab(data_path=generate_local_dataset)
imagelab.save(save_folder)
monkeypatch.setattr(cleanvision, "__version__", "dummy")
with pytest.warns(UserWarning) as record:
imagelab.load(save_folder)
warning_message = record[0].message.args[0]
assert (
"Saved Imagelab was created using different version of cleanvision"
in warning_message
)
| cleanlab/cleanvision | tests/test_save_load.py | test_save_load.py | py | 3,168 | python | en | code | 725 | github-code | 1 | [
{
"api_name": "cleanvision.imagelab.Imagelab",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.exi... |
32601342276 | #!/usr/bin/python3
from typing import List
import json
class Solution:
def wordsTyping(self, sentence: List[str], rows: int, cols: int) -> int:
nums = [len(d) for d in sentence]
n = len(sentence)
ans, row, col, j = 0, 0, 0, 0
while row < rows:
col = 0
while col + nums[j % n] <= cols:
col += nums[j % n]
j += 1
if (j % n) == 0:
break
else:
col += 1
row += 1
if (j % n) == 0:
break
# 一周するために必要な space 数
spaces = ((row-1) * cols) + col
total = rows * cols
ans = (total//(spaces+1)) if spaces != 0 else 0
print(total, spaces)
# print("total", rows*cols)
# print("spaces", spaces)
return ans
arr = json.loads(input())
rows = int(input())
cols = int(input())
sol = Solution()
print(sol.wordsTyping(arr, rows, cols))
| negibokken/sandbox | leetcode/418_sentence_screen_fitting/main.py | main.py | py | 1,007 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 34,
"usage_type": "call"
}
] |
4805265619 | import torch.nn as nn
import torch
from src.losses.loss_functions import (
DJSLoss,
ClassifLoss,
DiscriminatorLoss,
GeneratorLoss,
)
from src.utils.custom_typing import (
DiscriminatorOutputs,
ClassifierOutputs,
EDIMOutputs,
GenLosses,
DiscrLosses,
ClassifLosses,
)
class EDIMLoss(nn.Module):
"""Loss function to extract exclusive information from the image, see paper equation (8)
Args:
local_mutual_loss_coeff (float): Coefficient of the local Jensen Shannon loss
global_mutual_loss_coeff (float): Coefficient of the global Jensen Shannon loss
disentangling_loss_coeff (float): Coefficient of the Gan loss
"""
def __init__(
self,
local_mutual_loss_coeff: float,
global_mutual_loss_coeff: float,
disentangling_loss_coeff: float,
):
super().__init__()
self.local_mutual_loss_coeff = local_mutual_loss_coeff
self.global_mutual_loss_coeff = global_mutual_loss_coeff
self.disentangling_loss_coeff = disentangling_loss_coeff
self.djs_loss = DJSLoss()
self.classif_loss = ClassifLoss()
self.discriminator_loss = DiscriminatorLoss()
self.generator_loss = GeneratorLoss()
def compute_generator_loss(self, edim_outputs: EDIMOutputs) -> GenLosses:
"""Generator loss function
Args:
edim_outputs (EDIMOutputs): Output of the forward pass of the exclusive information model
Returns:
GenLosses: Generator losses
"""
# Compute Global mutual loss
global_mutual_loss_x = self.djs_loss(
T=edim_outputs.global_mutual_M_R_x,
T_prime=edim_outputs.global_mutual_M_R_x_prime,
)
global_mutual_loss_y = self.djs_loss(
T=edim_outputs.global_mutual_M_R_y,
T_prime=edim_outputs.global_mutual_M_R_y_prime,
)
global_mutual_loss = (
global_mutual_loss_x + global_mutual_loss_y
) * self.global_mutual_loss_coeff
# Compute Local mutual loss
local_mutual_loss_x = self.djs_loss(
T=edim_outputs.local_mutual_M_R_x,
T_prime=edim_outputs.local_mutual_M_R_x_prime,
)
local_mutual_loss_y = self.djs_loss(
T=edim_outputs.local_mutual_M_R_y,
T_prime=edim_outputs.local_mutual_M_R_y_prime,
)
local_mutual_loss = (
local_mutual_loss_x + local_mutual_loss_y
) * self.local_mutual_loss_coeff
gan_loss_x_g = self.generator_loss(fake_logits=edim_outputs.fake_x)
gan_loss_y_g = self.generator_loss(fake_logits=edim_outputs.fake_y)
gan_loss_g = (gan_loss_x_g + gan_loss_y_g) * self.disentangling_loss_coeff
# Get classification error
# For each network, we assign a loss objective
encoder_loss = global_mutual_loss + local_mutual_loss + gan_loss_g
return GenLosses(
encoder_loss=encoder_loss,
local_mutual_loss=local_mutual_loss,
global_mutual_loss=global_mutual_loss,
gan_loss_g=gan_loss_g,
)
def compute_discriminator_loss(
self, discr_outputs: DiscriminatorOutputs
) -> DiscrLosses:
"""Discriminator loss see paper equation (9)
Args:
discr_outputs (DiscriminatorOutputs): Output of the forward pass of the discriminators model
Returns:
DiscrLosses: Discriminator losses
"""
gan_loss_x_d = self.discriminator_loss(
real_logits=discr_outputs.disentangling_information_x_prime,
fake_logits=discr_outputs.disentangling_information_x,
)
gan_loss_y_d = self.discriminator_loss(
real_logits=discr_outputs.disentangling_information_y_prime,
fake_logits=discr_outputs.disentangling_information_y,
)
gan_loss_d = (gan_loss_x_d + gan_loss_y_d) * self.disentangling_loss_coeff
return DiscrLosses(gan_loss_d=gan_loss_d)
def compute_classif_loss(
self,
classif_outputs: ClassifierOutputs,
digit_labels: torch.Tensor,
color_bg_labels: torch.Tensor,
color_fg_labels: torch.Tensor,
) -> ClassifLosses:
"""Compute classifiers losses. The accuracy of the classifiers allow to quantify the representations level of disentanglement.
Args:
classif_outputs (ClassifierOutputs): Classifiers Outputs
digit_labels (torch.Tensor): Label of the digit
color_bg_labels (torch.Tensor): Background color of the images
color_fg_labels (torch.Tensor): Foreground color of the images
Returns:
ClassifLosses: Classifiers losses
"""
digit_bg_classif_loss, digit_bg_accuracy = self.classif_loss(
y_pred=classif_outputs.digit_bg_logits,
target=digit_labels,
)
digit_fg_classif_loss, digit_fg_accuracy = self.classif_loss(
y_pred=classif_outputs.digit_fg_logits, target=digit_labels
)
color_bg_classif_loss, color_bg_accuracy = self.classif_loss(
y_pred=classif_outputs.color_bg_logits,
target=color_bg_labels,
)
color_fg_classif_loss, color_fg_accuracy = self.classif_loss(
y_pred=classif_outputs.color_fg_logits, target=color_fg_labels
)
classif_loss = (
digit_bg_classif_loss
+ digit_fg_classif_loss
+ color_bg_classif_loss
+ color_fg_classif_loss
)
return ClassifLosses(
classif_loss=classif_loss,
digit_bg_classif_loss=digit_bg_classif_loss,
digit_fg_classif_loss=digit_fg_classif_loss,
color_bg_classif_loss=color_bg_classif_loss,
color_fg_classif_loss=color_fg_classif_loss,
digit_bg_accuracy=digit_bg_accuracy,
digit_fg_accuracy=digit_fg_accuracy,
color_bg_accuracy=color_bg_accuracy,
color_fg_accuracy=color_fg_accuracy,
)
| MehdiZouitine/Learning-Disentangled-Representations-via-Mutual-Information-Estimation | src/losses/EDIM_loss.py | EDIM_loss.py | py | 6,090 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "src.losses.loss_functions.DJSLoss",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "src.l... |
20980695431 | #!/usr/bin/env python
#coding:utf-8
"""
task.py
~~~~~~~~~~~~~
:license: BSD, see LICENSE for more details.
"""
from flask import Blueprint,render_template,g,request
from application.models import *
from application.decorators import admin_required
import json,logging,urllib
from google.appengine.api import memcache
task=Blueprint('task',__name__,template_folder="../templates")
from google.appengine.api import search
@task.route('/dotask')
@admin_required
def dotask():
one=User.all().get()
one.delete()
return "update successfully"
@task.route('/addpost')
@admin_required
def addpost():
document=search.Document(fields=[search.HtmlField(name="content",value=u"核心提示:到底是什么导致全球变暖?英国科学家们给出了一个“匪夷所思”的答案:生活在中生代的食草类恐龙排出大量甲烷气体,是全球变暖的重要因素。这一研究结果于近期发表在《当代生物学》杂志上")])
search.Index(name="Post").put(document)
document=search.Document(fields=[search.HtmlField(name="content",value=u"核心提示:据媒体调查称,2009年住建部曾对全国城市饮用水水质状况做普查,但至今未公布数据结果。多位业内人士称,此次检测结果实际合格率仅50%左右。调查显示全国城市供水管网质量普遍低劣,不符国标的灰口铸铁管占一半。目前,北京上海等大城市饮用水仍无法直饮。")])
search.Index(name="Post").put(document)
return "ok"
@task.route('/search/<searchtag>')
@admin_required
def dosearch(searchtag):
logging.info(searchtag)
searchtag=urllib.unquote(searchtag)
results=search.Index(name="Post").search(searchtag)
return str(results)
#@task.route('/movetopost')
#def movetopost():
# allpost=SPost.all().fetch(1000)
# for one in allpost:
# newone=Post(
# title=one.title,
# content=one.content,
# create_date=one.create_date,
# update_time=one.update_time,
# tags=one.tags,
# saveonly=one.saveonly,
# allowcomment=one.allowcomment,
# num_lookup=one.num_lookup,
# post_id=one.post_id
# )
# logging.info('xxxxxx')
# logging.info(newone.title)
# newone.put_into()
# return "ok"
| itopidea/leepress | application/views/task.py | task.py | py | 2,198 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "application.decorators.admin_required",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "google.appengine.api.search.Document",
"line_number": 28,
"usage_type": "call"
},... |
3288527771 | from tkinter import Tk, RIGHT, BOTTOM, BOTH, TOP, StringVar, font, Text, DISABLED, Scrollbar, Y
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import frameCreator
def CreateWindow(window):
#Main window and setting props
window.mainWindow = Tk()
window.mainWindow.title("Scientific Plotting Calculator v1.0")
window.mainWindow.resizable(False, False)
# Fonts
window.mainWindowFont = font.Font(family = "Verdana", size = 10)
window.secondaryWindowFont = font.Font(family = "Verdana", size = 10)
window.buttonsFont = font.Font(family = "Verdana", size = 10, weight = "bold")
window.amplifiedButtonsFont = font.Font(family = "Verdana", size = 12, weight = "bold")
window.historyTextFont = font.Font(family = "Verdana", size = 14, weight = "bold")
window.plotButtonFont = font.Font(family= "Verdana", size = 16, weight = "bold")
# Booleans, StringVar and lists
window.operationFilling = False
window.parenthesisOpen = False
window.actualHistoryOperation = StringVar()
window.operationMade = False
window.a_list = ['red', 'blue', 'green', 'black', 'grey', 'yellow', 'cyan', 'magenta']
window.colorSelected = StringVar()
window.xminsize = StringVar()
window.yminsize = StringVar()
window.xmaxsize = StringVar()
window.ymaxsize = StringVar()
window.function = StringVar()
window.DEGButtonText = StringVar()
window.ASINButtonText = StringVar()
window.ACOSButtonText = StringVar()
window.ATANButtonText = StringVar()
window.SINButtonText = StringVar()
window.COSButtonText = StringVar()
window.TANButtonText = StringVar()
window.firstScreenText = StringVar()
window.secondScreenText = StringVar()
# Frames
window.calculatorFrame = frameCreator.CalculatorFrame(window.mainWindow)
window.historyFrame = frameCreator.HistoryFrame(window.mainWindow)
window.plottingFrame = frameCreator.PlottingFrame(window.mainWindow)
window.functions2Plot = frameCreator.Functions2PlotFrame(window.plottingFrame)
window.firstRowFunctions2Plot = frameCreator.FirstRowFunctions2PlotFrame(window.functions2Plot)
window.secondRowFunctions2Plot = frameCreator.SecondRowFunctions2PlotFrame(window.functions2Plot)
window.thirdRowFunctions2Plot = frameCreator.ThirdRowFunctions2PlotFrame(window.functions2Plot)
window.screensFrame = frameCreator.ScreensFrame(window.calculatorFrame)
window.buttonsFrame = frameCreator.ButtonsFrame(window.calculatorFrame)
window.firstRowButtons = frameCreator.FirstRowButtonsFrame(window.buttonsFrame)
window.secondRowButtons = frameCreator.SecondRowButtonsFrame(window.buttonsFrame)
window.thirdRowButtons = frameCreator.ThirdRowButtonsFrame(window.buttonsFrame)
window.fourthRowButtons = frameCreator.FourthRowButtonsFrame(window.buttonsFrame)
window.fifthRowButtons = frameCreator.FifthRowButtonsFrame(window.buttonsFrame)
window.sixthRowButtons = frameCreator.SixthRowButtonsFrame(window.buttonsFrame)
#Text and scrollbar
window.history = Text(window.historyFrame, width = "300" , height = "100", fg = "white", bg = "black", font = window.historyTextFont)
window.history.config(state = DISABLED)
window.history.grid(row = 1, column = 0)
window.textScrollbar = Scrollbar(window.historyFrame)
window.textScrollbar.config(background = "#20221f", activebackground = "#20221f", activerelief = "sunken")
window.textScrollbar.pack(side=RIGHT, fill=Y)
window.history.config(yscrollcommand=window.textScrollbar.set)
window.textScrollbar.config(command=window.history.yview)
window.history.pack()
# Plotting an empty graph with matplotlib, into the tkinter window
window.firstPlot = False
window.myFigure = Figure(figsize=(5,5), dpi = 100)
window.a = window.myFigure.add_subplot(111)
window.a.plot([],[])
window.canvas = FigureCanvasTkAgg(window.myFigure, window.plottingFrame)
window.canvas.draw()
window.canvas.get_tk_widget().pack(side = BOTTOM, fill = BOTH, expand = True)
window.toolbar = NavigationToolbar2TkAgg(window.canvas, window.plottingFrame)
window.canvas._tkcanvas.pack(side = TOP, fill = BOTH, expand = True) | gianlacasella/ScientificGraphingCalculator | windowCreator.py | windowCreator.py | py | 4,583 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "tkinter.Tk",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tkinter.font.Font",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tkinter.font",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tkinter.font.Font",
"lin... |
20162168775 | import pygame
import random
from object import Object
class Enemy(Object):
def __init__(self, pos):
super().__init__("cursor.png", pos)
self.image = pygame.transform.scale(self.image, (70, 90))
self.accel = pygame.math.Vector2(0, 0)
self.accelspeed = 0.1
def update(self):
screeninfo = pygame.display.Info()
screenwidth = screeninfo.current_w
screenheight = screeninfo.current_h
if (self.accel.length_squared() > 10):
self.speed = 10
self.accel.y = self.accelspeed
self.accel.x = self.accelspeed
self.speed += self.accel
if (self.accel.length_squared() == 0):
self.speed *= 0.9
if (self.accel.length_squared() > 150):
self.speed = 150
if self.rect.right > screenwidth:
self.speed.x *= -1
self.accelspeed *= -1
if self.rect.left < 0:
self.speed.x *= -1
self.accelspeed *= -1
if self.rect.bottom > screenheight:
self.speed.y *= -1
self.accelspeed *= -1
if self.rect.top < 0:
self.speed.y *= -1
self.accelspeed *= -1
super().update()
| mojd1234/ouch | enemy.py | enemy.py | py | 1,216 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "object.Object",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pygame.transform.scale",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.math.V... |
34521671321 | #!/usr/bin/python3
import os, tempfile, subprocess
try:
data = input(">").strip()
if len(data) > 12: raise Exception("too large")
with tempfile.TemporaryDirectory() as dirname:
name = os.path.join(dirname, "user")
with open(name, "w") as f: f.write(data)
os.chmod(name, 0o500)
print(subprocess.check_output(name))
except Exception as e:
print("FAIL:", e)
exit(1)
| p4-team/ctf | 2018-12-08-hxp/misc_elves/tiny_elves_fake.py | tiny_elves_fake.py | py | 418 | python | en | code | 1,716 | github-code | 1 | [
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.chmod",
"li... |
29055176966 | import discord
import asyncio
from discord.ext import commands
from datetime import datetime
import math
from config import *
bot = commands.Bot(command_prefix=PREFIX, intents= discord.Intents.all())
bot.remove_command('help')
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.watching, name="you"))
print('Connected to bot: {}'.format(bot.user.name))
print('Bot ID: {}'.format(bot.user.id))
async def check_user_presence():
user_id = int(USER_ID)
guild_id = int(GUILD_ID)
guild = bot.get_guild(guild_id)
user = guild.get_member(user_id)
counter_offline = 0
counter_online_time = 0
check_being_online = False
check_online = False
while True:
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
if user.status == discord.Status.online or user.status == discord.Status.do_not_disturb or user.status == discord.Status.idle:
if check_being_online == False:
print(f"{user.display_name} is online right now. | {dt_string}")
check_being_online = True
counter_online_time += 1
check_online = True
else:
check_being_online = False
if check_online == True:
if counter_online_time > 3599:
hours = math.floor(counter_online_time / 3600)
counter_online_time = counter_online_time - hours
minutes = math.floor(counter_online_time / 60)
counter_online_time = counter_online_time - minutes
seconds = counter_online_time
print(f"{user.display_name} was online for {hours} hours, {minutes} minutes and {seconds} seconds.")
elif counter_online_time > 59:
minutes = math.floor(counter_online_time / 60)
counter_online_time = counter_online_time - minutes
seconds = counter_online_time
print(f"{user.display_name} was online for {minutes} minutes and {seconds} seconds.")
else:
print(f"{user.display_name} was online for {counter_online_time} seconds.")
check_online = False
if counter_offline == 0:
print(f"{user.display_name} is offline.")
counter_offline = 60
counter_offline -= 1
counter_online_time = 0
await asyncio.sleep(1) # Wait for 1 seconds before checking again
@bot.command()
async def s(ctx):
bot.loop.create_task(check_user_presence())
bot.run(TOKEN) | respuNN/Discord-Presence-Tracker | log-bot.py | log-bot.py | py | 2,758 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "discord.Intents.all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "discor... |
32261344928 | import requests
from bs4 import BeautifulSoup
def get_article_body(url):
# Send a GET request to the URL
response = requests.get(url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Parse the HTML content of the page
soup = BeautifulSoup(response.text, 'html.parser')
# Find the HTML element containing the article body
# This is a generic example, and you might need to inspect the HTML structure of the specific website you're working with
article_body_element = soup.findall('div', class_='article ') # Adjust according to your HTML structure
if article_body_element:
# Extract and return the text content of the article body
return article_body_element.get_text()
else:
article_body_element = soup.find('div', class_='article ') # Adjust according to your HTML structure
return article_body_element.get_text()
# If the request was not successful or the article body wasn't found, return None
return None
# Example usage
url=''
article_body = get_article_body(url)
if article_body:
print(article_body)
else:
print("Failed to fetch the article body.")
| AlexOutis/NewsBot | soup.py | soup.py | py | 1,242 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
}
] |
75065185953 | __all__ = [
"process_epoch",
]
import logging
import sys
from typing import NoReturn, Optional
import torch
import tqdm
from torch import LongTensor, Tensor
from . import _settings as settings
from . import results
from . import utils
from .. import _config as config
from .. import influence_utils
from ..influence_utils import InfluenceMethod
from .. import types as parent_types
from .. import utils as parent_utils
def process_epoch(block32: parent_utils.ClassifierBlock,
dataset: parent_types.CustomTensorDataset,
id_map: dict, ep: int, ep_wd: float, ep_bs: int,
x_targ32: Tensor, y_targ: LongTensor,
tensors: utils.TracInTensors, ex_id: Optional[int]) -> NoReturn:
r"""
Performs TracIn on a single epoch (including subepochs if applicable) for the specified
\p block
:param block32: Block for use with floats (i.e., float32)
:param dataset: Dataset object of interest
:param id_map: Maps example ID to dataset index
:param ep: Active epoch number
:param ep_wd: Epoch weight decay value
:param ep_bs: Epoch batch_size value
:param x_targ32: Already transformed X-target
:param y_targ: y of target example
:param tensors: All results tensors
:param ex_id: Optional target example ID number
:return:
"""
assert isinstance(dataset, parent_types.CustomTensorDataset), "Dataset class is not supported"
influence_utils.check_duplicate_ds_ids(ds_ids=tensors.full_ds_ids)
influence_utils.check_bd_ids_contents(bd_ids=tensors.full_bd_ids)
def _load_blocks(_ep: int, _subep: Optional[int]):
r""" Standardizes loading the block parameters """
block32.restore_epoch_params(ep=_ep, subepoch=_subep)
block32.eval()
cur_subep = 0
_load_blocks(_ep=ep - 1, _subep=None)
# Continue learning rate from end of the last epoch
lr = block32.get_prev_lr_val(ep=ep - 1, subepoch=None)
# Epoch dataset IDs ordered by batches
ep_ds_ids = block32.get_epoch_dataset_ids(ep=ep)
subep_ends = block32.get_subepoch_ends(ep=ep)
n = len(id_map)
# Iterate through the subepochs
n_subep = len(block32.get_subepoch_ends(ep=ep))
tqdm_desc = f"Epoch {ep} Subepoch %d"
for cur_subep in range(n_subep + 1):
# Get subepoch IDs for TracIn
start_rng = subep_ends[cur_subep - 1] if cur_subep > 0 else 0
end_rng = subep_ends[cur_subep] if cur_subep < len(subep_ends) - 1 else n
subep_ids = ep_ds_ids[start_rng:end_rng]
# Subepoch used to load stored data
subep_load = cur_subep if cur_subep < n_subep else None
# Initialize the tensors storing the results from the subepoch
tensors.subep.reset()
# Get the loss gradient for the test (target) example
loss_targ32, acts_targ32, grad_targ32 = utils.compute_grad(block32, ep_wd,
x_targ32, y_targ,
flatten=True)
always_use_dbl = utils.is_grad_zero(grad=grad_targ32)
if always_use_dbl:
header = influence_utils.build_log_start_flds(block=block32, ep=ep, subepoch=cur_subep,
res_type=None, ex_id=ex_id)
# Skip iter if target has zero gradient even at double precision
skip_iter = always_use_dbl and utils.is_grad_zero(grad=grad_targ32)
ex_desc = f"Epoch {ep} Subepoch: {cur_subep}"
ex_tqdm = tqdm.tqdm(tensors.full_ds_ids, total=tensors.full_ds_ids.shape[0],
file=sys.stdout, disable=config.QUIET, desc=ex_desc)
with ex_tqdm as ex_bar:
if not skip_iter: # skip if targ grad is zero
for cnt, id_val in enumerate(ex_bar):
utils.tracin_dot_product(block32=block32,
grad_targ32=grad_targ32,
subep_tensors=tensors.subep,
ds=dataset, id_val=id_val, id_map=id_map,
ep_wd=ep_wd)
else:
loss_targ32.fill_(settings.MIN_LOSS)
acts_targ32.fill_(settings.MIN_LOSS)
# Prevent divide by zero errors in later calculations
tensors.subep.grad_norms.fill_(settings.MIN_NORM)
tensors.subep.loss_vals.fill_(settings.MIN_LOSS)
# Perform normalization based on learning rate and batch size as specified by TracIn
# Perform externally to make faster with CUDA
tensors.subep.dot_vals *= lr / ep_bs
# Perform the sqrt externally to use CUDA
tensors.subep.grad_norms.sqrt_()
_combine_and_log_results(block=block32, ep=ep, subep=cur_subep,
grad_targ=grad_targ32, subep_ids=subep_ids,
tensors=tensors, ex_id=ex_id)
# Load parameters and learning rate for the next (sub)epoch
_load_blocks(_ep=ep, _subep=subep_load)
lr = block32.get_prev_lr_val(ep=ep, subepoch=subep_load)
def _combine_and_log_results(block: parent_utils.ClassifierBlock, ep: int, subep: int,
subep_ids: LongTensor,
grad_targ: Tensor,
tensors: utils.TracInTensors,
ex_id: Optional[int]) -> NoReturn:
r""" Combines and logs all results """
full_ds_ids, full_bd_ids = tensors.full_ds_ids, tensors.full_bd_ids
tensors.subep.dot_normed = tensors.subep.dot_vals / tensors.subep.grad_norms
# Equivalent of TracInCp
tensors.tracincp[full_ds_ids] += tensors.subep.dot_vals[full_ds_ids]
# GAS Results
targ_grad_norm = grad_targ.norm()
targ_grad_norm[targ_grad_norm <= 0] = settings.MIN_NORM
gas_sim_base = tensors.subep.dot_normed / targ_grad_norm.cpu()
tensors.gas_sim[full_ds_ids] += gas_sim_base[full_ds_ids]
# TracIn Results
tensors.tracin_inf[subep_ids] += tensors.subep.dot_vals[subep_ids]
# TracIn normalized by L2 cosine norm
tensors.tracin_renorm[subep_ids] += gas_sim_base[subep_ids]
def _log_ratio_stats(block: parent_utils.ClassifierBlock, ep: int, subep: int,
vals: Tensor, full_ds_ids: LongTensor,
full_bd_ids: LongTensor, ex_id: Optional[int],
is_grad_norm: bool) -> NoReturn:
r""" Calculates and returns the adversarial and clean mean norms respectively """
assert full_bd_ids.numel() == full_ds_ids.numel(), "Adversarial/dataset length mismatch"
# Extract only the relevant cumulative IDs
assert vals.numel() > torch.max(full_ds_ids).item(), "Some dataset ID not found"
vals = vals[full_ds_ids]
# Label whether each example is a backdoor or not
is_bd = influence_utils.is_bd(bd_ids=full_bd_ids)
adv_vals, cl_vals = vals[is_bd], vals[~is_bd]
if not is_grad_norm:
res_types = (InfluenceMethod.LOSS_CLEAN_SPOT, InfluenceMethod.LOSS_ADV_SPOT)
for vals, r_type in zip((cl_vals, adv_vals), res_types):
utils.log_vals_stats(block=block, ep=ep, subep=subep, res_type=r_type, norms=vals,
ex_id=ex_id)
else:
ratio_res_type = InfluenceMethod.GRAD_NORM_MAG_RATIO
# Log mean and median ratios for clear documentation
header = influence_utils.build_log_start_flds(block=block, ep=ep, subepoch=subep,
res_type=ratio_res_type, ex_id=ex_id)
# mean_mag_ratio = (adv_vals.mean() / cl_vals.mean()).view([1])
# logging.info(f"{header} Mean: {mean_mag_ratio.item():.3E}")
median_mag_ratio = (adv_vals.median() / cl_vals.median()).view([1])
logging.info(f"{header} Median: {median_mag_ratio.item():.3E}")
def _log_vals_split_adv_clean(block: parent_utils.ClassifierBlock, ep: int, subep: int,
clean_method: InfluenceMethod, adv_method: InfluenceMethod,
vals: Tensor, full_ds_ids: LongTensor, full_bd_ids: LongTensor,
ex_id: Optional[int]) -> NoReturn:
is_clean = ~influence_utils.is_bd(bd_ids=full_bd_ids)
utils.log_vals_stats(block=block, res_type=clean_method, ep=ep, subep=subep,
norms=vals[full_ds_ids[is_clean]], ex_id=ex_id)
# Log Adversarial stats
is_bd = influence_utils.is_bd(bd_ids=full_bd_ids)
utils.log_vals_stats(block=block, res_type=adv_method, ep=ep, subep=subep,
norms=vals[full_ds_ids[is_bd]], ex_id=ex_id)
| ZaydH/target_identification | fig01_cifar_vs_mnist/poison/tracin_utils/main.py | main.py | py | 8,735 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "torch.Tensor",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.LongTensor",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "influence_utils.check_d... |
34520883151 | #!/usr/bin/env python3
import sys
import random
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
def ReadPrivateKey(filename):
return serialization.load_pem_private_key(
open(filename, 'rb').read(), password=None, backend=default_backend())
def RsaDecrypt(private_key, ciphertext):
assert (len(ciphertext) <=
(private_key.public_key().key_size // 8)), 'Ciphertext too large'
return pow(
int.from_bytes(ciphertext, 'big'),
private_key.private_numbers().d,
private_key.public_key().public_numbers().n)
def Challenge(private_key, reader, writer):
try:
m0 = reader.read(1)
m1 = reader.read(1)
ciphertext = reader.read(private_key.public_key().key_size // 8)
dice = RsaDecrypt(private_key, ciphertext)
for rounds in range(100):
p = [m0, m1][dice & 1]
k = random.randint(0, 2)
c = (ord(p) + k) % 2
writer.write(bytes((c,)))
writer.flush()
return 0
except Exception as e:
return 1
def main():
private_key = ReadPrivateKey(sys.argv[1])
return Challenge(private_key, sys.stdin.buffer, sys.stdout.buffer)
if __name__ == '__main__':
sys.exit(main())
| p4-team/ctf | 2018-06-23-google-ctf/crypto_secrecy/challenge.py | challenge.py | py | 1,222 | python | en | code | 1,716 | github-code | 1 | [
{
"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.primitives.serialization",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "cryptography.hazmat.backends.defau... |
8658763067 | """:cvar
pip3 uninstall telebot
pip3 uninstall PyTelegramBotAPI
pip3 install pyTelegramBotAPI
pip3 install --upgrade pyTelegramBotAPI
"""
try:
import cv2
import PIL.Image as Image
import io
import base64
#from byte_array import byte_data
import telebot
import config
import random
from telebot import util, types
import Alg
import DiscretAlg
import Factorization
import primeTests
import time , steganography
bot = telebot.TeleBot(config.TOKEN)
#help(bot)
#print(getStickerSet())
print("Bot is started")
imagesPath = ''
LENGTH = 15
state = ''
msg2Hide = ''
hideInfo = False
HK = 1
#zzss = steganography.flatImage()
def buildMainMenu(buttons):
global stegoOp
stegoOp = ''
mainmn = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=1)
for i,v in buttons.items():
tmp = types.InlineKeyboardButton(i, callback_data=v)
mainmn.add(tmp)
return mainmn
def buildInternalMenu(buttons):
itemTmp = []
for i, v in buttons.items():
itemTmp.append([types.InlineKeyboardButton(i,callback_data=v)])
return types.InlineKeyboardMarkup(itemTmp)
def intt(x):
z= []
for i in x:
z.append(int(i))
return z
mainMenu = buildInternalMenu(
{"RSA": "1",
"EL-Gamal": "2",
"Inv mod": "3",
"BinPow": "4",
"Prime": "5",
"DigitalSignature": "10",
"Factorization p": "6",
"Discrete alg(Shanks’ baby-step/giant-step)": "7",
"Elliptic curve": "8",
"Steganograph": "9",
"About":"100"})
#buildMainMenu
RSAMenu = buildInternalMenu({"RSA Encryption":"20",
"RSA Decryption":"21",
"RSA Genrate keys":"20.1",
"Main menu":"00"})
GAMALMenu = buildInternalMenu({"GAMAL Encryption": "30",
"GAMAL Decryption":"31",
"Main menu": "00"})
primeMenu = buildInternalMenu({"is prime": "40",
"Generate prime": "41",
#"fractional": "52",
"Main menu": "00"})
primeCheckMenu = buildInternalMenu({"Trial Division Method": "50",#xxxxxxxxxxx
"Chinese Test": "501",#xxxxx
"Fermat Test": "502",
"Miller Test": "51",#xxxxx
"Miller-Rabin": "52",
"Main menu": "00"})
primeFractionMenu = buildInternalMenu({"Fermat": "60",
"Polard p-1": "61",
"Monte carlo": "62",#xxxxxx
"Main menu": "00"})
ellipticMenu = buildInternalMenu({"findPoints": "70",
"isGenerator": "71",
"Main menu": "00"})
DigSignMenu = buildInternalMenu({"RSA_DS": "80",
"El-Gamal_DS": "81",
"Elliptic-curve_DS": "82",#xxxxxxxxx
"Main menu": "00"})
stegoMenu = buildInternalMenu({"Hide": "110",
"Extract": "111",
"Main menu": "00"})
@bot.message_handler(commands=['start'])
def welcome(message):
sti = open(imagesPath + 'logo.tgs', 'rb')
bot.send_sticker(message.chat.id,sti)
time.sleep(1)
bot.send_message(message.chat.id,
"Hello, {0.first_name}!\nWelcome - <b>{1.first_name}</b>, to help you.\n\n\n****please use space instead of comma****".format(
message.from_user, bot.get_me()),
parse_mode='html', reply_markup=mainMenu)#mainMenu
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
global state
#if True:
try:
if call.message:
if call.data == '00':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text = "Select" , reply_markup=mainMenu)
elif call.data == '1':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text = "Select" , reply_markup=RSAMenu)
#bot.send_message(call.message.chat.id, "Select", parse_mode='html', reply_markup=RSAMenu)
elif call.data == '2':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text = "Select" , reply_markup=GAMALMenu)
elif call.data == '3':
bot.send_message(call.message.chat.id, "Enter x,p (x^-1 mod p)", parse_mode='html', reply_markup=None)
elif call.data == '4':
bot.send_message(call.message.chat.id, "Enter x,y,p (x^y mod p)", parse_mode='html', reply_markup=None)
elif call.data == '5':
bot.send_message(call.message.chat.id, "Select", parse_mode='html', reply_markup=primeMenu)
elif call.data == '6':
bot.send_message(call.message.chat.id, "Select", parse_mode='html', reply_markup=primeFractionMenu)
elif call.data == '7':
bot.send_message(call.message.chat.id, "Enter h,g,p(h = g^x mod p)", parse_mode='html', reply_markup=None)
elif call.data == '8':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text = "Select" , reply_markup=ellipticMenu)
elif call.data == '9':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text = "Select" , reply_markup=stegoMenu)
elif call.data == '10':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text = "Select" , reply_markup=DigSignMenu)
elif call.data == '20':
bot.send_message(call.message.chat.id, "Enter m,e,p,q or m,e,n or m", parse_mode='html',reply_markup=None)
elif call.data == '20.1':
bot.send_message(call.message.chat.id, "Enter p q e or Any number", parse_mode='html',reply_markup=None)
elif call.data == '21':
bot.send_message(call.message.chat.id, "Enter c,d,n", parse_mode='html',reply_markup=None)
elif call.data == '30':
bot.send_message(call.message.chat.id, "Enter m,p,g,x,k or m", parse_mode='html',reply_markup=None)
elif call.data == '31':
bot.send_message(call.message.chat.id, "Enter a,b,p,g", parse_mode='html', reply_markup=None)
elif call.data == '40':
bot.send_message(call.message.chat.id, "Select", parse_mode='html', reply_markup=primeCheckMenu)
elif call.data == '41':
bot.send_message(call.message.chat.id, "Enter length(bits)", parse_mode='html',reply_markup=None)
elif call.data == '50':
bot.send_message(call.message.chat.id, "Enter x", parse_mode='html',reply_markup=None)
elif call.data == '50'or call.data == '501':
bot.send_message(call.message.chat.id, "Enter p to test", parse_mode='html',reply_markup=None)
elif call.data == '502':
bot.send_message(call.message.chat.id, "Enter p,b", parse_mode='html',reply_markup=None)
elif call.data == '51':
bot.send_message(call.message.chat.id, "Under development", parse_mode='html',reply_markup=None)
#bot.send_message(call.message.chat.id, "Enter x, (*)", parse_mode='html',reply_markup=None)
elif call.data == '52':
bot.send_message(call.message.chat.id, "Enter n,k", parse_mode='html',reply_markup=None)
elif call.data == '60':
bot.send_message(call.message.chat.id, "Enter n", parse_mode='html', reply_markup=None)
elif call.data == '61':
bot.send_message(call.message.chat.id, "Enter n(r0 =2)", parse_mode='html',reply_markup=None)
elif call.data == '62':
bot.send_message(call.message.chat.id, "Under development", parse_mode='html',reply_markup=None)
#bot.send_message(call.message.chat.id, "Enter n, m0", parse_mode='html',reply_markup=None)
elif call.data == '70':
bot.send_message(call.message.chat.id, "Enter field, X (11 1 0 -8 -5)", parse_mode='html',reply_markup=None)
elif call.data == '71':
bot.send_message(call.message.chat.id, "Enter x,y,field,X (9 5 11 1 0 -8 -5)", parse_mode='html',reply_markup=None)
elif call.data == '80':
bot.send_message(call.message.chat.id, "Enter s msg |v msg c pub_key", parse_mode='html',reply_markup=None)
elif call.data == '81':
bot.send_message(call.message.chat.id, "Enter s msg| v msg,c,pub_key", parse_mode='html',reply_markup=None)
elif call.data == '82':
bot.send_message(call.message.chat.id, "Under development", parse_mode='html',reply_markup=None)
#bot.send_message(call.message.chat.id, "xxxxxxxxxxxx", parse_mode='html',reply_markup=None)
elif call.data == '100':
bot.send_message(call.message.chat.id, "Mohammed Hammoud\nOla Haydar", parse_mode='html',reply_markup=None)
elif call.data == '110':
bot.send_message(call.message.chat.id, "Enter image and text in caption", parse_mode='html',reply_markup=None)
elif call.data == '111':
bot.send_message(call.message.chat.id, "Enter image and hk in caption", parse_mode='html',reply_markup=None)
state = call.data
print(state)
except Exception as e:
print(repr(e))
@bot.message_handler(content_types=['photo'])
def handle_file(message):
global state, hideInfo, msg2Hide, HK
print("*******************1")
stegoOp = 'hide'
if False:# stegoOp == '':
bot.reply_to(message, "*******************1")
else:
file_info = bot.get_file(message.photo[-1].file_id)
tmp = bot.download_file(file_info.file_path)
#file_name= message.photo[-1].file_id +".png"
file_name= "tempImg.png"
src1 = file_name
with open(src1,'wb') as new_file:
new_file.write(tmp)
coverImage = cv2.imread(src1)
if stegoOp == 'hide':
msg2Hide, HK = message.caption.split('@')
HK = int(HK)
stegoImg,mse,psnr = steganography.hideLSB(msg2Hide,coverImage,0)
file_name= "stego.png"
src2 = file_name
cv2.imwrite(src2, stegoImg)
stegoImg = cv2.imread(src2)
RESULTS = 'Cover Image \nmsg: {}\nhideKey: {}\nmse = {}\npsnr = {}'.format(msg2Hide, HK, mse, psnr)
bot.send_photo(message.chat.id,photo = open(src2,'rb'),caption = RESULTS)
elif stegoOp == 'extract':
HK = int(message.caption.text)
data, h, date = steganography.extractLSB(coverImage,HK)
RESULTS = 'Hidekey: {}data\nh = {}\ndate = {}'.format(HK, date, h == steganography.calcHash(date) , date)
bot.reply_to(message, RESULTS)
@bot.message_handler(content_types=['text'])
def lalala(message):
global state, hideInfo, msg2Hide, HK
staegoOp = ''
#if True:
try:
print(state)
if message.chat.type == 'private':
#I = intt(message.text.split())
if state == '3':
I = intt(message.text.split())
x, p = I
z = Alg.findModInverse(x, p)
if z != "Error":
bot.send_message(message.chat.id, str(z), parse_mode='html', reply_markup=None)
else:
bot.send_message(message.chat.id, "gcd(x,p) !=1", parse_mode='html', reply_markup=None)
elif state == '4':
I = intt(message.text.split())
x, y, p = I
z = Alg.BPower(x, y, p)
bot.send_message(message.chat.id, str(z), parse_mode='html', reply_markup=None)
elif state == '7':
I = intt(message.text.split())
h,g,p = I
z = DiscretAlg.bsgs(h,g,p)
bot.send_message(message.chat.id, "x = {}".format(z), parse_mode='html', reply_markup=None)
elif state == '9':
bot.send_message(call.message.chat.id, "Enter text, HideKey", parse_mode='html', reply_markup=None)
elif state == '20':
I = intt(message.text.split())
if len(I) == 1:
m = I
[(e, n),(d, n)] = Alg.RSA_genrate_keys(LENGTH)
z = Alg.RSA_enc(m,e,n)
elif len(I) == 3:
m, e, n = I
z = Alg.RSA_enc(m,e,n)
d = '?'
else:
m, e, p, q = I
n = p * q
z = Alg.RSA_enc(m,e,n)
d = Alg.findModInverse(e,(p-1)*(q-1))
bot.send_message(message.chat.id, "msg = {}\nCipher = {}\ne = {}\nd = {}\nn = {}".format(m,z, e, d, n), parse_mode='html', reply_markup=None)
elif state == '20.1':
I = intt(message.text.split())
if len(I) == 1:
z = Alg.RSA_genrate_keys(20)
else:
z = Alg.RSA_genrate_keys(20,(I[0],I[1],I[2]))
bot.send_message(message.chat.id, "global{}\nprivate{}".format(z[0],z[1]), parse_mode='html', reply_markup=None)
elif state == '21':
I = intt(message.text.split())
c, d, n = I
z = Alg.RSA_dec(c,d,n)
bot.send_message(message.chat.id, "msg {}".format(z), parse_mode='html', reply_markup=None)
elif state == '30':
I = intt(message.text.split())
if len(I) == 1:
m = I
((y, g, p),(x, g, p)) = Alg.AlGamal_Genkeys("",LENGTH)
(a, b) = Alg.AlGamal_Enc(m, (y, g, p))
elif len(I) == 5:
m, p, g, x, k = I
((y, g, p),(x, g, p)) = Alg.AlGamal_Genkeys((p,g,x),LENGTH)
(a, b) = Alg.AlGamal_Enc(m, (y, g, p), k = k)
bot.send_message(message.chat.id, "msg {}\nCipher {} \npublic {}\nprivate {}\n".format(m, (a,b),(y,g,p),(x,g,p)), parse_mode='html', reply_markup=None)
elif state == '41':
n = intt(message.text.split())
z = Alg.generate_prime_number(n[0])
bot.send_message(message.chat.id, "p = {}".format(z), parse_mode='html', reply_markup=None)
elif state == '50':
n = intt(message.text.split())
z = primeTests.trialDevision(n[0])
bot.send_message(message.chat.id, "p = {} primility is {} ".format(n[0],z), parse_mode='html', reply_markup=None)
elif state == '501':
n = intt(message.text.split())
z = primeTests.chinesTest(n[0])
bot.send_message(message.chat.id, "p = {} primility is {} ".format(n[0],z), parse_mode='html', reply_markup=None)
elif state == '502':
p, b = intt(message.text.split())
z = primeTests.fermatTest(p, b)
bot.send_message(message.chat.id, "p = {}, b = {} primility is {} ".format(p,b,z), parse_mode='html', reply_markup=None)
elif state == '51':
pass
#n = intt(message.text.split())
#z = primeTests.millerTest(n[0])
#bot.send_message(message.chat.id, "p = {} primility is {} ".format(n[0],z), parse_mode='html', reply_markup=None)
elif state == '52':
n, k = intt(message.text.split())
z = primeTests.isPrimeMillerRabin(n,k)
bot.send_message(message.chat.id, "{} prime \n {}".format(n,z), parse_mode='html', reply_markup=None)
elif state == '60':
n = intt(message.text.split())
z = Factorization.factorFermat(n[0])
bot.send_message(message.chat.id, "n = {} :\n {}".format(n[0],z), parse_mode='html', reply_markup=None)
elif state == '61':
n = intt(message.text.split())
z = Factorization.factoPollard(n[0],2)
bot.send_message(message.chat.id, "n = {}\nr0 = {} :\n {}".format(n,r0, z), parse_mode='html', reply_markup=None)
elif state == '62':
pass
#z = Factorization.doit(n)
elif state == '70':
I = intt(message.text.split())
ALL = I
field = ALL[0]
X = ALL[1:]
(L,points) = Alg.FindPonts(X, field)
bot.send_message(message.chat.id, "numberOfPoints = {}\n Points = {}".format(L,points), parse_mode='html', reply_markup=None)
elif state == '71':
I = intt(message.text.split())
ALL = I
point = ALL[0:2]
field = ALL[2]
X = ALL[3:]
print(point, field, X)
(Ggroup, allpoints, ISgenerated, logger) = Alg.isgenerator((point[0],point[1]), X, field)
bot.send_message(message.chat.id, "Ggroup {}\nall points = {}\n IS generater = {}\n logger {}".format(Ggroup, allpoints, ISgenerated, logger), parse_mode='html',reply_markup=None)
elif state == '80':
rec = message.text.split()
op = rec[0]
msg = rec[1]
if op == 's':
(h1, c, priv_key,pub_key) = Alg.DS_RSA(msg)
bot.send_message(message.chat.id,"msg = {}\nh( {} ) = {}\nC = {}\npriv_key(d,n) = {}\npub_key(e,n) = {}".format(msg, h1, c,priv_key,pub_key), parse_mode='html', reply_markup=None)
elif op == 'v':
c = rec[2]
pub_key = (rec[3], rec[4])
z = Alg.DS_RSA_verify(msg, c, pub_key)
bot.send_message(message.chat.id,"signature is {}".format(z), parse_mode='html', reply_markup=None)
elif state == '81':
rec = message.text.split()
op = rec[0]
msg = rec[1]
if op == 's':
(c, pub_key, priv_key) = Alg.DS_Algamal(msg)
bot.send_message(message.chat.id,"msg = {}\nC = {}\npub_key(y, g, p) = {}\npriv_key(x, g, p) = {}".format(msg, c, pub_key,priv_key), parse_mode='html', reply_markup=None)
elif op == 'v':
c = rec[2]
pub_key = (rec[3], rec[4],rec[5])
z = Alg.DS_Algamal_verify(msg, c, pub_key)
bot.send_message(message.chat.id,"signature is {}".format(z), parse_mode='html', reply_markup=None)
elif state == '110':
bot.send_message(message.chat.id, "Enter image and text in caption", parse_mode='html', reply_markup=None)
stegoOp = 'hide'
elif state == '111':
stegoOp = 'extract'
elif state == '82':
pass
elif state == '82':
pass
else:
bot.reply_to(message, 'Sorry what is this!!!')
except Exception as e:
bot.reply_to(message, 'Uncorrect input')
# RUN
bot.polling(none_stop=True)
except Exception as e:
bot.polling(none_stop=True)
| Hammoudmsh/Information-security-Bot | main.py | main.py | py | 21,191 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "telebot.TeleBot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "telebot.types.ReplyKeyboardMarkup",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "t... |
26807453323 | #!/usr/bin/env python3
import os
import shutil
import yt_dlp
import requests
from utils import logger
from downloaders import DL_DIRECTORY
log = logger.get_log(__name__)
class Downloader():
def __init__(self):
self._createTempDir()
def cleanUp(self):
for filename in os.listdir(DL_DIRECTORY):
file_path = os.path.join(DL_DIRECTORY, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except OSError as e:
log.warning('Failed to remove: {} ERROR: {}'.format(filename, e))
continue
log.debug('Removed {}'.format(file_path))
def _moveTo(self, source, destination):
log.info('Download Complete Moving {} to {}'.format(os.path.basename(source), os.path.dirname(destination)))
# check for destination directory
if not os.path.isdir(os.path.dirname(destination)):
log.warning('Failed to move {} ERROR: {} does not exist.'.format(os.path.basename(source), os.path.dirname(destination)))
return False
# attempt to move the file
try:
# os.replace(source, destination)
shutil.move(source, destination)
except OSError as e:
log.warning('Failed to move {} ERROR: {}'.format(os.path.basename(source), e))
return False
return True
def _createTempDir(self):
if not os.path.isdir(DL_DIRECTORY):
os.mkdir(DL_DIRECTORY)
def downloadYouTube(self, fileName, destinationDirectory, link):
tempFilePath = os.path.join(DL_DIRECTORY, fileName)
destinationPath = os.path.join(destinationDirectory, fileName)
options = {
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]',
'default_search': 'auto',
'restrictfilenames': True,
'prefer_ffmpeg': True,
'quiet': True,
'no_warnings': True,
'ignoreerrors': True,
'noplaylist': True,
'noprogress': True,
'logger': logger.get_null_log('YouTube-DL'),
'outtmpl': tempFilePath
}
log.info('Attempting to download video: {} from "{}". Please Wait...'.format(fileName, link))
try:
with yt_dlp.YoutubeDL(options) as youtube:
youtube.extract_info(link, download=True)
except Exception as e:
log.warning('Something went wrong while getting trailer from {}. ERROR: {}'.format(link, e))
return False
if os.path.isfile(tempFilePath):
self._moveTo(tempFilePath, destinationPath)
return True
else:
log.warning('Failed to download from {}'.format(link))
return False
def downloadApple(self, fileName, destinationDirectory, link):
log.info('Attempting to download video at "{}". Please Wait...'.format(link))
tempPath = os.path.join(DL_DIRECTORY, fileName)
destinationPath = os.path.join(destinationDirectory, fileName)
headers = {'User-Agent': 'Quick_time/7.6.2'}
try:
with requests.get(link, stream=True, headers=headers, timeout=5) as response:
response.raise_for_status()
if int(response.headers.get('Content-length')) < 1000000:
log.warning('File too small. URL: {} Content-Length: {}'.format(link, response.headers.get('Content-Length')))
return False
with open(tempPath, 'wb') as tempFile:
for chunk in response.iter_content(chunk_size=1024 * 1024):
tempFile.write(chunk)
except requests.exceptions.HTTPError as e:
log.warning('Encountered an HTTP error while downloading from: {} ERROR: {}'.format(link, e))
return False
except IOError as e:
log.warning('Encountered an error while writing to disk. File: {} ERROR: {}'.format(tempPath, e))
return False
if self._moveTo(tempPath, destinationPath):
self.cleanUp()
return True
def download(self, fileName, destinationDirectory, link):
if 'apple' in link.lower():
return self.downloadApple(fileName, destinationDirectory, link)
elif 'youtube' in link.lower() or 'vimeo' in link.lower():
return self.downloadYouTube(fileName, destinationDirectory, link)
| jsaddiction/TrailerTech | downloaders/downloader.py | downloader.py | py | 4,543 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "utils.logger.get_log",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.logger",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "downloaders.DL_DIRECTORY... |
7425957587 | import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('assets/CFPB Housing Data.csv')
CENSUS = df['Census Tract']
LOAN = df['Loan Amount']
plt.scatter(CENSUS, LOAN)
plt.xlabel("Census Tract")
plt.ylabel("Loan Amount")
plt.show()
#print(df.head)
| shacktemp/data_1_checks | kc_2.py | kc_2.py | py | 266 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.p... |
24598567986 | import hashlib
from collections import OrderedDict
class Perceiver:
def __init__(self, perceptor):
self.last_state = None
self.perceptor = perceptor
self.current_state = None
self.keys = None
self.sorted_keys(perceptor.get_state())
self.perceive(perceptor.get_state())
def perceive(self, state):
current_state = OrderedDict()
for key in self.keys:
values = list(state[key])
values.sort()
current_state[key] = values
val = hashlib.sha1(str(current_state))
self.current_state = val.hexdigest()
def sorted_keys(self, state):
given_information = {key: state[key] for key in state}
self.keys = given_information.keys()
self.keys.sort()
def switch(self):
self.last_state = self.current_state
| DorAm1010/ReinforcementLearning_Q-Table | perceiver.py | perceiver.py | py | 857 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.OrderedDict",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 22,
"usage_type": "call"
}
] |
71934734434 | # coding: utf-8
from zfits import FactFits
from astropy.io import fits
import matplotlib.pyplot as plt
f = FactFits(
"zfits/test_data/20160817_016.fits.fz", "zfits/test_data/20160817_030.drs.fits.gz"
)
facttools = fits.open("zfits/test_data/20160817_016_calibrated.fits")
e = f.get_data_calibrated(0)
ft_e = facttools[1].data["DataCalibrated"][0].reshape((1440, -1))
plt.subplot(2, 1, 1)
plt.plot(ft_e[0], label="FACT-Tools")
plt.plot(e[0], label="zfits")
plt.legend()
plt.subplot(2, 1, 2)
plt.axhline(2000 / 4096, color="C1")
plt.plot(e[0] / ft_e[0])
plt.ylabel("zfits / facttools")
plt.ylim(0.8, 1.2)
plt.savefig("comp2.png", dpi=300)
| fact-project/zfits | plot_comparison.py | plot_comparison.py | py | 645 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "zfits.FactFits",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.su... |
26253222037 | import os
from pathlib import Path
from mne_bids import BIDSPath
from seek_localize import read_dig_bids
from seek_localize.bids import write_dig_bids
# BIDS entities
subject = 'la02'
session = 'presurgery'
acquisition = 'seeg'
datatype = 'ieeg'
space = 'fs'
# paths to test files
cwd = os.getcwd()
bids_root = Path(cwd) / 'data'
deriv_root = bids_root / 'derivatives'
mri_dir = deriv_root / 'freesurfer' / f'sub-{subject}' / 'mri'
subjects_dir = deriv_root / 'freesurfer'
desikan_fname = mri_dir / 'aparc+aseg.mgz'
destrieux_fname = mri_dir / 'aparc.a2009s+aseg.mgz'
wmparc_fname = mri_dir / 'wmparc.mgz'
T1mgz = mri_dir / 'T1.mgz'
# path to BIDs electrodes tsv file in test dataset
# NOTE: should not be used directly, always copy to temp directory
_bids_path = BIDSPath(subject=subject, session=session,
acquisition=acquisition, datatype=datatype,
space=space, root=bids_root,
suffix='electrodes', extension='.tsv')
def test_bids_write(_temp_bids_root):
"""Test BIDS writing and reading.
Test that electrodes.tsv and coordsystem.json files writing
to BIDS is i) BIDS compliant and ii) readable by mne-bids again.
"""
bids_path = _bids_path.copy().update(root=_temp_bids_root)
sensors = read_dig_bids(bids_path, root=_temp_bids_root)
ch_names = sensors.ch_names
ch_coords = sensors.get_coords()
unit = sensors.coord_unit
coord_system = sensors.coord_system
intended_for = sensors.intended_for
elec_bids_path = BIDSPath(subject='02',
session=bids_path.session,
space=bids_path.space,
acquisition=bids_path.acquisition,
datatype=bids_path.datatype,
root=_temp_bids_root,
suffix='electrodes',
extension='.tsv')
write_dig_bids(elec_bids_path, root=_temp_bids_root,
ch_names=ch_names,
ch_coords=ch_coords, unit=unit,
coord_system=coord_system,
intended_for=intended_for)
new_sensors = read_dig_bids(elec_bids_path, root=_temp_bids_root)
# the coordinates should match
assert all([sensors.__dict__[key] == new_sensors.__dict__[key]
for key in
sensors.__dict__.keys() if key not in [
'elecs_fname', 'coordsystem_fname'
]])
| adam2392/seek_localize | tests/test_bids.py | test_bids.py | py | 2,516 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mne_bids.BIDSPath",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "seek_localize.read_dig_bids"... |
12967072447 | import torch
from termcolor import colored
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import os
def visual_representation(ckpt_path=None, use_3d=False):
"""
2D encoder of pretrained 3D Visual representation
"""
if ckpt_path is None:
ckpt_path = "checkpoints/videoae_co3d.tar"
# check path
if not os.path.exists(ckpt_path):
raise FileNotFoundError("Checkpoint path does not exist")
encoder_3d = nn.DataParallel(Encoder3D())
checkpoint = torch.load(ckpt_path)
encoder_3d.load_state_dict(checkpoint['encoder_3d'])
print(colored(">>pretrained 3D visual representation is loaded.", "red"))
if use_3d:
return encoder_3d
else:
encoder_2d = encoder_3d.module.feature_extraction
return encoder_2d
def get_resnet18():
model = torchvision.models.resnet18(pretrained=True)
feature = nn.Sequential(*list(model.children())[:-2])
feature[7][0].conv1.stride = (1, 1)
feature[7][0].downsample[0].stride = (1, 1)
return feature
class Encoder3D(nn.Module):
"""
Encoder 3D v2.
"""
def __init__(self, args=None):
super(Encoder3D, self).__init__()
self.backbone = "resnet18"
if self.backbone=="resnet18":
self.feature_extraction = get_resnet18()
self.conv3d_1 = nn.ConvTranspose3d(64, 48, 4, stride=2, padding=1)
self.conv3d_2 = nn.ConvTranspose3d(48, 32, 4, stride=2, padding=1)
else:
raise NotImplementedError
def forward(self, img, use_3d=False):
z_2d = self.feature_extraction(img)
if use_3d:
B,C,H,W = z_2d.shape
if self.backbone=="resnet18":
z_3d = z_2d.reshape([-1, 64, 8, H, W])
elif self.backbone=="resnet50":
z_3d = z_2d.reshape([-1, 256, 8, H, W])
z_3d = F.leaky_relu(self.conv3d_1(z_3d))
z_3d = F.leaky_relu(self.conv3d_2(z_3d))
return z_3d
else:
return z_2d
| YanjieZe/rl3d | load_3d.py | load_3d.py | py | 2,036 | python | en | code | 63 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.DataParallel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
37312132142 | from flask import render_template,redirect
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask import Blueprint, current_app, jsonify, make_response, request
# 将model添加成视图,并控制在前端的显示
from myapp.models.model_serving import Service,KfService
from myapp.models.model_team import Project,Project_User
from myapp.utils import core
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from flask_appbuilder.actions import action
from myapp import app, appbuilder,db,event_logger
import logging
import re
import uuid
import requests
from myapp.exceptions import MyappException
from flask_appbuilder.security.decorators import has_access
from myapp.models.model_job import Repository
from flask_wtf.file import FileAllowed, FileField, FileRequired
from werkzeug.datastructures import FileStorage
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from myapp import security_manager
import os,sys
from wtforms.validators import DataRequired, Length, NumberRange, Optional,Regexp
from wtforms import BooleanField, IntegerField, SelectField, StringField,FloatField,DateField,DateTimeField,SelectMultipleField,FormField,FieldList
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget,BS3PasswordFieldWidget,DatePickerWidget,DateTimePickerWidget,Select2ManyWidget,Select2Widget
from myapp.forms import MyBS3TextAreaFieldWidget,MySelect2Widget,MyCodeArea,MyLineSeparatedListField,MyJSONField,MyBS3TextFieldWidget,MySelectMultipleField
from myapp.utils.py import py_k8s
import os, zipfile
import shutil
from flask import (
current_app,
abort,
flash,
g,
Markup,
make_response,
redirect,
render_template,
request,
send_from_directory,
Response,
url_for,
)
from .base import (
DeleteMixin,
api,
BaseMyappView,
check_ownership,
CsvResponse,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
MyappFilter,
MyappModelView,
)
from sqlalchemy import and_, or_, select
from .baseApi import (
MyappModelRestApi
)
import kubernetes
from kfserving import KFServingClient
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2CustomSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from flask_appbuilder import CompactCRUDMixin, expose
import pysnooper,datetime,time,json
conf = app.config
class KfService_ModelView(MyappModelView):
datamodel = SQLAInterface(KfService)
crd_name = 'inferenceservice'
help_url = conf.get('HELP_URL', {}).get(datamodel.obj.__tablename__, '') if datamodel else ''
show_columns = ['name', 'label','service_type','default_service','canary_service','canary_traffic_percent','k8s_yaml']
add_columns = ['name', 'label', 'service_type','default_service','canary_service','canary_traffic_percent']
list_columns = ['label_url','host','service','deploy','status','roll']
edit_columns = add_columns
base_order = ('id','desc')
order_columns = ['id']
@expose('/deploy1/<kfservice_id>',methods=['POST',"GET"])
def deploy1(self,kfservice_id):
mykfservice = db.session.query(KfService).filter_by(id=kfservice_id).first()
from myapp.utils.py.py_k8s import K8s
k8s = K8s(mykfservice.project.cluster.get('KUBECONFIG',''))
namespace = conf.get('KFSERVING_NAMESPACE')
crd_info = conf.get('CRD_INFO')['inferenceservice']
crd_list = k8s.get_crd(group=crd_info['group'], version=crd_info['version'], plural=crd_info['plural'],
namespace=namespace)
for crd_obj in crd_list:
if crd_obj['name'] == mykfservice.name:
k8s.delete_crd(group=crd_info['group'], version=crd_info['version'], plural=crd_info['plural'],
namespace=namespace, name=mykfservice.name)
def get_env(env_str):
if not env_str:
return []
envs = re.split('\r|\n', env_str)
envs = [env.split('=') for env in envs if env and len(env.split('=')) == 2]
return envs
def get_kfjson(service,mykfservice):
if not service:
return None
image_secrets = conf.get('HUBSECRET', [])
user_hubsecrets = db.session.query(Repository.hubsecret).filter(Repository.created_by_fk == g.user.id).all()
if user_hubsecrets:
for hubsecret in user_hubsecrets:
if hubsecret[0] not in image_secrets:
image_secrets.append(hubsecret[0])
kfjson={
"minReplicas": service.min_replicas,
"maxReplicas": service.max_replicas,
"custom": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "gpu" if core.get_gpu(service.resource_gpu)[0] else "cpu",
"operator": "In",
"values": [
"true"
]
},
]
}
]
}
},
},
"imagePullSecrets": [{"name":hubsecret} for hubsecret in image_secrets],
"container": {
"image": service.images,
"imagePullPolicy": conf.get('IMAGE_PULL_POLICY','Always'),
"name": mykfservice.name+"-"+service.name,
"workingDir": service.working_dir if service.working_dir else None,
"command": ["sh", "-c",service.command] if service.command else None,
"resources": {
"requests": {
"cpu": service.resource_cpu,
"memory": service.resource_memory
}
},
"env":[{"name":env[0],"value":env[1]} for env in get_env(service.env)],
# "volumeMounts": [
# {
# "mountPath": "/mnt/%s" % service.created_by.username,
# "name": "workspace",
# "subPath": service.created_by.username
# }
# ],
# "volumeDevices":[
# {
# "devicePath": "/data/home/",
# "name": "workspace"
# }
# ]
}
# "volumes": [
# {
# "name": "workspace",
# "persistentVolumeClaim": {
# "claimName": "kubeflow-user-workspace"
# }
# }
# ]
}
}
return kfjson
crd_json={
"apiVersion": "serving.kubeflow.org/v1alpha2",
"kind": "InferenceService",
"metadata": {
"labels": {
"app": mykfservice.name
},
"name": mykfservice.name,
"namespace": namespace
},
"spec": {
"canaryTrafficPercent": mykfservice.canary_traffic_percent,
"default": {
mykfservice.service_type: get_kfjson(mykfservice.default_service,mykfservice)
},
"canary": {
mykfservice.service_type: get_kfjson(mykfservice.canary_service,mykfservice),
} if mykfservice.canary_service else None,
}
}
import yaml
ya = yaml.load(json.dumps(crd_json))
ya_str = yaml.safe_dump(ya, default_flow_style=False)
logging.info(ya_str)
crd_objects = k8s.create_crd(group=crd_info['group'],version=crd_info['version'],plural=crd_info['plural'],namespace=namespace,body=crd_json)
flash(category='warning',message='部署启动,一分钟后部署完成')
return redirect('/kfservice_modelview/list/')
# 创建kfserving
@expose('/deploy/<kfservice_id>', methods=['POST', "GET"])
def deploy(self, kfservice_id):
mykfservice = db.session.query(KfService).filter_by(id=kfservice_id).first()
namespace = conf.get('KFSERVING_NAMESPACE')
crd_info = conf.get('CRD_INFO')['inferenceservice']
# 根据service生成container
def make_container(service,mykfservice):
from myapp.utils.py.py_k8s import K8s
k8s = K8s() # 不部署,不需要配置集群信息
container = k8s.make_container(name=mykfservice.name + "-" + service.name,
command=["sh", "-c",service.command] if service.command else None,
args=None,
volume_mount=None,
image_pull_policy=conf.get('IMAGE_PULL_POLICY','Always'),
image=service.images,
working_dir=service.working_dir if service.working_dir else None,
env=service.env,
resource_memory=service.resource_memory,
resource_cpu = service.resource_cpu,
resource_gpu= service.resource_gpu,
username = service.created_by.username
)
return container
api_version = crd_info['group'] + '/' + crd_info['version']
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas= mykfservice.default_service.min_replicas,
max_replicas=mykfservice.default_service.max_replicas,
custom=V1alpha2CustomSpec(
container=make_container(mykfservice.default_service,mykfservice)
)
)
) if mykfservice.default_service else None
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor= V1alpha2PredictorSpec(
min_replicas=mykfservice.canary_service.min_replicas,
max_replicas=mykfservice.canary_service.max_replicas,
custom=V1alpha2CustomSpec(
container=make_container(mykfservice.canary_service,mykfservice)
)
)
) if mykfservice.canary_service else None
metadata = kubernetes.client.V1ObjectMeta(
name=mykfservice.name,
labels={
"app":mykfservice.name,
"rtx-user":mykfservice.created_by.username
},
namespace=namespace
)
isvc = V1alpha2InferenceService(
api_version=api_version,
kind=crd_info['kind'],
metadata=metadata,
spec=V1alpha2InferenceServiceSpec(
default=default_endpoint_spec,
canary=canary_endpoint_spec,
canary_traffic_percent=mykfservice.canary_traffic_percent
)
)
KFServing = KFServingClient()
try:
KFServing.delete(mykfservice.name, namespace=namespace,version=crd_info['version'])
except Exception as e:
print(e)
KFServing.create(isvc,namespace=namespace,version=crd_info['version'])
flash(category='warning', message='部署启动,一分钟后部署完成')
return redirect('/kfservice_modelview/list/')
# 灰度
@expose('/roll/<kfservice_id>', methods=['POST', "GET"])
def roll(self, kfservice_id):
mykfservice = db.session.query(KfService).filter_by(id=kfservice_id).first()
namespace = conf.get('KFSERVING_NAMESPACE')
crd_info = conf.get('CRD_INFO')['inferenceservice']
# 根据service生成container
def make_container(service, mykfservice):
from myapp.utils.py.py_k8s import K8s
k8s = K8s() # 不部署,不需要配置集群信息
container = k8s.make_container(name=mykfservice.name + "-" + service.name,
command=["sh", "-c", service.command] if service.command else None,
args=None,
volume_mount=None,
image_pull_policy=conf.get('IMAGE_PULL_POLICY','Always'),
image=service.images,
working_dir=service.working_dir if service.working_dir else None,
env=service.env,
resource_memory=service.resource_memory,
resource_cpu=service.resource_cpu,
resource_gpu=service.resource_gpu,
username=service.created_by.username,
ports = service.ports
)
return container
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas=mykfservice.canary_service.min_replicas,
max_replicas=mykfservice.canary_service.max_replicas,
custom=V1alpha2CustomSpec(
container=make_container(mykfservice.canary_service, mykfservice)
)
)
) if mykfservice.canary_service else None
KFServing = KFServingClient()
KFServing.rollout_canary(mykfservice.name, canary=canary_endpoint_spec, percent=mykfservice.canary_traffic_percent,
namespace=namespace, timeout_seconds=120,version=crd_info['version'])
flash(category='warning', message='滚动升级已配置,刷新查看当前流量比例')
return redirect('/kfservice_modelview/list/')
# 基础批量删除
# @pysnooper.snoop()
def base_muldelete(self,items):
if not items:
abort(404)
for item in items:
try:
k8s_client = py_k8s.K8s(item.project.cluster.get('KUBECONFIG',''))
crd_info = conf.get("CRD_INFO", {}).get(self.crd_name, {})
if crd_info:
k8s_client.delete_crd(group=crd_info['group'],version=crd_info['version'],plural=crd_info['plural'],namespace=conf.get('KFSERVING_NAMESPACE'),name=item.name)
except Exception as e:
flash(str(e), "danger")
def pre_delete(self,item):
self.base_muldelete([item])
# @event_logger.log_this
# @expose("/delete/<pk>")
# @has_access
# def delete(self, pk):
# pk = self._deserialize_pk_if_composite(pk)
# self.base_delete(pk)
# url = url_for(f"{self.endpoint}.list")
# return redirect(url)
appbuilder.add_view(KfService_ModelView,"kfserving",icon = 'fa-tasks',category = '服务化')
| wujiapei/alldata | dataAI/mlops/myapp/views/view_kfserving.py | view_kfserving.py | py | 16,210 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "myapp.app.config",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "myapp.app",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "base.MyappModelView",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "flask_appbuilder.... |
71446573473 | # 257. Binary Tree Paths
# Easy
#
# 2497
#
# 129
#
# Add to List
#
# Share
# Given the root of a binary tree, return all root-to-leaf paths in any order.
#
# A leaf is a node with no children.
#
#
#
# Example 1:
#
#
# Input: root = [1,2,3,null,5]
# Output: ["1->2->5","1->3"]
# Example 2:
#
# Input: root = [1]
# Output: ["1"]
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
paths = []
def recurse(node, path):
path = list(path) + [str(node.val)]
if not node.left and not node.right:
paths.append(path)
elif node.left and not node.right:
recurse(node.left, path)
elif node.right and not node.left:
recurse(node.right, path)
else:
recurse(node.left, path)
recurse(node.right, path)
recurse(root, [])
return ["->".join(path) for path in paths]
| laiqjafri/LeetCode | problems/00257_binary_tree_paths.py | 00257_binary_tree_paths.py | py | 1,200 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 40,
"usage_type": "name"
}
] |
29926890398 | import numpy as np
import astropy.units as u
from eventio import EventIOFile
from eventio.simtel import MCShower
from astropy.coordinates import SkyCoord, AltAz
from astropy.time import Time
__all__ = [
"find_nearest_bin",
"create_angular_area_scaling",
"poisson_likelihood_gaussian",
"tensor_poisson_likelihood",
"create_xmax_scaling",
"xmax_expectation",
"rotate_translate",
]
def find_nearest_bin(array, value):
"""
Find nearest value in an array
:param array: ndarray
Array to search
:param value: float
Search value
:return: float
Nearest bin value
"""
idx = (np.abs(array - value)).argmin()
return array[idx]
def rotate_translate(pixel_pos_x, pixel_pos_y, x_trans, y_trans, phi):
"""
Function to perform rotation and translation of pixel lists
Parameters
----------
pixel_pos_x: ndarray
Array of pixel x positions
pixel_pos_y: ndarray
Array of pixel x positions
x_trans: float
Translation of position in x coordinates
y_trans: float
Translation of position in y coordinates
phi: float
Rotation angle of pixels
Returns
-------
ndarray,ndarray: Transformed pixel x and y coordinates
"""
cosine_angle = np.cos(phi[..., np.newaxis])
sin_angle = np.sin(phi[..., np.newaxis])
pixel_pos_trans_x = (x_trans - pixel_pos_x) * cosine_angle - (
y_trans - pixel_pos_y
) * sin_angle
pixel_pos_trans_y = (pixel_pos_x - x_trans) * sin_angle + (
pixel_pos_y - y_trans
) * cosine_angle
return pixel_pos_trans_x, pixel_pos_trans_y
def xmax_expectation(energy):
return 300 + 93 * np.log10(energy)
def create_angular_area_scaling(offset_bins, max_viewcone_radius):
# Argh this code is horrible, but need to account for the angular area contained in each offset bin
offset_area_scale = {}
if len(offset_bins) == 1:
offset_area_scale[offset_bins[0].value] = 1
else:
def angular_area(rmin, rmax):
return np.pi * rmax**2 - np.pi * rmin**2
total_area = angular_area(0 * u.deg, max_viewcone_radius)
i = 0
imax = offset_bins.shape[0] - 1
diff = np.diff(offset_bins) / 2
for offset in offset_bins:
upper_bound = offset + diff
if i < imax:
upper_bound = offset + diff
lower_bound = 0
if i > 0:
lower_bound = offset - diff
print(upper_bound, lower_bound)
ring_area = angular_area(lower_bound, upper_bound)
offset_area_scale[offset.value] = total_area / ring_area
i += 1
return offset_area_scale
def create_xmax_scaling(xmax_bins, offset_bins, array_pointing, filename):
output_dict = {}
shower_count = 0
with EventIOFile(filename) as f:
dummy_time = Time("2010-01-01T00:00:00", format="isot", scale="utc")
for o in f:
if isinstance(o, MCShower):
mc_shower = o.parse()
energy = mc_shower["energy"]
xmax_exp = xmax_expectation(energy)
zenith = (np.pi / 2) - mc_shower["altitude"]
xmax = mc_shower["xmax"] / np.cos(zenith)
xmax_bin = find_nearest_bin(xmax_bins, xmax - xmax_exp)
shower_direction = SkyCoord(
alt=mc_shower["altitude"] * u.rad,
az=mc_shower["azimuth"] * u.rad,
frame=AltAz(obstime=dummy_time),
)
offset = array_pointing.separation(shower_direction).to(u.deg).value
offset_bin = find_nearest_bin(offset_bins.value, offset)
# print(offset, offset_bin, xmax, xmax_exp, xmax-xmax_exp, xmax_bin, np.rad2deg(zenith))
key = xmax_bin, offset_bin
if key in output_dict.keys():
output_dict[key] += 1
else:
output_dict[key] = 1
shower_count += 1
for key in output_dict.keys():
output_dict[key] = float(shower_count) / output_dict[key]
return output_dict
def poisson_likelihood_gaussian(image, prediction, spe_width=0.5, ped=1):
image = np.asarray(image)
prediction = np.asarray(prediction)
spe_width = np.asarray(spe_width)
ped = np.asarray(ped)
sq = 1.0 / np.sqrt(
2 * np.pi * (np.power(ped, 2) + prediction * (1 + np.power(spe_width, 2)))
)
diff = np.power(image - prediction, 2.0)
denom = 2 * (np.power(ped, 2) + prediction * (1 + np.power(spe_width, 2)))
expo = np.asarray(np.exp(-1 * diff / denom))
# If we are outside of the range of datatype, fix to lower bound
min_prob = np.finfo(expo.dtype).tiny
expo[expo < min_prob] = min_prob
return -2 * np.log(sq * expo)
def tensor_poisson_likelihood(image, prediction, spe_width=0.5, ped=1):
import keras.backend as K
import tensorflow as tf
prediction = tf.clip_by_value(prediction, 1e-6, 1e9)
sq = 1.0 / K.sqrt(
2.0 * np.pi * (K.square(ped) + prediction * (1.0 + K.square(spe_width)))
)
diff = K.square(image - prediction)
denom = 2.0 * (K.square(ped) + prediction * (1 + K.square(spe_width)))
expo = K.exp(-1 * diff / denom)
expo = tf.clip_by_value(expo, 1e-20, 100)
return K.mean(-2 * K.log(sq * expo))
| ParsonsRD/template_builder | template_builder/utilities.py | utilities.py | py | 5,442 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "numpy.abs",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number":... |
22737252153 | from os import link
from bs4 import BeautifulSoup
from command import Command
import requests
import discord
# Every module has to have a command list
commandList = []
commandList.append(Command("!score", "score", "Used to see the score of live games or the betting odds of an upcoming game.\nNote: if trying to use this command on a team that is not in season it will not give their most recent final score.\nUsage: `!score <TEAM>`\nThe bot will then give either an upcoming game or a live game."))
async def score(client, message):
contents = message.content.split(" ")
if len(contents) < 2:
await message.channel.send("Please give an argument.")
# Live score handler
team = ""
for i in range(1, len(contents)-1):
team += contents[i] + "+"
team += contents[len(contents)-1]
searchURL = "https://www.google.com/search?q=espn+" + team
html = requests.get(searchURL)
soup = BeautifulSoup(html.content, 'html.parser')
links = soup.find_all('a')
# Now do a new soup with the espn team page
searchURL = links[16].get('href')[7:]
if (searchURL.find("team") == -1):
await message.channel.send("Invalid team, could not be found on ESPN.")
return
html = requests.get(searchURL)
soup = BeautifulSoup(html.content, 'html.parser')
# Now get the latest game page
for p in soup.find_all("section", class_="club-schedule"):
textList = p.find_all("a")
searchURL = "https://www.espn.com" + textList[1].get('href')
html = requests.get(searchURL)
soup = BeautifulSoup(html.content, 'html.parser')
scores = soup.find_all('div', class_='score-container')
teamStr = []
time = ""
for t in soup.find_all('div', class_='team-container'):
teams = t.find_all('span')
teamStr.append(teams[0].text + " " + teams[1].text)
line = ""
for s in soup.find_all('div', class_='game-status'):
span = s.find_all('span', class_='line')
if len(span) != 0:
line = span[0].text
teamRecords = []
teamLines = []
teamMoneyLines = []
over = ""
if line != "":
for s in soup.find_all('div', class_='pick-center-content'):
records = s.find_all('p', class_='record')
teamRecords.append(records[0].text)
teamRecords.append(records[1].text)
records = s.find_all('p', class_='record')
rows = s.find_all('td', class_="score")
teamLines.append(rows[3].text)
teamLines.append(rows[8].text)
teamMoneyLines.append(rows[4].text.strip())
teamMoneyLines.append(rows[9].text.strip())
over = rows[5].text
else:
for t in soup.find_all('div', class_='team-container'):
r = t.find_all('div', class_='record')
teamRecords.append(r[0].text)
for s in soup.find_all('div', class_='game-status'):
span = s.find_all('span', class_='game-time')
if len(span) == 0 or span[0].text == "":
span = s.find_all('span')
else:
time = span[0].text
break
for sp in span:
if sp.has_attr('data-date'):
time = sp.get('data-date')
break
if time != "":
newSearch = "https://www.google.com/search?q=What+time+is+" + time[11:16] + "+utc"
html = requests.get(newSearch)
soup = BeautifulSoup(html.content, 'html.parser')
temp = soup.find_all('div', class_="BNeawe iBp4i AP7Wnd")
# Convert to more readable time
newTime = temp[0].text.split(" ")[0] + " " + temp[0].text.split(" ")[1]
date = time[5:10]
time = newTime + " on " + date
break
# time = span[0].text
# Handling baseball
if (time.find("outs") != -1):
time = time[:-6] + " " + time[-6:]
elif (time.find("out") != -1):
time = time[:-5] + " " + time[-5:]
"""result = ""
result += "**" + time + "**\n"
result += "_" + teamStr[0] + "_ " + scores[0].text + "\n"
result += "_" + teamStr[1] + "_ " + scores[1].text + ""
"""
# print("\n")
# print(result)
embed = discord.Embed(title = "Gamecast", description=time, colour = discord.Colour.red(), url = searchURL)
embed.set_author(name=message.author.display_name, icon_url=message.author.avatar_url)
if line == "": # This means game is live
embed.add_field(name="**" + teamStr[0] + " " + scores[0].text + "**", value=teamRecords[0], inline=True)
embed.add_field(name="**" + teamStr[1] + " " + scores[1].text + "**", value=teamRecords[1], inline=True)
else:
embed.add_field(name="**" + teamStr[0] + "**", value=teamRecords[0] + "\nSpread: " + teamLines[0] + "\nMoney Line: " + teamMoneyLines[0], inline=True)
embed.add_field(name="**" + teamStr[1] + "**", value=teamRecords[1] + "\n" + teamLines[1] + "\n" + teamMoneyLines[1], inline=True)
embed.add_field(name="Over/Under", value=over, inline=False)
await message.channel.send(embed=embed)
commandList.append(Command("!player", "player", "Used to view a players season stats as well as their career stats.\nUsage: `!player <NAME>`."))
async def player(client, message):
contents = message.content.split(" ")
if len(contents) < 2:
await message.channel.send("Please give an argument.")
# Live score handler
player = ""
for i in range(1, len(contents)):
player += contents[i] + "+"
# player += contents[len(contents)-1]
player += "stats"
searchURL = "https://www.google.com/search?q=espn+" + player
html = requests.get(searchURL)
soup = BeautifulSoup(html.content, 'html.parser')
links = soup.find_all('a')
# Now do a new soup with the espn player page
searchURL = links[16].get('href')
if (searchURL[:5] != "https"):
searchURL = searchURL[7:]
# searchURL = links[16].get('href')[7:]
index = searchURL.find("player")
if (index == -1):
await message.channel.send("Invalid player, could not be found on ESPN.")
return
if (searchURL.find("stats") == -1):
searchURL = searchURL[:index+6] + "/stats/" + searchURL[index+7:]
html = requests.get(searchURL)
soup = BeautifulSoup(html.content, 'html.parser')
# playerName = " ".join(contents[1:])
count = 0
shift = 0 # Used when there is both a career and a season averages row
for t in soup.find_all('table'):
row = t.find_all('tr')
if count == 1:
statNames = row[0].find_all('th')
if shift == 1:
careerRow = row[len(row)-2].find_all('td')
recentRow = row[len(row)-3].find_all('td')
else :
careerRow = row[len(row)-1].find_all('td')
recentRow = row[len(row)-2].find_all('td')
elif count == 0:
recentSeason = row[len(row)-2].find_all('td')
if recentSeason[0].text == "Career":
recentSeason = row[len(row)-3].find_all('td')
shift = 1
else:
break
count += 1
recent = ""
# recent += "**" + recentSeason[0].text + " " + recentSeason[1].text + ":**\n"
# print(statNames)
for i in range(len(statNames)):
recent += "" + statNames[i].text + ": " + recentRow[i].text + "\n"
if (len(careerRow) != 0):
career = ""
# career += "Career:\n| "
for i in range(len(statNames)):
career += "" + statNames[i].text + ": " + careerRow[i].text + "\n"
playerName = ""
for s in soup.find_all('h1', class_='PlayerHeader__Name flex flex-column ttu fw-bold pr4 h2'):
spans = s.find_all('span')
playerName += spans[0].text + " " + spans[1].text
embed = discord.Embed(title = playerName, description="Shows most recent or current season stats and career stats for pro athletes.", colour = discord.Colour.purple(), url = searchURL)
embed.set_author(name=message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name=recentSeason[0].text + " " + recentSeason[1].text, value = recent, inline = True)
if (len(careerRow) != 0):
embed.add_field(name="Career", value = career, inline = True)
await message.channel.send(embed=embed) | ndamalas/Wild-Card-Bot | modules/sports.py | sports.py | py | 8,361 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "command.Command",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"li... |
73033920353 | # -*- coding: utf-8 -*-
'''
Simple returner for Couchbase. Optional configuration
settings are listed below, along with sane defaults.
couchbase.host: 'salt'
couchbase.port: 8091
couchbase.bucket: 'salt'
couchbase.skip_verify_views: False
To use the couchbase returner, append '--return couchbase' to the salt command. ex:
salt '*' test.ping --return couchbase
All of the return data will be stored in documents as follows:
JID
===
load: load obj
tgt_minions: list of minions targeted
nocache: should we not cache the return data
JID/MINION_ID
=============
return: return_data
out: out_data
'''
from __future__ import absolute_import
import logging
try:
import couchbase
HAS_DEPS = True
except ImportError:
HAS_DEPS = False
# Import salt libs
import salt.utils
import salt.utils.jid
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'couchbase'
# some globals
COUCHBASE_CONN = None
DESIGN_NAME = 'couchbase_returner'
VERIFIED_VIEWS = False
def __virtual__():
if not HAS_DEPS:
return False
# try to load some faster json libraries. In order of fastest to slowest
json = salt.utils.import_json()
couchbase.set_json_converters(json.dumps, json.loads)
return __virtualname__
def _get_options():
'''
Get the couchbase options from salt. Apply defaults
if required.
'''
return {'host': __opts__.get('couchbase.host', 'salt'),
'port': __opts__.get('couchbase.port', 8091),
'bucket': __opts__.get('couchbase.bucket', 'salt')}
def _get_connection():
'''
Global function to access the couchbase connection (and make it if its closed)
'''
global COUCHBASE_CONN
if COUCHBASE_CONN is None:
opts = _get_options()
COUCHBASE_CONN = couchbase.Couchbase.connect(host=opts['host'],
port=opts['port'],
bucket=opts['bucket'])
return COUCHBASE_CONN
def _verify_views():
'''
Verify that you have the views you need. This can be disabled by
adding couchbase.skip_verify_views: True in config
'''
global VERIFIED_VIEWS
if VERIFIED_VIEWS or __opts__.get('couchbase.skip_verify_views', False):
return
cb_ = _get_connection()
ddoc = {'views': {'jids': {'map': "function (doc, meta) { if (meta.id.indexOf('/') === -1 && doc.load){ emit(meta.id, null) } }"},
'jid_returns': {'map': "function (doc, meta) { if (meta.id.indexOf('/') > -1){ key_parts = meta.id.split('/'); emit(key_parts[0], key_parts[1]); } }"}
}
}
try:
curr_ddoc = cb_.design_get(DESIGN_NAME, use_devmode=False).value
if curr_ddoc['views'] == ddoc['views']:
VERIFIED_VIEWS = True
return
except couchbase.exceptions.HTTPError:
pass
cb_.design_create(DESIGN_NAME, ddoc, use_devmode=False)
VERIFIED_VIEWS = True
def _get_ttl():
'''
Return the TTL that we should store our objects with
'''
return __opts__['keep_jobs'] * 60 * 60 # keep_jobs is in hours
#TODO: add to returner docs-- this is a new one
def prep_jid(nocache=False, passed_jid=None):
'''
Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide (unless its passed a jid)
So do what you have to do to make sure that stays the case
'''
if passed_jid is None:
jid = salt.utils.jid.gen_jid()
else:
jid = passed_jid
cb_ = _get_connection()
try:
cb_.add(str(jid),
{'nocache': nocache},
ttl=_get_ttl(),
)
except couchbase.exceptions.KeyExistsError:
# TODO: some sort of sleep or something? Spinning is generally bad practice
if passed_jid is None:
return prep_jid(nocache=nocache)
return jid
def returner(load):
'''
Return data to the local job cache
'''
cb_ = _get_connection()
try:
jid_doc = cb_.get(load['jid'])
if jid_doc.value['nocache'] is True:
return
except couchbase.exceptions.NotFoundError:
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present in the local cache: {jid}'.format(**load)
)
return False
hn_key = '{0}/{1}'.format(load['jid'], load['id'])
try:
ret_doc = {'return': load['return']}
if 'out' in load:
ret_doc['out'] = load['out']
cb_.add(hn_key,
ret_doc,
ttl=_get_ttl(),
)
except couchbase.exceptions.KeyExistsError:
log.error(
'An extra return was detected from minion {0}, please verify '
'the minion, this could be a replay attack'.format(
load['id']
)
)
return False
def save_load(jid, clear_load):
'''
Save the load to the specified jid
'''
cb_ = _get_connection()
try:
jid_doc = cb_.get(str(jid))
except couchbase.exceptions.NotFoundError:
log.warning('Could not write job cache file for jid: {0}'.format(jid))
return False
# if you have a tgt, save that for the UI etc
if 'tgt' in clear_load:
ckminions = salt.utils.minions.CkMinions(__opts__)
# Retrieve the minions list
minions = ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
# save the minions to a cache so we can see in the UI
jid_doc.value['minions'] = minions
jid_doc.value['load'] = clear_load
cb_.replace(str(jid),
jid_doc.value,
cas=jid_doc.cas,
ttl=_get_ttl()
)
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
cb_ = _get_connection()
try:
jid_doc = cb_.get(str(jid))
except couchbase.exceptions.NotFoundError:
return {}
ret = jid_doc.value['load']
if 'minions' in jid_doc.value:
ret['Minions'] = jid_doc.value['minions']
return ret
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(DESIGN_NAME, 'jid_returns', key=str(jid), include_docs=True):
ret[result.value] = result.doc.value
return ret
def get_jids():
'''
Return a list of all job ids
'''
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(DESIGN_NAME, 'jids', include_docs=True):
ret[result.key] = _format_jid_instance(result.key, result.doc.value['load'])
return ret
def _format_job_instance(job):
'''
Return a properly formatted job dict
'''
ret = {'Function': job.get('fun', 'unknown-function'),
'Arguments': list(job.get('arg', [])),
# unlikely but safeguard from invalid returns
'Target': job.get('tgt', 'unknown-target'),
'Target-type': job.get('tgt_type', []),
'User': job.get('user', 'root')}
if 'metadata' in job:
ret['Metadata'] = job.get('metadata', {})
else:
if 'kwargs' in job:
if 'metadata' in job['kwargs']:
ret['Metadata'] = job['kwargs'].get('metadata', {})
return ret
def _format_jid_instance(jid, job):
'''
Return a properly formatted jid dict
'''
ret = _format_job_instance(job)
ret.update({'StartTime': salt.utils.jid.jid_to_time(jid)})
return ret
| shineforever/ops | salt/salt/returners/couchbase_return.py | couchbase_return.py | py | 7,730 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "salt.utils.utils.import_json",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "salt.utils.utils",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "... |
42440455663 | import telebot
import sql_functions
import urllib.request
import json
import alice_vars
from alice_vars import bot
import bot_functions
def cat(message):
chat_id = message.chat.id
if sql_functions.check_user(alice_vars.db_name, 'Admins', chat_id):
keyboard = alice_vars.keyboard_admin
else:
keyboard = alice_vars.keyboard_default
try:
f_name = message.from_user.first_name
print(f_name, "requested for a kitty")
photo = urllib.request.urlopen(
'http://thecatapi.com/api/images/get')
if chat_id < 0:
bot.send_photo(chat_id, photo)
print("Kitty launched.\n")
elif chat_id > 0:
bot.send_photo(chat_id, photo, reply_markup=keyboard)
print("Kitty launched.\n")
except Exception as e:
bot.send_message(
chat_id, "oops! something went wrong! try again.", reply_markup=keyboard)
def quote(message):
chat_id = message.chat.id
if sql_functions.check_user(alice_vars.db_name, 'Admins', chat_id):
keyboard = alice_vars.keyboard_admin
else:
keyboard = alice_vars.keyboard_default
try:
quote = urllib.request.urlopen(
'http://quotes.rest/qod.json').read().decode('utf-8')
quote_json = json.loads(quote)
bot.send_message(chat_id, "Quote of the day:\n\n", quote_json['contents']['quotes'][0]
['quote'], "\n", quote_json['contents']['quotes'][0]['author'], reply_markup=keyboard)
except Exception as e:
bot.send_message(
chat_id, "oops! something went wrong! try again.", reply_markup=keyboard)
| adtya/the-alice-bot | easter_eggs.py | easter_eggs.py | py | 1,655 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sql_functions.check_user",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "alice_vars.db_name",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "alice_vars.keyboard_admin",
"line_number": 13,
"usage_type": "attribute"
},
{
"a... |
41827178704 | import torch
from torch import nn, einsum
import numpy as np
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
def pair(t):
return t if isinstance(t, tuple) else (t, t)
class AddPositionEmbs(nn.Module):
"""向输入中添加可学习的位置嵌入模块 """
def __init__(self,inputs_positions=None):
super(AddPositionEmbs, self).__init__()
"""
默认情况下,这一层使用固定的sinusoidal embedding table。
如果需要一个可学习的位置嵌入,将初始化器传递给posemb_init。
Args:
inputs: input data.
inputs_positions: input position indices for packed sequences.
posemb_init: positional embedding initializer.
Returns:
output: `(bs, timesteps, in_dim)`
"""
self.inputs_positions = inputs_positions
def forward(self, inputs):
# inputs.shape is (batch_size, seq_len/embeddings num, emb_dim).
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
pos_emb_shape = (1, inputs.shape[1], inputs.shape[2])#torch.Size([1, 65, 1024])
pe = nn.Parameter(torch.randn(pos_emb_shape))#随机生成张量,尺寸为:torch.Size([1, 65, 1024])
#print(pe)
if self.inputs_positions is None:
# Normal unpacked case:
return inputs + pe
else:
# For packed data we need to use known position indices:
return inputs + nn.take(pe[0], self.inputs_positions, axis=0)
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block. 模块"""
"""
Linear -> Gelu -> Dropout -> linear -> Dropout
"""
def __init__(self, mlp_dim, Dim=1024, out_dim=None, dropout_rate=0.1):
super(MlpBlock,self).__init__()
self.out_dim = out_dim
self.mlp_dim = mlp_dim
self.Dim = Dim
self.dropout_rate = dropout_rate
def forward(self, inputs):
actual_out_dim = inputs.shape[-1] if self.out_dim is None else self.out_dim
output = nn.Sequential(
nn.Linear(self.Dim, self.mlp_dim),
nn.GELU(),
nn.Dropout(self.dropout_rate),
nn.Linear(self.mlp_dim, actual_out_dim),
nn.Dropout(self.dropout_rate)
)
return output(inputs)
class Attention(nn.Module):
# attention
def __init__(self, Dim = 1024, heads = 8, dim_head = 64, dropout = 0.1):
super().__init__()
inner_dim = dim_head * heads # 计算最终进行全连接操作时输入神经元的个数,64*16=1024
project_out = not (heads == 1 and dim_head == Dim) # 多头注意力并且输入和输出维度相同时为True
self.heads = heads # 多头注意力中“头”的个数
self.scale = dim_head ** -0.5 # 缩放操作,论文 Attention is all you need 中有介绍
self.attend = nn.Softmax(dim = -1) # 初始化一个Softmax操作
self.to_qkv = nn.Linear(Dim, inner_dim * 3, bias = False) # 对Q、K、V三组向量先进性线性操作
# 线性全连接,如果不是多头或者输入输出维度不相等,进行空操作
self.to_out = nn.Sequential(
nn.Linear(inner_dim, Dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
b, n, _, h = *x.shape, self.heads # 获得输入x的维度和多头注意力的“头”数
qkv = self.to_qkv(x).chunk(3, dim = -1) # 先对Q、K、V进行线性操作,然后chunk乘三三份
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv) # 整理维度,获得Q、K、V
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale # Q, K 向量先做点乘,来计算相关性,然后除以缩放因子
attn = self.attend(dots) # 做Softmax运算
out = einsum('b h i j, b h j d -> b h i d', attn, v) # Softmax运算结果与Value向量相乘,得到最终结果
out = rearrange(out, 'b h n d -> b n (h d)') # 重新整理维度
return self.to_out(out) # 做线性的全连接操作或者空操作(空操作直接输出out)
class Encoder1DBlock(nn.Module):
"""Transformer encoder 模块中的 layer结构."""
def __init__(self,
Dim,
heads,
dim_head,
mlp_dim,
dropout_rate=0.1,
attention_dropout_rate=0.1,
**attention_kwargs
):
"""Applies Encoder1DBlock module.
Args:
inputs: input data.
mlp_dim: dimension of the mlp on top of attention block.
dtype: the dtype of the computation (default: float32).
dropout_rate: dropout rate.
attention_dropout_rate: dropout for attention heads.
deterministic: bool, deterministic or not (to apply dropout).
**attention_kwargs: kwargs passed to nn.SelfAttention
Returns:
output after transformer encoder block.
"""
super(Encoder1DBlock, self).__init__()
self.Dim = Dim
self.heads = heads
self.dim_head = dim_head
self.mlp_dim = mlp_dim
self.dropout_rate = dropout_rate
self.attention_dropout_rate = attention_dropout_rate
def forward(self, inputs):
# Attention block.
assert inputs.ndim == 3
ln = nn.LayerNorm(self.Dim)
x = ln(inputs)#torch.Size([B, 65, 1024])
attention = Attention(Dim = self.Dim,
heads = self.heads,
dim_head = self.dim_head,
dropout = self.attention_dropout_rate
)
x = attention(x)
#print(x.shape)#torch.Size([B, 65, 1024])
dropout = nn.Dropout(self.dropout_rate)
x = dropout(x)#torch.Size([B, 65, 1024])
x = x + inputs
#print(x.shape) # torch.Size([B, 65, 1024])
# MLP block.
ln = nn.LayerNorm(self.Dim)
y = ln(x)
#print(y.shape)#torch.Size([1, 65, 1024])
mlpblock = MlpBlock(mlp_dim = self.mlp_dim,
Dim = self.Dim,
dropout_rate = self.dropout_rate)
y = mlpblock(y)
#print(y.shape)#torch.Size([B, 65, 1024])
return x + y
class Encoder(nn.Module):
"""完整的Transformer Model Encoder 模块 for sequence to sequence translation."""
def __init__(self,
Dim,
num_layers,
mlp_dim,
heads,
dim_head,
inputs_positions=None,
dropout_rate=0.1,
**attention_kwargs
):
"""Applies Transformer model on the inputs.
Args:
num_layers: number of layers
mlp_dim: dimension of the mlp on top of attention block
inputs_positions: input subsequence positions for packed examples.
dropout_rate: dropout rate
train: if it is training,
**attention_kwargs: kwargs passed to nn.SelfAttention
Returns:
output of a transformer encoder.
"""
super(Encoder, self).__init__()
self.Dim = Dim
self.num_layers = num_layers
self.mlp_dim = mlp_dim
self.inputs_positions = inputs_positions
self.dropout_rate = dropout_rate
self.heads = heads
self.dim_head = dim_head
def forward(self,inputs):
#inputs.shape = torch.Size([B, 65, 1024])
assert inputs.ndim == 3 # (batch, len, emb)
addpositionembs = AddPositionEmbs(inputs_positions = self.inputs_positions)
x = addpositionembs(inputs)#torch.Size([B, 65, 1024]),添加了position embedding
#print(x.shape)
dropout = nn.Dropout(self.dropout_rate)
x = dropout(x)#torch.Size([B, 65, 1024])
# Input Encoder
for lyr in range(self.num_layers):
#添加num_layers个Encoder块构成完整的Transformer模块
encoder1dblock = Encoder1DBlock(
Dim = self.Dim,
mlp_dim = self.mlp_dim,
dropout_rate = self.dropout_rate,
heads = self.heads,
dim_head= self.dim_head
)
x = encoder1dblock(x)
ln = nn.LayerNorm(self.Dim)
encoded = ln(x)
return encoded
class VisionTransformer(nn.Module):
"""完整的ViT结构"""
def __init__(self,
image_size,
patch_size,
num_classes,#一共有多少类别
Dim,
depth,
heads,
mlp_dim,
pool = 'cls',
channels = 3,
dim_head = 64,
dropout = 0.,
emb_dropout = 0.1):
super(VisionTransformer, self).__init__()
image_height, image_width = pair(image_size)#image_size=256 -> image_height, image_width = 256
patch_height, patch_width = pair(patch_size)#patch_size=32 -> patch_height, patch_width = 32
#图像尺寸和patch尺寸必须要整除,否则报错
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
#计算出一张图可以分成多少patch;这里:8*8=64,即一张图分成了64个patch
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width #3*32*32=3*1024=3072,计算压成一维所需多少容量
#print(patch_dim)
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
#将一整张图变成patch embeding
self.to_patch_embedding = nn.Sequential(
#https://blog.csdn.net/csdn_yi_e/article/details/109143580
#按给出的模式(注释)重组张量,其中模式中字母只是个表示,没有具体含义
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=patch_height, p2=patch_width),#torch.Size([B, 64, 3072]),一张图像分成了64个patch,并将每个patch压成1维
nn.Linear(patch_dim, Dim),#torch.Size([B, 64, 1024]),线性投影,将每个patch降维到1024维,得到patch embeddings
)
self.cls_token = nn.Parameter(torch.randn(1, 1, Dim))#torch.Size([1, 1, 1024])
#print(self.cls_token.shape)
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Encoder(num_layers = depth,#6
mlp_dim = mlp_dim,#2048
dropout_rate = dropout,#0.1
heads = heads,#16
dim_head = dim_head,#64
Dim = Dim#1024
)
self.pool = pool
self.to_latent = nn.Identity()#https://blog.csdn.net/artistkeepmonkey/article/details/115067356
self.mlp_head = nn.Sequential(
nn.LayerNorm(Dim),
nn.Linear(Dim, num_classes)##torch.Size([B, 1024]) -->
)
def forward(self, inputs):
x = self.to_patch_embedding(inputs)
#print(x.shape)
b, n, _ = x.shape #torch.Size([B, 64, 1024])
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b)#如果batch size=2,则torch.Size([2, 1, 1024]),相当于有多少张图就有多少个cls_token
#print(cls_tokens.shape)#torch.Size([1, 1, 1024]),torch.Size([2, 1, 1024]),torch.Size(B, 1, 1024])....
x = torch.cat((cls_tokens, x), dim=1)#torch.Size([B, 65, 1024])
#print(x.shape)
x = self.dropout(x)#torch.Size([B, 65, 1024])
x = self.transformer(x)
#向输入中添加position embedding -> Dropout -> num_layers个Encoder块 ->
#对于每个Encoder块单元:输出都为:torch.Size([B, 65, 1024])
#整个transformer输出尺寸:torch.Size([B, 65, 1024])
#print(x[:, 0].shape)#torch.Size([B, 1024]),
x = x.mean(dim=1) if self.pool == 'mean' else x[:, 0]
#print(x.shape)#torch.Size([B, 1024])
x = self.to_latent(x)#torch.Size([B, 1024])
return self.mlp_head(x)#torch.Size([B, 1024]) --> torch.Size([B, num_classes])
| Lp-wu/ViT-by-pytorch | ViT.py | ViT.py | py | 12,536 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line... |
24021067933 | from cgitb import reset
from http import HTTPStatus
from flask import Flask, jsonify, request
from flask_restful import Api, Resource, abort
from flask_cors import CORS
from util.math import math
app = Flask(__name__)
CORS(app)
api = Api(app)
# Create resource
class Operation(Resource):
'''
Perform operation on two integers based on request data.
'''
def post(self):
data = request.get_json()
result = math(operator=data["operator"], int1=data["int1"], int2=data["int2"])
if result is None:
abort(404, message="Error processing request.")
return {
"result": result
}, HTTPStatus.OK
# Add Resources
api.add_resource(Operation, "/calculate")
# Normally dont keep debug=True if going to actual production.
if __name__ == "__main__":
app.run(port=5000, debug=True) | seanfinnessy/SimpleCalculator | server/app.py | app.py | py | 859 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",... |
2599586486 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
veriler = pd.read_csv('Ads_CTR_Optimisation.csv')
import random
N = 10000
d = 10
toplam = 0
secilenler = []
for n in range(0,N):
ad = random.randrange(d)
secilenler.append(ad)
odul = veriler.values[n,ad] # verilerdeki n. satır = 1 ise odul 1
toplam = toplam + odul
plt.hist(secilenler)
plt.show() | AyseErdanisman/MakineOgrenmesiKurs | 7- Takviyeli Öğrenme (Reinforced Learning)/1- UCB (Üst Güven Sınırı)/Rasgele Örnekleme Yaklaşımı/random.py | random.py | py | 398 | python | tr | code | 6 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyp... |
74868111712 | import shodan
import json
import sys
from time import sleep
from parser import process_parser
def portCheck(info):
ports = info['ports']
if 515 in ports:
content = str(info['ip_str']) + ":port 515 printer find"
writeToFile('log/port.log',content)
print(content)
if 9100 in ports:
content = str(info['ip_str']) + ':port 9100 printer find'
writeToFile('log/port.log',content)
print(content)
## save in log directory
def deviceCheck(info):
ports = info['ports']
for i in range(len(ports)):
keys = list(info['data'][i].keys())
for key in keys:
if key == 'devicetype':
devicetype = info['data'][i]['devicetype']
content = str(info['data'][i]['port']) + ":" + devicetype
writeToFile("log/devicetype.log",content)
if -1 != devicetype.find('router'):
writeToFile("log/router.log",content)
elif -1 != devicetype.find('printer'):
writeToFile('log/printer.log',content)
elif -1 != devicetype.find('webcam'):
writeToFile('log/webcam.log',content)
print(content)
def writeToFile(name,content):
f = open(name,"a+")
f.write(content)
f.close()
def json_output(info):
j = json.dumps(info,indent=4)
print(j)
def shodan_engine(api_key,ip,count):
api = shodan.Shodan(api_key)
# search only 1 ip
if count == 1:
info = api.host(ip)
json_output(info)
portCheck(info)
deviceCheck(info)
return
mask = ip.split('/')[1]
if mask == "16":
ip_str = "".join([ip.split('.')[0],".",ip.split('.')[1]])
for i in range (0 , 256 , 1):
for j in range (0 , 256 , 1):
if j == count:
return
ip = ip_str + "." + str(i) + "." + str(j)
try:
info = api.host(ip)
content = "ip:" + str(info['ip_str'])
writeToFile("log/running.log",content)
portCheck(info)
deviceCheck(info)
sleep(1.5)
except shodan.APIError as e:
print(e)
continue
if __name__ == "__main__":
args = process_parser()
ip = args.ip
count = args.count
if count == -1:
count == 1000
API_KEY = "839CrW4f3Omc9wYO9aMWeRq0Go4rEPfN"
shodan_engine(API_KEY,ip,count);
print("finish!!!")
| IotScanner2021/IotScanner2021 | backup/shod.py | shod.py | py | 2,592 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "shodan.Shodan",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "shodan.APIError",
"line_numbe... |
7958211750 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^register', views.register),
url(r'^login', views.login),
url(r'^show', views.show),
url(r'^add', views.add),
url(r'^createtrip', views.createtrip),
url(r'^logout', views.logout),
url(r'^(?P<tid>\d+)/destroy', views.destroy),
url(r'^(?P<tid>\d+)/granttrip', views.granttrip),
url(r'^(?P<tid>\d+)/view', views.view),
# url(r'^stats', views.stats)
]
| jbacos7/C- | secondDojo answers1/Python/Django/pexam2/pexam2 2/apps/pexam2/urls.py | urls.py | py | 519 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.co... |
29111524576 |
from datetime import datetime
from app.main.constants import (PLOT_SAVE_DIRECTORY, TIME_STAMP_FORMAT,
PlotStyle)
from app.main.models.wall import Wall
from matplotlib import pyplot as plt
'''
Description: This method contains helper functions for plotting and reporting.
'''
class ReportingHelper:
@staticmethod
def get_file_path(plot_type: str) -> str:
'''
Description: This method creates a file path for a plot with a given plot type.
Params: plot_type: str
Return: file_path: str
'''
now = datetime.now()
date_time = now.strftime(TIME_STAMP_FORMAT)
return PLOT_SAVE_DIRECTORY + date_time + plot_type + ".png"
@staticmethod
def legend_without_duplicate_labels(ax) -> list[tuple]:
'''
Description: This method is used to get unique values for plot_legend
Params: ax: Axes
Return: unique: list[tuple]
Exception: None
'''
handles, labels = ax.get_legend_handles_labels()
labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))
unique = [(h, l) for i, (h, l) in enumerate(zip(handles, labels)) if l not in labels[:i]]
return unique
@staticmethod
def plot_room_number_on_wall(wall: Wall, room_number) -> tuple[str, (float, float), (float, float), int, dict, dict]:
'''
Description: This method is used to plot room label on a wall
Params: wall: Wall, room_number: str
Return: message: str, xy: (float, float), xytext: (float, float), size: int, bbox: dict, arrowprops: dict
Exception: None
'''
wall_x_coordinate = wall.start_point.x_coordinate
wall_y_coordinate = wall.end_point.y_coordinate
message = ("{}".format(room_number))
size = PlotStyle.ROOM_LABEL_SIZE.value
bbox = dict(boxstyle="round", fc="w", lw=0.25, alpha=0.75)
arrowprops=dict(arrowstyle="-|>",connectionstyle="arc3", facecolor="black", lw=0.25)
return message, (wall_x_coordinate, wall_y_coordinate), (wall_x_coordinate, wall_y_coordinate - PlotStyle.ROOM_LABEL_Y_POS.value), size, bbox, arrowprops
| nchalimba/building-plan-processor | app/main/reporting/reporting_helper.py | reporting_helper.py | py | 2,197 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "app.main.constants.TIME_STAMP_FORMAT",
"line_number": 22,
"usage_type": "argument"
},
{
"... |
17213380192 | import argparse
from math import pi, sin, asin
import time
from time import sleep
import pvaccess as pva
class UnionTest:
def __init__(self, **kwargs):
"""
"""
self.dataStruct = {'ArrayId': pva.UINT,
'Time': [pva.DOUBLE],
'value': pva.PvUnion({'Sinusoid': [pva.FLOAT], 'Triangle': [pva.DOUBLE]}),
'Sinusoid': [pva.FLOAT],
'Triangle': [pva.FLOAT]}
self.counts = 0
step = kwargs.get("sample", 1000)
self.time_interval = 1. / step
self.pv = pva.PvObject(self.dataStruct)
self.pvaServer = pva.PvaServer('{}:Scope:Data'.format(kwargs.get("pv", "Test")), self.pv)
print("PV Name: {}:Union:Data".format(kwargs.get("pv", "Test")))
def update(self):
# sleep(0.1)
time0 = time.time()
ts = [time0 + self.time_interval * i for i in range(0, 100)]
sinusoid = [sin(2 * pi * 1.1 * t + pi / 2) for t in ts]
triangle = [(2 / pi) * asin(sin(2 * pi * 1.1 * t)) for t in ts]
pv = pva.PvObject(self.dataStruct, {'ArrayId': self.counts,
'Time': ts,
'value': {'Sinusoid': sinusoid},
'Sinusoid': sinusoid,
'Triangle': triangle})
self.pvaServer.update(pv)
self.counts = self.counts + 1
def main():
"""
Scope simulator main routine
:return:
"""
parser = argparse.ArgumentParser(
description='Example using EPICS7 Union with pvaPy to provide data via EPICS7 pvAccess')
parser.add_argument('--pv', type=str, default="Test",
help='EPICS PV name prefix. The full PV name will be {prefix}:Scope:Data'
'e.g. --pv=test, the full PV name will be "test:Scope:Data"')
parser.add_argument('--freq', type=int, default=10,
help='data update frequency')
parser.add_argument('--sample', type=int, default=1000,
help='data samples in 1 second')
args = parser.parse_args()
pvas = UnionTest(pv=args.pv, sample=args.sample)
while True:
pvas.update()
sleep(1./args.freq)
if __name__ == "__main__":
main()
| epics-extensions/c2dataviewer | example/union.py | union.py | py | 2,375 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "pvaccess.UINT",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pvaccess.DOUBLE",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pvaccess.PvUnion",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pvaccess.FLO... |
41035495644 | import typing
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import declared_attr
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
class Base(DeclarativeBase):
pass
class HasRelatedDataMixin:
@declared_attr
def related_data(cls) -> Mapped[str]:
return mapped_column(Text(), deferred=True)
class User(HasRelatedDataMixin, Base):
@declared_attr.directive
def __tablename__(cls) -> str:
return "user"
@declared_attr.directive
def __mapper_args__(cls) -> typing.Dict[str, typing.Any]:
return {}
id = mapped_column(Integer, primary_key=True)
class Foo(Base):
__tablename__ = "foo"
id = mapped_column(Integer, primary_key=True)
u1 = User()
if typing.TYPE_CHECKING:
# EXPECTED_TYPE: str
reveal_type(User.__tablename__)
# EXPECTED_TYPE: str
reveal_type(Foo.__tablename__)
# EXPECTED_TYPE: str
reveal_type(u1.related_data)
# EXPECTED_TYPE: InstrumentedAttribute[str]
reveal_type(User.related_data)
| sqlalchemy/sqlalchemy | test/typing/plain_files/orm/declared_attr_two.py | declared_attr_two.py | py | 1,104 | python | en | code | 8,024 | github-code | 1 | [
{
"api_name": "sqlalchemy.orm.DeclarativeBase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.mapped_column",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Text",
"line_number": 18,
"usage_type": "call"
},
{
"api_n... |
72389012194 | # coding: utf-8
"""Download and file clips."""
import logging
import hashlib
import os
import datetime
import ffmpy
import requests
from tinydb import Query
from config import db, SUPPORTED_TYPES, DATA_DIR
logger = logging.getLogger('oxo')
def download_clip(url, bot, update, content_type, fname=None):
"""Download clips."""
if not fname:
fname = hashlib.sha1(url.encode(encoding='UTF-8')).hexdigest()
author = update.message.from_user.first_name
if content_type not in SUPPORTED_TYPES:
logger.info("Link not supported: \n{}\nType{}".format(
url, content_type))
bot.sendMessage(chat_id=update.message.chat_id, text="👾 Link not supported. Only mp4, webm and gif links.")
elif duplicate(url):
logger.info("Detected duplicate {}".format(url))
update.message.reply_text("👾 Reposter!")
else:
fpath = os.path.join(DATA_DIR, "clips", fname)
logger.debug("Downloading clip to {}...".format(fpath))
with open(fpath, "wb+") as f:
r = requests.get(url, stream=True)
if r.ok:
for block in r.iter_content(1024):
f.write(block)
else:
logger.error("Download failed {}".format(r))
# Convert gif files using ffmpeg
if url[-3:] == "gif":
fpath = convert_gif(fpath)
fname = os.path.basename(fpath)
clip = {
"type": "clip",
"url": url,
"author": author,
"filename": fname,
"created": datetime.datetime.now().isoformat(),
"incoming": True
}
db.insert(clip)
bot.sendMessage(chat_id=update.message.chat_id, text="👾 Added video to database.")
logger.info("Saved new clip {} from {}".format(fname, author))
def duplicate(url):
"""Boolean, true if given filenam exists in clips."""
Duplicate = Query()
return len(db.search(Duplicate.url == url)) > 0
#
# Converting gifs
#
def convert_gif(fpath):
"""Convert gif at fpath using ffmpeg."""
logger.info("Converting gif to mp4...")
new_fpath = fpath + ".mp4"
ff = ffmpy.FFmpeg(
inputs={
fpath: None
},
outputs={
new_fpath: '-pix_fmt yuv420p -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2"'
}
)
ff.run()
return new_fpath
| cafca/displaybot | displaybot/conversion.py | conversion.py | py | 2,395 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "config.SUPPORTED_TYPES",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "os.path.join",
... |
39726595273 | """Delete disabled column
Revision ID: de3bdf5a9ff8
Revises: 2b7fcebfede1
Create Date: 2021-06-20 09:35:40.600021
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'de3bdf5a9ff8'
down_revision = '2b7fcebfede1'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_users_disabled', table_name='users')
op.drop_index('ix_users_email', table_name='users')
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.drop_column('users', 'disabled')
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('disabled', mysql.TINYINT(
display_width=1), autoincrement=False, nullable=False))
op.drop_index(op.f('ix_users_email'), table_name='users')
op.create_index('ix_users_email', 'users', ['email'], unique=False)
op.create_index('ix_users_disabled', 'users', ['disabled'], unique=False)
# ### end Alembic commands ###
| shin-hama/JunkNoteAPI | app/db/migrations/versions/de3bdf5a9ff8_delete_disabled_column.py | de3bdf5a9ff8_delete_disabled_column.py | py | 1,169 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "alembic.op.drop_index",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.drop_index",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
... |
25088785509 | from flask import Flask, request, jsonify
import sys
from elasticsearch_dsl import Search, A, Q
from elasticsearch import Elasticsearch
app = Flask(__name__)
from flask import render_template
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
es = Elasticsearch(['http://elastic:changeme@localhost:9200/'], use_ssl=False)
PAGESIZE = 8
@app.route('/')
@app.route('/movies')
def index():
page = 1
sort = request.args.get('sort')
search = request.args.get('search')
logger.debug(request.args)
s = Search(using=es)
s = s.index('imdb')
s = s.source(includes=['title', 'poster', '_id'])
s = s[(page-1)*PAGESIZE:page*PAGESIZE]
if search:
s = s.query(Q('multi_match', query=search, fields=['title', 'summary', 'casts'])).extra(size=8)
if sort:
s = s.sort(sort)
ret = s.execute()
logger.debug(ret.hits)
movies = get_movies(ret.hits)
genres = get_genre_agg()
return render_template('review.html', movies=movies, genres=genres)
@app.route('/movie/<string:mid>')
def movie_page(mid):
s = Search(using=es)
s = s.index('imdb')
s = s.filter('term', _id=mid)
ret = s.execute()
return render_template('single.html', movie=get_movie_detail(ret.hits[0].to_dict()))
def get_genre_agg():
s = Search(using=es)
s = s.index('imdb')
s.aggs.bucket('genres', A('terms', field='genres'))
ret = s.execute()
return [x['key'] for x in ret.to_dict()['aggregations']['genres']['buckets']]
def get_movie_detail(movie):
movie['genres'] = '/'.join(movie['genres'])
movie['creators'] = ', '.join(movie['creators'])
movie['casts'] = ', '.join(movie['casts'])
return movie
def get_movies(hits):
for r in hits:
r._d_['id'] = r.meta.id
return [x.to_dict() for x in hits]
@app.route('/suggest/<string:input>')
def get_suggest(input):
if not input:
return None
s = Search(using=es)
s = s.index('imdb')
s = s.suggest('suggestion', input, completion={'field': 'suggest'})
s = s.source(False)
ret = s.execute()
results = [x['text'] for x in ret.suggest.suggestion[0]['options']]
return jsonify(result=results)
| chaopli/movielib-incomplete | movielib.py | movielib.py | py | 2,280 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler"... |
22885261332 | from __future__ import print_function
import datetime
from tzlocal import get_localzone
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
def get_creds():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
def get_events(creds):
service = build('calendar', 'v3', credentials=creds)
#get today's date
today = datetime.date.today()
#tmrw = (today + datetime.timedelta(1))
#hack to get local timezone
'''
local_t = datetime.datetime.fromtimestamp(0)
utc_t = datetime.datetime.utcfromtimestamp(0)
local_tz = datetime.timezone(local_t - utc_t)
'''
local_tz = get_localzone()
#turn today's date into a datetime set at midnight in local timezone
today_dt = datetime.datetime.combine(today, datetime.time(0, tzinfo=local_tz))
#tmrw_dt = today_dt + datetime.timedelta(1)
day_after_tmrw_dt = today_dt + datetime.timedelta(2)
events_result = service.events().list(calendarId='primary', timeMin=today_dt.isoformat(), timeMax=day_after_tmrw_dt.isoformat(), maxResults=5, singleEvents=True, orderBy='startTime').execute()
events = events_result.get('items', [])
events_list = []
if not events:
pass
for event in events:
#if(event['summary'] == 'Work'):
start = event['start']['dateTime']
end = event['end']['dateTime']
events_list.append((event['summary'], start, end))
#print(event['summary'], start, end)
return events_list
'''
print('Getting Tomorrow\'s Work Schedule')
events_result = service.events().list(calendarId='primary', timeMin=tmrw_dt.isoformat(), timeMax=day_after_tmrw_dt.isoformat(), maxResults=5, singleEvents=True, orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No Work Tomorrow!')
for event in events:
if(event['summary'] == 'Work'):
start = event['start']['dateTime']
end = event['end']['dateTime']
events_list.append((event['summary'], start, end))
print(event['summary'], start, end)
'''
if __name__ == '__main__':
creds = get_creds()
print(get_events(creds)) | vwlau/eink-cal | g_cal.py | g_cal.py | py | 3,214 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "os.path.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line... |
3164290218 | #!/usr/bin/env python3
from time import sleep
from datetime import datetime
from tasks import primeira_task, segunda_task
q1 = 'filarq30_1'
q2 = 'filarq30_2'
s = 'INFORMACAO '
if __name__ == '__main__':
while True:
try:
print(f'*** {datetime.now()}')
_1 = primeira_task.apply_async(args=[s], queue=q1)
_1 = _1.get()
_2 = segunda_task.apply_async(args=[_1], queue=q2)
sleep(3)
except KeyboardInterrupt:
break
| htbrandao/tutorial-rabbit-and-celery | demo/app.py | app.py | py | 505 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "tasks.primeira_task.apply_async",
"line_number": 17,
"usage_type": "call"
},
{
"api_name"... |
35847150078 | import ast
import asyncio
import threading
import time
from datetime import datetime, timedelta
from collector.main_collector import MainCollector
from data_base import DataBase
from keys import VIME_TEST_TOKEN
from keys import VIME_TOKEN, VIME_TEST_TOKEN
from utils import cls
from vime_api.vime import Vime
TEST_MODE = False
if TEST_MODE:
VIME_TOKEN = VIME_TEST_TOKEN
CONFIG_FILE = 'config.cfg'
class VimeArchive:
def __init__(self) -> None:
self.vime = Vime(VIME_TOKEN)
self.load_config()
self.make_db()
self.load_locale()
self.log_list = []
self.threads = []
self.main_collector = MainCollector(self, self.db, self.db_p)
self.run_async_thread(self.log_cycle())
async def log_cycle(self):
while True:
print_text = f'Threads'
print_text += f'\n Count: {len(self.threads)}'
print_text += f'\nCollectors'
print_text += f'\n Main Collector: {self.main_collector.get_status()}'
print_text += f'\n Matches Collector: {self.main_collector.matches_collector.get_status()}'
print_text += f'\n Leaderboards Collector: {self.main_collector.lb_collector.get_status()}'
print_text += f'\n Players Collector: {self.main_collector.players_collector.get_status()}'
print_text += f'\n Rank Collector: {self.main_collector.rank_collector.get_status()}'
print_text += f'\nVime'
print_text += f'\n Limit Remaining: {self.vime.limit_remaining}'
print_text += f'\nDataBase users.db'
print_text += f'\n Update: {len(self.users_db.update_list)}'
print_text += f'\n Insert: {len(self.users_db.insert_list)}'
print_text += f'\n Delete: {len(self.users_db.delete_list)}'
print_text += f'\n Get: {len(self.users_db.get_dict)}'
print_text += '\nLogs'
for m in self.log_list:
print_text += f'\n {m}'
print_text += '\nExceptions'
for e in self.main_collector.exceptions:
print_text += f'\n{e}'
cls()
print(print_text)
time.sleep(1)
def load_config(self):
self.activeUsers = []
self.last_match_id = 0
try:
f = open(CONFIG_FILE, 'r', encoding='utf-8')
data = f.read()
config = {}
if data != '':
config = ast.literal_eval(data)
try:
self.last_match_id = config['last_match_id']
except:
self.last_match_id = None
try:
self.activeUsers = config['active_users']
except:
self.activeUsers = []
f.close()
except FileNotFoundError:
open(CONFIG_FILE, 'a', encoding='utf-8')
def save_config(self):
open(CONFIG_FILE, 'a', encoding='utf-8')
f = open(CONFIG_FILE, 'w', encoding='utf-8')
f.write(str({'last_match_id': self.main_collector.matches_collector.last_match_id, 'active_users': self.main_collector.players_collector.activeUsers}))
f.close()
def make_db(self):
date = datetime.now()
self.db = DataBase(f'{date.day}.{date.month}.{date.year}')
date_p = datetime.now() - timedelta(days=1)
self.db_p = DataBase(f'{date_p.day}.{date_p.month}.{date_p.year}')
self.users_db = DataBase(date='', file_name='users.db')
def load_locale(self):
f = open('locale.txt', 'r', encoding='utf-8')
self.locale = ast.literal_eval(f.read())
f.close()
def log(self, message: str):
self.log_list.append(message)
def run_async_thread(self, target):
thread = threading.Thread(target=self.run_async, args=[target])
self.threads.append(thread)
thread.start()
def run_async(self, target):
asyncio.run(target)
vime_archive = VimeArchive() | FalmerF/VimeArchive | run.py | run.py | py | 4,035 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "keys.VIME_TOKEN",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "keys.VIME_TEST_TOKEN",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "vime_api.vime.Vime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keys.VIME_TOK... |
20604431851 | """djangodemo03 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from index import views
urlpatterns = [
# url(r'^index/', views.index),
url(r'^01-add-author/$', views.add_author),
url(r'^02-add-book/$', views.add_book),
url(r'^03-add-publisher/$', views.add_publisher),
url(r'^02-query/$', views.query),
url(r'^03-queryall/$', views.queryall),
url(r'^04-filter/$', views.filter_views),
url(r'^05-lookup/$', views.filed_lookup),
url(r'^06-exclude/$', views.exclud),
url(r'^05-update/(\d+)/$', views.update, name='upgrade'),
url(r'^06-aggregate/$', views.aggregate),
url(r'^07-annotate/$', views.annotate),
url(r'^07-annotate_book/$', views.annotate_book),
url(r'^08-update/$', views.update08),
url(r'^09-delete/(\d+)/$', views.delete),
url(r'^10-oto/$', views.oto_views),
url(r'^10-find_oto/$', views.oto_find),
url(r'^10-one_to_many/$', views.onetomany),
url(r'^11-find_otm/$', views.find_otm),
url(r'^12-mtm/$', views.add_mtm),
url(r'^13-find_mtm/$', views.find_mtm),
url(r'^13-objects/$', views.object_views),
url(r'^13-name_like/$', views.name_like_views),
url(r'^13-book_date/$', views.book_date_views),
]
| demo112/1809 | PythonWeb/Django/1809/djangoproject/djangodemo03/index/urls.py | urls.py | py | 1,823 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "index.views.add_author",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "index.views",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.c... |
7807856985 | from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from .models import UserInfo
from .models import battledata
from .forms import UserForm,dataForm
# ユーザ情報を辞書に格納して、users.htmlに返す
def showUsers(request):
usefinfo = UserInfo.objects.all()
context = {
'msg': '現在の利用状況',
'userinfo': usefinfo,
'count':usefinfo.count,
}
return render(request, 'form/users.html',context)
def showDetail(request,user):
#URLのidをもとに、ユーザ情報を抽出
userinfoDetail = get_object_or_404(UserInfo,userName=user)
context = {
'userinfoDetail':userinfoDetail,
}
#detail.htmlへデータを渡す
return render(request, 'form/detail.html',context)
def showCreateUserForm(request):
#フォームを変数にセット
form = UserForm()
context = {
'userForm':form,
}
#detail.htmlへデータを渡す
return render(request, 'form/create.html',context)
def addUsers(request):
if request.method=='POST':
userForm = UserForm(request.POST)
if(userForm.is_valid()):
userForm.save()
userinfo = UserInfo.objects.all()
context = {
'msg': '現在の利用状況',
'userinfo': userinfo,
'count':userinfo.count,
}
#user.htmlへデータを渡す
return render(request, 'form/users.html',context)
def showEditUserForm(request,user):
#idをもとにユーザ情報を取得
userinfo = get_object_or_404(UserInfo,userName=user)
#フォームをオブジェクトを作成
userForm = UserForm(instance=userinfo)
#ユーザ情報をフォームに格納
context = {
'userinfo':userinfo,
'userForm':userForm,
}
#user.htmlへデータを渡す
return render(request, 'form/edit.html',context)
def updateUser(request,user):
if request.method=='POST':
userInfo = get_object_or_404(UserInfo,userName=user)
userForm = UserForm(request.POST,instance=userInfo)
if userForm.is_valid():
userForm.save()
usefinfo = UserInfo.objects.all()
context = {
'msg': '現在の利用状況',
'userinfo': usefinfo,
'count':usefinfo.count,
}
#detail.htmlへデータを渡す
return render(request, 'form/users.html',context)
def syouhaiForm(request):
battleData = battledata.objects.all()
context = {
'battledata' : battleData,
}
return render(request, 'form/syouhaikai.html',context)
def syouriData(request,Id):
if request.method == 'POST':
Battledata = get_object_or_404(battledata,battleID=Id)
if Battledata.winner != "none":
context = {
'battledata' : Battledata,
}
return render(request,'form/error.html',context)
elif 'button1' in request.POST:
Battledata.winner = Battledata.userID1
Battledata.save()
index="勝利"
elif 'button2' in request.POST:
Battledata.winner = Battledata.userID2
Battledata.save()
index="敗北"
context = {
'battledata' : Battledata,
'index' : index,
}
return render(request, 'form/syouhaikekka.html',context)
| pyTakuya/djongoform | form/views.py | views.py | py | 3,361 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.UserInfo.objects.all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.UserInfo.objects",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "models.UserInfo",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": ... |
43134615624 | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys, os, warnings
gpu = sys.argv[ sys.argv.index('-gpu') + 1 ] if '-gpu' in sys.argv else '0'
os.environ['PYTHONHASHSEED'] = '0'
#os.environ['CUDA_VISIBLE_DEVICES']=gpu
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disable Tensorflow CUDA load statements
#warnings.filterwarnings('ignore')
from keras import backend as K
import tensorflow as tf
import copy
import argparse
import numpy as np
gpus = tf.config.list_physical_devices('GPU')
print("Num GPUs Available: ", gpus)
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
tf.config.experimental.set_memory_growth(gpus[int(gpu)], True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
import utilArgparse
import utilConst
import utilIO
import util
import CNNmodel
#util.init()
K.set_image_data_format('channels_last')
# ----------------------------------------------------------------------------
def menu():
parser = argparse.ArgumentParser(description='Data augmentation on test')
parser.add_argument('-m', required=False, help='Pathfile for the model')
parser.add_argument('-db_train_src', required=True, help='Dataset path for training (src imags)')
parser.add_argument('-db_train_gt', required=True, help='Dataset path for training (gt images)')
parser.add_argument('-db_test_src', required=False, help='Dataset path to test (src imags)')
parser.add_argument('-db_test_gt', required=False, help='Dataset path to test (gt images)')
parser.add_argument('-aug', nargs='*',
choices=utilConst.AUGMENTATION_CHOICES,
default=[utilConst.AUGMENTATION_NONE],
help='Data augmentation modes')
parser.add_argument('-npatches', default=-1, dest='n_pa', type=int, help='Number of patches to be extracted from training data')
parser.add_argument('-n_annotated_patches', default=-1, dest='n_an', type=int, help='Number of patches to be extracted from training data')
parser.add_argument('-window_w', default=256, dest='win_w', type=int, help='width of window')
parser.add_argument('-window_h', default=256, dest='win_h', type=int, help='height of window')
parser.add_argument('-l', default=4, dest='n_la', type=int, help='Number of layers')
parser.add_argument('-f', default=64, dest='nb_fil', type=int, help='Number of filters')
parser.add_argument('-k', default=5, dest='ker', type=int, help='kernel size')
parser.add_argument('-drop', default=0.2, dest='drop', type=float, help='dropout value')
parser.add_argument('-pages_train', default=-1, type=int, help='Number of pages to be used for training. -1 to load all the training set.')
parser.add_argument('-e', default=200, dest='ep', type=int, help='nb_epoch')
parser.add_argument('-b', default=16, dest='ba', type=int, help='batch size')
parser.add_argument('-verbose', default=1, type=int, help='1=show batch increment, other=mute')
parser.add_argument('--test', action='store_true', help='Only run test')
parser.add_argument('-res', required=False, help='File where append the results.')
parser.add_argument('-gpu', default='0', type=str, help='GPU')
parser.add_argument('-no_mask', required=False, action='store_true', help='File where append the results.')
args = parser.parse_args()
print('CONFIG:\n -', str(args).replace('Namespace(','').replace(')','').replace(', ', '\n - '))
return args
def tpc_result(result):
return round(result*100,1)
def number_to_string(number):
return str(tpc_result(number)).replace(".",",")
if __name__ == "__main__":
config = menu()
print (config)
if config.m is None:
path_model = utilIO.getPathModel(config)
else:
path_model = config.m
utilIO.createParentDirectory(path_model)
input_shape = util.getInputShape(config)
list_src_train = utilIO.listFilesRecursive(config.db_train_src)
list_gt_train = utilIO.listFilesRecursive(config.db_train_gt)
assert(len(list_src_train) == len(list_gt_train))
train_data, val_data = util.create_Validation_and_Training_partitions(
list_src_train=list_src_train,
list_gt_train=list_gt_train,
pages_train=config.pages_train)
if config.test == False: # TRAINING MODE
print("Training and validation partitioned...")
print("\tTraining: %d" %(len(train_data)))
print("\tValidation: %d" %(len(val_data)))
augmentation_val = ["none"]
if utilConst.AUGMENTATION_RANDOM in config.aug:
augmentation_val = ["random"]
model = CNNmodel.get_model(input_shape, config.no_mask, config.n_la, config.nb_fil, config.ker, dropout=config.drop, stride=2)
train_generator = util.create_generator(train_data, config.no_mask, config.ba, input_shape, config.n_pa, config.n_an, config.aug)
val_generator = util.create_generator(val_data, config.no_mask, config.ba, input_shape, config.n_pa, config.n_an, augmentation_val)
nb_train_pages = len(train_data)
nb_val_pages = len(val_data)
epochs = config.ep
patience = 20
print("Number of effective epochs: " + str(epochs))
print("Effective patience: " + str(patience))
if utilConst.AUGMENTATION_RANDOM in config.aug:
assert(config.n_pa!=-1)
steps_per_epoch = int(np.ceil((config.n_pa*nb_train_pages)/ config.ba))
else:
number_annotated_patches = util.get_number_annotated_patches(train_data, input_shape[0], input_shape[1], config.n_pa)
print ("Number of annotated patches: " + str(number_annotated_patches))
steps_per_epoch = np.ceil(number_annotated_patches/config.ba)
steps_per_epoch = max(1, steps_per_epoch)
CNNmodel.train(model, path_model, train_generator, val_generator, steps_per_epoch, nb_val_pages, config.ba, epochs, patience=patience)
else: #TEST MODE
list_src_test = utilIO.listFilesRecursive(config.db_test_src)
list_gt_test = utilIO.listFilesRecursive(config.db_test_gt)
assert(len(list_src_test) == len(list_gt_test))
test_data = utilIO.match_SRC_GT_Images(list_src_test, list_gt_test)
print("Obtaining best threshold...(Validation partition)")
threshold=None
best_fm_val, best_th_val, prec_val, recall_val, dict_predictions = util.compute_best_threshold(path_model, val_data, config.ba, input_shape, nb_annotated_patches=config.n_an, threshold=threshold, with_masked_input=False)
print("Results of the test...")
with_mask = not config.no_mask
dict_results = util.test_model(config, path_model, test_data, input_shape, best_th_val, with_mask)
separator = ";"
print ("SUMMARY:")
str_result = "VAL"+separator+str(best_th_val) + separator + number_to_string(best_fm_val) + separator + number_to_string(prec_val) + separator + number_to_string(recall_val) + "\n" #number_to_string(best_fm_val) + separator + number_to_string(prec_val) + separator + number_to_string(recall_val) + separator + str(best_th_val).replace(".", ",") + separator
best_fm_test = dict_results[utilConst.KEY_RESULT][0][0]
prec_test = dict_results[utilConst.KEY_RESULT][0][1]
recall_test = dict_results[utilConst.KEY_RESULT][0][2]
print("Results: " + number_to_string(best_fm_test) + separator + number_to_string(prec_test) + separator + number_to_string(recall_test))
str_result += key + separator + separator + number_to_string(best_fm_test) + separator + number_to_string(prec_test) + separator + number_to_string(recall_test) + separator + "\n"
print(str_result)
if config.res is not None:
utilIO.appendString(str_result, config.res, True)
| fjcastellanos/FewShotLayoutAnalysisMusic | main.py | main.py | py | 8,374 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv.index",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.list_phys... |
74454066272 | # Write a Python program to count the number of characters (character frequency) in a string.
from collections import Counter
m = 'google.com'
count = Counter(m)
print(count)
#Prgram to reverse words the string
def reverse_string_words(text):
for line in text.split('\n'):
return(' '.join(line.split()[::-1]))
print(reverse_string_words("The quick brown fox jumps over the lazy dog."))
print(reverse_string_words("Python Exercises.")) | IswaryaJ/Practice | String/Prog1.py | Prog1.py | py | 447 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 4,
"usage_type": "call"
}
] |
17384052453 | """
Create a Blockchain
"""
import hashlib
import datetime
class Block:
def __init__(self, timestamp, data, previous_hash):
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.calc_hash()
#Define a function to create a encoded string particular to the data it has
def calc_hash(self):
sha = hashlib.sha256()
string = str(self.timestamp) + str(self.previous_hash) + str(self.data)
hash_str = string.encode('utf-8')
sha.update(hash_str)
return sha.hexdigest()
class BlockChain:
def __init__(self):
self.curr_block = self.genesis() #Create the first block and set whic block is the last one
self.size = 0
#Define a function to create an arbitrary block, to be the first of the chain
def genesis(self):
time = datetime.datetime.now()
return(Block(time,"Genesis",0))
#Function to link to blocks
def add(self, data):
time = datetime.datetime.now()
if self.curr_block == None:
self.curr_block = Block(time, data, 0)
return
previous_hash = self.curr_block.hash #Link them by one having as previous hash
block = Block(time,data,previous_hash) # the hash of the former one
self.curr_block = block #Make the last block, the current one
def get_block(self):
return self.curr_block
| DomingoCast/DS-and-algorithms | Blockchain.py | Blockchain.py | py | 1,551 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "hashlib.sha256",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "datetime.... |
8415907355 | from django.db import models
from django.contrib.auth.models import User
class Project(models.Model):
name = models.CharField(max_length=256, unique=True)
description = models.TextField()
class Dataset(models.Model):
name = models.CharField(max_length=256)
description = models.TextField()
class Participant(models.Model):
PROJECT_MANAGER = "PM"
RESEARCHER = "RE"
ADMINISTRATOR = "AD"
ROLE_CHOICES = [
(PROJECT_MANAGER, "Project Manager"),
(RESEARCHER, "Researcher"),
(ADMINISTRATOR, "Administrator"),
]
role = models.CharField(
max_length=50,
choices=ROLE_CHOICES,
default=RESEARCHER,
)
user = models.ForeignKey(
User, related_name="participants", on_delete=models.CASCADE
)
project = models.ForeignKey(
Project, related_name="participants", on_delete=models.CASCADE
)
class Meta():
unique_together = ("user", "project")
class WorkPackage(models.Model):
NEW = "N"
UNDERWAY = "U"
COMPLETED = "C"
STATUS_CHOICES = [
(NEW, "New"),
(UNDERWAY, "Underway"),
(COMPLETED, "Completed"),
]
status = models.CharField(
max_length=32,
choices=STATUS_CHOICES,
default=NEW,
)
project = models.ForeignKey(
Project, on_delete=models.CASCADE, related_name="work_packages"
)
name = models.CharField(max_length=256)
description = models.TextField()
participants = models.ManyToManyField(
Participant,
related_name="work_packages",
through="WorkPackageParticipant",
blank=True,
)
datasets = models.ManyToManyField(
Dataset, related_name="work_packages", blank=True
)
class WorkPackageParticipant(models.Model):
work_package = models.ForeignKey(
WorkPackage, related_name="work_package_participants", on_delete=models.CASCADE
)
participant = models.ForeignKey(
Participant, related_name="+", on_delete=models.CASCADE
)
class Meta():
unique_together = ("participant", "work_package") | tcouch/dataset-app-django | data_classifier/projects/models.py | models.py | py | 2,111 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "... |
74935568354 | import numpy as np
from pdb import set_trace
# load text data
set_trace()
txt_data = "abcdefghijklmnopqrstuvwxyz abcdefghijklmnopqrstuvwxyz abcdefghijklmnopqrstuvwxyz " # input data
# txt_data = open('input.txt', 'r').read() # test external files
chars = list(set(txt_data)) # split and remove duplicate characters. convert to list.
num_chars = len(chars) # the number of unique characters
txt_data_size = len(txt_data)
# one hot encode
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
# integer encode input data
integer_encoded = [char_to_int[i] for i in txt_data]
# Not actually used.
onehot_encoded = []
for ix in integer_encoded:
letter = [0 for _ in range(len(chars))]
letter[ix] = 1
onehot_encoded.append(letter)
onehot_encoded = np.array(onehot_encoded)
set_trace()
# invert encoding
inverted = int_to_char[np.argmax(onehot_encoded[0])] # "argmax" returns the index of the largest value.
# hyperparameters
iteration = 5000
sequence_length = 10
batch_size = round((txt_data_size / sequence_length) + 0.5)
hidden_size = 100 # size of hidden layer of neurons.
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, num_chars) * 0.01 # weight input -> hidden.
W_hh = np.random.randn(hidden_size, hidden_size) * 0.01 # weight hidden -> hidden
W_hy = np.random.randn(num_chars, hidden_size) * 0.01 # weight hidden -> output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((num_chars, 1)) # output bias
h_prev = np.zeros((hidden_size,1)) # h_(t-1)
def forwardprop(inputs, targets, h_prev):
# Since the RNN receives the sequence, the weights are not updated during one sequence.
xs, hs, ys, ps = {}, {}, {}, {} # dictionary
hs[-1] = np.copy(h_prev) # Copy previous hidden state vector to -1 key value.
loss = 0 # loss initialization
for t in range(len(inputs)): # t is a "time step" and is used as a key(dic).
xs[t] = np.zeros((num_chars,1))
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state.
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars.
# Softmax. -> The sum of probabilities is 1 even without the exp() function, but all of the elements are positive through the exp() function.
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss). Efficient and simple code
# y_class = np.zeros((num_chars, 1))
# y_class[targets[t]] =1
# loss += np.sum(y_class*(-np.log(ps[t]))) # softmax (cross-entropy loss)
return loss, ps, hs, xs
def backprop(ps, inputs, hs, xs):
dWxh, dWhh, dWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy) # make all zero matrices.
dbh, dby = np.zeros_like(b_h), np.zeros_like(b_y)
dhnext = np.zeros_like(hs[0]) # (hidden_size,1)
# reversed
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t]) # shape (num_chars,1). "dy" means "dloss/dy"
dy[targets[t]] -= 1 # backprop into y. After taking the soft max in the input vector, subtract 1 from the value of the element corresponding to the correct label.
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h.
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity #tanh'(x) = 1-tanh^2(x)
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients.
return dWxh, dWhh, dWhy, dbh, dby
data_pointer = 0
# memory variables for Adagrad
mWxh, mWhh, mWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mbh, mby = np.zeros_like(b_h), np.zeros_like(b_y)
for i in range(iteration):
h_prev = np.zeros((hidden_size,1)) # reset RNN memory
data_pointer = 0 # go from start of data
for b in range(batch_size):
inputs = [char_to_int[ch] for ch in txt_data[data_pointer:data_pointer+sequence_length]]
targets = [char_to_int[ch] for ch in txt_data[data_pointer+1:data_pointer+sequence_length+1]] # t+1
if (data_pointer+sequence_length+1 >= len(txt_data) and b == batch_size-1): # processing of the last part of the input data.
# targets.append(char_to_int[txt_data[0]]) # When the data doesn't fit, add the first char to the back.
targets.append(char_to_int[" "]) # When the data doesn't fit, add space(" ") to the back.
# forward
loss, ps, hs, xs = forwardprop(inputs, targets, h_prev)
# print(loss)
# backward
dWxh, dWhh, dWhy, dbh, dby = backprop(ps, inputs, hs, xs)
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam # elementwise
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
data_pointer += sequence_length # move data pointer
if i % 100 == 0:
print ('iter %d, loss: %f' % (i, loss)) # print progress
def predict(test_char, length):
x = np.zeros((num_chars, 1))
x[char_to_int[test_char]] = 1
ixes = []
h = np.zeros((hidden_size,1))
for t in range(length):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(num_chars), p=p.ravel()) # ravel -> rank0
# "ix" is a list of indexes selected according to the soft max probability.
x = np.zeros((num_chars, 1)) # init
x[ix] = 1
ixes.append(ix) # list
txt = ''.join(int_to_char[i] for i in ixes)
print ('----\n %s \n----' % (txt, ))
predict('a', 30)
predict('b', 30)
| gov-ind/char-rnn | p.py | p.py | py | 6,284 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pdb.set_trace",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pdb.set_trace",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_numbe... |
2744513023 | from setuptools import setup, find_packages
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
def get_version(rel_path):
for line in (this_directory / rel_path).read_text().splitlines():
if line.startswith('__VERSION__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
def requires_from_file(filename):
return open(filename).read().splitlines()
LONG_DESCRIPTION = (this_directory / "README.md").read_text()
SHORT_DESCRIPTION = "Utility for web scraping."
setup(
name="scrapinghelper",
version=get_version('scrapinghelper/versions.py'),
license="MIT",
install_requires=requires_from_file('requirements.txt'),
extras_require={
"socks": ["PySocks>=1.5.6, !=1.5.7"],
"converter": [ "multimethod>=1.8" ],
},
author="Goichi (Iisaka) Yukawa",
author_email="iisaka51@gmail.com",
url="https://github.com/iisaka51/scrapinghelper",
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
packages=find_packages(),
package_data={'': [ 'data/*.csv', 'data/LICENSE' ]},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
| iisaka51/scrapinghelper | setup.py | setup.py | py | 1,519 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 36,
"usage_type": "call"
}
] |
27037063648 | from rest_framework import serializers
class RegisterDnaSerializer(serializers.Serializer):
dna = serializers.ListField(
child=serializers.CharField()
)
class AllRegisterDnaSerializer(serializers.Serializer):
id = serializers.IntegerField()
dna = serializers.CharField()
isMutant = serializers.BooleanField()
class Stats(object):
def __init__(self, count_mutant_dna, count_human_dna, ratio):
self.count_mutant_dna = count_mutant_dna
self.count_human_dna = count_human_dna
self.ratio = ratio
class StatsSerializer(serializers.Serializer):
count_mutant_dna = serializers.IntegerField()
count_human_dna = serializers.IntegerField()
ratio = serializers.DecimalField(max_digits=5, decimal_places=2)
| felipehoyos1110/Mutant-magneto | xMen/mutant/serializers.py | serializers.py | py | 771 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ListField",
"line_number": 5,
"usage_type"... |
24280152763 | import requests
import bs4 as bs
import pandas as pd
import pickle
import sys
from datetime import datetime
import math
# # Puxa o nome dos 1965 ativos que vieram do site da uol e estao no arquivo chamado ativos
# with open('ativos', 'rb') as f:
# ativos = pickle.load(f)
# ########
# Lista de acoes que o programa vai puxar
acoes = ['PETR4.SA', 'ABEV3.SA', 'B3SA3.SA', 'BBAS3.SA', 'BBDC3.SA', 'BBSE3.SA', 'VALE3.SA',
'WEGE3.SA', 'USIM5.SA', 'TAEE11.SA', 'ITUB4.SA', 'MGLU3.SA', 'KROT3.SA', 'FLRY3.SA',
'EGIE3.SA', 'CVCB3.SA', 'CIEL3.SA', 'BRFS3.SA', 'ITSA4.SA', 'LAME4.SA']
#########
# Nome base das colunas, na hora o codigo ja adiciona o ticker da acao no final
columns = ['data', 'cotacao', 'min', 'max', 'variacao', 'variacao_percent', 'volume']
#########
diainicial = input('Digite o dia de ínicio de sua série: ')
mesinicial = input('Digite o mês de ínicio de sua série: ')
anoinicial = input('Digite o ano de ínicio de sua série: ')
diafinal = input('Digite o dia do fim de sua série: ')
mesfinal= input('Digite o mês do fim de sua série: ')
anofinal = input('Digite o ano do fim de sua série: ')
# URL que vai puxar o payload base. Se for mudar as datas tem que mudar aqui
url = 'http://cotacoes.economia.uol.com.br/acao/cotacoes-historicas.html'
base_payload = {'codigo': '', 'beginDay': diainicial,
'beginMonth': mesinicial, 'beginYear': anoinicial,
'endDay': diafinal, 'endMonth': mesfinal,
'endYear': anofinal, 'page': 1, 'size': 200}
#########
# Funcao que recebe um payload e retorna um dataframe com os
# dados da acao passado no codigo do payload
def get_data(payload):
global url
global columns
stock_columns = [x + '_' + payload['codigo'][:-3] for x in columns]
df = pd.DataFrame(columns=stock_columns)
print('Pegando dados da acao ' + payload['codigo'] + ' (', end='', flush=True)
while True:
html = requests.get(url, params=payload)
soup = bs.BeautifulSoup(html.text, 'html5lib')
tables = soup.findAll('table', {'id': 'tblInterday', 'class': 'tblCotacoes'})
if len(tables) == 0:
if payload['page'] == 1:
print('Acao nao encontrada')
break
for table in tables:
table_values = table.findChildren('tbody')
trs = table_values[0].findAll('tr')
for tr in trs:
values = [td.text for td in tr.findAll('td')]
df.loc[len(df)] = values
print('.', end='', flush=True)
payload['page'] += 1
print(') DONE')
return df
# Rodando a funcao para todas as acoes da lista acoes e guarndando os resultados em dfs
dfs = []
for i in acoes:
base_payload['codigo'] = i
base_payload['page'] = 1
a = get_data(base_payload)
dfs.append(a)
# concatenando todos os dataframes em dfs e alvando em um arquivo chamado dataframe_acoes
result = pd.concat(dfs, axis=1)
print(result.head())
result.to_pickle('dataframe_acoes')
# O arquivo foi salvo no formato pickle pois nesse formato a leitura e escrita eh mais rapida
# se achar melhor pode salvar um csv tambem
# Pra ler do arquivo pickle basta usar
df = pd.read_pickle('dataframe_acoes')
# print(df)
#Ajeitar a base de dados já importadada
#Tirar as pontos dos milhares e colocamos os pontos nas casas decimais
for column in df:
df[column] = df[column].str.replace(".","")
for column in df:
df[column] = df[column].str.replace(",",".")
# print(df.head())
#Colocando a data como index
lista = list(df.ix[:,0])
lista = lista[0:-1]
lista1 = []
for i in range(0,len(lista)):
lista1.append(datetime.strptime(lista[i], '%d/%m/%Y'))
#Aqui nós tiramos o último valor, pois dia 01/01/2017 é feriado e não tinham valores
df = df[:-1]
df['index']=lista1
df=df.set_index('index') #colocando a data ajustada como index
df.sort_values(by=['index'], inplace=True, ascending=True) #ordenando do menor para o maior
print(df.head())
#Transformando em números
df = df.apply(pd.to_numeric,errors='coerce')
#Retirando as demais colunas de datas
for column in df:
if column.startswith('data'):
df.drop([column],axis=1,inplace=True)
print(df)
#Transformar o tempo em contínuo
datainicial_aux= diainicial+'/'+mesinicial+'/'+anoinicial
datainicial = datetime.strptime(datainicial_aux, '%d/%m/%Y')
datafinal_aux=diafinal+'/'+mesfinal+'/'+anofinal
datafinal = datetime.strptime(datafinal_aux, '%d/%m/%Y')
delta = (datafinal - datainicial).days
df1 = pd.DataFrame(index=pd.date_range(datainicial, periods=delta),columns=[c for c in df])
print(df1.head(20))
#Funcão para transformar a nova matriz para a data contínua
def dfajust(df_old, df_new):
for i in range(len(df_old)):
for j in range(len(df_new)):
if (df_new.index[j]==df_old.index[i]):
df_new.iloc[j]=df_old.iloc[i]
for j in range (len(df_new)):
if math.isnan(df_new.iloc[j][0]):
df_new.iloc[j]=df_new.iloc[j-1]
return df_new
df1 = dfajust(df,df1)
print(df1.head(15))
| pedrocampeloa/UnB-LMF-Data-Science | algoritimo1.py | algoritimo1.py | py | 5,095 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"... |
31496879213 | import serial
import time
ser = serial.Serial('/dev/ttyACM0', baudrate = 9600, timeout = 1)
time.sleep(3)
numPoints = 17 #no.of values coming
dataList = [0]*numPoints
def getValues():
arduinoOutput = ser.readline().decode().split(':')
#print(arduinoOutput)
#print(len(arduinoOutput))
if(numPoints == len(arduinoOutput)):
for i in range(0,numPoints-1):
arduinoOutput[i] = arduinoOutput[i].encode('ascii','ignore')
arduinoOutput[numPoints-1] = arduinoOutput[numPoints-1].encode('ascii','ignore').split('\r\n')[0]
return arduinoOutput
else:
return None #Add NULL case
#arduinoData = ser.readline().decode().split('\r\n')
#return arduinoData[0].encode('ascii','ignore')
while(1):
data = getValues()
#for i in range(0,numPoints):
#dataList[i] = data[i]
print(data)
| Niyas-A/auto | src/imu/src/imu_pub.py | imu_pub.py | py | 901 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 5,
"usage_type": "call"
}
] |
33827589363 | import redis
import json
import threading
import rospy
from movebase import MoveBase as AdmMove
r = redis.Redis()
p = r.pubsub()
p.subscribe('ros-panel')
map_metadata = None
map_filename = None
current_pos = None
def setupdone(minfo,mapfilename):
global map_metadata, map_filename
map_metadata = minfo
map_filename = mapfilename
# r.publish('ros-web','setupdone-message')
mb = AdmMove()
mb.setup(setupdone)
def GetPosition(data):
current_pos = {
"pos": {
"x": data.pose.pose.position.x,
"y": data.pose.pose.position.y
},
"orientation": {
"x": data.pose.pose.orientation.x,
"y": data.pose.pose.orientation.y,
"z": data.pose.pose.orientation.z,
"w": data.pose.pose.orientation.w
}
}
r.publish('ros-amcl',json.dumps(current_pos))
mb.attach_pos(GetPosition)
def move_finished(status):
r.publish('ros-goalstatus',status)
def process_message(message):
if message["type"] == 'message':
data = json.loads(message["data"].decode('utf-8'))
cmd = data["cmd"]
if cmd == "move":
location = data["location"]
mb.run_to_point(location["x"],location["y"],location["z"],location["angle"],move_finished)
if cmd == "set_initial_pose":
mb.set_initialpose(data["pose"]["x"],data["pose"]["y"],0.0,data["pose"]["angle"])
# if cmd == "request":
# r.publish('ros-web','request done')
if message["type"] == "mapdata":
global map_metadata, map_filename
data = json.loads({metadata: map_metadata})
r.publish('ros-panel-startup','worker-test')
# if message["type"] == "position":
# global current_pos
# r.publish('ros-web',json.loads())
try:
while True:
message = p.get_message()
if (message):
process_message(message)
except KeyboardInterrupt:
print("quitting") | adm-sglm/ros-project | src/worker.py | worker.py | py | 1,913 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "redis.Redis",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "movebase.MoveBase",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number... |
70427984994 | import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
# Data
voltage = np.array([710, 730, 750, 770, 790, 810, 830, 850, 870, 890, 910, 930, 950, 970, 990])
count = np.array([91, 102, 90, 137, 142, 149, 154, 126, 160, 160, 156, 150, 181, 143, 168])
coefficients = np.polyfit(voltage, count, 2)
polynomial = np.poly1d(coefficients)
# Generate y-values for the quadratic line of best fit
line_of_best_fit = polynomial(voltage)
# Plotting Count vs. Voltage with the line of best fit
plt.figure(figsize=(10, 6))
plt.plot(voltage, count, marker='o', linestyle='', color='b', label='Observed Counts')
plt.plot(voltage, line_of_best_fit, linestyle='-', color='r', label='Line of Best Fit')
plt.title('Count Rate vs. Voltage')
plt.xlabel('Voltage (V)')
plt.ylabel('Count Rate')
plt.legend()
plt.grid(True)
plt.show()
| NolanTrem/phys1494 | experiment10/background.py | background.py | py | 845 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.polyfit",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.poly1d",
"line_number": ... |
21753957627 | from config_data import config
import requests
import json
import re
from loader import logger
endpoint_search = 'locations/v2/search'
endpoint_hotels = 'properties/list'
endpoint_photo = 'properties/get-hotel-photos'
def json_mod(text, file_name):
data = json.loads(text)
with open(file_name, 'w', encoding='utf-8') as file:
mod = json.dump(data, file, ensure_ascii=False, indent=4)
with open(file_name, 'r', encoding='utf-8') as file:
mod_text = json.load(file)
return mod_text
def location_processing(endpoint, locale=None, currency=None, city=None, city_id=None, checkin=None, checkout=None, sort_order=None,
hotel_id=None, price_min=None, price_max=None,
number=None, user=None): # функция для поиска id города
search_city = api_requests(endpoint, locale, currency, city, city_id, checkin,
checkout, sort_order, hotel_id, price_min, price_max,
number, user) # через API Hotels ищем всю информацию по городу
mod_search_city = json.loads(search_city) # Преобразовываем текст с помощью команд можуля json
return mod_search_city
def api_requests(endpoint, locale=None, currency=None, city=None, city_id=None, checkin=None, checkout=None, sort_order=None,
hotel_id=None, price_min=None, price_max=None, number=None, user=None):
url = f"https://hotels4.p.rapidapi.com/{endpoint}"
querystring = {}
if endpoint == 'locations/v2/search':
querystring = {"query": city, "locale": locale, "currency": currency}
elif endpoint == 'properties/list' and sort_order == 'STAR_RATING_HIGHEST_FIRST':
querystring = {"destinationId": city_id, "pageNumber": f"{number}", "pageSize": "25", "checkIn": checkin,
"checkOut": checkout, "adults1": "1", "priceMin": price_min, "priceMax": price_max,
"sortOrder": sort_order, "locale": locale, "currency": currency, "landmarkIds": "Центр города"}
elif endpoint == 'properties/list':
querystring = {"destinationId": city_id, "pageNumber": "1", "pageSize": "25", "checkIn": checkin,
"checkOut": checkout, "adults1": "1", "sortOrder": sort_order, "locale": locale,
"currency": currency}
elif endpoint == 'properties/get-hotel-photos':
querystring = {"id": hotel_id}
headers = {
"X-RapidAPI-Key": config.RAPID_API_KEY,
"X-RapidAPI-Host": "hotels4.p.rapidapi.com"
}
try:
response = requests.request("GET", url, headers=headers, params=querystring, timeout=30)
return response.text
except requests.exceptions.RequestException as exc:
logger.error("ID пользователя - {user} | Ошибка при обращении к API", user=user)
return False
# t = location_processing(city='Нью-Йорк', endpoint=endpoint_search)
| russe19/telegram-bot-project | api_requests.py | api_requests.py | py | 3,060 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 31,
... |
4652979163 | from typing import Dict, List
from datamodel import OrderDepth, TradingState, Order
import numpy as np
class Trader:
period = 15
def __init__(self) -> None:
self.price_log = np.zeros(Trader.period)
self.past_day = {}
def run(self, state: TradingState) -> Dict[str, List[Order]]:
print("\n\n New day/run at " + str(state.timestamp))
print(str(state.toJSON()))
result = {}
for product in state.order_depths.keys():
if (product == "BANANAS"):
order_depth: OrderDepth = state.order_depths[product]
orders: list[Order] = []
#average of all buy and sell orders
average = sum(order_depth.sell_orders.keys()) + sum(order_depth.buy_orders.keys())
denominator_for_avg = len(order_depth.sell_orders) + len(order_depth.buy_orders)
#add to weighted average
if (denominator_for_avg != 0):
average /= denominator_for_avg
if (product not in self.past_day):
self.past_day[product] = average
self.price_log = np.roll(self.price_log, 1)
self.price_log[0] = average - self.past_day[product]
# self.past_moving_avg[product] = self.moving_avg_long[product]
# if (self.moving_avg_short[product] == 0):
# self.moving_avg_short[product] = average
# self.moving_avg_long[product] = average
# self.moving_avg_short[product] = self.moving_avg_short[product] * (1 - Trader.weighted_muliplier_short) + Trader.weighted_muliplier_short*average
# self.moving_avg_long[product] = self.moving_avg_long[product] * (1 - Trader.weighted_muliplier_long) + Trader.weighted_muliplier_long*average
# self.momentum[product] = self.momentum[product] * (1 - Trader.weighted_muliplier_short) + Trader.weighted_muliplier_short*(self.moving_avg_long[product] - self.past_moving_avg[product])
# print(f"Current average for {product}: " + str(average) + ", long run avg: " + str(self.moving_avg_long[product]) + ", short run avg: " + str(self.moving_avg_short[product]) + f", Momentum is {str(self.momentum[product])} ")
print(f"MEAN IS: {self.price_log.mean()}")
if len(order_depth.sell_orders) > 0:
best_ask = min(order_depth.sell_orders.keys())
best_ask_volume = order_depth.sell_orders[best_ask]
print(f"The current best price for buying {product} is: " + str(best_ask) + ". ")
if self.price_log.mean() > 0: #and self.moving_avg_short[product] < self.moving_avg_long[product]:
print("BUY", str(-best_ask_volume) + "x", best_ask)
orders.append(Order(product, best_ask, -best_ask_volume))
if len(order_depth.buy_orders) > 0: # and ("BANANAS" in state.position) and (state.position["BANANAS"] >= 0):
best_bid = max(order_depth.buy_orders.keys())
best_bid_volume = order_depth.buy_orders[best_bid]
print("The current best price for selling bananas is: " + str(best_bid))
if self.price_log.mean() < 0:
print("SELL", str(best_bid_volume) + "x", best_bid)
orders.append(Order(product, best_bid, -best_bid_volume))
print(str(state.position))
result[product] = orders
return result
#trader = Trader()
#trader.run(example_state_1) | Samukat/IMC_Trading_game | Sam/momentum.py | momentum.py | py | 3,833 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datamodel.TradingState",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datamodel.OrderDepth",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "datamodel.Ord... |
38204015096 | import os,json
from .exceptions import NsValueIsNotDict, NsUuidDoesNotExist
from .nbiapi.identity import bearer_token
from .nbiapi.ns import Ns
from .nbiapi.vim import Vim
from .nbiapi.vnf import Vnf
def getToken():
creds = os.environ.get('OSM_ADMIN_CREDENTIALS')
token = None
if creds:
creds = json.loads(creds)
token = bearer_token(str(creds.get('username')), str(creds.get('username')))
return token
def get_ns_uuid_from_message(message):
"""Extract the UUID of the NS to be instantiated/terminated from the message
In the OSM r4, the message looks like as follows:
--
{
"_admin": {
"created": "1527159237.137106",
"modified": "1527159237.137106"
},
"_id": "f850ebf3-a487-4fe6-bafa-6245c8b537e1",
"id": "f850ebf3-a487-4fe6-bafa-6245c8b537e1",
"isAutomaticInvocation": "false",
"isCancelPending": "false",
"lcmOperationType": "terminate",
"links": {
"nsInstance": "/osm/nslcm/v1/ns_instances/af6de0c1-7279-427e-9b68-1fa0e493d31d",
"self": "/osm/nslcm/v1/ns_lcm_op_occs/f850ebf3-a487-4fe6-bafa-6245c8b537e1"
},
"nsInstanceId": "af6de0c1-7279-427e-9b68-1fa0e493d31d",
"operationParams": {
"_id": "f850ebf3-a487-4fe6-bafa-6245c8b537e1",
"autoremove": "true",
"nsInstanceId": "af6de0c1-7279-427e-9b68-1fa0e493d31d"
},
"operationState": "PROCESSING",
"startTime": "1527159237.1370444",
"statusEnteredTime": "1527159237.1370444"
}
--
Args:
message (dict): The value of the message as it was published in teh Kafka broker
Returns:
str: The UUID of the NS to be instantiated/terminated
"""
if not isinstance(message, dict):
raise NsValueIsNotDict('The value of the message in the ns topic is not dict'.format(message))
if 'nsInstanceId' not in message:
raise NsUuidDoesNotExist(
'The nsInstanceId key is not included in the value of the message {} in the topic ns'.format(message))
return str(message['nsInstanceId'])
def convert_byte_to_str(term):
"""Convert a term from the bytes to str
Args:
term (bytes): The term in bytes
Returns:
str: The term in str
"""
return term.decode("utf-8")
def compose_redis_key(vim_name, vdu_uuid):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
vdu_uuid (str): The VDU uuid (NFVI based)
Returns:
str: the key for redis
"""
return "{}:{}".format(vim_name.lower(), vdu_uuid)
def get_vim_info(vim_uuid=None):
"""Get the VIM name, type, url by given VIM uuid
Args:
vim_uuid (str): The VIM uuid
Returns:
dict: the VIM uuid, name, type and url
"""
if vim_uuid is None:
return {"uuid": vim_uuid, "name": None, "type": None, "url": None}
creds = os.environ.get('OSM_ADMIN_CREDENTIALS')
token = None
if creds:
creds = json.loads(creds)
token = bearer_token(str(creds.get('username')), str(creds.get('username')))
vim = Vim(token)
response = vim.get(vim_uuid=vim_uuid)
data = response.json()
vim_info = {
"uuid": vim_uuid,
"name": data.get('name', None),
"type": data.get('vim_type', None),
"url": data.get('vim_url', None)
}
return vim_info
def get_vdus_info(ns_uuid=None):
"""Get information about NS, VNF(s) and VDU(s) by given NS uuid
Args:
ns_uuid (str): The NS uuid
Returns:
dict: ns, vnf and vdu info
"""
vdus_info = []
if ns_uuid is None:
return vdus_info
token = getToken()
if token is None:
return vdus_info
ns = Ns(token)
ns_response = ns.get(ns_uuid=ns_uuid)
nsr = ns_response.json()
# Get Vim
vim_uuid = nsr.get('datacenter', None)
vim_info = get_vim_info(vim_uuid=vim_uuid)
# Get the Vnf UUIDs, members of the NS
vnf_uuids = nsr.get('constituent-vnfr-ref', [])
vnfr = Vnf(token)
for vnf_uuid in vnf_uuids:
vnf_response = vnfr.get(vnf_uuid=vnf_uuid)
vnf_record = vnf_response.json()
# VDUs info
vdu_records = vnf_record.get('vdur', [])
for vdu_record in vdu_records:
mano = {
"vim": vim_info,
"ns": {
"id": ns_uuid,
"name": nsr.get('name-ref', None),
"nsd_id": nsr.get('nsdId', None),
"nsd_name": nsr.get('nsd-name-ref', None)
},
"vnf": {
"id": vnf_record.get("id", None),
"name": None, # not provided in osm r4
"short_name": None, # not provided in osm r4
"vnfd_id": vnf_record.get("vnfd-id", None),
"vnfd_name": None # not provided in osm r4
},
"vdu": {
"id": vdu_record.get("vim-id", None), # NFVI-based uuid
"image_id": None,
"flavor": {},
"status": vdu_record.get("status", None),
"ip_address": vdu_record.get("ip-address", None),
"mgmt-interface": None # future usage
}
}
vdus_info.append(mano)
return vdus_info
| sonata-nfv/son-monitor | vnv_manager/app/api/management/commands/osm/utils.py | utils.py | py | 5,464 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nbiapi.identity.bearer_toke... |
31234711983 | from itertools import permutations
def solution(k, dungeons):
answer = -1
dun_list = list(permutations(dungeons, len(dungeons)))
for dun in dun_list:
tired = k
cnt = 0
for d in dun:
if tired < d[0]:
break
else:
tired -= d[1]
cnt += 1
answer = max(cnt, answer)
return answer
| earthssu/Programmers-Algorithm | Level2/피로도.py | 피로도.py | py | 401 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.permutations",
"line_number": 5,
"usage_type": "call"
}
] |
27493427622 | import pygame
import random
import tkinter as tk
# Set up the game window
WINDOW_WIDTH = 500
WINDOW_HEIGHT = 500
pygame.init()
WINDOW = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Snake Game")
# Set up the colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
# Set up the font for the score display
SCORE_FONT = ('times', 24)
# Set up the Snake class
class Snake:
def __init__(self, x=250, y=250):
self.body = [(x, y)]
self.direction = 'right'
self.score = 0
def move(self):
x, y = self.body[0]
if self.direction == 'right':
x += 10
elif self.direction == 'left':
x -= 10
elif self.direction == 'up':
y -= 10
elif self.direction == 'down':
y += 10
self.body.insert(0, (x, y))
self.body.pop()
def change_direction(self, direction):
if direction == 'right':
if self.direction != 'left':
self.direction = 'right'
elif direction == 'left':
if self.direction != 'right':
self.direction = 'left'
elif direction == 'up':
if self.direction != 'down':
self.direction = 'up'
elif direction == 'down':
if self.direction != 'up':
self.direction = 'down'
def grow(self):
x, y = self.body[0]
if self.direction == 'right':
x += 10
elif self.direction == 'left':
x -= 10
elif self.direction == 'up':
y -= 10
elif self.direction == 'down':
y += 10
self.body.insert(0, (x, y))
self.score += 10
def draw(self):
for x, y in self.body:
pygame.draw.rect(WINDOW, GREEN, (x, y, 10, 10))
def check_collision(self):
x, y = self.body[0]
if x < 0 or x > WINDOW_WIDTH - 10 or y < 0 or y > WINDOW_HEIGHT - 10:
return True
for i in range(1, len(self.body)):
if x == self.body[i][0] and y == self.body[i][1]:
return True
return False
# Set up the Food class
class Food:
def __init__(self):
self.x, self.y = self.generate_position()
def generate_position(self):
x = random.randint(0, WINDOW_WIDTH - 10)
y = random.randint(0, WINDOW_HEIGHT - 10)
return x - x % 10, y - y % 10
def draw(self):
pygame.draw.rect(WINDOW, RED, (self.x, self.y, 10, 10))
def check_collision(self, snake):
x, y = snake.body[0]
if self.x <= x <= self.x + 10 and self.y <= y <= self.y + 10:
self.x, self.y = self.generate_position()
return True
return False
# Set up the Tkinter window for the score and high score display
class ScoreBoard:
def __init__(self):
self.score = 0
self.high_score = 0
self.create_scoreboard()
def create_scoreboard(self):
self.root = tk.Tk()
self.root.title('Score')
self.score_var = tk.StringVar()
self.score_var.set('Score: 0')
self.score_label = tk.Label(self.root, textvariable=self.score_var, font=SCORE_FONT)
self.score_label.pack()
self.high_score_var = tk.StringVar()
self.high_score_var.set('High Score: 0')
self.high_score_label = tk.Label(self.root, textvariable=self.high_score_var, font=SCORE_FONT)
self.high_score_label.pack()
def update_score(self, score):
self.score = score
self.score_var.set('Score: {}'.format(score))
if score > self.high_score:
self.update_high_score(score)
def update_high_score(self, high_score):
self.high_score = high_score
self.high_score_var.set('High Score: {}'.format(high_score))
def show_scoreboard(self):
self.root.mainloop()
# Set up the game loop
def game_loop():
snake = Snake()
food = Food()
scoreboard = ScoreBoard()
clock = pygame.time.Clock()
game_over = False
while not game_over:
# Handle events
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
snake.change_direction('right')
elif event.key == pygame.K_LEFT:
snake.change_direction('left')
elif event.key == pygame.K_UP:
snake.change_direction('up')
elif event.key == pygame.K_DOWN:
snake.change_direction('down')
# Move the snake
snake.move()
# Check for collision with the food
if food.check_collision(snake):
snake.grow()
scoreboard.update_score(snake.score)
# Check for collision with the walls or itself
if snake.check_collision():
game_over = True
# Draw the objects
WINDOW.fill(BLACK)
snake.draw()
food.draw()
pygame.display.update()
# Set the frame rate
clock.tick(10)
# Clean up the game and show the final score
pygame.quit()
scoreboard.show_scoreboard()
print('Final Score:', snake.score)
if __name__ == '__main__':
game_loop() | EsmailTaghizadehResume/mini_games | Snake_Game.py | Snake_Game.py | py | 5,558 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.s... |
31920134745 | #!/home/knielbo/virtenvs/ndhl/bin/python
"""
Build signal(s) from information dynamics in model
@author: kln@cas.au.dk
"""
import os
import numpy as np
from numpy.matlib import repmat
import scipy as sp
from util import load_pcl
# vis and test
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams.update({"text.usetex": False,
"font.family": "Times New Roman",
"font.serif": "cmr10",
"mathtext.fontset": "cm",
"axes.unicode_minus": False
})
def kld(p, q):
""" KL-divergence for two probability distributions
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, (p-q) * np.log10(p / q), 0))
def jsd(p, q, base=np.e):
'''Pairwise Jensen-Shannon Divergence for two probability distributions
- use to avoid division by zero when q = 0
'''
## convert to np.array
p, q = np.asarray(p), np.asarray(q)
## normalize p, q to probabilities
p, q = p/p.sum(), q/q.sum()
m = 1./2*(p + q)
return sp.stats.entropy(p,m, base=base)/2. + sp.stats.entropy(q, m, base=base)/2.
def normalize(x, lower=-1, upper=1):
""" transform x to x_ab in range [a, b]
"""
x_norm = (upper - lower)*((x - np.min(x)) / (np.max(x) - np.min(x))) + lower
return x_norm
"""
def build_signals(X, w=3):
m = len(X)
# Novelty
N_hat = np.zeros(m)
N_sd = np.zeros(m)
for i, x in enumerate(X):
submat = X[(i-w):i, ]
tmp = np.zeros(submat.shape)
for ii, xx in enumerate(submat):
tmp[ii] = kld(xx, x)
N_hat[i] = np.mean(tmp)
N_sd[i] = np.std(tmp)
# Transience
T_hat = np.zeros(m)
T_sd = np.zeros(m)
for i, x in enumerate(X):
submat = X[i:(i+w), ]
tmp = np.zeros(submat.shape)
for ii, xx in enumerate(submat):
tmp[ii] = kld(xx, x)
T_hat[i] = np.mean(tmp)
T_sd[i] = np.std(tmp)
# Resonance
R = N_hat - T_hat
R_sd = (N_sd + T_sd)/2
return [N_hat, N_sd], [T_hat, T_sd], [R, R_sd]
"""
def main():
bow_mdl = load_pcl(os.path.join("..","mdl","bow_lda.pcl"))
X = bow_mdl["theta"]
#fname = os.path.join("..", "dat", "target.pcl")
#db = load_pcl(fname)
#content = db["content"]N_hat[i] = np.mean(tmp)
#metadata = db["metadata"]
#print(metadata.head(25))
# ASSERT: X is matrix
# parameters P of function
window = 25# abstract time window because the data are not sampled on regular intervals (in sample)
m = X.shape[0]
weight = 0.0# parameter to set initial window for novelty and final window for transience
impute = True
rescale = True#-1:1 scaling
# ASSERT: win < m
# novelty
N_hat = np.zeros(m)
N_sd = np.zeros(m)
for i, x in enumerate(X):#TODO: remove w+1 limit
submat = X[(i-window):i,]
tmp = np.zeros(submat.shape[0])
if submat.any():
for ii, xx in enumerate(submat):
tmp[ii] = jsd(x, xx)
else:
tmp = np.zeros([window]) + weight# Comment: set initial windows to 0.0
N_hat[i] = np.mean(tmp)
N_sd[i] = np.std(tmp)
#print(N_hat)
# Transience
T_hat = np.zeros(m)
T_sd = np.zeros(m)
for i, x in enumerate(X):#TODO: remove w+1 limit
submat = X[i+1:(i+window+1),]
tmp = np.zeros(submat.shape[0])
#print(i, x)
if submat.any():
for ii, xx in enumerate(submat):
#print(ii, xx)
#print(ii, kld(xx,x))
tmp[ii] = jsd(x, xx)
#print(tmp)
#print("*"*10)
else:
tmp = np.zeros([window])
#print(tmp)
T_hat[i] = np.mean(tmp)
T_sd[i] = np.std(tmp)
#print(T_hat)
#T_hat[i] = np.mean(tmp)
#T_sd[i] = np.std(tmp)
#print(T_hat)
#novelty, transience, resonance = build_signals(X, w=w)
# Resonance
R = N_hat - T_hat
R_sd = (N_sd + T_sd)/2
#print(novelty)
T_hat[-window:] = np.zeros([window]) + weight
R[:window] = np.zeros([window]) + weight
R[-window:] = np.zeros([window]) + weight
if rescale:
print("rescaling")
R = normalize(R)
N_hat = normalize(N_hat, lower=0)
T_hat = normalize(T_hat, lower=0)
N_hat[:window] = np.zeros([window]) + np.mean(N_hat[window:])
#T_hat[-window:] = np.zeros([window]) + weight
T_hat[-window:] = np.zeros([window]) + np.mean(T_hat[:-window])
#R[:window] = np.zeros([window]) + weight
#R[-window:] = np.zeros([window]) + weight
R[:window] = np.zeros([window]) + np.mean(R[window:-window])
R[-window:] = np.zeros([window]) + np.mean(R[window:-window])
if impute:
print("imputation initiated")
fig, ax = plt.subplots(1,3,figsize=(14,3))
ax[0].plot(N_hat,c="k")
ax[0].axhline(np.mean(N_hat[window:]),c="r",linestyle=":")
ax[1].plot(T_hat,c="k")
ax[1].axhline(np.mean(T_hat[:-window]),c="r",linestyle=":")
ax[2].plot(R,c="k")
ax[2].axhline(np.mean(R[window:-window]),c="r",linestyle=":")
#ax[2].axhline(0.,c="g",linestyle=":")
plt.tight_layout()
plt.savefig("../fig/signal.png")
plt.close()
if __name__ == "__main__":
main()
| centre-for-humanities-computing/NDHL-AHM20 | src/build_signal.py | build_signal.py | py | 5,392 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.rcParams.update",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "num... |
44068529651 | # Grocery Billing System
from datetime import datetime
print('------WELCOME TO OUR SUPER-MARKET-----------')
name=(input('Enter your Name: '))
#LISTS of items
lists= '''
Rice Rs 20/kg
Sugar Rs 40/kg
Salt Rs 35/kg
Oil Rs 70/kg
Panner Rs 80/kg
Maggi Rs 50/kg
Boost Rs 90/kg
Colgate Rs 20/kg
'''
#Declaration
price=0
pricelist=[]
totalprice=0
finalamount=0
ilist=[] #Item list
qlist=[]#quantity list
plist=[]
#rates for items
items={'Rice':20,
'Sugar':40,
'Salt':35,
'Oil':70,
'Panner':80,
'Maggi':50,
'Boosrt':90,
'Colgate':20}
#options
option=int(input('For List of Items press 1:'))
if option==1:
print(lists)
for i in range(len(items)):
inp1=int(input('If you want to buy please press 1 or Press 2 to exit:'))
if inp1==2:
break
if inp1==1:
item=input('Enter your items:')
quantity=int(input('Enter quantity:'))
if item in items.keys():
price=quantity*(items[item])
pricelist.append((item,quantity,items,price)) #as tupple
totalprice=totalprice+price
ilist.append(item)
qlist.append(quantity)
plist.append(price)
gst=(totalprice*5)/100
finalamount=gst+totalprice
else :
print('Sorry you entered item is not avalible')
else:
print('You press wrong number')
inp=input('Do you want bill yes/no:')
if inp=='yes':
pass
if finalamount!=0:
print(25*'=', 'SUPER MARKET',25*'=')
print('Name:',name,30*' ',datetime.now())
print(75*'-')
print('s-no:',8*' ','items',8*' ','quantity',3*' ','price')
for i in range(len(pricelist)):
print(i,8*' ',5* ' ' ,ilist[i],3*' ',qlist[i],8*' ',plist[i])
print(75*'-')
print(50*' ','TotalAmount:','Rs',totalprice)
print('GstAmount:',40*' ','Rs' ,gst)
print(75*'-')
print(50*'-','FinalAmount:','Rs',finalamount)
print(75*'-')
print(20*'-','Thanks For Visiting Our Store')
print(75*'*') | sreenivas782/Grocery-Billing-System | Hi.py/hello.py | hello.py | py | 2,187 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 70,
"usage_type": "name"
}
] |
70436483553 | import torch
from torch.utils.data import DataLoader, Subset
import torch.optim as optim
class AssignmentModel:
def __init__(self, dataloader=None, latent=None, generator=None, critic=None, cost=None, device='cpu', A_couples=None, A_cost=None):
## Variables ##
self.device = device
self.cost = cost
self.dataset = dataloader.dataset
self.dataloader = dataloader
self.latent = latent
self.generator = generator.to(self.device)
self.critic = critic.to(self.device)
#self.A_couples = A_couples
#self.A_cost = A_cost
#self.power_factors = (0.0448, 0.2856)
## Architecture ##
# generates a random latent batch on call, is function object
self.gen_latent_batch = self.latent.sample
if self.cost == "ssim":
self.find_couples = self.find_couples_unlimited_ssim
self.gen_cost = self.assign_gen_cost_ssim
elif self.cost == "psnr":
self.find_couples = self.find_couples_unlimited_psnr
self.gen_cost = self.assign_gen_cost_psnr
elif self.cost == "square":
self.find_couples = self.find_couples_unlimited_square
self.gen_cost = self.assign_gen_cost_square
else:
raise ValueError("cost must be one of ['ssim', 'psnr', 'square']")
def find_assignments_critic(self, assign_loops=100):
num_batches = len(self.dataset) // self.dataset.batch_size
assign_arr = torch.zeros((len(self.dataset),))
latent_sample_list, real_idx_list = [], []
for _ in range(assign_loops):
latent_points = self.gen_latent_batch(self.latent.batch_size)
latent_points = latent_points.to(self.device) # move to same device as generator
generated_batch = self.generator(latent_points)
'''
### New start ###
for batch_idx, sample in enumerate(self.dataloader):
# get batch from sample, sample is tuple of (batch, labels)
real_batch, labels = sample
indices = torch.arange(self.dataloader.batch_size) + batch_idx * self.dataloader.batch_size
if batch_idx != 0:
#print(indices, curr_best)
indices = torch.cat((curr_best, indices), dim=0)
# construct sub-dataset from given indices to combine previous best with current indices
subset = Subset(self.dataloader.dataset, indices)
subset_loader = DataLoader(subset, len(subset))
real_batch = subset_loader.next()[0]
print(real_batch.shape)
real_batch = real_batch.reshape(-1, torch.prod(real_batch.shape[1:]))
best = self.find_couples(real_batch, generated_batch)
curr_best = indices[best]
#assert len(curr_best.shape) == 1
assign_c = torch.tensor(curr_best).unsqueeze(dim=1)
latent_samples.append(latent_points)
real_indcs.append(assign_c)
idx_values = torch.unique(assign_c, return_counts=True)
assign_arr[idx_values[0]] += idx_values[1]
return assign_arr, latent_samples, real_indcs
### New end ###
'''
for b in range(num_batches):
indices = torch.arange(b * self.dataloader.batch_size, (b+1)* self.dataloader.batch_size)
# add the current best from previous batch(es) into comparison
if b == 0:
all_idx = indices
else:
all_idx = torch.cat([current_best, indices], dim=0)
# returns indices of best couples from current batch
best = self.find_couples(real_batch=self.dataset[all_idx].to(self.device), generated_batch=generated_batch)
current_best = all_idx[best]
assign_c = current_best.reshape(-1, 1)
latent_sample_list.append(latent_points)
real_idx_list.append(assign_c)
idx_value = torch.unique(assign_c, return_counts=True)
assign_arr[idx_value[0]] += idx_value[1]
return assign_arr, latent_sample_list, real_idx_list
# A_w(X), X = real_samples
def assign_critic_cost(self, assign_samples, n_assign):
crit_assign = self.critic(assign_samples).squeeze()
crit_assign_weigthed = torch.mul(n_assign, crit_assign)
assign_w_n_ratio = torch.sum(crit_assign_weigthed) / torch.sum(n_assign)
dataset_mean = torch.mean(self.critic(self.dataset[:len(self.dataset)]))
crit_cost = -(assign_w_n_ratio - dataset_mean)
return crit_cost
### Square ###
def find_couples_unlimited_square(self, real_batch, generated_batch):
#print(real_batch.shape, generated_batch.shape)
# use broadcasting to get a matrix with all fakes - each real
z = torch.unsqueeze(generated_batch, dim=1) - real_batch
# square distance for matrix
norm_mat = 0.1 * torch.square(torch.linalg.norm(z, dim=2))
dist = torch.transpose(self.critic(real_batch), 0, 1) + norm_mat
# return real positions where distance is minimal
couples = torch.argmin(dist, dim=1)
return couples
# cost(X, G(z)), X = real samples, G(z) = generated samples
def assign_gen_cost_square(self, real_batch, generated_batch):
diff_batch = generated_batch - real_batch
gen_cost = torch.mean(torch.square(torch.linalg.norm(diff_batch, dim=1)))
return gen_cost
### SSIM ###
def find_couples_unlimited_ssim(self, real_batch, generated_batch):
# TODO: external code/pytorch ignite version?
pass
def assign_gen_cost_ssim(self, real_batch, generated_batch):
#TODO
pass
### PSNR ###
def find_couples_unlimited_psnr(self, real_batch, generated_batch):
# TODO: external code?
pass
def assign_gen_cost_psnr(self, real_batch, generated_batch):
#TODO
pass
### Train Critic/Generator ###
def train_critic(self, assign_arr, optimizer=None):
self.critic.train()
if optimizer is None:
optimizer = optim.RMSprop(self.critic.parameters(), lr=self.critic.lr)
assign_idx_local = torch.nonzero(assign_arr)
assign_samples = self.dataset[assign_idx_local].to(self.device)
n_assign = assign_arr[assign_idx_local].to(self.device)
# train step
crit_cost = self.assign_critic_cost(assign_samples, n_assign)
optimizer.zero_grad()
crit_cost.backward()
optimizer.step()
def train_generator(self, real_idcs, latent_samples, offset=2000, optimizer=None):
self.generator.train()
if optimizer is None:
optimizer = optim.RMSprop(self.generator.parameters(), lr=self.generator.lr)
cost = []
for c_idx in range(0, int(len(real_idcs) - offset + 1), int(offset)):
real_batch = self.dataset.data[real_idcs[c_idx:c_idx+offset]]
generated_batch = self.generator(latent_samples[c_idx:c_idx+offset])
# train step
gen_cost = self.gen_cost(real_batch, generated_batch)
optimizer.zero_grad()
gen_cost.backward()
optimizer.step()
cost.append(gen_cost.detach())
print("The transportation distance is", torch.sqrt(torch.mean(torch.as_tensor(cost).detach())))
| devbflow/optimal-transport-gan | src/models/AssignmentModel.py | AssignmentModel.py | py | 7,497 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.unique",
"line_number": ... |
69861434594 | import aws_infrastructure.tasks.library.instance_helmfile
import scope.config
from invoke import Collection
from pathlib import Path
import tasks.terraform.ecr
CONFIG_KEY = "helmfile"
STAGING_LOCAL_HELMFILE_DIR = "./.staging/helmfile"
STAGING_REMOTE_HELM_DIR = "./.staging/helm"
STAGING_REMOTE_HELMFILE_DIR = "./.staging/helmfile"
INSTANCE_TERRAFORM_DIR = "./terraform/instance"
INSTANCE_NAME = "instance"
HELMFILE_PATH = "./helmfile/uwscope/helmfile.yaml"
HELMFILE_CONFIG_PATH = "./helmfile/uwscope/helmfile_config.yaml"
SSH_CONFIG_PATH = Path(INSTANCE_TERRAFORM_DIR, INSTANCE_NAME, "ssh_config.yaml")
FLASK_DEMO_CONFIG_PATH = "./secrets/configuration/flask_demo.yaml"
FLASK_DEV_CONFIG_PATH = "./secrets/configuration/flask_dev.yaml"
FLASK_FREDHUTCH_CONFIG_PATH = "./secrets/configuration/flask_fredhutch.yaml"
FLASK_MULTICARE_CONFIG_PATH = "./secrets/configuration/flask_multicare.yaml"
FLASK_SCCA_CONFIG_PATH = "./secrets/configuration/flask_scca.yaml"
# Information for accessing the ECR
def ecr_helmfile_values_factory(*, context):
with tasks.terraform.ecr.ecr_read_only(context=context) as ecr_read_only:
return {
"registryUrl": ecr_read_only.output.registry_url,
"registryUser": ecr_read_only.output.registry_user,
"registryPassword": ecr_read_only.output.registry_password,
}
#
# Demo configuration
#
# Information for configuring server_flask
def flask_demo_values_factory(*, context):
flask_demo_config = scope.config.FlaskConfig.load(FLASK_DEMO_CONFIG_PATH)
return {
"flaskConfig": flask_demo_config.encode(),
}
# Information for configuring web_patient
def web_patient_demo_values_factory(*, context):
return {
"webPatientConfig": {
"flaskBaseUrl": "https://app.demo.uwscope.org/api/",
}
}
# Information for configuring web_registry
def web_registry_demo_values_factory(*, context):
return {
"webRegistryConfig": {
"flaskBaseUrl": "https://registry.demo.uwscope.org/api/",
}
}
#
# Dev configuration
#
# Information for configuring server_flask
def flask_dev_values_factory(*, context):
flask_dev_config = scope.config.FlaskConfig.load(FLASK_DEV_CONFIG_PATH)
return {
"flaskConfig": flask_dev_config.encode(),
}
# Information for configuring web_patient
def web_patient_dev_values_factory(*, context):
return {
"webPatientConfig": {
"flaskBaseUrl": "https://app.dev.uwscope.org/api/",
}
}
# Information for configuring web_registry
def web_registry_dev_values_factory(*, context):
return {
"webRegistryConfig": {
"flaskBaseUrl": "https://registry.dev.uwscope.org/api/",
}
}
#
# Fred Hutch configuration
#
# Information for configuring server_flask
def flask_fredhutch_values_factory(*, context):
flask_fredhutch_config = scope.config.FlaskConfig.load(FLASK_FREDHUTCH_CONFIG_PATH)
return {
"flaskConfig": flask_fredhutch_config.encode(),
}
# Information for configuring web_patient
def web_patient_fredhutch_values_factory(*, context):
return {
"webPatientConfig": {
"flaskBaseUrl": "https://app.fredhutch.uwscope.org/api/",
}
}
# Information for configuring web_registry
def web_registry_fredhutch_values_factory(*, context):
return {
"webRegistryConfig": {
"flaskBaseUrl": "https://registry.fredhutch.uwscope.org/api/",
}
}
#
# MultiCare configuration
#
# Information for configuring server_flask
def flask_multicare_values_factory(*, context):
flask_multicare_config = scope.config.FlaskConfig.load(FLASK_MULTICARE_CONFIG_PATH)
return {
"flaskConfig": flask_multicare_config.encode(),
}
# Information for configuring web_patient
def web_patient_multicare_values_factory(*, context):
return {
"webPatientConfig": {
"flaskBaseUrl": "https://app.multicare.uwscope.org/api/",
}
}
# Information for configuring web_registry
def web_registry_multicare_values_factory(*, context):
return {
"webRegistryConfig": {
"flaskBaseUrl": "https://registry.multicare.uwscope.org/api/",
}
}
#
# SCCA configuration
#
# Information for configuring server_flask
def flask_scca_values_factory(*, context):
flask_scca_config = scope.config.FlaskConfig.load(FLASK_SCCA_CONFIG_PATH)
return {
"flaskConfig": flask_scca_config.encode(),
}
# Information for configuring web_patient
def web_patient_scca_values_factory(*, context):
return {
"webPatientConfig": {
"flaskBaseUrl": "https://app.scca.uwscope.org/api/",
}
}
# Information for configuring web_registry
def web_registry_scca_values_factory(*, context):
return {
"webRegistryConfig": {
"flaskBaseUrl": "https://registry.scca.uwscope.org/api/",
}
}
task_helmfile_apply = (
aws_infrastructure.tasks.library.instance_helmfile.task_helmfile_apply(
config_key=CONFIG_KEY,
ssh_config_path=SSH_CONFIG_PATH,
staging_local_dir=STAGING_LOCAL_HELMFILE_DIR,
staging_remote_dir=STAGING_REMOTE_HELMFILE_DIR,
helmfile_path=HELMFILE_PATH,
helmfile_config_path=HELMFILE_CONFIG_PATH,
helmfile_values_factories={
"ecr_generated": ecr_helmfile_values_factory,
# Dev Values
"flask_dev_generated": flask_dev_values_factory,
"web_patient_dev_generated": web_patient_dev_values_factory,
"web_registry_dev_generated": web_registry_dev_values_factory,
# Demo Values
"flask_demo_generated": flask_demo_values_factory,
"web_patient_demo_generated": web_patient_demo_values_factory,
"web_registry_demo_generated": web_registry_demo_values_factory,
# Fred Hutch Values
"flask_fredhutch_generated": flask_fredhutch_values_factory,
"web_patient_fredhutch_generated": web_patient_fredhutch_values_factory,
"web_registry_fredhutch_generated": web_registry_fredhutch_values_factory,
# MultiCare Values
"flask_multicare_generated": flask_multicare_values_factory,
"web_patient_multicare_generated": web_patient_multicare_values_factory,
"web_registry_multicare_generated": web_registry_multicare_values_factory,
# SCCA Values
"flask_scca_generated": flask_scca_values_factory,
"web_patient_scca_generated": web_patient_scca_values_factory,
"web_registry_scca_generated": web_registry_scca_values_factory,
},
)
)
task_helmfile_apply.__doc__ = "Apply helmfile/uwscope/helmfile.yaml in the instance."
ns = Collection("helmfile")
ns.add_task(task_helmfile_apply, "apply")
| uwscope/scope-aws-infrastructure | tasks/helmfile.py | helmfile.py | py | 6,863 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tasks.terraform.ecr.terraform.ecr.ecr_read_only",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tasks.terraform.ecr.terraform",
"line_number": 28,
"usage_type": "attribute"... |
10581123244 | import requests
import time
with open('NEWURL.txt','r') as f:
url = f.readlines()
for i in range(len(url)):
url[i] = url[i].strip()
try:
t1 = time.time()
r = requests.get(url[i])
t2 = time.time()
Time = t2 - t1
print(Time)
print(len(r.content))
Throughput = (len(r.content)/ (Time))
print(Throughput)
print(type(Throughput))
#f = open('F:\\UCL\\python\\throughput.txt', 'a+')
except Exception as e:
print(e)
| MieMieWangWang/SNS | throughput.py | throughput.py | py | 466 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 12,
"usage_type": "call"
}
] |
21468516172 | import logging
import os
from motor.motor_asyncio import AsyncIOMotorClient
log = logging.getLogger(__file__)
class Database:
def __init__(self, connection_url: str | None = None):
connection_url = connection_url or os.getenv('DB_CONNECTION_URL')
assert connection_url, 'Connection URL to DB not set'
self.__client = AsyncIOMotorClient(connection_url)
self.__database = None
log.info(f'Connected to DB\n{self.__client.topology_description}')
@property
async def database(self):
if not self.__database:
self.__database = self.__client.get_database('microblog')
return self.__database
async def close(self):
if self.__client:
self.__client.close()
| kozlowskimaciej/microblog | backend/api/database/db.py | db.py | py | 758 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "motor.motor_asyncio.AsyncIOMotorClient",
"line_number": 15,
"usage_type": "call"
}
] |
74576595552 | import pandas as pd
from datetime import datetime, timedelta
def add_english_time_column(dataframe):
# Convertissez la colonne de date en datetime si elle n'est pas déjà au format datetime
# if not pd.api.types.is_datetime64_ns_dtype(dataframe['Date']):
# dataframe['Date'] = pd.to_datetime(dataframe['Date'], format='%d/%m/%Y %H:%M')
# Créez une liste vide pour stocker les valeurs de temps en anglais
english_times = []
# Obtenez la date de départ (31/12/2022 23:30)
current_date = datetime(2012, 1, 1, 0, 0)
# Parcourez les lignes du DataFrame
for index, row in dataframe.iterrows():
# Ajoutez la valeur de temps en anglais à la liste
english_times.append(current_date.strftime('%A, %B %d, %Y %I:%M %p'))
# Reculez d'un pas de 30 minutes
current_date += timedelta(minutes=30)
# Ajoutez la liste de temps en anglais comme une nouvelle colonne au DataFrame
dataframe['English Time'] = english_times
return dataframe
def df_to_xlsx(df):
# Supposons que votre DataFrame s'appelle 'df' et que vous souhaitez le sauvegarder sous le nom 'nom_du_fichier.xlsx'
nom_du_fichier = 'new_data.xlsx'
# Utilisez la méthode to_excel pour sauvegarder votre DataFrame en tant que fichier Excel
df.to_excel(nom_du_fichier, index=False) # Si vous ne voulez pas inclure l'index dans le fichier Excel
| kermia-ai/pythonProject | prepar_data.py | prepar_data.py | py | 1,400 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 21,
"usage_type": "call"
}
] |
42618756934 | import cirq
import sympy
import numpy as np
import tensorflow as tf
import tensorflow_quantum as tfq
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras.metrics import Precision, Recall, AUC
from tensorflow.keras.optimizers import Adam
class QConv(tf.keras.layers.Layer):
def __init__(self, filter_size, depth, activation=None, name=None, kernel_regularizer=None, **kwangs):
super(QConv, self).__init__(name=name, **kwangs)
self.filter_size = filter_size
self.depth = depth
self.learning_params = []
self.QCNN_layer_gen()
# self.circuit_tensor = tfq.convert_to_tensor([self.circuit])
self.activation = tf.keras.layers.Activation(activation)
self.kernel_regularizer = kernel_regularizer
def get_config(self):
config = super().get_config()
config.update({
"filter_size": self.filter_size,
"depth": self.depth,
"activation": self.activation,
"name": self.name,
"kernel_regularizer": self.kernel_regularizer,
})
return config
def _next_qubit_set(self, original_size, next_size, qubits):
step = original_size // next_size
qubit_list = []
for i in range(0, original_size, step):
for j in range(0, original_size, step):
qubit_list.append(qubits[original_size * i + j])
return qubit_list
def _get_new_param(self):
"""
return new learnable parameter
all returned parameter saved in self.learning_params
"""
new_param = sympy.symbols("p" + str(len(self.learning_params)))
self.learning_params.append(new_param)
return new_param
def _QConv(self, step, target, qubits):
"""
apply learnable gates each quantum convolutional layer level
"""
yield cirq.CZPowGate(exponent=self._get_new_param())(qubits[target], qubits[target + step])
yield cirq.CXPowGate(exponent=self._get_new_param())(qubits[target], qubits[target + step])
def QCNN_layer_gen(self):
"""
make quantum convolutional layer in QConv layer
"""
pixels = self.filter_size ** 2
# filter size: 2^n only
if np.log2(pixels) % 1 != 0:
raise NotImplementedError("filter size: 2^n only available")
cirq_qubits = cirq.GridQubit.rect(self.filter_size, self.filter_size)
# mapping input data to circuit
input_circuit = cirq.Circuit()
input_params = [sympy.symbols('a%d' % i) for i in range(pixels)]
for i, qubit in enumerate(cirq_qubits):
input_circuit.append(cirq.rx(np.pi * input_params[i])(qubit))
# apply learnable gate set to QCNN circuit
qcnn_circuit = cirq.Circuit()
step_size = [2 ** i for i in range(np.log2(pixels).astype(np.int32))]
for step in step_size:
for target in range(0, pixels, 2 * step):
qcnn_circuit.append(self._QConv(step, target, cirq_qubits))
# merge the circuits
full_circuit = cirq.Circuit()
full_circuit.append(input_circuit)
full_circuit.append(qcnn_circuit)
self.circuit = full_circuit # save circuit to the QCNN layer obj.
self.params = input_params + self.learning_params
self.op = cirq.Z(cirq_qubits[0])
def build(self, input_shape):
self.width = input_shape[1]
self.height = input_shape[2]
self.channel = input_shape[3]
self.num_x = self.width - self.filter_size + 1
self.num_y = self.height - self.filter_size + 1
self.kernel = self.add_weight(name="kernel",
shape=[self.depth,
self.channel,
len(self.learning_params)],
initializer=tf.keras.initializers.glorot_normal(),
regularizer=self.kernel_regularizer)
self.circuit_tensor = tfq.convert_to_tensor([self.circuit] * self.num_x * self.num_y * self.channel)
def call(self, inputs):
# input shape: [N, width, height, channel]
# slide and collect data
stack_set = None
for i in range(self.num_x):
for j in range(self.num_y):
slice_part = tf.slice(inputs, [0, i, j, 0], [-1, self.filter_size, self.filter_size, -1])
slice_part = tf.reshape(slice_part, shape=[-1, 1, self.filter_size, self.filter_size, self.channel])
if stack_set is None:
stack_set = slice_part
else:
stack_set = tf.concat([stack_set, slice_part], 1)
# -> shape: [N, num_x*num_y, filter_size, filter_size, channel]
stack_set = tf.transpose(stack_set, perm=[0, 1, 4, 2, 3])
# -> shape: [N, num_x*num_y, channel, filter_size, fiter_size]
stack_set = tf.reshape(stack_set, shape=[-1, self.filter_size ** 2])
# -> shape: [N*num_x*num_y*channel, filter_size^2]
# total input citcuits: N * num_x * num_y * channel
circuit_inputs = tf.tile([self.circuit_tensor], [tf.shape(inputs)[0], 1])
circuit_inputs = tf.reshape(circuit_inputs, shape=[-1])
tf.fill([tf.shape(inputs)[0] * self.num_x * self.num_y, 1], 1)
outputs = []
for i in range(self.depth):
controller = tf.tile(self.kernel[i], [tf.shape(inputs)[0] * self.num_x * self.num_y, 1])
outputs.append(self.single_depth_QCNN(stack_set, controller, circuit_inputs))
# shape: [N, num_x, num_y]
output_tensor = tf.stack(outputs, axis=3)
output_tensor = tf.math.acos(tf.clip_by_value(output_tensor, -1 + 1e-5, 1 - 1e-5)) / np.pi
# output_tensor = tf.clip_by_value(tf.math.acos(output_tensor)/np.pi, -1, 1)
return self.activation(output_tensor)
def single_depth_QCNN(self, input_data, controller, circuit_inputs):
"""
make QCNN for 1 channel only
"""
# input shape: [N*num_x*num_y*channel, filter_size^2]
# controller shape: [N*num_x*num_y*channel, len(learning_params)]
input_data = tf.concat([input_data, controller], 1)
# input_data shape: [N*num_x*num_y*channel, len(learning_params)]
qcnn_output = tfq.layers.Expectation()(circuit_inputs,
symbol_names=self.params,
symbol_values=input_data,
operators=self.op)
# qcnn_output shape: [N*num_x*num_y*channel]
qcnn_output = tf.reshape(qcnn_output, shape=[-1, self.num_x, self.num_y, self.channel])
return tf.math.reduce_sum(qcnn_output, 3)
class QCNNqconv:
def __init__(self, num_classes, img_size, channels, learning_rate=0.01, name="QCNN_QConv"):
self.name = name
self.learning_rate = learning_rate
self.num_classes = num_classes
self.input_width_height = img_size
self.channels = channels
self.input_type = 'images'
def build(self):
model = models.Sequential()
model.add(QConv(filter_size=2, depth=8, activation='relu', name='qconv1',
input_shape=(self.input_width_height, self.input_width_height, self.channels)))
model.add(layers.Conv2D(16, (2, 2), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(self.num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(self.learning_rate),
metrics=['acc', Precision(name="prec"), Recall(name="rec"), AUC(name='auc')])
return model
| Djack1010/tami | code_models/sota_code_models/QCNN_QConv.py | QCNN_QConv.py | py | 7,873 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "tensorflow.keras",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 20,
"usage_type": "attribute"
},
{
"ap... |
72046529635 | import smtplib
from email.mime.text import MIMEText
from email.utils import COMMASPACE # This is a just a fancy way of doing: COMMASPACE = ", "
def sendEmail():
recipient = ['email@gmail.com','email2@gmail.com']
pwd = 'your password'
sender = 'pyfeeds@gmail.com'
subject = '**** ALERT ****'
message = "ATTENTION: Someone has entered the home"
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = COMMASPACE.join(recipient) # COMMASPACE still works ok with just one recipient
#msg['CC'] = COMMASPACE.join(recipient)
#msg['BCC'] = COMMASPACE.join(recipient)
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
try:
server.login(sender,pwd)
except smtplib.SMTPAuthenticationError: # Check for authentication error
return "ERROR"
try:
server.sendmail(sender, recipient, msg.as_string())
except smtplib.SMTPRecipientsRefused: # Check if recipient's email was accepted by the server
return "ERROR"
server.quit()
| pybokeh/python | email/SendingEmail.py | SendingEmail.py | py | 1,057 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "email.mime.text.MIMEText",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "email.utils.COMMASPACE.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "email.utils.COMMASPACE",
"line_number": 16,
"usage_type": "name"
},
{
"api_n... |
27826917586 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 15 12:19:30 2023
@author: sharrm
"""
import fiona
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import rasterio
from rasterio.mask import mask
from scipy import spatial
from scipy import ndimage
from skimage import feature, filters
from skimage.morphology import binary_dilation, binary_erosion
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, HistGradientBoostingClassifier
from sklearn.metrics import accuracy_score#, confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import classification_report, f1_score, recall_score, precision_score
from sklearn.model_selection import train_test_split, learning_curve, StratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler, StandardScaler
# %% - morphology
def normalize(band):
band_min, band_max = (np.nanmin(band), np.nanmax(band))
return ((band-band_min)/((band_max - band_min)))
def near_land(input_blue, input_green, input_red, input_704, input_nir, shapefile, out_dir, write):
# Open the geotiff file
with rasterio.open(input_green) as green:
# Read the green band metadata
out_meta = green.meta
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_green, transform = mask(green, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(input_nir) as nir:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_nir, transform = mask(nir, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(input_704) as b704:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_704, transform = mask(b704, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(input_blue) as blue:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_blue, transform = mask(blue, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(input_red) as red:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_red, transform = mask(red, gdf.geometry, crop=True)
# compute ndwi
ndwi = (cropped_green - cropped_nir) / (cropped_green + cropped_nir)
cropped = np.moveaxis(ndwi, 0, -1)[:,:,0]
# compute pSDBr
pSDBr = np.log(cropped_blue * 1000) / np.log(cropped_red * 1000)
pSDBg = np.log(cropped_blue * 1000) / np.log(cropped_green * 1000)
# create binary array for land and water pixels
nan_vals = np.where(np.isnan(cropped))
cropped_land_water = np.where(cropped < 0.1, 1, 0)
# morphological operation to grow land pixels
morphed_land = binary_dilation(cropped_land_water) #.astype(cropped_land_water.dtype))
erode_land = binary_erosion(morphed_land) #.astype(cropped_land_water.dtype))
# pixels adjacent to land
zero_mask = np.logical_and(morphed_land, ~erode_land)
land_adjacent_ndwi = np.where(zero_mask, cropped, 0)
# land_adjacent_ndwi = np.where(land_adjacent_ndwi < 0.15, 0, land_adjacent_ndwi)
# land_adjacent_percentile = np.where(np.percentile(land_adjacent_ndwi, 90), land_adjacent_ndwi, 0)
percentile10 = np.nanpercentile(cropped[zero_mask == 1], 10)
print(f'Precentile 10: {percentile10:.3f}')
percentile10 = np.where(land_adjacent_ndwi < percentile10, land_adjacent_ndwi, 0)
percentile90 = np.nanpercentile(cropped[zero_mask == 1], 90)
print(f'Precentile 90: {percentile90:.3f}')
percentile90 = np.where(land_adjacent_ndwi > percentile90, land_adjacent_ndwi, 0)
# ndwi values for pixels adjacent to land for histogram
ndwi_adjacent = cropped[zero_mask == 1]
print(f'Average land adjacent NDWI value: {np.nanmean(ndwi_adjacent):.3f} ± {np.nanstd(ndwi_adjacent):.3f}')
print(f'Median land adjacent NDWI value: {np.nanmedian(ndwi_adjacent):.3f}')
land_adjacent_ndwi[nan_vals] = np.nan
percentile10[nan_vals] = np.nan
percentile90[nan_vals] = np.nan
red_n = normalize(cropped_red[0,:,:])
green_n = normalize(cropped_green[0,:,:])
blue_n = normalize(cropped_blue[0,:,:])
rgb_composite_n = np.dstack((red_n, green_n, blue_n))
# Stack the bands to create an RGB image
rgb_image = np.dstack((cropped_red[0,:,:], cropped_green[0,:,:], cropped_blue[0,:,:]))
brightened_image = np.clip(rgb_composite_n * 3, 0, 255)#.astype(np.uint8)
brightened_image[nan_vals] = 255
m = np.ma.masked_where(np.isnan(brightened_image),brightened_image)
# plt.figure(figsize=(10, 10))
f, ax = plt.subplots(2,2, figsize=(10, 6), dpi=200)
ax[0,0].imshow(brightened_image)
ax[0,0].set_title('RGB', fontsize=10)
ax[0,1].imshow(land_adjacent_ndwi, vmax=0.1, cmap='cividis')
ax[0,1].set_title('Land Adjacent Pixels', fontsize=10)
ax[1,0].imshow(percentile10, vmax=0.01, cmap='cividis')
ax[1,0].set_title('10th Percentile', fontsize=10)
ax[1,1].imshow(percentile90, vmax=0.05, cmap='cividis')
ax[1,1].set_title('90th Percentile', fontsize=10)
# plt.tight_layout()
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);
plt.show()
# ndwi values for pixels adjacent to land for histogram
ndwi_adjacent = cropped[zero_mask == 1]
print(f'Average land adjacent NDWI value: {np.nanmean(ndwi_adjacent):.3f} ± {np.nanstd(ndwi_adjacent):.3f}')
print(f'Median land adjacent NDWI value: {np.nanmedian(ndwi_adjacent):.3f}')
land_adjacent_ndwi[nan_vals] = np.nan
training_data = np.vstack((cropped_blue.flatten(),
cropped_green.flatten(),
cropped_red.flatten(),
cropped_704.flatten(),
cropped_nir.flatten(),
ndwi.flatten(),
# pSDBg.flatten(),
pSDBr.flatten())).transpose()
training_data[np.isnan(training_data)] = 2
# Plot the masked image
plt.figure(figsize=(12, 6))
plt.subplot(2, 2, 1)
plt.imshow(cropped, cmap='gray', vmin=0.2)
plt.title('Land Adjacent Pixels')
plt.imshow(zero_mask, cmap='Reds', alpha=0.3, vmax=0.2)
plt.colorbar()
# Plot histogram of values
plt.subplot(2, 2, 2)
plt.hist(ndwi_adjacent, bins=50, edgecolor='k')
plt.xlabel('NDWI Value')
plt.ylabel('Frequency')
plt.title('Distribution of NDWI Values at Land Adjacent Pixels')
plt.tight_layout()
plt.show()
land_adjacent_ndwi[nan_vals] = 2
# raster meta
out_meta.update({"driver": "GTiff",
"height": cropped_nir.shape[1],
"width": cropped_nir.shape[2],
"count": cropped_nir.shape[0],
"nodata": 2,
"transform": transform})
# save rasters
if write:
morph_name = os.path.join(out_dir, 'morphed.tif')
with rasterio.open(morph_name, "w", **out_meta) as dest:
dest.write(morphed_land, 1)
dest = None
ndwi_name = os.path.join(out_dir, 'ndwi.tif')
with rasterio.open(ndwi_name, "w", **out_meta) as dest:
dest.write(cropped, 1)
dest = None
print(f'Wrote: {ndwi_name}')
water_name = os.path.join(out_dir, 'land_adjacent.tif')
with rasterio.open(water_name, "w", **out_meta) as dest:
dest.write(land_adjacent_ndwi, 1)
dest = None
percentile10_name = os.path.join(out_dir, 'percentile10.tif')
with rasterio.open(percentile10_name, "w", **out_meta) as dest:
dest.write(percentile10, 1)
dest = None
print(f'Wrote: {percentile10_name}')
percentile90_name = os.path.join(out_dir, 'percentile90.tif')
with rasterio.open(percentile90_name, "w", **out_meta) as dest:
dest.write(percentile90, 1)
dest = None
print(f'Wrote: {percentile90_name}')
return land_adjacent_ndwi, training_data
# %% - training
# plots the learning curve -- relationship between prediction accuracy and data size
def plotLearningCurve(train_size_abs, train_mean, train_std, test_mean, test_std, curve_title):
plt.plot(train_size_abs, train_mean, color='forestgreen', marker='o', markersize=5, label='Training Accuracy')
plt.fill_between(train_size_abs, train_mean + train_std, train_mean - train_std, alpha=0.3, color='forestgreen')
plt.plot(train_size_abs, test_mean, color='royalblue', marker='+', markersize=5, linestyle='--', label='Validation Accuracy')
plt.fill_between(train_size_abs, test_mean + test_std, test_mean - test_std, alpha=0.3, color='royalblue')
plt.title(curve_title)
plt.xlabel('Training Data Size')
plt.ylabel('Model accuracy (f1-score)')
plt.grid()
plt.legend(loc='lower right')
plt.show()
return None
# computes and plots learning curve
def compute_learning_curve(clf, x_train, y_train):
# start_time = time.time() # start time for process timing
cv = StratifiedKFold(n_splits=5)
print(f'\nComputing learning curve for {clf}.')
train_size_abs, train_scores, test_scores = learning_curve(
clf, x_train, y_train, cv=cv, scoring='f1_macro',
train_sizes=np.linspace(0.1, 1., 10), random_state=42)
# Calculate training and test mean and std
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# Plot the learning curve
print(f'--Plotting learning curve for {clf}.')
plotLearningCurve(train_size_abs, train_mean, train_std, test_mean, test_std, curve_title=clf)
print(f'Test accuracy:\n{test_mean}')
print(f'Test standard deviation:\n{test_std}')
return None
def subsample(array1, array2, adjustment):
# get indices of rows containing 0 and 1
indices_with_zeros = np.where(array1 == 0)[0]
indices_with_ones = np.where(array1 == 1)[0]
# randomly select a subset of rows containing 0
num_rows_to_select = np.count_nonzero(array1 == 1) * adjustment # Adjust as needed
rng = np.random.default_rng(0)
selected_indices_zeros = rng.choice(indices_with_zeros, size=num_rows_to_select, replace=False)
# include the randomly selected rows
selected_rows_array1 = np.concatenate((array1[indices_with_ones], array1[selected_indices_zeros]))
selected_rows_array2 = np.vstack((array2[indices_with_ones], array2[selected_indices_zeros]))
return selected_rows_array1, selected_rows_array2
def train_model(water_vals, training_data):
labels = np.where((water_vals != 0) & (water_vals != 2), 1, water_vals)
water_vals_1d = training_data
labels_1d = labels.flatten()
print(f'\nTraining else values: {np.count_nonzero(labels_1d == 0)}')
print(f'Water labels: {np.count_nonzero(labels_1d == 1)}')
print(f'Nan labels: {np.count_nonzero(labels_1d == 2)}')
# water_vals_1d = np.delete(water_vals_1d, np.where(training_data == 2), axis = 0)
# labels_1d = np.delete(labels_1d, np.where(training_data == 2), axis=0)
# subsample()
print(f'\nTrainData Shape: {water_vals_1d.shape}\nLabels Shape: {labels_1d.shape}')
X_train, X_test, Y_train, Y_test = train_test_split(water_vals_1d, labels_1d,
test_size=0.3, random_state=40, stratify=labels_1d)
scaler = MinMaxScaler().fit(water_vals_1d)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print(f'\nX Train Shape: {X_train_scaled.shape}\nY_train Shape: {Y_train.shape}')
print(f'Water labels: {np.count_nonzero(Y_train == 1)}\n')
clf = RandomForestClassifier(random_state=42, n_jobs=4, n_estimators=50)
# clf = HistGradientBoostingClassifier(random_state=42, max_iter=500, learning_rate=0.1, max_depth=5)
# clf = MLPClassifier(random_state=42, max_iter=300, hidden_layer_sizes=(30,30,30))
# clf = svm.SVC(C=1.0, class_weight='balanced', random_state=42)
# X_learn_scaled = scaler.transform(water_vals_1d)
# compute_learning_curve(clf, X_learn_scaled, labels_1d)
print(f'Training {clf}')
model = clf.fit(X_train_scaled, Y_train)
# feature_list = ['blue', 'green', 'red', '704', 'nir', 'ndwi', 'pSDBr']
# feature_importance = pd.Series(model.feature_importances_,index=feature_list).sort_values(ascending=False).round(3)
# print(f'\nFeature Importance:\n{feature_importance}\n')
print('--Computing Precision, Recall, F1-Score...')
classification = classification_report(Y_test, model.predict(X_test_scaled), labels=model.classes_)
print(f'--Classification Report:\n{classification}')
return water_vals_1d, labels_1d, model
def save_model(model_dir, model_name, model):
model_name = os.path.join(model_dir, model_name)
pickle.dump(model, open(model_name, 'wb')) # save the trained Random Forest model
print(f'Saved model: {model_name}')
return None
# %% - prediction
def predict(test_blue, test_green, test_red, test_704, test_nir, shapefile, model):
# Open the geotiff file
with rasterio.open(test_green) as green:
# Read the green band metadata
prediction_meta = green.meta
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_green, transform = mask(green, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(test_nir) as nir:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_nir, transform = mask(nir, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(test_704) as b704:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_704, transform = mask(b704, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(test_blue) as blue:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_blue, transform = mask(blue, gdf.geometry, crop=True)
# Open the geotiff file
with rasterio.open(test_red) as red:
# Open the shapefile
gdf = gpd.read_file(shapefile)
# Crop the raster to the shapefile extent
cropped_red, out_transform = mask(red, gdf.geometry, crop=True)
# compute ndwi
ndwi = (cropped_green - cropped_nir) / (cropped_green + cropped_nir)
# compute pSDBr
pSDBr = np.log(cropped_blue * 1000) / np.log(cropped_red * 1000)
pSDBg = np.log(cropped_blue * 1000) / np.log(cropped_green * 1000)
# shape prediction data
test_data = np.vstack((cropped_blue.flatten(),
cropped_green.flatten(),
cropped_red.flatten(),
cropped_704.flatten(),
cropped_nir.flatten(),
ndwi.flatten(),
# pSDBg.flatten(),
pSDBr.flatten())).transpose()
scaler = MinMaxScaler().fit(test_data)
# scaler = StandardScaler().fit(test_data)
scaled = scaler.transform(test_data)
scaled[np.isnan(scaled)] = 2
prediction = model.predict(scaled)
prediction_shape = cropped_red.shape
print(f'\nPrediction (0) values: {np.count_nonzero(prediction == 0)}')
print(f'Prediction (1) values: {np.count_nonzero(prediction == 1)}')
return prediction_shape, prediction, prediction_meta, pSDBr, out_transform
def plot_prediction(prediction, prediction_shape, pSDBr):
# reshape
img = np.reshape(prediction, prediction_shape)
img = np.moveaxis(img, 0, -1)[:,:,0]
# pSDBr = np.moveaxis(pSDBr, 0, -1)[:,:,0]
# mask = np.ma.masked_where(img != 1, img)
img = np.where(img == 2, np.nan, img)
fig = plt.figure()
# plt.imshow(pSDBr, cmap='gray')
# plt.imshow(mask, cmap='hot', alpha=0.7)
plt.imshow(img, cmap='viridis')
plt.title('Prediction')
plt.colorbar()
plt.show()
return img.shape
def save_prediction(prediction, pSDBr, prediction_shape, prediction_meta, out_dir, out_transform):
prediction_name = os.path.join(out_dir, '_prediction.tif')
pSDBr_name = os.path.join(out_dir, '_pSDBr.tif')
img = np.reshape(prediction, prediction_shape)
# img = np.ma.masked_where(img == 1, img)
# raster meta
prediction_meta.update({"driver": "GTiff",
"height": prediction_shape[1],
"width": prediction_shape[2],
"count": prediction_shape[0],
"nodata": 2,
"transform": out_transform})
# save rasters
with rasterio.open(prediction_name, "w", **prediction_meta) as dest:
dest.write(img) # had to specify '1' here for some reason
dest = None
print(f'\nSaved prediction to: {prediction_name}')
# save rasters
with rasterio.open(pSDBr_name, "w", **prediction_meta) as dest:
dest.write(pSDBr) # had to specify '1' here for some reason
dest = None
print(f'Saved pSDBr to: {pSDBr_name}')
return None
# %% - main
if __name__ == '__main__':
out_dir = r"C:\_ZeroShoreline\Out\Testing"
model_dir = r'C:\_ZeroShoreline\Model'
model_name = 'RF_BGR7NWpR_StCroix.pkl'
# in492 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_492.tif"
# in560 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\NDWI\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_560.tif"
# in665 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_665.tif"
# in704 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_704.tif"
# in833 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\NDWI\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_833.tif"
# land = r"C:\_ZeroShoreline\Extent\Hatteras_Inlet_FocusedExtent.shp"
in492 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_492.tif"
in560 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_560.tif"
in665 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_665.tif"
in704 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_704.tif"
in833 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_833.tif"
land = r"C:\_ZeroShoreline\Extent\StCroix_Zero.shp"
# in492 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_492.tif"
# in560 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_560.tif"
# in665 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_665.tif"
# in704 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_704.tif"
# in833 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_833.tif"
# land = r"C:\_ZeroShoreline\Extent\Halfmoon_Zero.shp"
# in492 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_492.tif"
# in560 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_560.tif"
# in665 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_665.tif"
# in704 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_704.tif"
# in833 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_833.tif"
# land = r"C:\_ZeroShoreline\Extent\FL_Zero.shp"
water_vals, training_data = near_land(in492, in560, in665,
in704, in833, land, out_dir, write=False)
water_vals_1d, labels_1d, model = train_model(water_vals, training_data)
# save_model(model_dir, model_name, model)
# in492 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_492.tif"
# in560 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\NDWI\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_560.tif"
# in665 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_665.tif"
# in704 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_704.tif"
# in833 = r"C:\_ZeroShoreline\Imagery\Hatteras_20230102\NDWI\S2A_MSI_2023_01_02_15_53_20_T18SVE_L2R_rhos_833.tif"
# land = r"C:\_ZeroShoreline\Extent\Hatteras_Inlet_FocusedExtent.shp"
# in492 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_492.tif"
# in560 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_560.tif"
# in665 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_665.tif"
# in704 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_704.tif"
# in833 = r"C:\_ZeroShoreline\Imagery\StCroix_20220129\S2A_MSI_2022_01_29_14_58_03_T20QKE_L2R_rhos_833.tif"
# land = r"C:\_ZeroShoreline\Extent\StCroix_Zero.shp"
# in492 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_492.tif"
# in560 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_560.tif"
# in665 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_665.tif"
# in704 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_704.tif"
# in833 = r"C:\_ZeroShoreline\Imagery\HalfMoonShoal_20221209\S2A_MSI_2022_12_09_16_16_30_T17RLH_L2R_rhos_833.tif"
# land = r"C:\_ZeroShoreline\Extent\Halfmoon_Zero.shp"
# in492 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_492.tif"
# in560 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_560.tif"
# in665 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_665.tif"
# in704 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_704.tif"
# in833 = r"C:\_ZeroShoreline\Imagery\FL_Keys_20230115\S2A_MSI_2023_01_15_16_06_24_T17RNH_L2R_rhos_833.tif"
# land = r"C:\_ZeroShoreline\Extent\FL_Zero.shp"
prediction_shape, prediction, prediction_meta, pSDBr, out_transform = predict(in492, in560, in665,
in704, in833, land, model)
img_shape = plot_prediction(prediction, prediction_shape, pSDBr)
# save_prediction(prediction, pSDBr, prediction_shape, prediction_meta, out_dir, out_transform) | sharrm/RSD | zero_shoreline_old.py | zero_shoreline_old.py | py | 24,259 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.nanmin",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.nanmax",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "rasterio.open",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"li... |
28517427312 | #coding=utf-8
from pyrogram import Client, filters
from pyrogram.errors import FloodWait
from pyrogram.types import ChatPermissions
import time
from time import sleep
import random
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
app = Client("my_account", api_id=config['pyrogram']['api_id'], api_hash=config['pyrogram']['api_hash'])
@app.on_message(filters.command("type", prefixes=".") & filters.me)
def type(_, msg):
orig_text = msg.text.split(".type ", maxsplit=1)[1]
text = orig_text
tbp = ""
typing_symbol = "▒"
while(tbp != orig_text):
try:
msg.edit(tbp + typing_symbol)
sleep(0.05)
tbp = tbp + text[0]
text = text[1:]
msg.edit(tbp)
sleep(0.05)
except FloodWait as e:
sleep(e.x)
@app.on_message(filters.command("smtype", prefixes=".") & filters.me)
def type(_, msg):
orig_text = msg.text.split(" ", maxsplit=2)[2]
text = orig_text
tbp = ""
typing_symbol = msg.text.split(" ", maxsplit=2)[1]
while(tbp != orig_text):
try:
msg.edit(tbp + typing_symbol)
sleep(0.05)
tbp = tbp + text[0]
text = text[1:]
msg.edit(tbp)
sleep(0.05)
except FloodWait as e:
sleep(e.x)
app.run() | Slay-of/dtx_userbot | main.py | main.py | py | 1,384 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pyrogram.Client",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
26157049157 | from fastapi import APIRouter, Response, status, Depends, HTTPException
from psycopg.errors import UniqueViolation
from datetime import datetime
from pydantic import BaseModel
from fastapi.security import OAuth2PasswordBearer
import os
from jose import jwt
from user_db import pool
from .users import User
from profile_db import ProfileQueries
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
router = APIRouter()
SECRET_KEY = os.environ["SECRET_KEY"]
ALGORITHM = "HS256"
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
class ProfileIn(BaseModel):
budget: int
class ProfileOut(BaseModel):
id: int
budget: int
userid: int
firstname: str
lastname: str
username: str
class ErrorMessage(BaseModel):
message: str
from profile_db import ProfileQueries
@router.post("/api/profile/new", response_model = ProfileOut, responses={500: {"model": ErrorMessage},},)
def profile_post(
profile: ProfileIn,
response: Response,
bearer_token: str = Depends(oauth2_scheme),
):
if bearer_token is None:
raise credentials_exception
print(bearer_token)
## decode returns a dictionary (ie. payload is a dictionary dictionary)
payload = jwt.decode(bearer_token, SECRET_KEY, algorithms=[ALGORITHM])
print(payload)
password = payload.get("sub"),
print(password)
userid = payload.get("userid")
print(userid)
username= payload.get("username")
print(username)
firstname =payload.get("firstname")
print(firstname)
lastname= payload.get("lastname")
print(lastname)
with pool.connection() as conn:
with conn.cursor() as cur:
try:
cur.execute(
"""
INSERT INTO user_profile (budget, userid)
VALUES (%s, %s)
RETURNING id, budget, userid
""",
[profile.budget, userid],
)
except UniqueViolation:
response.status_code = status.HTTP_409_CONFLICT
return {
"message": "duplicate profile",
}
row = cur.fetchone()
record = {
"firstname": firstname,
"lastname": lastname,
"username": username,
"budget": profile.budget,
"userid": userid,
}
for i, column in enumerate(cur.description):
record[column.name] = row[i]
return record
@router.get(
"/api/profile/",
response_model = ProfileOut,
responses = {
200: {"model": ProfileOut},
400: {"model": ErrorMessage},
},
)
def profile_list(
response: Response,
query=Depends(ProfileQueries),
bearer_token: str = Depends(oauth2_scheme),
):
if bearer_token is None:
raise credentials_exception
payload = jwt.decode(bearer_token, SECRET_KEY, algorithms=[ALGORITHM])
user = payload.get("username")
id = payload.get("userid")
row = query.get_profile(id)
if row is None:
response.status_code = status.HTTP_404_NOT_FOUND
return {"message": "profile not found"}
return row
| MichaelEChristian/ProjectGamma | backend/accounts/api/routers/profile.py | profile.py | py | 3,353 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.security.OAuth2PasswordBearer",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name"... |
15575848076 | # Based on data show cities with positive temperature. Additionally - calculate average temperature for all cities.
# Use map(), filter() and reduce() function for that
from functools import reduce
data_from_api = [
{'city': 'Kraków', 'province_id': 8, 'current_temp': 3.5},
{'city': 'Warszawa', 'province_id': 1, 'current_temp': 2.8},
{'city': 'Suwałki', 'province_id': 9, 'current_temp': -0.5},
{'city': 'Gdańsk', 'province_id': 3, 'current_temp': -0.1},
{'city': 'Rzeszów', 'province_id': 7, 'current_temp': 3.9},
{'city': 'Wrocław', 'province_id': 2, 'current_temp': 5.0},
]
#mapped = list(map(lambda cities:cities['city'], data_from_api))
#filtered = list (filter(lambda cities: cities ['current_temp']>=0, data_from_api))
mapped = list (map(lambda cities:cities['city'],filter(lambda cities: cities ['current_temp']>=0, data_from_api) ))
avg_temp = reduce((lambda avg, city: avg + city['current_temp']), data_from_api, 0) / len(data_from_api)
print (f"Cities with positive temperature: {', '.join(mapped)}")
print (f'Average temperature for all cities is: {avg_temp:.3}°C') | deinoo/python | other/filter_map_reduce_for_temperature.py | filter_map_reduce_for_temperature.py | py | 1,113 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "functools.reduce",
"line_number": 17,
"usage_type": "call"
}
] |
9527606346 | from pytest import fixture
from core.db.models import db
from core.db.models.domain_attribute import DomainAttribute
@fixture
def domain_attribute(domain_category):
node = DomainAttribute(
name='domain attribute',
category_id=domain_category.id,
has_taxonomy=False,
taxonomy_is_scored=False,
is_multivalue=False,
is_required=False,
is_runtime=False,
should_extract_values=False,
code='code',
datatype='text',
default_value=1,
should_extract_from_name=False
)
db.session.add(node)
db.session.commit()
yield node
| NLPDev/Wine_Project | tests/fixtures/domain_attribute.py | domain_attribute.py | py | 632 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "core.db.models.domain_attribute.DomainAttribute",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "core.db.models.db.session.add",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "core.db.models.db.session",
"line_number": 24,
"usage_type":... |
29890631794 | import requests
import random
import string
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url="http://127.0.0.1:5166"
routes=["/code","/linq","/rawsql"]
tries=50
sum = []
queries=[]
print("------------Starting Benchmark------------")
## generate random search query
for i in range(tries):
queries.append(''.join(random.choices(string.ascii_uppercase, k=6)))
##fire dummy request to warm up tcp connection?
for route in routes:
print("Benchmarking route "+route)
sumtemp = False
for i in range(tries):
response = requests.get(url+route+'?search='+queries[i]+'?sortby=zipcode', verify=False)
print("Request {} completed in {}ms".format(i+1,response.elapsed.total_seconds() * 1000))
if i == 0: sumtemp = response.elapsed
else:sumtemp= sumtemp+response.elapsed
sum.append(sumtemp)
print("-----------Benchmark Ended------------")
print("Endpoint /code1 : a total of {:.2f}ms with an average of {:.2f}ms per request".format(sum[0].total_seconds() * 1000,(sum[0]/50).total_seconds() * 1000))
print("Endpoint /linq : a total of {:.2f}ms with an average of {:.2f}ms per request".format(sum[1].total_seconds() * 1000,(sum[1]/50).total_seconds() * 1000))
print("Endpoint /rawsql : a total of {:.2f}ms with an average of {:.2f}ms per request".format(sum[2].total_seconds() * 1000,(sum[2]/50).total_seconds() * 1000)) | amineamri3/SocialBrothersCase | benchmark.py | benchmark.py | py | 1,407 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "random.choices",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "string.... |
34436411858 | #!/usr/bin/env python3
import sys
import os
from threading import Thread
from queue import Queue, LifoQueue
from functools import cache
from operator import itemgetter
from collections import Counter
from rectangle import Rectangle
# Keep a count of each area a rectangle might have
rect_areas = Counter()
rect_queue = LifoQueue()
area_queue = Queue()
@cache
def create_rectangle(bottom, left, top, right):
"Creating a rectangle is expensive, reuse an existing one if available"
return Rectangle(bottom, left, top, right)
def read_rectangles():
for line in sys.stdin:
cmd, data = line.split(maxsplit=1)
if cmd == "CREATE":
bottom, left, top, right = [float(n) for n in data.split()]
rect = create_rectangle(bottom, left, top, right)
rect_queue.put(rect)
elif cmd == "MOVE":
# Add moved version of the previously read rectangle
vertical, horizontal = [float(n) for n in data.split()]
rect = rect_queue.get()
new_rect = rect.move(vertical, horizontal)
rect_queue.put(rect)
rect_queue.put(new_rect)
elif cmd == "RESIZE":
# Add resized version of the previously read rectangle
vertical, horizontal = [float(n) for n in data.split()]
rect = rect_queue.get()
new_rect = rect.resize(vertical, horizontal)
rect_queue.put(rect)
rect_queue.put(new_rect)
def rect_to_area():
while not rect_queue.empty():
rect = rect_queue.get()
area = rect.area()
area_queue.put(area)
rect_queue.task_done()
def area_to_counter():
while not area_queue.empty():
rect_areas[area_queue.get(timeout=1)] += 1
if __name__ == '__main__':
read_rectangles()
for _ in range(os.cpu_count()):
Thread(target=rect_to_area, daemon=True).start()
rect_queue.join()
area_to_counter()
print("Number of rectangles computed:", sum(rect_areas.values()))
print("Most common rectangle areas:")
most_common = sorted(rect_areas.most_common(),
key=itemgetter(1), reverse=True)
for area, count in most_common:
print(" Area %s\t%d rectangles" % (area, count))
| atlantistechnology/thinking-about-debugging | queues/summarize.py | summarize.py | py | 2,262 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "queue.LifoQueue",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rectangle.Rectangle",... |
70320340513 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 9 17:05:27 2021
Author: James Dixon
Date: Summer 2021
Convex Optimisation program
's' is signal estimate
"""
import numpy as np
from scipy.optimize import minimize
def Opt(MeasMtx,y,Errorbars,MaxPhtnNum):
# Generate error bound
epsilon = min(Errorbars)
def objective(s):
" minimise sum of P_0 and P_1"
p01s = s[:2]
return np.sum(p01s)
def IneqCons(s):
# some f(x)>=0
# y=Theta*s + e --> e-(y-Theta*s)>=0
"""
l2 norm of differences between modelled measuremnents and measurements
is less than the minimum element of the errorbars
"""
delta = y-np.dot(MeasMtx, s)
return epsilon-np.linalg.norm(delta, ord=2)
def EqCons(s):
"sum of elements of s should be 1, normalisation"
return (1 - np.sum(s))*1000
# Define intitial guess for signal
s_initial = np.random.rand(MaxPhtnNum)
s_initial = s_initial/np.sum(s_initial)
# Set bounds for elements of estimate s_i>=0
b = (0., 1.0)
bnds = ([])
for i in range(0, MaxPhtnNum):
bnds.append(b)
# Store constratints
con2 = {'type': 'ineq', 'fun': IneqCons}
con1 = {'type': 'eq', 'fun': EqCons}
cons = [con1,con2]
# Optimise
solution = minimize(objective,s_initial,method='SLSQP',\
bounds=bnds,constraints=cons)
# Re-optimise
solution = minimize(objective,solution.x,method='SLSQP',\
bounds=bnds,constraints=cons)
# Re-re-optimse
solution = minimize(objective,solution.x,method='SLSQP',\
bounds=bnds,constraints=cons)
# Triple-Re-optimse
solution = minimize(objective,solution.x,method='SLSQP',\
bounds=bnds,constraints=cons)
# Quadruple-Re-optimse
solution = minimize(objective,solution.x,method='SLSQP',\
bounds=bnds,constraints=cons)
# Quintuple-Re-optimse
solution = minimize(objective,solution.x,method='SLSQP',\
bounds=bnds,constraints=cons)
return solution.x | jmsdixon/Photon-Stats-ConVxOpt | ConvexOpt1.py | ConvexOpt1.py | py | 2,316 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.sum",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number... |
11648461448 | import os
import logging
from fastapi import FastAPI
from fastapi.responses import FileResponse
app = FastAPI()
PORT = os.environ.get('PORT', "NNNN")
logger = logging.getLogger("uvicorn.error")
logger.info(f"Server started in port {PORT}")
IMAGE_PATH = "/volume/image.png"
@app.get("/")
async def root():
if os.path.exists(IMAGE_PATH):
return FileResponse("/volume/image.png")
else:
return {"message": "Image is not downloaded."} | vinhng10/devops-with-kubernetes | part1/exercise-112-Project-v0.6/backend/app/main.py | main.py | py | 458 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"... |
20004381625 | import cv2
from os import listdir
from os.path import isfile, join
import os
import shutil
def rearrange(files):
if "ordered" in files[0]:
return False
if len(files) % 4 != 0:
return False
new = []
for i in range(0, len(files), 4):
for j in range(2):
new.append(files[i+j])
for i in range(len(files)-4, -1, -4):
for j in range(2,4):
new.append(files[i+j])
return new
def save(files, dir_path):
extension = files[0].split(".")[-1]
for i, file in enumerate(files):
shutil.move(file, join(dir_path, f"ordered-{i}.{extension}"))
def scout_dir(dir_path="dir_test"):
return ([x[0] for x in os.walk(dir_path) if len(x[0].split("/")) == 3])
def visualize(files):
for file in files:
image = cv2.imread(file)
cv2.imshow("picture", image)
cv2.waitKey(0)
def work_on_dir(dir_path="dir_test"):
issues = scout_dir(dir_path=dir_path)
for i, issue_path in enumerate(issues):
files = [join(issue_path, f) for f in listdir(issue_path) if isfile(join(issue_path, f))]
files = sorted(files, reverse=False)
rearranged = rearrange(files)
if rearranged != False:
save(rearranged, issue_path)
print(f"Processed {i+1} issues out of {len(issues)} | {((i/len(issues)) * 100):0.2f}%")
"""
TEST FUNCS
"""
def test():
a = [1, 2, 11, 12, 3, 4, 9, 10, 5, 6, 7, 8]
print(a)
a = rearrange(a)
print(a)
def test_with_files(dir_path="data"):
files = [join(dir_path, f) for f in listdir(dir_path) if isfile(join(dir_path, f))]
print(files)
files = sorted(files, reverse=False)
rearranged = rearrange(files)
save(rearranged, dir_path)
print(rearranged)
if __name__ == "__main__":
# just call this function to the main directory path
work_on_dir(dir_path="dir_test")
| dartmouth-review/reorganizer | rearrange.py | rearrange.py | py | 1,875 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "shutil.move",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 31,
... |
25694035511 | import os
from flask import Flask, request, jsonify, render_template
from keras.preprocessing import image
from bs4 import BeautifulSoup
from keras import backend as K
import keras
import requests
import re
app = Flask(__name__,template_folder='/Users/iqbalsandhu/Desktop/finalproject-2')
app.config['UPLOAD_FOLDER'] = 'uploads'
idx2label = {0: 'apple_pie',1: 'caesar_salad',2: 'cannoli',3: 'cheesecake', 4: 'chicken_wings', 5: 'cup_cakes', 6: 'donuts',7: 'french_fries',8: 'grilled_cheese_sandwich',9: 'guacamole', 10: 'hamburger',
11: 'hot_and_sour_soup',
12: 'hot_dog',
13: 'ice_cream',
14: 'lasagna',
15: 'oysters',
16: 'pizza',
17: 'spaghetti_carbonara',
18: 'steak',
19: 'sushi',
20: 'tacos',
21: 'waffles'}
def load_model():
global model
global graph
model = keras.models.load_model('/Users/iqbalsandhu/Desktop/finalproject-2/food_trained.h5')
graph = K.get_session().graph
load_model()
def prepare_image(img):
# Convert the image to a numpy array
img = image.img_to_array(img)
# Scale from 0 to 255
img /= 255
# Flatten into a 1x28*28 array
image_resized = img.flatten().reshape(-1, 7*7*512)
# Return the processed feature array
return image_resized
@app.route('/', methods=['GET', 'POST'])
def upload_file():
data = {}
if request.method == 'POST':
print(request)
if request.files.get('file'):
# read the file
file = request.files['file']
# read the filename
filename = file.filename
# create a path to the uploads folder
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Save the file to the uploads folder
file.save(filepath)
# Load the saved image using Keras and resize it to the mnist
# format of 28x28 pixels
image_size = (224, 224)
im = image.load_img(filepath, target_size=image_size)
# Convert the 2D image to an array of pixel values
image_array = prepare_image(im)
print(image_array)
# Get the tensorflow default graph and use it to make predictions
global graph
with graph.as_default():
# Use the model to make a prediction
predicted_digit = model.predict_classes(image_array)[0]
for k,v in idx2label.items():
if k == predicted_digit:
data['prediction'] = v
# indicate that the request was a success
data["success"] = True
url_cal = 'https://ndb.nal.usda.gov/ndb/search/list?fgcd=&manu=&lfacet=&count=&max=25&sort=default&qlookup={}&offset=&format=Abridged&new=&measureby=&ds=SR&order=asc&qt=&qp=&qa=&qn=&q=&ing='.format(data['prediction'])
response1 = requests.get(url_cal)
soup = BeautifulSoup(response1.text, 'lxml')
row = soup.findAll('td')[2]
newurl = 'https://ndb.nal.usda.gov'+ row.find('a')['href']
newresponse = requests.get(newurl)
newsoup = BeautifulSoup(newresponse.text, 'lxml')
unit = newsoup.find('td', text = 'kcal')
unitstr = str(unit)
numkcal= str(unit.find_next_sibling("td"))
unit = re.sub("<.*?>", "", unitstr)
numkcal = re.sub("<.*?>", "", numkcal)
data['calories'] = numkcal
return render_template('index.html', data = data)
if __name__ == "__main__":
app.run(debug=True)
| thusneem/CNN | finalproject-2/app.py | app.py | py | 3,736 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "keras.backend.ge... |
73564417634 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from main.models import *
from main.forms import *
from django.conf import settings
from django.core import serializers
from django.http import HttpResponse
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render_to_response
from PIL import Image
@csrf_exempt
def rank(request):
if request.method == "POST":
form = RankForm(request.POST)
if form.is_valid():
fb_shared = form.cleaned_data['facebook_share']
tw_shared = form.cleaned_data['twitter_share']
name = form.cleaned_data['name']
try:
art = Article.objects.get(name=name)
except:
art = Article()
art.name = name
art.save()
if fb_shared:
art.facebook_count = art.facebook_count + 1
art.total_count = art.total_count + 1
art.facebook_rate = (art.facebook_rate +
form.cleaned_data['facebook_rate']) / art.facebook_count
art.save()
elif tw_shared:
art.twitter_count = art.twitter_count + 1
art.total_count = art.total_count + 1
art.twitter_rate = (art.twitter_rate +
form.cleaned_data['twitter_rate']) / art.twitter_count
art.save()
else:
return HttpResponse("ERROR NOT SHARED")
return HttpResponse("OK")
else:
return HttpResponse("ERROR NOT VALID")
else:
return HttpResponse("ERROR METHOD")
@csrf_exempt
def best(request):
if request.method == "POST":
if 'cant' in request.POST:
cant = request.POST.get('cant')
else:
cant = 5
data = serializers.serialize('json',
Article.objects.all().order_by('total_count')[:cant],
fields=('name', 'total_count', 'facebook_count', 'twitter_count'))
return HttpResponse(data, mimetype='application/json')
else:
return HttpResponse("ERROR METHOD")
@csrf_exempt
def worst(request):
if request.method == "POST":
if 'cant' in request.POST:
cant = request.POST.get('cant')
else:
cant = 5
data = serializers.serialize('json',
Article.objects.all().order_by('-total_count')[:cant],
fields=('name', 'total_count', 'facebook_count', 'twitter_count'))
return HttpResponse(data, mimetype='application/json')
else:
return HttpResponse("ERROR METHOD")
@csrf_exempt
def sendImage(request):
if request.method == "POST":
form = ImgForm(request.POST, request.FILES)
if form.is_valid():
imgObject=form.save(commit=False)
imgObject.save()
resize(imgObject.img.path)
return HttpResponse("OK")
else:
return HttpResponse("ERROR NOT VALID")
else:
form = ImgForm()
return render_to_response("upload.html",
{'form': form}, context_instance=RequestContext(request))
#~ return HttpResponse("ERROR")
@csrf_exempt
def voteImage(request):
if request.method == "POST":
form = VoteImageForm(request.POST)
if form.is_valid():
imageID = form.cleaned_data['img_id']
try:
img = Imagen.objects.get(id=imageID)
img.votes = img.votes + 1
img.save()
return HttpResponse("OK")
except:
return HttpResponse("ERROR NOT EXIST")
else:
return HttpResponse("ERROR NOT VALID")
else:
return HttpResponse("ERROR METHOD")
@csrf_exempt
def getImages(request):
if 'elem' in request.GET:
name = request.GET['elem']
iList = serializers.serialize('json',
Imagen.objects.filter(article_name=name).order_by('-votes'),
fields=('img'))
return HttpResponse(iList, mimetype='application/json')
else:
return HttpResponse("ERROR NOT EXIST")
def resize(filename):
img = Image.open(filename)
width, height = img.size
if width>=height:
nWidth = settings.MAX_IMG_SIZE
nHeight = settings.MAX_IMG_SIZE*height/width
else:
nHeight = settings.MAX_IMG_SIZE
nWidth = settings.MAX_IMG_SIZE*width/height
thumb = img.resize((nWidth, nHeight), Image.ANTIALIAS)
justName = filename.rsplit("/",1)[1]
thumb.save(filename.replace(justName, "thumb_" + justName))
#def extra(request):
#iList = Imagen.objects.all()
#for i in iList:
#resize(i.img.path)
#return HttpResponse("OK") | gruizmir/valporanking | main/views.py | views.py | py | 4,771 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 44,
"usage_type": "call"
},
{
"api_na... |
25167180092 | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
from apps import database, measurement, home, posttest, config
import pandas
import glob
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
#callback sets the layout for the main menu
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return home.layout
elif pathname == '/measurement':
return measurement.layout
elif pathname == '/database':
return database.layout
elif pathname == '/posttest':
return posttest.layout
else:
return '404'
if __name__ == '__main__':
app.run_server(port = 8077, host='192.168.0.103')
#get the ip above by running hostname -I on RBP. Call sudo python3 index.py to run the program. | donovan97/IAP_Measurement | src/index.py | index.py | py | 977 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.app.layout",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "app.app",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "dash_html_components.Div",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dash_core_compon... |
12889468637 | import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader
from torchvision import transforms
from functions import*
import os
'''
abbvi without any extension
'''
num_epochs=1
batchSize=500
num_S=5#训练的采样数量
dim=1000000+1
num_St=100#测试的采样数量
#eta=0.05#eta、k、w、c这四个参数是和论文对应的
k=1
w=5e13
c=1.3e9
M=10
num_St=100#测试的采样数量
interval=20
#读取数据
train_index=np.linspace(0,999999,1000000)
with open('./dataset/criteo-train-sub1000000.txt','r') as f:
train_datas=f.readlines()
train_loader=DataLoader(train_index,batch_size=batchSize,shuffle=True)
#定义分布参数
para=torch.zeros(dim*2,requires_grad=True)
#para[dim:]=torch.ones(dim)*(-1)
scale=1000000/batchSize
G=torch.zeros(dim*2)
#需要储存结果
elbo_list=[]
para_list=[]
#变量
G_pow2=None
grad_d=None
para_last=None
#开始迭代
for epoch in range(num_epochs):
for i ,data_index in enumerate(train_loader):
labels,images=data_preprocess(train_datas,data_index,dim)
revise=batchSize/len(images)
#ELBO evaluate & record para
if i==len(train_loader)-1:
para_list.append(para.clone().detach().numpy())
if (epoch*len(train_loader)+i)%interval==0:
elbo_list.append(elbo_evaluate(images,labels,para,dim,scale,revise,num_St).item())
#算法起始位置
if(epoch==0 and i==0):
grad_d,G_pow2=nabla_F_Calc(images,labels,para,dim,num_S,scale,revise)
continue
#计算步长
rho=k/(w+G_pow2)**(1/3)
#迭代更新
para_last=para.clone().detach()
update=rho*grad_d
para.data+=update
#计算bt
b=c*rho*rho
if b>1: b=1
#计算nabla_F及二范数
nabla_F,temp=nabla_F_Calc(images,labels,para,dim,num_S,scale,revise)
G_pow2+=temp
#计算Delta **************************************************************************
Delta_temp=torch.zeros(dim*2)
delta=(para-para_last).clone().detach().requires_grad_(False)
A=torch.rand(M)
for j in range(M):
para_a=((1-A[j])*para_last+A[j]*para).clone().detach()
Delta_temp+=hessian_F_Calc(images,labels,para_a,delta,dim,num_S,scale,revise)
Delta=Delta_temp/M
#************************************************************************************
grad_d=(1-b)*(grad_d+Delta)+b*nabla_F
print(b,torch.median(update.abs()),torch.max(update.abs()))
#print information
if (epoch*len(train_loader)+i)%interval==0:
print('Epoch[{}/{}], step[{}/{}]'.format(\
epoch+1,
num_epochs,
i+1,len(train_loader)))
print('ELBO: {:.3f}\n'.format(\
elbo_list[len(elbo_list)-1]))
if not os.path.exists('./result_elbo'):
os.makedirs('./result_elbo')
result=np.array(elbo_list)
np.save('./result_elbo/abbvi_basic.npy',result)
if not os.path.exists('./result_para'):
os.makedirs('./result_para')
result=np.array(para_list)
np.save('./result_para/abbvi_basic.npy',result)
| allenzhangzju/Black_Box_Variational_Inference | bbvi_criteo4000000/abbvi_basic.py | abbvi_basic.py | py | 3,145 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.dataloader.DataLoader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tor... |
30896401362 | from data import get_data
from experiments import INVASETrainer
from config import get_decode_args
import torch
from models.decoder import LinearDecoder
import os
from tqdm import tqdm
from robustness.tools.helpers import AverageMeter
from argparse import Namespace
from torch import optim
import torch.nn as nn
import matplotlib.pyplot as plt
import json
import numpy as np
N_IMAGES = 2
def load_ckpt(trainer, ckpt_path):
model = trainer.model
optimizer = trainer.optimizer
state_dict = torch.load(ckpt_path)
model.load_state_dict(state_dict["model"])
optimizer.load_state_dict(state_dict["optimizer"])
epoch = state_dict["epoch"]
trainer.epoch = epoch
return trainer
def main(args):
path = args.trained_path
ckpt_path = os.path.join(path, "checkpoint")
config_path = os.path.join(path, "config.json")
decode_result_path = os.path.join(path, "decode_results.json")
# Reload the experiment configurations
with open(config_path, "r") as fp:
trainer_args_dict = json.load(fp)
trainer_args = Namespace(**trainer_args_dict)
# Get the data
dim, label_dim, train_loader, test_loader = get_data(trainer_args)
dim = train_loader.dataset.input_size
label_dim = train_loader.dataset.output_size
# Load from the checkpoint
trainer = INVASETrainer(dim, label_dim, trainer_args, path)
trainer = load_ckpt(trainer, ckpt_path)
# Construct the decoder
decoder = LinearDecoder(dim)
optimizer = optim.Adam(decoder.parameters(), 0.1, weight_decay=1e-4)
loss_fn = nn.MSELoss()
# Obtain these parameters to undo normalization
mean = torch.tensor(train_loader.dataset.means)
std = torch.tensor(train_loader.dataset.stds)
# Tuning the decoder
for i in range(args.decoder_epochs):
MSE = AverageMeter()
b_loader = tqdm(train_loader)
trainer.model.eval()
for x_batch, y_batch, _ in b_loader:
b_loader.set_description(
f"EpochProvision: DecodingMSE: {MSE.avg}")
x_batch, y_batch = x_batch.to(args.device), y_batch.to(args.device)
optimizer.zero_grad()
# Generate a batch of selections
selection_probability = trainer.model(x_batch, fw_module="selector")
# Predictor objective
used, reconstruction = decoder(selection_probability, x_batch)
# Convert to pixels space
reconstruction = reconstruction * std + mean
x_batch = x_batch * std + mean
loss = loss_fn(reconstruction, x_batch)
MSE.update(loss.detach().item(), y_batch.shape[0])
loss.backward()
optimizer.step()
if (i+1) % args.eval_freq == 0:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr']/2
fig, axs = plt.subplots(N_IMAGES, 3, figsize=(10, 5))
flat_shape = x_batch.shape[1]
img_dim = int(np.sqrt(flat_shape))
for i in range(N_IMAGES):
im = x_batch[i].detach().numpy().reshape((img_dim, img_dim))
im_rec = reconstruction[i].detach().numpy().reshape((img_dim, img_dim))
im_chosen = used[i].detach().numpy().reshape((img_dim, img_dim))
axs[i][0].imshow(im)
axs[i][1].imshow(im_rec)
axs[i][2].imshow(im_chosen)
axs[i][0].set_axis_off()
axs[i][1].set_axis_off()
axs[i][2].set_axis_off()
axs[0][0].set_title("Original Image", fontsize=18)
axs[0][1].set_title("Reconstructed Image", fontsize=18)
axs[0][2].set_title("Chosen Pixels", fontsize=18)
fig.savefig(os.path.join(path, "reconstruction_viz.pdf"))
fig.savefig(os.path.join(path, "reconstruction_viz.png"))
plt.close(fig)
MSE = AverageMeter()
modes = [("Train", train_loader), ("Test", test_loader)]
decoder.eval()
trainer.model.eval()
result = dict()
for mode, loader in modes:
b_loader = tqdm(loader)
for x_batch, y_batch, _ in b_loader:
b_loader.set_description(
f"EpochProvision: DecodingMSE: {MSE.avg}")
x_batch, y_batch = x_batch.to(args.device), y_batch.to(args.device)
selection_probability = trainer.model(x_batch, fw_module="selector")
used, reconstruction = decoder(selection_probability, x_batch)
reconstruction = reconstruction * std + mean
x_batch = x_batch * std + mean
loss = loss_fn(reconstruction, x_batch)
MSE.update(loss.detach().item(), y_batch.shape[0])
print(f"{mode} Final: ", MSE.avg)
result[mode] = MSE.avg
with open(decode_result_path, "w") as fp:
json.dump(result, fp)
if __name__ == '__main__':
args = get_decode_args()
main(args)
| choheeee22/invase-pytorch | decode_analysis.py | decode_analysis.py | py | 4,778 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.