text string | size int64 | token_count int64 |
|---|---|---|
from tkinter import font
class Config:
def __init__(self, master, *args, **kwargs):
self.master = master
self.font = font.Font(
family="Consolas",
size=11, weight="normal")
| 219 | 64 |
import json
import logging
from hooks.vivareal_hook import VivarealHook
from airflow.utils.decorators import apply_defaults
from airflow.models.baseoperator import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class VivarealOperator(BaseOperator):
template_fields = [
"s3_key",
"s3_bucket_name"
]
@apply_defaults
def __init__(self, s3_key, s3_bucket_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.s3_key = s3_key
self.s3_bucket_name = s3_bucket_name
def execute(self, context):
hook = VivarealHook()
s3_hook = S3Hook(aws_conn_id="s3_connection")
logger.info(f"Getting data")
with open("vivareal.json", "w") as fp:
for blocks in hook.run():
for ap in blocks:
json.dump(ap, fp, ensure_ascii=False)
fp.write(",\n")
logger.info(f"Uploading object in S3 {self.s3_bucket_name}")
s3_hook.load_file(
filename=fp.name,
key=self.s3_key,
bucket_name=self.s3_bucket_name
)
if __name__ == "__main__":
operator = VivarealOperator(file_path="/some/directory")
operator.execute() | 1,372 | 445 |
#!/usr/bin/python
import numpy as np
import argparse
import time
import cv2 as cv
import os
def runYOLODetection(args):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "fish.names"])
#labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "fish.weights"])
configPath = os.path.sep.join([args["yolo"], "fish_test.cfg"])
#weightsPath = os.path.sep.join([args["yolo"], "yolov3.weights"])
#configPath = os.path.sep.join([args["yolo"], "yolov3.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO from disk ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# load input image and grab its spatial dimensions
image = cv.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv.putText(image, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX,
0.5, color, 2)
return image
def runYOLOBoundingBoxes(args):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "fish.names"])
#labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "fish.weights"])
configPath = os.path.sep.join([args["yolo"], "fish_test.cfg"])
#weightsPath = os.path.sep.join([args["yolo"], "yolov3.weights"])
#configPath = os.path.sep.join([args["yolo"], "yolov3.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO from disk ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# load input image and grab its spatial dimensions
image = cv.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
return image, boxes, idxs
def runYOLOBoundingBoxes_streamlit(image, yolopath, _confidence, _threshold):
# load my fish class labels that my YOLO model was trained on
labelsPath = os.path.sep.join([yolopath, "fish.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
print(COLORS)
#COLORS = np.array([255, 0, 0], dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([yolopath, "fish.weights"])
configPath = os.path.sep.join([yolopath, "fish_test.cfg"])
# load my YOLO object detector trained on my fish dataset (1 class)
print("[INFO] loading YOLO model ...")
net = cv.dnn.readNetFromDarknet(configPath, weightsPath)
# grab input image's spatial dimensions
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
# NOTE: (608, 608) is my YOLO input image size. However, using
# (416, 416) results in much accutate result. Pretty interesting.
blob = cv.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show execution time information of YOLO
print("[INFO] YOLO took {:.6f} seconds.".format(end - start))
# initialize out lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater then the minimum probability
if confidence > _confidence:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update out list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weark and overlapping bounding
# boxes
idxs = cv.dnn.NMSBoxes(boxes, confidences, _confidence,
_threshold)
return boxes, idxs
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.25,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.45,
help="threshold when applying non-maxima suppression")
args = vars(ap.parse_args())
image = runYOLODetection(args)
# show the output image
#cv.namedWindow("Image", cv.WINDOW_NORMAL)
#cv.resizeWindow("image", 1920, 1080)
cv.imshow("Image", image)
#cv.imwrite("predictions.jpg", image)
cv.waitKey(0)
| 12,676 | 3,919 |
# Data Preprocessing
# Importing the libraries
import numpy as np # numpy is the library that contains mathematical tools
import matplotlib.pyplot as plt # help plot charts
import pandas as pd # import data sets and manage data sets
# 1. Importing the dataset
df = pd.read_csv('Data.csv' )
# 2. Need to determine the data feature (Independent data) and the dependent variables
X_feature = df.iloc[:, :-1].values # symbol : means we take all the rows, and all the columns except the last one.
Y_dependent = df.iloc[:, 3].values
# Tip: ‘NaN’ has to be replaced by ‘np.nan’. This is due to an update in the sklearn library.
# Taking care of missing data
# scikit-learn : machine learning library in python
# sklearn.impute: help process missing data
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values = np.nan, strategy='mean')
# make the imputer fit on X_feature
imp = imp.fit(X_feature[:, 1:3]) # Upper bound is excluded, so index 1 and 2 columns are included
#Replace the missing data of the matrix X_feature by the mean of the column
X_feature[:, 1:3] = imp.transform(X_feature[:, 1:3])
# # Splitting the dataset into the Training set and Test set
# from sklearn.cross_validation import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#
# # Feature Scaling
# """from sklearn.preprocessing import StandardScaler
# sc_X = StandardScaler()
# X_train = sc_X.fit_transform(X_train)
# X_test = sc_X.transform(X_test)
# sc_y = StandardScaler()
# y_train = sc_y.fit_transform(y_train)""" | 1,619 | 513 |
"""
This module provides the observations and rewards for testing and
for training the RL agent to vocalize.
"""
import collections
import logging
import math
import numpy as np
import random
import output.voice.synthesizer as synth # pylint: disable=locally-disabled, import-error
import warnings
Step = collections.namedtuple("Step", "state action")
ObservationSpace = collections.namedtuple("ObservationSpace", "dtype high low shape")
class TestEnvironment:
"""
The test environment.
"""
def __init__(self, behavior, nsteps, first_obs, action_shape, observation_space):
"""
:param behavior: A callable of signature fn(obs, action) -> (obs, reward, done).
This function is what our step() function actually calls under the hood.
:param nsteps: The number of steps before we return 'done' for step(). If this parameter
is None, an episode will only terminate if the behavior yields a done.
:param first_obs: The first observation that should be returned by calling reset(). This may be a callable.
:param action_shape: The shape of an action in this environment.
:param observation_space: An ObservationSpace.
"""
self.behavior = behavior
self.nsteps = nsteps
self.nsteps_so_far_taken = 0
self.first_obs = first_obs
self.most_recent_obs = first_obs
self.action_shape = action_shape
self.observation_space = observation_space
def reset(self):
"""
Reset the environment and give the first observation.
:returns: The first observation of the environment.
"""
self.nsteps_so_far_taken = 0
if callable(self.first_obs):
obs = self.first_obs()
else:
obs = self.first_obs
self.most_recent_obs = obs
return obs
def step(self, action):
"""
:param action: The action to take at the current step to derive the next
:returns: ob, rew, done, info; where:
- ob: The observation of the step that we are at in response to
taking `action`.
- rew: The reward we got for taking `action`.
- done: True or False, depending on if the episode is over.
- info: Dict of random crap. Currently always empty.
"""
if self.nsteps is not None and self.nsteps_so_far_taken >= self.nsteps:
# Terminate the episode
self.nsteps_so_far_taken = 0
return self.most_recent_obs, 0, True, {}
else:
# Increment the number of steps
self.nsteps_so_far_taken += 1
# Figure out our next step
obs, reward, done = self.behavior(self.most_recent_obs, action, self.most_recent_obs)
# update the most recent observation
self.most_recent_obs = obs
# Return the observation, reward, whether we are done or not, and the info dict
return obs, reward, done, {}
class SomEnvironment:
"""
This class provides the Environment for learning to vocalize and to produce sounds that are 'phoneme'-like.
The behavior of this environment is as follows. Each episode is exactly one step. The first (and only)
observation that is given is a random (uniform) scalar that represents the cluster index of a sound,
as clustered by: Sound -> Preprocessing -> VAE -> Mean Shift Clustering over all latent vectors produced during VAE training.
The action space is len(articularizers) (continuous). The reward function depends on if this environment
is in phase 0 or phase 1 of training. During phase 0, a reward is given based on whether or not an
audible sound is produced via the chosen action, as fed into the articulatory synthesizer controller.
During phase 1, the reward is conditioned on the resulting sound sounding like the prototype of the
cluster observed, probably via a correlation DSP function.
"""
def __init__(self, nclusters, articulation_duration_ms, time_points_ms, cluster_prototypes):
"""
:param nclusters: The number of clusters.
:param articulation_duration_ms: The total number of ms of each articulation. Currently we only support
making all articulations the same duration.
:param time_points_ms: The discrete time points of the articulation. This parameter
indicates the number of times we will move the articularizers and when.
The action space is of shape (narticularizers, ntime_points).
:param cluster_prototypes: A list or dict of the form [cluster_index => cluster_prototype]. Each cluster prototype
should be an AudioSegment object.
"""
for idx, tp in enumerate(time_points_ms):
if tp < 0:
raise ValueError("Time points cannot be negative. Got", tp, "at index", idx, "in 'time_points' parameter.")
elif tp > articulation_duration_ms:
raise ValueError("Time point", tp, "at index", idx, "is greater than the duration of the articulation:", articulation_duration_ms)
self.nclusters = nclusters
self._phase = 0
self._retain_audio = False
self._audio_buffer = []
self.observed_cluster_index = None
self.articulation_duration_ms = articulation_duration_ms
self.time_points_ms = time_points_ms
self.cluster_prototypes = cluster_prototypes
self.action_shape = (len(synth.articularizers), len(time_points_ms))
self.observation_space = ObservationSpace(dtype=np.int32,
high=np.array([(self.nclusters - 1)], dtype=np.int32),
low=np.array([0], dtype=np.int32),
shape=(1,))
self._inference_mode = False
self._previous_inferred_index = -1
self._xcor_component_max = 0.0
self._step = 0
@property
def inference_mode(self):
"""Inference mode = True means that we cycle through the observations rather than sampling them randomly"""
return self._inference_mode
@inference_mode.setter
def inference_mode(self, v):
"""Inference mode = True means that we cycle through the observations rather than sampling them randomly"""
self._inference_mode = v
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, p):
"""Set the phase. Phase is zero or one. Any nonzero value will set phase to one."""
if p != 0:
self._phase = 1
else:
self._phase = 0
@property
def retain_audio(self):
"""
Set to True if you want to keep the audio that is generated by the Agent.
The audio will be retained in self.produced_audio. You may dump the audio to disk
with `env.dump_audio()` or you may clear the audio buffer with `env.clear_audio()`.
"""
return self._retain_audio
@retain_audio.setter
def retain_audio(self, retain):
self._retain_audio = retain
def clear_audio(self):
"""
Clear the audio buffer. The audio buffer is the audio generated by the agent if
this environment's `retain_audio` is set to True.
"""
self._audio_buffer.clear()
def dump_audio(self, basefname=None):
"""
Dumps each audio segment from the audio buffer to disk. Does not clear the buffer.
:param basefname: If not None, the base filename, which will have the audio segment
indexes appended starting from 0. Can be a file path. If None,
the default name of 'produced_sound' is used as the base.
"""
base = basefname if basefname is not None else "produced_sound"
for i, seg in enumerate(self._audio_buffer):
seg.export("{}{}.wav".format(base, i), format='wav')
def reset(self):
"""
Reset the environment and give the first observation.
Will NOT clear the audio buffer as well.
:returns: The first observation of the environment, a uniform random scalar
from the distribution [0, self.nclusters]. If we are in inference mode
however, we will return 0 first, then 1, ..., self.nclusters - 1, then 0, etc.
"""
if self.inference_mode:
self.observed_cluster_index = (self._previous_inferred_index + 1) % self.nclusters
else:
self.observed_cluster_index = random.choice([n for n in range(0, self.nclusters)])
self._previous_inferred_index = self.observed_cluster_index
return np.array([self.observed_cluster_index], dtype=self.observation_space.dtype)
def step(self, action):
"""
:param action: The action to take at the current step to derive the next
:returns: ob, rew, done, info; where:
- ob: The observation of the step that we are at in response to
taking `action`.
- rew: The reward we got for taking `action`.
- done: True or False, depending on if the episode is over.
- info: Dict of random crap. Currently always empty.
"""
action = np.reshape(action, self.action_shape)
# Just return the cluster index we generated at reset as the observation
obs = np.array([self.observed_cluster_index], dtype=self.observation_space.dtype)
done = True # We are always done after the first step in this environment
info = {} # Info dict is just an empty dict, kept for compliance with OpenAI Gym
warnings.simplefilter(action='ignore', category=ResourceWarning)
seg = synth.make_seg_from_synthmat(action, self.articulation_duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])
if self.retain_audio:
self._audio_buffer.append(seg)
##################### Used to Debug. Should not be retained while actually using this ########################
if self._step % 50 == 0:
seg.export("mimicry-{}-{}-debug-delete-me.wav".format(self.observed_cluster_index, self._step), format='wav')
##############################################################################################################
if self.phase == 0:
# During phase 0, the reward is based on whether or not we vocalized at all
arr = seg.to_numpy_array()
assert len(arr) > 0
squares = np.square(arr)
assert len(squares) == len(arr)
sum_of_squares = np.sum(squares[squares >= 0], axis=0)
assert sum_of_squares >= 0.0, "Len: {}, Sum of squares: {}".format(len(arr), np.sum(squares, axis=0))
mean_square = sum_of_squares / len(arr)
assert mean_square > 0.0
rms = np.sqrt(mean_square)
rew = rms
if math.isnan(rew):
rew = 0.0
rew /= 100.0 # Get it into a more reasonable domain
else:
# During phase 1, the reward is based on how well we match the prototype sound
# for the given cluster index
# Shift the wave form up by most negative value
ours = seg.to_numpy_array().astype(float)
most_neg_val = min(ours)
ours += abs(most_neg_val)
prototype = self.cluster_prototypes[int(self.observed_cluster_index)].to_numpy_array().astype(float)
most_neg_val = min(prototype)
prototype += abs(most_neg_val)
assert sum(ours[ours < 0]) == 0
assert sum(prototype[prototype < 0]) == 0
# Divide by the amplitude
if max(ours) != min(ours):
ours /= max(ours) - min(ours)
if max(prototype) != min(prototype):
prototype /= max(prototype) - min(prototype)
# Now you have values in the interval [0, 1]
# XCorr with some amount of zero extension
xcor = np.correlate(ours, prototype, mode='full')
# Find the single maximum value along the xcor vector
# This is the place at which the waves match each other best
# Take the xcor value at this location as the reward
rew = max(xcor)
logging.debug("Observation: {} -> Action: {} -> Reward: {}".format(obs, action, rew))
self._step += 1
return obs, rew, done, info
| 13,073 | 3,454 |
# coding: utf-8
from flask import Flask
import mysql,os,re
from mysql import Pool
import properties
# 定义WEB容器(同时防止json以ascii解码返回)
app=Flask(__name__)
app.config['JSON_AS_ASCII'] = False
# 处理各模块中的自动注入以及组装各蓝图
# dir_path中为蓝图模块路径,例如需要引入的蓝图都在routes文件夹中,则传入参数'/routes'
def map_apps(dir_path):
path=os.getcwd()+dir_path
list=os.listdir(path)
print('蓝图文件夹:','.',dir_path)
# list.remove('__pycache__')
while list:
try:
file=list.pop(0)
if file.startswith('__') and file.endswith('__'):
continue
print('加载蓝图模块:',file)
f_model=__import__(re.sub('/','',dir_path)+'.'+re.sub('\.py','',file),fromlist=True)
app.register_blueprint(f_model.app)
except:
pass
def get_app():
return app
print('加载数据库模块')
mysql.pool = Pool.Pool()
# print('加载完毕')
print('蓝图初始化')
for path in properties.blueprint_path:
map_apps(path)
| 934 | 420 |
from collections import defaultdict
import logging
from typing import Dict, List
from .predict import predict
from chemprop.data import MoleculeDataLoader, StandardScaler
from chemprop.utils.metrics import get_metric_func
from chemprop.models import MoleculeModel, PB_MoleculeModel
def evaluate_predictions(preds : List[List[float]],
targets : List[List[float]],
num_tasks : int,
metrics : List[str],
dataset_type : str,
logger : logging.Logger = None) -> Dict[str, List[float]]:
info = logger.info if logger is not None else print
metric_to_func = {metric: get_metric_func(metric) for metric in metrics}
if len(preds) == 0:
return {metric: [float('nan')] * num_tasks for metric in metrics}
valid_preds = [[] for _ in range(num_tasks)]
valid_targets = [[] for _ in range(num_tasks)]
for i in range(num_tasks):
for j in range(len(preds)):
if targets[j][i] is not None: # Skip those without targets
valid_preds[i].append(preds[j][i])
valid_targets[i].append(targets[j][i])
results = defaultdict(list)
for i in range(num_tasks):
if dataset_type == 'classification':
nan = False
if all(target == 0 for target in valid_targets[i]) or all(target == 1 for target in valid_targets[i]):
nan = True
info('Warning: Found a task with targets all 0s or all 1s')
if all(pred == 0 for pred in valid_preds[i]) or all(pred == 1 for pred in valid_preds[i]):
nan = True
info('Warning: Found a task with predictions all 0s or all 1s')
if nan:
for metric in metrics:
results[metric].append(float('nan'))
continue
if len(valid_targets[i]) == 0:
continue
for metric, metric_func in metric_to_func.items():
if dataset_type == 'multiclass':
results[metric].append(metric_func(valid_targets[i],
valid_preds[i],
labels=list(range(len(valid_preds[i][0])))))
else:
results[metric].append(metric_func(valid_targets[i],
valid_preds[i]))
results = dict(results)
return results
def evaluate(model : MoleculeModel,
data_loader : MoleculeDataLoader,
num_tasks : int,
metrics : List[str],
dataset_type : str,
scaler : StandardScaler = None,
logger : logging.Logger = None) -> Dict[str, List[float]]:
preds, _ = predict(
model=model,
data_loader=data_loader,
scaler=scaler
)
results = evaluate_predictions(
preds=preds,
targets=data_loader.targets,
num_tasks=num_tasks,
metrics=metrics,
dataset_type=dataset_type,
logger=logger
)
return results
| 3,213 | 905 |
"""
test_detect_transparent_proxy.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from nose.plugins.attrib import attr
from w3af.plugins.tests.helper import PluginTest, PluginConfig
class TestDetectTransparentProxy(PluginTest):
target_url = 'http://moth/'
_run_config = {
'cfg': {
'target': target_url,
'plugins': {'infrastructure': (PluginConfig('detect_transparent_proxy'),)}
}
}
@attr('ci_fails')
def test_transparent_proxy(self):
cfg = self._run_config['cfg']
self._scan(cfg['target'], cfg['plugins'])
# TODO: For now I just check that it doesn't crash on me,
self.assertTrue(True) | 1,326 | 423 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import chewing
import ctypes
chewing._libchewing.chewing_userphrase_has_next.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)]
chewing._libchewing.chewing_userphrase_get.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_uint]
phrase_len = ctypes.c_uint(0)
bopomofo_len = ctypes.c_uint(0)
#chewing.Init('/usr/share/chewing', '/tmp')
ctx = chewing.ChewingContext()
## https://github.com/chewing/libchewing/blob/v0.4.0/include/chewingio.h#L523
rtn = ctx.userphrase_enumerate()
## print(rtn)
## https://github.com/chewing/libchewing/blob/v0.4.0/include/chewingio.h#L525
while ctx.userphrase_has_next(phrase_len, bopomofo_len):
print('')
phrase = ctypes.create_string_buffer(phrase_len.value)
bopomofo = ctypes.create_string_buffer(bopomofo_len.value)
ctx.userphrase_get(phrase, phrase_len, bopomofo, bopomofo_len)
print('phrase: %s' % phrase.value)
print('bopomofo: %s' % bopomofo.value)
| 1,026 | 476 |
# -*- coding: utf-8 -*-
import xlrd
import MySQLdb
import datetime
xlsfile = r'li.xlsx'
book = xlrd.open_workbook(xlsfile)
# 获取sheet的数量
count = len(book.sheets())
sheet = book.sheets()[0]
# 设置连接数据库
database = MySQLdb.connect(host="yb.upc.edu.cn", port=3306, user="yibantest", passwd="yibantest",
db="yibantest_integrate")
# 设置字符集
database.set_character_set('utf8')
cursor = database.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
starttime = datetime.datetime.now()
print '开始时间:%s' % (starttime)
# 循环sheet
query = """INSERT INTO instructor_evaluate_student ( number, name, sex, password,nation , birthday ,college, grade,major,school_class,hometown, instructor_name,second_instructor) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s)"""
for r in range(1, sheet.nrows):
number = sheet.cell(r, 1).value
name = sheet.cell(r, 2).value
sex = sheet.cell(r, 3).value
password = sheet.cell(r, 4).value
nation = sheet.cell(r, 5).value
birthday = sheet.cell(r, 6).value
college = sheet.cell(r, 7).value
grade = sheet.cell(r, 8).value
major = sheet.cell(r, 9).value
school_class = sheet.cell(r, 10).value
hometown = sheet.cell(r, 11).value
instructor_name = sheet.cell(r, 12).value
second_instructor = sheet.cell(r, 13).value
values = (
number, name, sex, password, nation, birthday, college, grade, major, school_class, hometown,
instructor_name,
second_instructor)
cursor.execute(query, values)
cursor.close()
database.commit()
database.close()
endtime = datetime.datetime.now()
print (endtime)
print (endtime - starttime)
| 1,752 | 681 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rule_engine/types.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import collections.abc
import datetime
import decimal
import math
from . import errors
__all__ = (
'DataType',
'NoneType',
'coerce_value',
'is_integer_number',
'is_natural_number',
'is_numeric',
'is_real_number',
'iterable_member_value_type'
)
NoneType = type(None)
def _to_decimal(value):
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(repr(value))
def coerce_value(value, verify_type=True):
"""
Take a native Python *value* and convert it to a value of a data type which can be represented by a Rule Engine
:py:class:`~.DataType`. This function is useful for converting native Python values at the engine boundaries such as
when resolving a symbol from an object external to the engine.
.. versionadded:: 2.0.0
:param value: The value to convert.
:param bool verify_type: Whether or not to verify the converted value's type.
:return: The converted value.
"""
# ARRAY
if isinstance(value, (list, range, tuple)):
value = tuple(coerce_value(v, verify_type=verify_type) for v in value)
# DATETIME
elif isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month, value.day)
# FLOAT
elif isinstance(value, (float, int)) and not isinstance(value, bool):
value = _to_decimal(value)
# MAPPING
elif isinstance(value, (dict, collections.OrderedDict)):
value = collections.OrderedDict(
(coerce_value(k, verify_type=verify_type), coerce_value(v, verify_type=verify_type)) for k, v in value.items()
)
if verify_type:
DataType.from_value(value) # use this to raise a TypeError, if the type is incompatible
return value
def is_integer_number(value):
"""
Check whether *value* is an integer number (i.e. a whole, number). This can, for example, be used to check if a
floating point number such as ``3.0`` can safely be converted to an integer without loss of information.
.. versionadded:: 2.1.0
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is an integer number.
:rtype: bool
"""
if not is_real_number(value):
return False
if math.floor(value) != value:
return False
return True
def is_natural_number(value):
"""
Check whether *value* is a natural number (i.e. a whole, non-negative number). This can, for example, be used to
check if a floating point number such as ``3.0`` can safely be converted to an integer without loss of information.
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is a natural number.
:rtype: bool
"""
if not is_integer_number(value):
return False
if value < 0:
return False
return True
def is_real_number(value):
"""
Check whether *value* is a real number (i.e. capable of being represented as a floating point value without loss of
information as well as being finite). Despite being able to be represented as a float, ``NaN`` is not considered a
real number for the purposes of this function.
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is a natural number.
:rtype: bool
"""
if not is_numeric(value):
return False
if not math.isfinite(value):
return False
return True
def is_numeric(value):
"""
Check whether *value* is a numeric value (i.e. capable of being represented as a floating point value without loss
of information).
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is numeric.
:rtype: bool
"""
if not isinstance(value, (decimal.Decimal, float, int)):
return False
if isinstance(value, bool):
return False
return True
def iterable_member_value_type(python_value):
"""
Take a native *python_value* and return the corresponding data type of each of its members if the types are either
the same or NULL. NULL is considered a special case to allow nullable-values. This by extension means that an
iterable may not be defined as only capable of containing NULL values.
:return: The data type of the sequence members. This will never be NULL, because that is considered a special case.
It will either be UNSPECIFIED or one of the other types.
"""
subvalue_types = set()
for subvalue in python_value:
if DataType.is_definition(subvalue):
subvalue_type = subvalue
else:
subvalue_type = DataType.from_value(subvalue)
subvalue_types.add(subvalue_type)
if DataType.NULL in subvalue_types:
# treat NULL as a special case, allowing typed arrays to be a specified type *or* NULL
# this however makes it impossible to define an array with a type of NULL
subvalue_types.remove(DataType.NULL)
if len(subvalue_types) == 1:
subvalue_type = subvalue_types.pop()
else:
subvalue_type = DataType.UNDEFINED
return subvalue_type
class _DataTypeDef(object):
__slots__ = ('name', 'python_type', 'is_scalar', 'iterable_type')
def __init__(self, name, python_type):
self.name = name
self.python_type = python_type
self.is_scalar = True
@property
def is_iterable(self):
return getattr(self, 'iterable_type', None) is not None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.name == other.name
def __hash__(self):
return hash((self.python_type, self.is_scalar))
def __repr__(self):
return "<{} name={} python_type={} >".format(self.__class__.__name__, self.name, self.python_type.__name__)
@property
def is_compound(self):
return not self.is_scalar
_DATA_TYPE_UNDEFINED = _DataTypeDef('UNDEFINED', errors.UNDEFINED)
class _CollectionDataTypeDef(_DataTypeDef):
__slots__ = ('value_type', 'value_type_nullable')
def __init__(self, name, python_type, value_type=_DATA_TYPE_UNDEFINED, value_type_nullable=True):
# check these three classes individually instead of using Collection which isn't available before Python v3.6
if not issubclass(python_type, collections.abc.Container):
raise TypeError('the specified python_type is not a container')
if not issubclass(python_type, collections.abc.Iterable):
raise TypeError('the specified python_type is not an iterable')
if not issubclass(python_type, collections.abc.Sized):
raise TypeError('the specified python_type is not a sized')
super(_CollectionDataTypeDef, self).__init__(name, python_type)
self.is_scalar = False
self.value_type = value_type
self.value_type_nullable = value_type_nullable
@property
def iterable_type(self):
return self.value_type
def __call__(self, value_type, value_type_nullable=True):
return self.__class__(
self.name,
self.python_type,
value_type=value_type,
value_type_nullable=value_type_nullable
)
def __repr__(self):
return "<{} name={} python_type={} value_type={} >".format(
self.__class__.__name__,
self.name,
self.python_type.__name__,
self.value_type.name
)
def __eq__(self, other):
if not super().__eq__(other):
return False
return all((
self.value_type == other.value_type,
self.value_type_nullable == other.value_type_nullable
))
def __hash__(self):
return hash((self.python_type, self.is_scalar, hash((self.value_type, self.value_type_nullable))))
class _ArrayDataTypeDef(_CollectionDataTypeDef):
pass
class _SetDataTypeDef(_CollectionDataTypeDef):
pass
class _MappingDataTypeDef(_DataTypeDef):
__slots__ = ('key_type', 'value_type', 'value_type_nullable')
def __init__(self, name, python_type, key_type=_DATA_TYPE_UNDEFINED, value_type=_DATA_TYPE_UNDEFINED, value_type_nullable=True):
if not issubclass(python_type, collections.abc.Mapping):
raise TypeError('the specified python_type is not a mapping')
super(_MappingDataTypeDef, self).__init__(name, python_type)
self.is_scalar = False
# ARRAY is the only compound data type that can be used as a mapping key, this is because ARRAY's are backed by
# Python tuple's while SET and MAPPING objects are set and dict instances, respectively which are not hashable.
if key_type.is_compound and not isinstance(key_type, DataType.ARRAY.__class__):
raise errors.EngineError("the {} data type may not be used for mapping keys".format(key_type.name))
self.key_type = key_type
self.value_type = value_type
self.value_type_nullable = value_type_nullable
@property
def iterable_type(self):
return self.key_type
def __call__(self, key_type, value_type=_DATA_TYPE_UNDEFINED, value_type_nullable=True):
return self.__class__(
self.name,
self.python_type,
key_type=key_type,
value_type=value_type,
value_type_nullable=value_type_nullable
)
def __repr__(self):
return "<{} name={} python_type={} key_type={} value_type={} >".format(
self.__class__.__name__,
self.name,
self.python_type.__name__,
self.key_type.name,
self.value_type.name
)
def __eq__(self, other):
if not super().__eq__(other):
return False
return all((
self.key_type == other.key_type,
self.value_type == other.value_type,
self.value_type_nullable == other.value_type_nullable
))
def __hash__(self):
return hash((self.python_type, self.is_scalar, hash((self.key_type, self.value_type, self.value_type_nullable))))
class DataTypeMeta(type):
def __new__(metacls, cls, bases, classdict):
data_type = super().__new__(metacls, cls, bases, classdict)
data_type._member_map_ = collections.OrderedDict()
for key, value in classdict.items():
if not isinstance(value, _DataTypeDef):
continue
data_type._member_map_[key] = value
return data_type
def __contains__(self, item):
return item in self._member_map_
def __getitem__(cls, item):
return cls._member_map_[item]
def __iter__(cls):
yield from cls._member_map_
def __len__(cls):
return len(cls._member_map_)
class DataType(metaclass=DataTypeMeta):
"""
A collection of constants representing the different supported data types. There are three ways to compare data
types. All three are effectively the same when dealing with scalars.
Equality checking
.. code-block::
dt == DataType.TYPE
This is the most explicit form of testing and when dealing with compound data types, it recursively checks that
all of the member types are also equal.
Class checking
.. code-block::
isinstance(dt, DataType.TYPE.__class__)
This checks that the data types are the same but when dealing with compound data types, the member types are
ignored.
Compatibility checking
.. code-block::
DataType.is_compatible(dt, DataType.TYPE)
This checks that the types are compatible without any kind of conversion. When dealing with compound data types,
this ensures that the member types are either the same or :py:attr:`~.UNDEFINED`.
"""
ARRAY = _ArrayDataTypeDef('ARRAY', tuple)
"""
.. py:function:: __call__(value_type, value_type_nullable=True)
:param value_type: The type of the array members.
:param bool value_type_nullable: Whether or not array members are allowed to be :py:attr:`.NULL`.
"""
BOOLEAN = _DataTypeDef('BOOLEAN', bool)
DATETIME = _DataTypeDef('DATETIME', datetime.datetime)
FLOAT = _DataTypeDef('FLOAT', decimal.Decimal)
MAPPING = _MappingDataTypeDef('MAPPING', dict)
"""
.. py:function:: __call__(key_type, value_type, value_type_nullable=True)
:param key_type: The type of the mapping keys.
:param value_type: The type of the mapping values.
:param bool value_type_nullable: Whether or not mapping values are allowed to be :py:attr:`.NULL`.
"""
NULL = _DataTypeDef('NULL', NoneType)
SET = _SetDataTypeDef('SET', set)
"""
.. py:function:: __call__(value_type, value_type_nullable=True)
:param value_type: The type of the set members.
:param bool value_type_nullable: Whether or not set members are allowed to be :py:attr:`.NULL`.
"""
STRING = _DataTypeDef('STRING', str)
UNDEFINED = _DATA_TYPE_UNDEFINED
"""
Undefined values. This constant can be used to indicate that a particular symbol is valid, but it's data type is
currently unknown.
"""
@classmethod
def from_name(cls, name):
"""
Get the data type from its name.
.. versionadded:: 2.0.0
:param str name: The name of the data type to retrieve.
:return: One of the constants.
"""
if not isinstance(name, str):
raise TypeError('from_name argument 1 must be str, not ' + type(name).__name__)
dt = getattr(cls, name, None)
if not isinstance(dt, _DataTypeDef):
raise ValueError("can not map name {0!r} to a compatible data type".format(name))
return dt
@classmethod
def from_type(cls, python_type):
"""
Get the supported data type constant for the specified Python type. If the type can not be mapped to a supported
data type, then a :py:exc:`ValueError` exception will be raised. This function will not return
:py:attr:`.UNDEFINED`.
:param type python_type: The native Python type to retrieve the corresponding type constant for.
:return: One of the constants.
"""
if not isinstance(python_type, type):
raise TypeError('from_type argument 1 must be type, not ' + type(python_type).__name__)
if python_type in (list, range, tuple):
return cls.ARRAY
elif python_type is bool:
return cls.BOOLEAN
elif python_type is datetime.date or python_type is datetime.datetime:
return cls.DATETIME
elif python_type in (decimal.Decimal, float, int):
return cls.FLOAT
elif python_type is dict:
return cls.MAPPING
elif python_type is NoneType:
return cls.NULL
elif python_type is set:
return cls.SET
elif python_type is str:
return cls.STRING
raise ValueError("can not map python type {0!r} to a compatible data type".format(python_type.__name__))
@classmethod
def from_value(cls, python_value):
"""
Get the supported data type constant for the specified Python value. If the value can not be mapped to a
supported data type, then a :py:exc:`TypeError` exception will be raised. This function will not return
:py:attr:`.UNDEFINED`.
:param python_value: The native Python value to retrieve the corresponding data type constant for.
:return: One of the constants.
"""
if isinstance(python_value, bool):
return cls.BOOLEAN
elif isinstance(python_value, (datetime.date, datetime.datetime)):
return cls.DATETIME
elif isinstance(python_value, (decimal.Decimal, float, int)):
return cls.FLOAT
elif python_value is None:
return cls.NULL
elif isinstance(python_value, (set,)):
return cls.SET(value_type=iterable_member_value_type(python_value))
elif isinstance(python_value, (str,)):
return cls.STRING
elif isinstance(python_value, collections.abc.Mapping):
return cls.MAPPING(
key_type=iterable_member_value_type(python_value.keys()),
value_type=iterable_member_value_type(python_value.values())
)
elif isinstance(python_value, collections.abc.Sequence):
return cls.ARRAY(value_type=iterable_member_value_type(python_value))
raise TypeError("can not map python type {0!r} to a compatible data type".format(type(python_value).__name__))
@classmethod
def is_compatible(cls, dt1, dt2):
"""
Check if two data type definitions are compatible without any kind of conversion. This evaluates to ``True``
when one or both are :py:attr:`.UNDEFINED` or both types are the same. In the case of compound data types (such
as :py:attr:`.ARRAY`) the member types are checked recursively in the same manner.
.. versionadded:: 2.1.0
:param dt1: The first data type to compare.
:param dt2: The second data type to compare.
:return: Whether or not the two types are compatible.
:rtype: bool
"""
if not (cls.is_definition(dt1) and cls.is_definition(dt2)):
raise TypeError('argument is not a data type definition')
if dt1 is _DATA_TYPE_UNDEFINED or dt2 is _DATA_TYPE_UNDEFINED:
return True
if dt1.is_scalar and dt2.is_scalar:
return dt1 == dt2
elif dt1.is_compound and dt2.is_compound:
if isinstance(dt1, DataType.ARRAY.__class__) and isinstance(dt2, DataType.ARRAY.__class__):
return cls.is_compatible(dt1.value_type, dt2.value_type)
elif isinstance(dt1, DataType.MAPPING.__class__) and isinstance(dt2, DataType.MAPPING.__class__):
if not cls.is_compatible(dt1.key_type, dt2.key_type):
return False
if not cls.is_compatible(dt1.value_type, dt2.value_type):
return False
return True
elif isinstance(dt1, DataType.SET.__class__) and isinstance(dt2, DataType.SET.__class__):
return cls.is_compatible(dt1.value_type, dt2.value_type)
return False
@classmethod
def is_definition(cls, value):
"""
Check if *value* is a data type definition.
.. versionadded:: 2.1.0
:param value: The value to check.
:return: ``True`` if *value* is a data type definition.
:rtype: bool
"""
return isinstance(value, _DataTypeDef)
| 18,276 | 6,304 |
"""Convert a number to its text representation."""
from number2text.cli.cli import cli
if __name__ == '__main__':
cli()
| 125 | 39 |
from datetime import datetime
import json
import logging
from mimetypes import guess_extension
import os
import requests
from slugify import slugify
FORMAT = '%(asctime)-15s - %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('data_pr')
BASE_DATA_DIR = 'data_files'
DATA_PR_CATALOG_PATH = f'{BASE_DATA_DIR}/data_pr_catalog_{datetime.now()}.json'
DATA_PR_CATALOG_URL = 'https://data.pr.gov/data.json'
def get_new_data_pr_catalog(url):
data_pr_json_meta_response = requests.get(url)
with open(DATA_PR_CATALOG_PATH, 'w') as data_pr_catalog_json:
json.dump(data_pr_json_meta_response.json(), data_pr_catalog_json)
def get_datasets(data_pr_catalog, amount_to_download=None):
with open(f'{data_pr_catalog}') as data_catalog:
data_pr_json_meta = json.load(data_catalog)
for dataset in data_pr_json_meta['dataset']:
folder_name = slugify(dataset['title'])
data_file_path = f'{BASE_DATA_DIR}/{folder_name}'
logger.info(f"Start download of {dataset['title']} to {data_file_path}")
if not os.path.exists(data_file_path):
os.makedirs(data_file_path)
for distribution in dataset['distribution']:
file_extension = guess_extension(distribution['mediaType'])
try:
response = requests.get(distribution['downloadURL'], stream=True)
except Exception as e:
logger.error('Error requesting data: %s', e)
continue
logger.debug(f"Downloading distribution: {distribution['mediaType']}")
with open(f'{data_file_path}/data{file_extension}', 'wb') as dataset_file:
for data in response.iter_content(chunk_size=100):
dataset_file.write(data)
logger.debug(f"Done downloading distribution: {distribution['mediaType']}")
logger.info(f"Finished download of {dataset['title']} to {data_file_path}")
def main():
try:
get_new_data_pr_catalog(DATA_PR_CATALOG_URL)
except Exception as e:
logger.error('Error at get_new_data_pr_catalog: %s', e)
try:
get_datasets(DATA_PR_CATALOG_PATH)
except Exception as e:
logger.error('Error at get_datasets: %s', e)
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
logger.info(f'process started: {datetime.now()}')
main()
logger.info(f'process finished: {datetime.now()}')
| 2,437 | 783 |
import gzip
import glob
import os
import json
import numpy as np
from more_itertools import chunked
DATA_DIR = '/mnt/wanyao/zsj/codesearchnet'
DEST_DIR = '/mnt/wanyao/zsj/CodeBERT/data/codesearch/train_valid'
def format_str(string):
for char in ['\r\n', '\r', '\n']:
string = string.replace(char, ' ')
return string
def read_tsv(input_file, quotechar=None):
with open(input_file, "r", encoding='utf-8') as f:
lines = []
for line in f.readlines():
line = line.strip().split('<CODESPLIT>')
if len(line) != 5:
continue
lines.append(line)
return lines
# preprocess the training data but not generate negative sample
def preprocess_train_data(lang):
path_list = glob.glob(os.path.join(DATA_DIR, '{}/train'.format(lang), '{}_train_*.jsonl.gz'.format(lang)))
path_list.sort(key=lambda t: int(t.split('_')[-1].split('.')[0]))
examples = []
for path in path_list:
print(path)
with gzip.open(path, 'r') as pf:
data = pf.readlines()
for index, data in enumerate(data):
line = json.loads(str(data, encoding='utf-8'))
doc_token = ' '.join(line['docstring_tokens'])
code_token = ' '.join([format_str(token) for token in line['code_tokens']])
example = (str(1), line['url'], line['func_name'], doc_token, code_token)
example = '<CODESPLIT>'.join(example)
examples.append(example)
dest_file = os.path.join(DEST_DIR, lang, 'raw_train.txt')
print(dest_file)
with open(dest_file, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(examples))
if __name__ == '__main__':
preprocess_train_data('python')
| 1,735 | 578 |
n=int(input())
k=int(input())
a=[]
m=0
for i in range(n):
a.append(int(input()))
a.sort(reverse=True)
m=a[0]-a[k-1]
for i in range(n-k+1):
if a[i]-a[i+k-1] < m:
m=a[i]-a[i+k-1]
print(m) | 201 | 116 |
#!/usr/bin/python3
"""
https://golem.ph.utexas.edu/category/2019/03/how_much_work_can_it_be_to_add.html#c055688
see also entropy.py
"""
import sys
from functools import reduce
from operator import add
from math import log, log2
from random import shuffle, choice, randint, seed
#import numpy
#from matplotlib import pyplot
#from bruhat.gelim import row_reduce, shortstr, kernel
#from qupy.dev import linalg
from bruhat.argv import argv
EPSILON = 1e-8
def is_close(a, b):
return abs(a-b) < EPSILON
def entropy(items, base=2):
"un-normalized entropy"
k = log(base)
sitems = sum(items)
r = 0.
for n in items:
r += n * log(n) / k
return -1*(r - sitems*log(sitems) / k)
def entropy(items):
"un-normalized entropy"
sitems = sum(items)
r = 0.
for n in items:
r += n * log2(n)
return -1*(r - sitems*log2(sitems))
class Multiset(object):
"un-normalized probability distribution"
def __init__(self, cs={}):
items = [(k, v) for (k, v) in cs.items() if v>0]
cs = dict(items)
self.cs = dict(cs) # map item -> count
self._len = sum(self.cs.values(), 0)
keys = list(cs.keys())
keys.sort() # canonicalize
self.keys = keys
def __str__(self):
cs = self.cs
keys = self.keys
items = reduce(add, [(str(key),)*cs[key] for key in keys], ())
items = ','.join(items)
return '{%s}'%items
__repr__ = __str__
def get_str(self):
cs = self.cs
keys = self.keys
items = [(key if cs[key]==1 else "%d%s"%(cs[key], key)) for key in keys]
items = '+'.join(items)
return items
def __eq__(self, other):
return self.cs == other.cs
def __ne__(self, other):
return self.cs != other.cs
def __mul__(X, Y):
"cartesian product of multisets"
if isinstance(Y, Multiset):
xcs, ycs = X.cs, Y.cs
cs = dict((x+y, xcs[x]*ycs[y]) for x in xcs for y in ycs)
return Multiset(cs)
return NotImplemented
def __rmul__(self, r):
"left multiplication by a number"
assert int(r) == r
assert r >= 0
cs = dict((k, r*v) for (k, v) in self.cs.items())
return Multiset(cs)
def __add__(X, Y):
# WARNING: not disjoint union (coproduct)
xcs, ycs = X.cs, Y.cs
cs = dict(xcs)
for k, v in ycs.items():
cs[k] = cs.get(k, 0) + v
return Multiset(cs)
def terms(self):
cs = self.cs
return [Multiset({k:cs[k]}) for k in self.keys]
def disjoint(X, Y):
# We only keep non-zero keys, so this works
lhs = set(X.cs.keys())
rhs = set(Y.cs.keys())
return not bool(lhs.intersection(rhs))
def contains(self, other):
"self contains other"
cs = self.cs
for k,v in other.cs.items():
if v > cs.get(k, 0):
return False
return True
def __len__(self):
return self._len
def isomorphic(self, other):
if self._len != other._len:
return False
lhs = set(self.cs.values())
rhs = set(other.cs.values())
return lhs == rhs
def entropy(self):
"un-normalized entropy"
cs = self.cs
items = [n for n in cs.values() if n>0]
return entropy(items)
def huffman(self, sort=False):
cs = self.cs
keys = list(cs.keys())
keys.sort()
if not keys:
return Node(self) # the empty tree
# build a tree, start with the leaves:
nodes = [Node(Multiset({key : cs[key]})) for key in keys]
while len(nodes) > 1:
if not sort:
shuffle(nodes)
else:
nodes.sort(key = str)
n = len(nodes)
best = (0, 1)
value = nodes[0].cost() + nodes[1].cost()
for i in range(n):
for j in range(i+1, n):
w = nodes[i].cost() + nodes[j].cost()
if w < value:
best = (i, j)
value = w
i, j = best
assert i < j, (i, j)
right = nodes.pop(j)
left = nodes.pop(i)
node = Node(left.X + right.X, left, right)
nodes.append(node)
return nodes[0]
def product_tree(self):
cs = self.cs
keys = list(cs.keys())
keys.sort()
for k in keys:
assert len(k) == 2
# fail..
def total_length(self):
n = sum([len(k)*v for (k, v) in self.cs.items()], 0)
return n
def W(self): # brain fart the name
return self.huffman().encode().total_length()
def W(item):
return item.W()
class Node(object):
"A tree over a multiset"
"mutable !!"
def __init__(self, X, left=None, right=None):
self.X = X
self._cost = len(X)
self.left = left
self.right = right
assert self.check(), str(self)
def cost(self):
return self._cost
def check(self):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return True
if not left.X.disjoint(right.X):
return False
return X == left.X + right.X and left.check() and right.check()
def __eq__(self, other):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return self.X == other.X
return self.X == other.X and (
self.left == other.left and self.right == other.right or
self.right == other.left and self.left == other.right)
def __ne__(self, other):
return not (self==other)
def clone(self):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Node(self.X) # X is immutable.... for now..
return Node(self.X, left.clone(), right.clone())
def __getitem__(self, idx):
if type(idx) is int:
assert idx==0 or idx==1
node = [self.left, self.right][idx]
elif type(idx) is tuple:
node = self
for i in idx:
node = node[i] # recurse
else:
raise TypeError
return node
def __setitem__(self, idx, node):
assert isinstance(node, Node), node
if type(idx) is tuple and len(idx)==1:
idx = idx[0]
if type(idx) is int:
assert idx==0 or idx==1
assert self.has_children
child = [self.left, self.right][idx]
assert node.X == child.X
if idx==0:
self.left = node
else:
self.right = node
elif type(idx) is tuple:
assert len(idx)>1
child = self
for i in idx[:-1]:
child = child[i]
child[idx[-1]] = node # recurse
else:
raise TypeError
assert self.check()
@property
def has_children(self):
return self.left is not None and self.right is not None
def all_isos(self, other):
#if len(self.X) != len(other.X):
if not self.X.isomorphic(other.X):
return
if not self.has_children and not other.has_children:
yield 1
return
elif not self.has_children:
return
elif not other.has_children:
return
for l_isos in self.left.all_isos(other.left):
for r_isos in self.right.all_isos(other.right):
yield 1
for l_isos in self.right.all_isos(other.left):
for r_isos in self.left.all_isos(other.right):
yield 1
def isomorphic(self, other):
"up to multiset isomorphism.."
for iso in self.all_isos(other):
return True
return False
def _subtrees(self):
yield self
yield Node(self.X)
if not self.has_children:
return
X = self.X
left = self.left
right = self.right
lsubs = list(left.subtrees())
rsubs = list(right.subtrees())
for sub in lsubs:
yield sub
for sub in rsubs:
yield sub
for l in lsubs:
for r in rsubs:
if l.X + r.X == X:
yield Node(X, l, r)
def subtrees(self):
found = set()
for sub in self._subtrees():
key = str(sub)
if key in found:
continue
found.add(key)
yield sub
def encode(self):
" the (un-normalized) distribution of encoded words "
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Multiset({'' : len(X)})
left = left.encode()
right = right.encode()
left = Multiset(dict(('0'+k, v) for (k, v) in left.cs.items()))
right = Multiset(dict(('1'+k, v) for (k, v) in right.cs.items()))
return left + right
def W(self):
return self.encode().total_length()
def __str__(self):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
s = str(X)
assert s[0]=="{"
assert s[-1]=="}"
return "(%s)"%s[1:-1]
assert left and right
return "(%s : %s)" % (left, right)
__repr__ = __str__
def idxs(self): # dict .keys()
X = self.X
left = self.left
right = self.right
if left is None and right is None:
yield ()
else:
for idx, X in left.idxs():
yield (0,)+idx
for idx, X in right.idxs():
yield (1,)+idx
def leaves(self): # dict .values()
X = self.X
left = self.left
right = self.right
if left is None and right is None:
yield X
else:
for X in left.leaves():
yield X
for X in right.leaves():
yield X
def items(self): # dict .items()
X = self.X
left = self.left
right = self.right
if left is None and right is None:
yield ((), X)
else:
for idx, X in left.items():
yield ((0,)+idx, X)
for idx, X in right.items():
yield ((1,)+idx, X)
def __rmul__(self, r):
" left multiplication by a Multiset "
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Node(r*X)
return Node(r*X, r*left, r*right)
def __lmul__(self, r):
" right multiplication by a Multiset "
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Node(X*r)
return Node(X*r, left*r, right*r)
def __mul__(TX, TY):
if not isinstance(TY, Node):
return TX.__lmul__(TY)
X = TX.X
Y = TY.X
#print("__mul__", TX, TY)
XTY = X * TY
#print("__mul__", XTY)
#TXY = TX * Y
top = XTY
for (idx, r) in TY.items():
# glue
#print("glue", idx)
if not idx:
top = TX*r
else:
top[idx] = TX*r # recurse
return top
def __add__(self, other):
assert self.X.contains(other.X)
X = self.X
left = self.left
right = self.right
if not self.has_children:
assert self.X == other.X
return other
elif left.X == other.X:
assert not left.has_children
left = other
elif right.X == other.X:
assert not right.has_children
right = other
elif left.X.contains(other.X):
left = left+other # recurse
elif right.X.contains(other.X):
right = right+other # recurse
else:
assert 0, (self, other)
return Node(X, left, right)
# ------------- rendering ----------------------
def get_bbox(self, R=1.0):
"find (width,height) of bounding box for render"
X = self.X
left = self.left
right = self.right
if not self.has_children:
s = X.get_str()
W = (1 + 0.1*len(s)) * R # fudge this
return (W, R) # square
lbb = left.get_bbox(R)
rbb = right.get_bbox(R)
W = lbb[0] + rbb[0]
H = max(lbb[1], rbb[1]) + R
return (W, H)
def render(self, x=0, y=0, R=1.0, r=0.2, cvs=None, name=None):
"(x, y) is top center of this tree"
if cvs is None:
cvs = pyx.canvas.canvas()
X = self.X
left = self.left
right = self.right
cvs.fill(path.circle(x, y, r), [white])
cvs.stroke(path.circle(x, y, r))
if not self.has_children:
cvs.text(x, y-2.8*r, X.get_str(), south)
else:
w0, h0 = left.get_bbox(R)
w1, h1 = right.get_bbox(R)
w, h = self.get_bbox(R)
x0 = x-0.5*w+0.5*w0
x1 = x+0.5*w-0.5*w1
y0 = y1 = y-R
# render self...
cvs.fill(path.circle(x, y, 0.4*r), [black])
cvs.stroke(path.line(x0, y0, x, y), st_Thick)
cvs.stroke(path.line(x1, y1, x, y), st_Thick)
left.render(x0, y0, R=R, r=r, cvs=cvs)
right.render(x1, y1, R=R, r=r, cvs=cvs)
if name is not None:
cvs.writePDFfile(name)
return cvs
class Box(object):
pass
class HBox(Box):
def __init__(self, items, align="center"):
self.items = list(items)
self.align = align
def render(self, x=0., y=0., cvs=None, name=None, **kw):
"(x, y) is top center of this box"
if cvs is None:
cvs = pyx.canvas.canvas()
items = self.items
boxs = [item.get_bbox(**kw) for item in items]
w = sum(b[0] for b in boxs) # sum widths
h = max([b[1] for b in boxs]) # max of heights
x0 = x - 0.5*w
y0 = y - 0.5*h
align = self.align
for i, item in enumerate(self.items):
b = boxs[i]
if align == "center":
item.render(x0 + 0.5*b[0], y0+0.5*b[1], cvs=cvs, **kw)
x0 += b[0]
if name is not None:
cvs.writePDFfile(name)
class TextBox(Box):
def __init__(self, s, w=1, h=1):
self.s = s
self.w = w
self.h = h
def get_bbox(self):
return self.w, self.h
def render(self, x=0., y=0., cvs=None, name=None, **kw):
cvs.text(x, y-self.h, self.s, south)
def render():
# head = "transversal2018/"
head = "tmp/"
seed(0)
a = Multiset({"a" : 1})
b = Multiset({"b" : 1})
c = Multiset({"c" : 1})
d = Multiset({"d" : 1})
d = Multiset({"d" : 1})
e = Multiset({"e" : 1})
f = Multiset({"f" : 1})
g = Multiset({"g" : 1})
def mkrand(items, a=1, b=3):
Z = Multiset()
for A in items:
Z = Z + randint(a, b)*A
return Z
T = Node(a+b+2*c, Node(a+b, Node(a), Node(b)), Node(2*c))
#T.render(name="pic_a_b_2c.pdf")
S = ((a+b)*T)
S.render(name=head+"pic_left_a_b_2c.pdf")
#T = T*T
U = Node(a*a+b*a, Node(a*a), Node(b*a))
SU = S+U
box = HBox([S, TextBox("$+$"), U, TextBox("$=$"), SU])
box.render(name=head+"pic_add.pdf")
S = Node(a+b, Node(a), Node(b))
#(S*T).render(name=head+"pic_prod.pdf")
box = HBox([S, TextBox(r"$\times$"), T, TextBox("$=$"), S*T])
box.render(name=head+"pic_prod.pdf")
box = HBox([S, TextBox(r"$\otimes$"), T, TextBox("$=$"), S*T])
box.render(name=head+"pic_tensor.pdf")
X = mkrand([a,b,c,d,e,f,g])
TX = X.huffman(sort=True)
#print(W(TX))
if 0:
box = HBox([TX])
box.render(name=head+"pic_huffman.pdf")
X = 5*a+5*b+4*c+3*d+3*e
TX = X.huffman(sort=True)
print(W(TX))
box = HBox([TX])
box.render(name=head+"pic_huffman.pdf")
X = a + b + 2*c + 4*d
TX = X.huffman(sort=True)
box = HBox([TX])
box.render(name=head+"pic_dyadic.pdf")
print("OK")
def randmonomial(X):
"random monomial tree on X"
nodes = [Node(Y) for Y in X.terms()]
if not nodes:
return Node(X)
while len(nodes)>1:
left = nodes.pop(randint(0, len(nodes)-1))
right = nodes.pop(randint(0, len(nodes)-1))
node = Node(left.X+right.X, left, right)
nodes.append(node)
return nodes[0]
def randtree(X, monomial=True):
if monomial:
return randmonomial(X)
nodes = [Node(Y) for Y in X.terms()]
if not nodes:
return Node(X)
while len(nodes)>1:
left = nodes.pop(randint(0, len(nodes)-1))
right = nodes.pop(randint(0, len(nodes)-1))
if randint(0, 1):
node = Node(left.X+right.X, left, right)
nodes.append(node)
else:
node = Node(left.X+right.X)
nodes.append(node)
return nodes[0]
def main():
X = Multiset({"a":3, "b":1})
assert (X+X) == Multiset({"a":6, "b":2})
assert (X+X) == 2*X
#print(X, X.entropy())
XX = X*X
Y = Multiset({"a":2, "b":2})
#print(Y, Y.entropy())
assert str(Y) == "{a,a,b,b}"
A = Multiset({"a" : 1})
B = Multiset({"b" : 1})
C = Multiset({"c" : 1})
D = Multiset({"d" : 1})
E = Multiset({"e" : 1})
F = Multiset({"f" : 1})
G = Multiset({"g" : 1})
assert A.disjoint(B)
assert not (A+B).disjoint(B)
assert (A+2*B).terms() == [A, 2*B]
assert not A.contains(B)
assert (A+B).contains(B)
assert not (A+B).contains(2*B)
# ---------------------------------------------------------------------
assert Node(A+B, Node(A), Node(B)) == Node(B+A, Node(B), Node(A))
lhs, rhs = (Node(A+B+C, Node(A+B, Node(A), Node(B)), Node(C)),
Node(A+B+C, Node(A), Node(B+C, Node(B), Node(C))))
assert lhs.isomorphic(rhs)
T = Node(A+B+C, Node(A+B, Node(A), Node(B)), Node(C))
subs = list(T.subtrees())
assert len(subs) == 8
# test left multiplication
for r in [2, A, A+B]:
assert r*T == Node(r*A+r*B+r*C, Node(r*A+r*B, Node(r*A), Node(r*B)), Node(r*C))
T = Node(A+B+C+D, Node(A+B, Node(A), Node(B)), Node(C+D, Node(C), Node(D)))
subs = list(T.subtrees())
assert len(subs) == 13, len(subs)
S, T = Node(A+B, Node(A), Node(B)), Node(B)
assert S[0] != T
assert S[1] == T
U = S.clone()
assert U==S
U[1] = T
assert U[0] != T
assert U[1] == T
assert U==S
T = Node(A+B+C+D, Node(A+B, Node(A), Node(B)), Node(C+D, Node(C), Node(D)))
assert T[0] == S
T[0] = Node(A+B)
T = Node(2*A+B+C+D+E+F, Node(2*A+B+E), Node(C+D+F))
U = T.clone()
U[0] = Node(2*A+B+E, Node(2*A), Node(B+E))
U[0, 1] = Node(B+E, Node(B), Node(E))
assert U.clone() == U
T = Node(A+B, Node(A), Node(B))
S = Node(A+B+2*C, Node(A+B, Node(A), Node(B)), Node(2*C))
assert str(T*S) == "((((aa) : (ba)) : ((ab) : (bb))) : ((ac,ac) : (bc,bc)))"
def randmultiset(a=0, b=4):
Z = randint(a, b)*A + randint(a, b)*B + randint(a, b)*C + randint(a, b)*D + randint(a, b)*E
return Z
#seed(1)
for trial in range(1000):
X = randmultiset()
if not(len(X)):
continue
TX = randtree(X)
assert X.entropy() <= W(X) <= W(TX)
Y = randmultiset()
TY = randtree(Y)
# W is a derivation on monomial trees
assert W(TX*TY) == len(X)*W(TY) + W(TX)*len(Y)
HX = X.huffman()
HY = Y.huffman()
print(W(X*Y), W(HX*HY))
for trial in range(100):
X = randmultiset()
TX = randtree(X, False)
Y = randmultiset()
TY = randtree(Y, False)
assert W(Y*TX) == len(Y)*W(TX)
T = TX
for n in range(1, 4):
assert W(T) == n*len(X)**(n-1) * W(TX)
T = TX*T
#print("TX=%s, TY=%s"%(TX, TY))
# W is a derivation on non-monomial trees
assert W(TX*TY) == len(X)*W(TY) + W(TX)*len(Y)
return
# ---------------------------------------------------------------------
#print( ((X*Y).entropy(), len(X)*Y.entropy() + len(Y)*X.entropy()))
assert is_close((X*Y).entropy(), len(X)*Y.entropy() + len(Y)*X.entropy())
tree = X.huffman()
assert tree.X == X
#assert str(tree) == "({B} : {A,A,A})", repr(str(tree))
#assert str(tree.encode()) == "{0,1,1,1}"
tree = XX.huffman()
#assert str(tree.encode()) == "{0,0,0,0,0,0,0,0,0,10,10,10,110,110,110,111}"
assert XX.W() == 27
def mkrand(a=1, b=3):
Z = randint(a, b)*A + randint(a, b)*B + randint(a, b)*C #+ randint(a, b)*D + randint(a, b)*E
return Z
#seed(0)
for trial in range(1000):
X = mkrand(1, 5)
lhs = X.huffman()
rhs = X.huffman()
assert lhs.isomorphic(rhs) # huffman is unique up to isomorphism ? this can't be right..
for trial in range(100):
X = mkrand(1, 3)
T = X.huffman()
for S in T.subtrees():
assert S.check()
assert W(Multiset()) == 0
for trial in range(100):
a = randint(1, 3)
b = randint(1, 3)
c = randint(1, 3)
X = a*A + b*B + c*C
lhs = W(X*X)
rhs = 2*len(X)*W(X)
assert lhs <= rhs
#if lhs==rhs: # no nice characterization of this
# print(X)
#else:
# print("*")
for trial in range(100):
X = mkrand()
Y = mkrand()
S = X.huffman()
T = Y.huffman()
ST = (X*Y).huffman()
lhs = W(ST)
rhs = len(X)*W(T) + W(S)*len(Y)
#print(lhs, rhs)
assert lhs<=rhs
def mkdyadic(a=0, b=4, terms=[A, B, C, D, E]):
while 1:
cs = [2**randint(a, b) for t in terms]
c = sum(cs)
if bin(c).count('1')==1: # is power of 2
break
Z = reduce(add, [c*term for (c, term) in zip(cs, terms)])
return Z
for trial in range(100):
X = mkdyadic()
Y = mkdyadic()
#print(X, Y)
S = X.huffman()
T = Y.huffman()
ST = (X*Y).huffman()
lhs = W(ST)
rhs = len(X)*W(T) + W(S)*len(Y)
#print(lhs, rhs)
assert lhs==rhs
assert X.entropy() == W(X)
assert Y.entropy() == W(Y)
assert (X*Y).entropy() == lhs
return
for trial in range(1000):
a = randint(1, 3)
b = randint(1, 3)
c = randint(1, 3)
X = a*A + b*B + c*C
lhs = W(X)
for aa in range(a+1):
for bb in range(b+1):
for cc in range(c+1):
Y = aa*A + bb*B + cc*C
XY = (a-aa)*A + (b-bb)*B + (c-cc)*C
assert XY + Y == X
rhs = W(XY + len(Y)*D) + W(Y)
assert lhs <= rhs
if len(Y)==0:
assert XY == X
assert XY + len(Y)*D == X
assert lhs == rhs
return
for trial in range(100):
X = mkrand()
n = randint(2, 5)
assert n*X.W() == (n*X).W()
print(X)
lhs, rhs = n*X.huffman(), (n*X).huffman()
print(lhs, rhs)
print()
#assert n*X.huffman() == (n*X).huffman()
assert lhs.isomorphic(rhs)
assert X.huffman().check()
# print(Z.entropy(), Z.W())
X = 3*A + B
#print(X.huffman())
lhs, rhs = (X*X).W(), len(X)*X.W() + len(X)*X.W()
#print(lhs, rhs)
assert lhs < rhs
assert lhs == 27
assert rhs == 32
for trial in range(100):
X = mkrand(1, 3)
Y = mkrand(1, 3)
#assert (X*Y) == (Y*X) # nope ( not on the nose.. )
assert (X*Y).W() == (Y*X).W()
#assert (X*Y).huffman() == (Y*X).huffman() # nope ( not on the nose.. )
lhs, rhs = (X*Y).W(), len(X)*Y.W() + len(Y)*X.W()
assert lhs<=rhs
lhs, rhs = (X*Y).entropy(), len(X)*Y.entropy() + len(Y)*X.entropy()
assert is_close(lhs, rhs)
# Z = 25*A + 25*B + 20*C + 15*D + 15*E
def mkrand(items, a=1, b=3):
Z = Multiset()
for A in items:
Z = Z + randint(a, b)*A
return Z
for trial in range(100):
X = mkrand([A, B, C])
Y = mkrand([D, E, F])
#print(X, Y)
#print(X+Y)
lhs = W(X+Y)
rhs = W(X + len(Y)*D) + W(Y)
#print(lhs, rhs)
assert lhs <= rhs
lhs = (X+Y).entropy()
rhs = (X + len(Y)*D).entropy() + (Y).entropy()
assert is_close(lhs, rhs)
#print(lhs, rhs)
#print()
#break
for trial in range(100):
X0 = mkrand([A, B, C], 1, 3)
Y = mkrand([D, E, F], 1, 3)
print(X, Y)
for a in range(1, 10):
X = a*X0
lhs = W(X+Y)
rhs = W(X + len(Y)*G) + W(Y)
print(lhs, rhs)
assert lhs <= rhs
if lhs==rhs:
break
else:
fail
print()
return
seed(0)
while 1:
X = mkrand([A, B, C], 1, 3)
Y = mkrand([D, E, F], 1, 3)
print(X, Y)
a = 1
while 1:
print("[%s]"%a, end="", flush=True)
#aX = a*X
#lhs, rhs = W(aX*Y), len(aX)*W(Y) + W(aX)*len(Y)
aY = a*Y
lhs, rhs = W(X*aY), len(X)*W(aY) + W(X)*len(aY)
if lhs==rhs:
break
print(lhs, rhs)
assert lhs == a*W(X*Y)
assert rhs == a*(len(X)*W(Y) + W(X)*len(Y))
a += 1
#assert a<10
if a>10:break
print(".", end="", flush=True)
return
found = set()
#for trial in range(100):
while 1:
i = 2**randint(0, 3)
j = 2**randint(0, 3)
k = 2**randint(0, 3)
X = i*A + j*B + k*C
#X = mkrand([A, B, C], 1, 8)
lhs, rhs = X.entropy(), X.W()
if is_close(lhs, rhs):
#vals = list(X.cs.values())
vals = [i, j, k]
vals.sort()
vals = tuple(vals)
print(vals)
if vals not in found:
print(vals)
found.add(vals)
return
#X = 5*A + 1*B + C + 1*D
X = 1*A + 1*B + 1*C
X1 = 1
h = X.entropy()
print(h)
n = 1
while 1:
X1 = X1*X
lhs, rhs = (X1.W(), X1.entropy())
r = n * (len(X)**(n-1))
assert is_close(rhs/r, h)
print('\t', lhs / r)
# if len(X1) > 10000000:
# break
n += 1
print(len(X1))
try:
# yay globals...
import pyx
from pyx import path, deco, trafo, style, text, color, deformer
from pyx.color import rgb, cmyk
from pyx.color import rgbfromhexstring as rgbhex
black = rgb(0., 0., 0.)
blue = rgb(0., 0., 0.8)
lred = rgb(1., 0.4, 0.4)
red = rgb(1., 0.0, 0.0)
white = rgb(1., 1., 1.)
grey = rgb(0.75, 0.75, 0.75)
shade = grey
shade0 = rgb(0.25, 0.25, 0.25)
shade1 = rgb(0.80, 0.80, 0.80)
shade2 = rgb(0.85, 0.85, 0.85)
light_shade = rgb(0.85, 0.65, 0.1)
light_shade = rgb(0.9, 0.75, 0.4)
north = [text.halign.boxcenter, text.valign.top]
northeast = [text.halign.boxright, text.valign.top]
northwest = [text.halign.boxleft, text.valign.top]
south = [text.halign.boxcenter, text.valign.bottom]
southeast = [text.halign.boxright, text.valign.bottom]
southwest = [text.halign.boxleft, text.valign.bottom]
east = [text.halign.boxright, text.valign.middle]
west = [text.halign.boxleft, text.valign.middle]
center = [text.halign.boxcenter, text.valign.middle]
st_dashed = [style.linestyle.dashed]
st_dotted = [style.linestyle.dotted]
st_round = [style.linecap.round]
#st_mitre = [style.linecap.square]
st_thick = [style.linewidth.thick]
st_Thick = [style.linewidth.Thick]
st_THick = [style.linewidth.THick]
st_THIck = [style.linewidth.THIck]
st_THICk = [style.linewidth.THICk]
st_THICK = [style.linewidth.THICK]
except ImportError:
pass
if __name__ == "__main__":
if argv.render:
render()
else:
main()
| 28,559 | 10,835 |
'''
- Leetcode problem: 384
- Difficulty: Medium
- Brief problem description:
Shuffle a set of numbers without duplicates.
Example:
// Init an array with set 1, 2, and 3.
int[] nums = {1,2,3};
Solution solution = new Solution(nums);
// Shuffle the array [1,2,3] and return its result. Any permutation of [1,2,3] must equally likely to be returned.
solution.shuffle();
// Resets the array back to its original configuration [1,2,3].
solution.reset();
// Returns the random shuffling of array [1,2,3].
solution.shuffle();
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
class Solution:
def __init__(self, nums: List[int]):
self.origin = nums[:]
self.arr = nums
def reset(self) -> List[int]:
"""
Resets the array to its original configuration and return it.
"""
return self.origin
def shuffle(self) -> List[int]:
"""
Returns a random shuffling of the array.
"""
for i in range(len(self.arr)):
l = i
r = len(self.arr) - 1 - i
ranLen = random.randint(0, max(l, r))
ranDirect = 1
j = i
if ranLen <= min(l, r):
ranDirect = random.randint(0, 1)
if l > r:
if ranDirect == 1:
j = i - ranLen
else:
j = i + ranLen
else:
if ranDirect == 1:
j = i + ranLen
else:
j = i - ranLen
self.arr[i], self.arr[j] = self.arr[j], self.arr[i]
return self.arr
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle() | 1,759 | 558 |
# python
import sys, traceback
import requests, pprint, os, zipfile, shutil, glob, xml.etree.ElementTree, getpass
import json
from os.path import expanduser
#from git import Repo
BASE_URL = "https://api.github.com/repos/pixelfondue/%s/releases/latest"
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
print "base path:", BASE_PATH
config_file_path = os.path.join(BASE_PATH, "_github_credentials")
print "config file path:", config_file_path
KIT_NAME = "mecco_dara"
DARA_PATH = os.path.join(BASE_PATH, KIT_NAME)
print "dara path:", DARA_PATH
DARA_KITS_PATH = os.path.join(DARA_PATH, "Kits")
print "dara kits path:", DARA_KITS_PATH
DARA_WIP_PATH = os.path.join(BASE_PATH, "wip")
print "dara WIP path:", DARA_WIP_PATH
DARA_RELEASES_PATH = os.path.join(BASE_PATH, "releases")
print "dara releases path:", DARA_RELEASES_PATH
KITS = [
'mecco_neatFreak',
'mecco_solo',
'mecco_tabbyCat',
'mecco_bling',
'mecco_cropper',
'mc_noodles',
'mc_lifesaver',
'mecco_flipper',
'mecco_ignition',
'mecco_kelvin',
'mecco_metermade',
'mecco_passify',
'mecco_renderMonkey',
'mecco_replay',
'mecco_snap',
'mecco_tagger',
'mecco_wheely',
'mecco_Zen'
]
def set_github_credentials():
global USERNAME
global PASSWORD
try:
config_file = open(config_file_path)
config = json.load(config_file)
USERNAME = config['GITHUB_USERNAME']
PASSWORD = config['GITHUB_PASSWORD']
except:
print "Username:"
USERNAME = raw_input()
if 'PYCHARM' in os.environ:
PASSWORD = raw_input()
else:
PASSWORD = getpass.getpass('Password: ')
config = {'GITHUB_USERNAME':USERNAME, 'GITHUB_PASSWORD':PASSWORD}
config_file = open(config_file_path, 'w')
json.dump(config, config_file)
finally:
print "username:", USERNAME
print "password:", PASSWORD[0] + "..."
def download_file(kit, url):
tmp_filename = os.path.join(DARA_KITS_PATH, kit + "_" + os.path.basename(url) + "_partial")
r = requests.get(url, stream=True, auth=(USERNAME, PASSWORD))
if r.status_code == 200:
with open(tmp_filename, 'wb') as f:
for chunk in r:
f.write(chunk)
complete_filename = tmp_filename.replace("_partial", ".zip")
if os.path.exists(complete_filename):
os.remove(complete_filename)
os.rename(tmp_filename, complete_filename)
return complete_filename
def delete_dir_contents(directory):
for the_file in os.listdir(directory):
file_path = os.path.join(directory, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
def extract_zip_file(src, dest):
zip_ref = zipfile.ZipFile(src, 'r')
extracted_folder_name = zip_ref.namelist()[0]
zip_ref.extractall(dest)
zip_ref.close()
return extracted_folder_name
def make_dirs():
# create Kits foler if it doesn't exist:
if not os.path.exists(DARA_KITS_PATH):
os.makedirs(DARA_KITS_PATH)
# create releases foler if it doesn't exist:
if not os.path.exists(DARA_RELEASES_PATH):
os.makedirs(DARA_RELEASES_PATH)
# download and extract
def download_releases():
for kit in KITS:
try:
rest_api_response = requests.get(BASE_URL % kit, auth=(USERNAME, PASSWORD))
rest_api_response.raise_for_status()
except requests.exceptions.HTTPError as err:
print err
sys.exit(1)
data = rest_api_response.json()
target_path = os.path.join(DARA_KITS_PATH, kit)
target_cfg = os.path.join(target_path, "index.cfg")
if os.path.exists(target_cfg) and os.path.isfile(target_cfg):
repo_version = data['tag_name']
config_xml = xml.etree.ElementTree.parse(target_cfg).getroot()
local_version = config_xml.attrib["version"]
if local_version == repo_version:
print "up to date %s..." % data['zipball_url']
continue
if os.path.exists(target_path):
shutil.rmtree(target_path)
print "downloading %s..." % data['zipball_url']
zip_file_path = download_file(kit, data['zipball_url'])
extracted_folder_name = extract_zip_file(zip_file_path, DARA_KITS_PATH)
extracted_folder_name = os.path.join(DARA_KITS_PATH, extracted_folder_name)
# retrieve actual kit name (just in case it's not the same as the github repo name)
index_file = os.path.join(extracted_folder_name, "index.cfg")
index_xml = xml.etree.ElementTree.parse(index_file).getroot()
real_kit_name = index_xml.attrib["kit"]
os.rename(extracted_folder_name, target_path)
os.remove(zip_file_path)
# duplicate dara folder
temp_directory = os.path.join(DARA_RELEASES_PATH, "tmp")
shutil.copytree(DARA_PATH, temp_directory)
# # delete cruft
for directory,subdirs,files in os.walk(temp_directory):
if '.gitignore' in files:
os.unlink(os.path.join(temp_directory, directory, '.gitignore'))
if '.gitmodules' in files:
os.unlink(os.path.join(temp_directory, directory, '.gitmodules'))
if '.gitattributes' in files:
os.unlink(os.path.join(temp_directory, directory, '.gitattributes'))
if '.git' in subdirs:
shutil.rmtree(os.path.join(temp_directory, directory, '.git'))
for pyc_file in [f for f in files if f.lower().endswith('.pyc')]:
try:
os.unlink(pyc_file)
except:
print traceback.print_exc()
# retrieve dara version
index_file = os.path.join(temp_directory, "index.cfg")
index_xml = xml.etree.ElementTree.parse(index_file).getroot()
dara_version = index_xml.attrib["version"]
release_dirname = os.path.join(DARA_RELEASES_PATH, KIT_NAME + "_" + str(dara_version))
if os.path.isdir(release_dirname):
shutil.rmtree(release_dirname)
os.rename(temp_directory, release_dirname)
# zip release directory
release_zipname = release_dirname + ".zip"
temp_file = os.path.join(DARA_RELEASES_PATH, "tmp")
shutil.make_archive(temp_file, 'zip', release_dirname)
if os.path.isfile(release_zipname):
os.unlink(release_zipname)
os.rename(temp_file + ".zip", release_zipname)
shutil.rmtree(release_dirname)
# update/clone wip
def update_wip():
try:
os.mkdir(DARA_WIP_PATH)
except:
pass
for kit in KITS:
try:
repo_url = 'https://%s:%s@github.com/adamohern/%s' % (USERNAME, PASSWORD, kit)
dest_path = os.path.join(DARA_WIP_PATH, kit)
if os.path.exists(dest_path):
print 'Update', kit
# repo = Repo(dest_path)
# origin = repo.remotes.origin
# origin.pull(rebase=True)
# origin.push()
os.chdir(dest_path)
os.system("git pull --rebase")
os.system("git push")
else:
print "Cloning", kit
#Repo.clone_from(repo_url, os.path.join(DARA_WIP_PATH, kit))
os.system("git clone {0} {1}".format(repo_url, dest_path))
except requests.exceptions.HTTPError as err:
print err
sys.exit(1)
continue
data = rest_api_response.json()
target_path = os.path.join(DARA_KITS_PATH, kit)
target_cfg = os.path.join(target_path, "index.cfg")
if os.path.exists(target_cfg) and os.path.isfile(target_cfg):
repo_version = data['tag_name']
config_xml = xml.etree.ElementTree.parse(target_cfg).getroot()
local_version = config_xml.attrib["version"]
if local_version == repo_version:
print "up to date %s..." % data['zipball_url']
continue
if os.path.exists(target_path):
shutil.rmtree(target_path)
print "downloading %s..." % data['zipball_url']
zip_file_path = download_file(kit, data['zipball_url'])
extracted_folder_name = extract_zip_file(zip_file_path, DARA_KITS_PATH)
extracted_folder_name = os.path.join(DARA_KITS_PATH, extracted_folder_name)
# retrieve actual kit name (just in case it's not the same as the github repo name)
index_file = os.path.join(extracted_folder_name, "index.cfg")
index_xml = xml.etree.ElementTree.parse(index_file).getroot()
real_kit_name = index_xml.attrib["kit"]
os.rename(extracted_folder_name, target_path)
os.remove(zip_file_path)
# duplicate dara folder
temp_directory = os.path.join(DARA_RELEASES_PATH, "tmp")
shutil.copytree(DARA_PATH, temp_directory)
# # delete cruft
for directory, subdirs, files in os.walk(temp_directory):
if '.gitignore' in files:
os.unlink(os.path.join(temp_directory, directory, '.gitignore'))
if '.gitmodules' in files:
os.unlink(os.path.join(temp_directory, directory, '.gitmodules'))
if '.gitattributes' in files:
os.unlink(os.path.join(temp_directory, directory, '.gitattributes'))
if '.git' in subdirs:
shutil.rmtree(os.path.join(temp_directory, directory, '.git'))
for pyc_file in [f for f in files if f.lower().endswith('.pyc')]:
os.unlink(pyc_file)
# retrieve dara version
index_file = os.path.join(temp_directory, "index.cfg")
index_xml = xml.etree.ElementTree.parse(index_file).getroot()
dara_version = index_xml.attrib["version"]
release_dirname = os.path.join(DARA_RELEASES_PATH, KIT_NAME + "_" + str(dara_version))
if os.path.isdir(release_dirname):
shutil.rmtree(release_dirname)
os.rename(temp_directory, release_dirname)
# zip release directory
release_zipname = release_dirname + ".zip"
temp_file = os.path.join(DARA_RELEASES_PATH, "tmp")
shutil.make_archive(temp_file, 'zip', release_dirname)
if os.path.isfile(release_zipname):
os.unlink(release_zipname)
os.rename(temp_file + ".zip", release_zipname)
shutil.rmtree(release_dirname)
if __name__ == '__main__':
try:
set_github_credentials()
make_dirs()
download_releases()
update_wip()
except BaseException as e:
print traceback.print_exc()
raise
finally:
raw_input('(Press <Enter> to close)')
| 10,613 | 3,610 |
from pathlib import Path
import pandas as pd
def load_excavators(cleaned=False):
"""
Helper function to load excavator toy dataset.
Hodkiewicz, M., and Ho, M. (2016)
"Cleaning historical maintenance work order data for reliability analysis"
in Journal of Quality in Maintenance Engineering, Vol 22 (2), pp. 146-163.
BscStartDate| Asset | OriginalShorttext | PMType | Cost
--- | --- | --- | --- | ---
initialization of MWO | which excavator this MWO concerns (A, B, C, D, E)| natural language description of the MWO| repair (PM01) or replacement (PM02) | MWO expense (AUD)
Args:
cleaned (bool): whether to return the original dataset (False) or the dataset with
keyword extraction rules applied (True), as described in Hodkiewicz and Ho (2016)
Returns:
pandas.DataFrame: raw data for use in testing nestor and subsequent workflows
"""
module_path = Path(__file__).parent
if cleaned:
csv_filename = module_path / "excavators-cleaned.csv"
else:
csv_filename = module_path / "excavators.csv"
df = pd.read_csv(csv_filename)
df["BscStartDate"] = pd.to_datetime(df.BscStartDate)
return df
| 1,210 | 387 |
import unittest
from game import Game
import server
REDIS_HOST = '192.168.20.50'
class TestServer(unittest.TestCase):
"""Please note that the tests in this suite only work if a Redis
host is available (see REDIS_HOST above).
"""
def test_redis_connection(self):
"""Test that the Battleship server can correctly find a Redis
instance.
"""
with server.Battleship(REDIS_HOST, db=1) as battleship:
self.assertTrue(battleship.ping_redis())
def test_redis_add_open_game(self):
"""Test that the Battleship server can add an open game to the
Redis instance.
"""
with server.Battleship(REDIS_HOST, db=1) as battleship:
con = battleship.redis_conn
con.flushdb()
result = con.get(battleship.OpenGames)
self.assertIsNone(result)
game = Game('New game!')
result = battleship.add_open_game(game)
self.assertTrue(result)
results = con.lrange(battleship.OpenGames, 0, -1)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].decode('utf-8'), game.id)
result = battleship.close_open_game(game)
self.assertTrue(result)
results = con.lrange(battleship.OpenGames, 0, -1)
self.assertEqual(len(results), 0)
def test_find_game(self):
"""Test that the Battleship server can create a new game if no
open game is found and that it can find an open game in Redis
if one is actually there.
"""
with server.Battleship(REDIS_HOST, db=1) as battleship:
con = battleship.redis_conn
con.flushdb()
game, is_new = battleship.find_game_or_create()
self.assertTrue(is_new)
game = Game('new game')
result = battleship.add_open_game(game)
self.assertTrue(result)
found_game, is_new = battleship.find_game_or_create()
self.assertFalse(is_new)
self.assertEqual(game.id, found_game.id)
| 2,094 | 651 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from optparse import make_option
from apiclient import errors
from apiclient.discovery import build
from dateutil import parser
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from oauth2client.client import SignedJwtAssertionCredentials
import httplib2
import pytz
from provisioning import models
class Command(BaseCommand):
# Check
# https://developers.google.com/admin-sdk/directory/v1/guides/authorizing
# for all available scopes
OAUTH_SCOPE = ['https://www.googleapis.com/auth/admin.directory.user',
'https://www.googleapis.com/auth/admin.directory.device.chromeos']
help = 'Updates the last seen timestamp for provisioned services.'
option_list = BaseCommand.option_list + (
make_option('--skip-okta',
action='store_true',
dest='skip-okta',
default=False,
help='Do not query Okta. Default=False'),
make_option('--skip-google',
action='store_true',
dest='skip-google',
default=False,
help='Do not query Google. Default=False'),
make_option('--skip-airwatch',
action='store_true',
dest='skip-airwatch',
default=False,
help='Do not query AirWatch. Default=False'),
make_option('--tenant',
dest='tenant',
default=1,
help='Tenant id to do this for. Default=1'),
)
def _parseDateTime(self, stamp):
parsed = parser.parse(stamp)
utc = parsed.astimezone(pytz.utc)
stripped = utc.replace(tzinfo=None)
return stripped
def handle(self, *args, **options):
tenant = models.Tenant.objects.get(pk=options['tenant'])
okta_item = models.Okta.objects.get(tenant=tenant)
users = models.User.objects.filter(services=okta_item)
software_contenttype = ContentType.objects.get_for_model(
models.Software)
google_software = models.Software.objects.get(name='Google Account')
device_contenttype = ContentType.objects.get_for_model(
models.Device)
self.stdout.write("Okta users in database.")
user_dict = {}
for user in users:
username = '%s@%s' % (user.username, tenant.email_domain)
user_dict[username] = {'username': user.username, 'user': user}
self.stdout.write(username)
if not options['skip-okta']:
self.stdout.write("")
self.stdout.write("Get Okta user logins.")
okta_users = okta_item.get_users()
okta_item_type = ContentType.objects.get_for_model(okta_item)
for okta_user in okta_users:
okta_username = okta_user['profile']['login']
if okta_username in user_dict:
user_dict[okta_username].update(
{'okta_id': okta_user['id']})
if okta_user['lastLogin']:
models.LastSeenEvent.objects.create(
user=user_dict[okta_username]['user'],
item_type=okta_item_type,
object_id=okta_item.id,
last_seen=self._parseDateTime(okta_user['lastLogin']))
self.stdout.write(
'%s - %s' % (okta_username, okta_user['lastLogin']))
# Get Okta application SSO events
self.stdout.write("")
self.stdout.write("Get Okta SSO events.")
okta_client = okta_item.get_client()
usersoftwares = models.UserProvisionable.objects.filter(
user__tenant=tenant,
item_type=software_contenttype,
service=okta_item).exclude(
object_id=google_software.id)
# Google accoint login is done below directly from google
for usersoftware in usersoftwares:
oktatenantservice = usersoftware.item.tenantserviceasset_set.get(
service=okta_item)
event = okta_client.last_sso_event(
user_dict[usersoftware.user.tenant_email]['okta_id'],
oktatenantservice.get('application_id'))
if event:
models.LastSeenEvent.objects.create(
user=usersoftware.user,
item_type=software_contenttype,
object_id=usersoftware.object_id,
last_seen=self._parseDateTime(event['published']))
self.stdout.write(
'%s - %s -> %s' % (usersoftware.user.tenant_email,
usersoftware.item.name,
event and event['published'] or "never"))
if not options['skip-google']:
# Get Google lastseen
google_tenant_asset = tenant.tenantasset_set.get(
asset__name='Google Account')
# Run through the OAuth flow and retrieve credentials
certificate_file_path = os.path.join(
settings.CERTIFICATES_DIR, google_tenant_asset.get('CERTIFICATE_FILE_NAME')
)
with open(certificate_file_path) as f:
private_key = f.read()
credentials = SignedJwtAssertionCredentials(
google_tenant_asset.get('CLIENT_EMAIL'),
private_key,
scope=self.OAUTH_SCOPE,
sub=google_tenant_asset.get('ADMINISTRATOR')
)
# Create an httplib2.Http object and authorize it with our
# credentials
http = httplib2.Http()
http = credentials.authorize(http)
directory_service = build('admin', 'directory_v1', http=http)
# Get Google Account lastseen information
all_users = []
page_token = None
params = {'customer': 'my_customer'}
self.stdout.write("")
self.stdout.write("Get Google Account users")
while True:
try:
if page_token:
params['pageToken'] = page_token
current_page = directory_service.users().list(
**params).execute()
all_users.extend(current_page['users'])
page_token = current_page.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
self.stderr.write('An error occurred: %s' % error)
break
for user in all_users:
if user['lastLoginTime'] == '1970-01-01T00:00:00.000Z':
continue
if models.UserProvisionable.objects.filter(
user__username=user['primaryEmail'].split('@')[0],
user__tenant=tenant,
item_type=software_contenttype,
object_id=google_software.id).exists():
models.LastSeenEvent.objects.create(
user=user_dict[user['primaryEmail']]['user'],
item_type=software_contenttype,
object_id=google_software.id,
last_seen=self._parseDateTime(user['lastLoginTime']))
self.stdout.write(
user['primaryEmail'] + " - " + user['lastLoginTime'])
# Get Google Device lastseen information
all_devices = []
page_token = None
params = {'customerId': 'my_customer'}
while True:
try:
if page_token:
params['pageToken'] = page_token
current_page = directory_service.chromeosdevices().list(
**params).execute()
all_devices.extend(current_page['chromeosdevices'])
page_token = current_page.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
self.stderr.write('An error occurred: %s' % error)
break
self.stdout.write("")
self.stdout.write("Get Google Devices")
chromebook_device = models.Device.objects.get(name='Chromebook')
for device in all_devices:
if models.UserProvisionable.objects.filter(
user__username=device['annotatedUser'].split('@')[0],
user__tenant=tenant,
item_type=device_contenttype,
object_id=chromebook_device.id).exists():
models.LastSeenEvent.objects.create(
user=user_dict[device['annotatedUser']]['user'],
item_type=device_contenttype,
object_id=chromebook_device.id,
last_seen=self._parseDateTime(device['lastSync']))
self.stdout.write('%s - %s -> %s' % (device['annotatedUser'],
device[
'serialNumber'],
device['lastSync']))
if not options['skip-airwatch']:
self.stdout.write("")
self.stdout.write("Get AirWatch Devices & Platform usage")
airwatch_item = models.AirWatch.objects.get(tenant=tenant)
airwatch_client = airwatch_item.get_client()
endpoint = 'mdm/devices/search'
iPad_device = models.Device.objects.get(name='iPad')
iPhone_device = models.Device.objects.get(name='iPhone')
airwatch_item_type = ContentType.objects.get_for_model(airwatch_item)
airwatch_users = models.User.objects.filter(services=airwatch_item)
for user in airwatch_users:
response = airwatch_client.call_api(
'GET', endpoint, params={'user': user.username})
response.raise_for_status()
if response.status_code == 200:
devices = response.json().get('Devices')
newest_seen = parser.parse(devices[0]['LastSeen'])
for device in devices:
seen = parser.parse(device['LastSeen'])
if seen > newest_seen:
newest_seen = seen
if device['Model'].startswith(iPad_device.name):
device_item = iPad_device
elif device['Model'].startswith(iPhone_device.name):
device_item = iPhone_device
else:
device_item = None
models.LastSeenEvent.objects.create(
user=user,
item_type=device_contenttype,
object_id=device_item.id,
last_seen=seen)
self.stdout.write(
"%s - %s -> %s" % (user, device['SerialNumber'],
device['LastSeen']))
self.stdout.write("%s -> %s" % (user, newest_seen))
models.LastSeenEvent.objects.create(
user=user,
item_type=airwatch_item_type,
object_id=airwatch_item.id,
last_seen=newest_seen)
| 12,643 | 3,243 |
#games module
from Football.goalkeeper import name_goalkeeper
name_goalkeeper()
| 83 | 25 |
import csv
import sys
# error checking
if len(sys.argv) != 3:
print("ERROR. Usage: Usage: python dna.py data.csv sequence.txt")
sys.exit(1)
#opening csv file and sequence texts
csvFile = open(sys.argv[1])
DNAsequence = open(sys.argv[2]).read()
# reading csv file onto a dictionary
csvReader = csv.DictReader(csvFile)
peopleList = []
for person in csvReader:
peopleList.append(person)
# Short Tandem Repeats (STRs). An STR is a short sequence of DNA bases that
# tends to repeat consecutively numerous times at specific locations inside of a person’s DNA.
# grabbing keys (str) from csvReader and passing them onto a list
strList = list(peopleList[0].keys())
# removing the name key from the list since i dont want it
strList.remove("name")
strCounter = {}
for strKey in strList:
tmpDNAsequence = DNAsequence
strCounter[strKey] = 0
counter = 0
strPosition = tmpDNAsequence.find(strKey)
while strPosition >= 0:
counter += 1
tmpDNAsequence = tmpDNAsequence.replace(strKey, "", 1)
tmpStrPosition = tmpDNAsequence.find(strKey)
if strPosition != tmpStrPosition:
if counter > int(strCounter[strKey]):
strCounter[strKey] = str(counter)
counter = 0
strPosition = tmpStrPosition
for person in peopleList:
clonePerson = person.copy()
del clonePerson["name"]
if clonePerson == strCounter:
print(person["name"])
sys.exit(0)
print("No match")
| 1,470 | 441 |
import os, sys, re, natsort
ls = [this for this in os.listdir(os.getcwd() + '/' + str(sys.argv[1]))]
# ls = ls.sort(key=lambda f: int(re.sub('\D', '', f)))
ls = natsort.natsorted(ls,reverse=False)
with open(os.getcwd() + '/' + str(sys.argv[2]), 'w') as f:
for item in ls:
f.write("%s\n" % item)
| 311 | 136 |
import numpy as np
import cv2
import random
def int_(x):
if x==int(x):
return int(x)
else:
return int(x+1)
def rdint(x):
return int(round(x))
def IoU(box1, box2):
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[0]+box1[2], box2[0]+box2[2])
y2 = min(box1[1]+box1[3], box2[1]+box2[3])
if x1>=x2 or y1>=y2:
return 0
else:
a1 = box1[2] * box2[3]
a2 = box1[2] * box2[3]
return (x2-x1)*(y2-y1)*1.0 / (a1 + a2)
def transform(bbox, delta=(0,0,-0,-0)):
(x,y,w,h) = bbox
cx = x + 0.5 * w
cy = y + 0.5 * h
w = w / exp(delta[2])
h = h / exp(delta[3])
cx -= delta[0] * w
cy -= delta[1] * h
x = cx - 0.5 * w
y = cy - 0.5 * h
return (int(round(x)),int(round(y)),int(round(w)),int(round(h)))
def transform_inv(bbox, delta):
(x,y,w,h) = bbox
cx = x + 0.5 * w
cy = y + 0.5 * h
cx += delta[0] * w
cy += delta[1] * h
w = w * exp(delta[2])
h = h * exp(delta[3])
x = cx - 0.5 * w
y = cy - 0.5 * h
return (int(round(x)),int(round(y)),int(round(w)),int(round(h)))
def padding(bbox, scale, minpad):
x,y,w,h = bbox
pad = max(int(scale*min(w,h)), minpad)
x -= pad
y -= pad
w += 2 * pad
h += 2 * pad
return (x,y,w,h)
def scaleBox(bbox, scale):
x,y,w,h = bbox
return (rdint(scale*x), rdint(scale*y), rdint(scale*w), rdint(scale*h))
| 1,281 | 696 |
__all__ = ['ais', 'fid', 'hmc'] | 31 | 16 |
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM, Conv1D, GRU
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.models import Model
class ConvLstm:
def __init__(self, **kwargs):
self.n_length = 100
self.n_features = 1
def get_model(self) -> Model:
model = Sequential()
model.add(TimeDistributed(Conv1D(filters=8, kernel_size=5, activation='relu'),
input_shape=(None, self.n_length, self.n_features)
)
)
model.add(TimeDistributed(Conv1D(filters=4, kernel_size=5, activation='relu'),
input_shape=(None, self.n_length, self.n_features)))
model.add(TimeDistributed(Conv1D(filters=2, kernel_size=5, activation='relu'),
input_shape=(None, self.n_length, self.n_features)))
model.add(Dropout(0.5))
model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(512))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
if __name__ == '__main__':
model = ConvLstm().get_model()
model.summary()
| 1,651 | 521 |
#############################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
############################################################################
"""
Specify the brief gpu_predictor.py
Author: map(wushilei@baidu.com)
Date: 2019/07/21 20:09:58
Brief:
This class can cover single-node multi-gpus and multi-node multi-gpus
The corresponding platform is gpu and slurm.
"""
from __future__ import print_function
import os
import sys
import argparse
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet
#from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
#from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
from paddle.fluid.incubate.fleet.base import role_maker
from utils.object_transform import ObjectTransform
from cpu_predictor import CPUPredictor
from gpu_mixin import GPUMixin
class GPUPredictor(GPUMixin, CPUPredictor):
"""
gpu trainer class
This class can cover single-node multi-gpus and multi-node multi-gpus
The corresponding platform is gpu and slurm.
"""
def __init__(self):
super(GPUPredictor, self).__init__()
def split_filelist(self, FLAGS):
"""
split filelist for multi-node or multi gpus.
"""
if self.is_multi_gpu(FLAGS):
filelist_arr = FLAGS.file_list.split(',')
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
num_trainers = int(os.getenv("PADDLE_TRAINERS_NUM"))
trainer_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
trainer_endpoints = trainer_endpoints.split(',')
role = role_maker.UserDefinedCollectiveRoleMaker(current_id=trainer_id,
worker_endpoints=trainer_endpoints)
fleet.init(role)
filelist_arr = fleet.split_files(filelist_arr)
FLAGS.file_list = ','.join(filelist_arr)
if __name__ == '__main__':
trainer = GPUPredictor()
ret = trainer.start(sys.argv)
if not ret:
sys.exit(-1)
| 2,112 | 698 |
"""SPARQL package.
Modules:
dataset_queries
dataservice_queries
"""
| 77 | 30 |
from django.http import request
from django.shortcuts import render,HttpResponse
import cv2
import numpy as np
import base64
import joblib
from numpy.core.defchararray import join
import sklearn
import pywt
def w2d(img, mode='haar', level=1):
imArray = img
#Datatype conversions
#convert to grayscale
imArray = cv2.cvtColor( imArray,cv2.COLOR_RGB2GRAY )
#convert to float
imArray = np.float32(imArray)
imArray /= 255;
# compute coefficients
coeffs=pywt.wavedec2(imArray, mode, level=level)
#Process Coefficients
coeffs_H=list(coeffs)
coeffs_H[0] *= 0;
# reconstruction
imArray_H=pywt.waverec2(coeffs_H, mode);
imArray_H *= 255;
imArray_H = np.uint8(imArray_H)
return imArray_H
def get_cv2_image_from_base64_string(b64str):
encoded_data = b64str.split(',')[1]
nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr,cv2.IMREAD_COLOR)
return img
def clf(img_base64):
result =[]
img = get_cv2_image_from_base64_string(img_base64)
scalled_raw_img = cv2.resize(img, (32,32))
img_har = w2d(img,'db1',5)
scalled_har_img = cv2.resize(img_har,(32,32))
combined_img = np.vstack((scalled_raw_img.reshape(32*32*3,1),scalled_har_img.reshape(32*32*1,1)))
len_img_array = 32*32*3 + 32*32
final = combined_img.reshape(1,len_img_array).astype(float)
with open('static/model_svm.pkl','rb') as f:
model_ = joblib.load(f)
result.append({
'prediction':model_.predict(final)[0],
'probability': np.round(model_.predict_proba(final),2).tolist()[0]
})
return result
# Create your views here.
def home(request):
return render(request,'home.html')
def classify_xray(request):
if request.method =="POST":
print('post')
img_string = request.POST.get('img_string','')
pred = clf(img_string)
keys_ = ['COVID-19', 'Other Lung Infection', 'Normal', 'Viral Pneumonia']
print(pred)
pred_no = pred[0]['prediction']
prediction = keys_[pred_no]
probability = int(float(pred[0]['probability'][pred_no])*100)
context ={
'prediction':prediction,
'probability':probability
}
return render(request, 'classify.html',context)
# def test_(request):
# return render(request,'test.html')
| 2,368 | 904 |
import string
import os
import requests
from tube_dl.extras import Output
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36', 'referer': 'https: //youtube.com'}
class Format:
def __init__(self, category, description, title, stream_data: dict):
self.data = stream_data
self.category = category
self.description = description
self.title = title
self.itag = self.data['itag']
self.mime = self.data['mimeType']
self.acodec = self.data['acodec']
self.vcodec = self.data['vcodec']
self.size = self.data['size']
self.fps = self.data['fps']
self.quality = self.data['quality']
self.abr = self.data['abr']
self.url = self.data['url']
self.adaptive = self.data['adaptive']
self.progressive = self.data['progressive']
def safe_filename(self, name: str, ):
valid_chars = "-_() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(char for char in name if char in valid_chars)
return filename
def download(self, force_filename=False, onprogress=None, path=None, file_name=None, skip_existing=False):
'''
This Function downloads the format selected by user.
Params :
onprogress: Function - If defined, following data will be returned to the specified function
1. Chunk - The file Chunk
2. bytes_done - Total count of bytes done downloading
3. total_size - Total size of the file. Extracted from header
path : Str - Defines the path where to keep the file
file_name : Str - Defines the file name to be used in the file. To avoid any saving error, function safe_filename will be used to extract the filename without unsafe characters.
'''
url = self.url
if type(url) != str:
raise Exception('Download should be a single Format. Not List(Format)')
if file_name is None:
file_name = self.title
if force_filename is False:
file_name = self.safe_filename(file_name)
else:
file_name = file_name
_, extension = self.mime.split('/')
if path is None:
path = os.getcwd()
final_path = f'{path}{os.path.sep}{file_name}.{extension}'
def start():
response = requests.get(url, stream=True, headers=headers)
total_size_in_bytes = int(response.headers.get('content-length', 0))
block_size = 1024
bytes_done = 0
f = open(final_path, 'wb')
try:
for data in response.iter_content(block_size):
f.write(data)
bytes_done += block_size
if onprogress is not None:
onprogress(bytes_done=bytes_done, total_bytes=total_size_in_bytes)
f.close()
except Exception:
start()
if skip_existing is False:
start()
else:
if os.path.exists(final_path) is False:
start()
else:
print('Skipping Files : Existing check is True')
return Output(self.description, final_path)
def __repr__(self):
return f'<Format : itag={self.itag}, mimeType={self.mime}, size={self.size}, acodec={self.acodec}, vcodec={self.vcodec}, fps={self.fps}, quality={self.quality}, abr={self.abr}, progressive={self.progressive}, adaptive={self.adaptive} >'
class list_streams:
def __init__(self, data):
self.data = data
def __repr__(self):
return f'{self.data}'
def first(self):
return self.data[0]
def last(self):
return self.data[-1]
def filter_by(
self,
progressive=False,
only_audio=False,
adaptive=False,
itag=None,
fps=None,
quality=None,
no_audio=None
):
content = self.data
content_list = list()
for i in content:
if no_audio is True:
if i.acodec is None:
content_list.append(i)
if only_audio is True:
if i.mime.split('/')[0].lower() == 'audio':
content_list.append(i)
if quality is not None:
if i.quality.lower() == quality.lower():
content_list.append(i)
if fps is not None:
if i.fps == fps:
content_list.append(i)
if itag is not None:
if i.itag == itag:
content_list.append(i)
if adaptive is True:
if i.adaptive is True:
content_list.append(i)
if progressive is True:
if i.progressive is True:
content_list.append(i)
self.data = content_list
return content_list
| 5,060 | 1,490 |
# coding=utf-8
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^allauth/', include('allauth.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('main.urls')),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns(
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| 619 | 206 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 27 19:10:03 2017
@author: Sergio Cristauro Manzano
"""
class Constantes:
Meses = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio','Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']
DB_Name = 'DatabaseTFG'
IP_BD = '127.0.0.1'
UsuarioBD = 'root'
ContaminacionEllipticEnvelope = 0.46161 | 372 | 170 |
import pytest
import requests
import json
from issuer import Issuer
class TestJWT:
def test_valid_authorization_get(self):
token = Issuer().issue_valid_vc()
headers = {'Authorization':'Bearer ' + token, 'Accept': 'application/json'}
response = requests.get("http://localhost:9000/secure/jwt-vc", headers = headers)
print(response.text)
assert(response.status_code == 200)
def test_valid_authorization_post(self):
token = Issuer().issue_valid_vc()
headers = {'Authorization':'Bearer ' + token, 'Accept': 'application/json', 'Content-Type': 'application/json'}
data = {'on': False}
response = requests.post("http://localhost:9000/secure/jwt-vc", headers = headers, data = json.dumps(data))
print(response.text)
assert(response.status_code == 200)
def test_valid_authorization_get_with_exp(self):
token = Issuer().issue_valid_vc_with_exp()
headers = {'Authorization':'Bearer ' + token, 'Accept': 'application/json'}
response = requests.get("http://localhost:9000/secure/jwt-vc", headers = headers)
print(response.text)
assert(response.status_code == 200)
def test_expired(self):
token = Issuer().issue_expired()
headers = {'Authorization':'Bearer ' + token, 'Accept': 'application/json'}
response = requests.get("http://localhost:9000/secure/jwt-vc", headers = headers)
print(response.text)
assert(response.status_code == 401)
| 1,520 | 468 |
# Classificando Atletas
from datetime import date
from time import sleep
n = str(input('\033[1;30mDigite o seu nome completo: ')).strip().title()
a = int(input('Digite o seu ano de nascimento: '))
anoatual = date.today().year
i = anoatual - a
print('')
sleep(1.75)
print('ANALISANDO...')
sleep(2)
print('')
print('=-=' * 15)
print(f'Nome Completo: {n}')
print(f'Idade: {i} anos.\033[m')
if i <= 9:
print(f'\033[1;31mCategoria: MIRIM')
elif 9 < i <= 14:
print('\033[1;32mCategoria: INFANTIL')
elif 14 < i <= 19:
print('\033[1;33mCategoria: JUNIOR')
elif 19 < i <= 25:
print('\033[1;34mCategoria: SÊNIOR')
else:
print('\033[1;35mCategoria: MASTER')
print('\033[1;30m=-=' * 15)
| 695 | 334 |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 12, 2021
Developed for UIF to more easily handle the growing number of alumni they have,
and to track interactions with said alumni.
Final Project for CCAC DAT-281
@author: BKG
"""
import os
import sqlite3
from sqlite3 import Error
import pandas as pd
def main(location):
"""
From the db, pulls the following columns from the listed tables, formats,
the dataframe, then saves it to a .csv file. (see: 'query_read')
Specifically, this gives the user a list of all alumni with their ID nums.
Parameters
----------
location : STR
String of the path to the folder the user previously selected.
Returns
-------
None.
"""
query = ''' SELECT Alumni_ID.ID_number, first_name, last_name,
graduation_year, CORE_student, birthday
FROM Alumni_ID
INNER JOIN Basic_Info on Basic_Info.ID_number = Alumni_ID.ID_number
ORDER BY last_name ASC
'''
connection = _db_connection()
output = pd.read_sql(query, con=connection)
connection.close()
col_names = ['ID Number', #Print friendly column names
'First Name',
'Last Name',
'Graduation Year',
'CORE?',
'Birthday']
output.cloumns = col_names #rename the df col names
file_name = 'Alumni List.csv'
os.chdir(location)
output.to_csv(file_name, index=False, encoding='utf-8')
def _db_connection():
'''
Connects to the .db file
Returns
-------
connection : sqlite db connection
'''
try:
connection = sqlite3.connect('Data\\UIF_Alumni_DB.db')
except Error:
print(Error)
return connection
if __name__ == "__main__":
main()
| 1,828 | 548 |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django import forms
from django.core import validators
from datetime import date
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('Users Must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
# Not necessary to write anything inside brackets
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database Model for users in system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
# Custom Manager To handle users with email field as the main field
# instead of username
objects = UserProfileManager()
# Done to handle Django Admin
USERNAME_FIELD = 'email'
# Required Field can include more fields
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve Full Name"""
return self.name
def get_short_name(self):
return self.name
def __str__(self):
"""Return String representation of user"""
return self.name
# Create your models here.
class Score(models.Model):
user = models.OneToOneField(UserProfile, on_delete=models.CASCADE)
score = models.PositiveIntegerField(default=0)
last_answer = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.email
class Answer(models.Model):
answer = models.CharField(max_length=255)
class AnswerChecker(models.Model):
index = models.PositiveIntegerField(default=0, unique=True)
answer = models.CharField(max_length=255)
def __str__(self):
return self.answer
def ans_value(self):
return self.answer | 2,537 | 730 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Data Source
import yfinance as yf
import time, datetime, math
from datetime import datetime
import sqlite3
#Interval required 5 minutes
con = sqlite3.connect("DB/stocks.db")
#con.row_factory = sqlite3.Row
stock = 'UBER'
data = pd.read_sql_query("SELECT * FROM stocks_hist WHERE symbol='" + stock + "' AND Datetime >= '2021-04-22' ORDER BY Datetime DESC limit 10000 ",con,index_col='Datetime')
data.index = pd.to_datetime(data.index)
data= data.sort_index()
print(data)
#RSI CALC
data['Return'] = np.log(data['Close'] / data['Close'].shift(1) )
data['Movement'] = data['Close'] - data['Close'].shift(1)
data['up'] = np.where((data['Movement'] > 0) ,data['Movement'],0)
data['down'] = np.where((data['Movement'] < 0) ,data['Movement'],0)
window_length = 14
#calculate moving average of the last 14 days gains
up = data['up'].rolling(window_length).mean()
#calculate moving average of the last 14 days losses
down = data['down'].abs().rolling(window_length).mean()
RS = up / down
#Bollinger bands, 1 std and 2 std
data['MA20'] = data['Close'].rolling(window=20).mean()
data['20dSTD'] = data['Close'].rolling(window=20).std()
data['Upper'] = data['MA20'] + (data['20dSTD'] * 2)
data['Lower'] = data['MA20'] - (data['20dSTD'] * 2)
'''data['Upper1s'] = data['MA20'] + (data['20dSTD'] * 1)
data['Lower1s'] = data['MA20'] - (data['20dSTD'] * 1)
data['LBPer']=(data['Close']/data['Lower'])-1
data['UBPer']=(data['Upper']/data['Close'])-1
data['UBPer1s']=(data['Close']/data['Upper1s'])-1'''
data['AD'] = 0
#ADL Line
data['CMFV'] = (((data['Close']-data['Low'])-(data['High']-data['Close']))/(data['High']-data['Low']))*data['Volume']
data['AD'] = data['CMFV'].rolling(14, min_periods=14).sum()
data['AD'] = data['AD'].shift(1)
data['RSI'] = 100.0 - (100.0 / (1.0 + RS))
#data = data[data.index.strftime('%Y-%m-%d') == '2021-02-27']
#Print data
print(data)
'''data[['Close','AD']].plot(figsize=(10,4))
plt.grid(True)
plt.title(stock + ' AD')
plt.axis('tight')
plt.ylabel('Price')
plt.show()'''
| 2,071 | 847 |
# proxy module
from __future__ import absolute_import
from blockcanvas.class_tools.search_package import *
| 107 | 29 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'yújì'
CN=u'鱼际'
NAME=u'yuji24'
CHANNEL='lung'
CHANNEL_FULLNAME='LungChannelofHand-Taiyin'
SEQ='LU10'
if __name__ == '__main__':
pass
| 216 | 125 |
import csv
import datetime
import os
import gym
from gym.spaces import space
from gym.spaces.box import Box
from gym.spaces.discrete import Discrete
from gym.spaces.multi_discrete import MultiDiscrete
from definitions import ROOT_DIR
from stable_baselines_model_based_rl.utils.configuration import Configuration
from stable_baselines_model_based_rl.utils.spaces.base import SpaceType
from stable_baselines_model_based_rl.utils.spaces.factory import space_value_from_gym
def __update_action_input_config(config: Configuration, action_space: space, action_col_names):
if isinstance(action_space, Discrete):
action_type = 'DISCRETE'
elif isinstance(action_space, MultiDiscrete):
action_type = 'MULTI_DISCRETE'
raise NotImplementedError('Not yet supported!') # TODO
elif isinstance(action_space, Box):
action_type = 'BOX'
box_bounds = {
'low': [float(x) for x in list(action_space.low)],
'high': [float(x) for x in list(action_space.high)],
}
config.set('input_config.action_box_bounds', box_bounds)
config.set('input_config.action_type', action_type)
config.set('input_config.action_cols', action_col_names)
def __update_observation_input_config(config: Configuration, observation_cols, obs_space: Box):
config.set('input_config.observation_cols', observation_cols)
if isinstance(obs_space, Box):
config.set('input_config.observation_bounds.low', [float(x) for x in list(obs_space.low)])
config.set('input_config.observation_bounds.high',
[float(x) for x in list(obs_space.high)])
def __sample_gym_environment(gym_environment_name: str, data_file: str, episode_count=20,
max_steps=100):
"""Sample given gym environment, create proper config and store generated data in data_file."""
config = Configuration(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../example_usage/sample_config.yaml'))
env = gym.make(gym_environment_name)
env.np_random.seed(0)
env.action_space.np_random.seed(0)
action_col_names = space_value_from_gym(env.action_space, env.action_space.sample(),
SpaceType.ACTION).column_names
observation_col_names = space_value_from_gym(env.observation_space,
env.observation_space.sample()).column_names
__update_action_input_config(config, env.action_space, action_col_names)
__update_observation_input_config(config, observation_col_names, env.observation_space)
config.set('gym_sampling.gym_environment_name', gym_environment_name)
data_file_handle = open(data_file, mode='w', encoding='UTF-8', newline='')
csv_writer = csv.writer(data_file_handle, delimiter=',')
# CSV Header
csv_writer.writerow(['EPISODE', 'STEP', *action_col_names, *observation_col_names])
# SAMPLE DATA
for episode in range(episode_count):
print('Start of episode %d' % episode)
obs = env.reset()
step = 0
done = False
while step < max_steps and not done:
step += 1
action = env.action_space.sample()
action_sv = space_value_from_gym(env.action_space, action, SpaceType.ACTION)
obs_sv = space_value_from_gym(env.observation_space, obs)
# Append row to CSV file
csv_writer.writerow([int(episode), int(step), *action_sv.to_value_list(),
*obs_sv.to_value_list()])
obs, reward, done, _ = env.step(action)
print(' --> finished after %d steps' % step)
data_file_handle.close()
return config
def sample_gym_environment(gym_environment_name: str, episode_count=20, max_steps=100,
output_path=os.path.join(ROOT_DIR, 'sample_output'),
debug: bool = False):
"""
Sample the given gym environment with the given amount of episodes and maximum
steps per episode.
Two files are created:
- A CSV file, containing the sampled data.
- A YAML file, containing the configuration that results from the sampled gym
environment, based on the sample_config.yaml file.
Both files are stored within the output_path directory. They will be subfolders of directories
containing the gym environment name and the current time. E.g. the follwoing folder structure
will be created within output_path: "CartPole-v1/sample_data/2021-05-01-10-00-30/data.csv".
Args:
gym_environment_name: Name of the Gym-Environment to sample.
epsiode_count: Amount of episodes to use for the sampling.
max_steps: Maximum steps per episode allowed during sampling.
output_path: The directory the generated files are stored in.
debug: Flag whether to enable debugging features, such as naming the output folder based
on the amount of episodes and max steps.
Returns:
data_file: Path to the created data (csv) file.
config: Configuration object created for the sampled environment.
"""
time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
dest_dir_name = time if not debug else f'{time}_episodes={episode_count}_max-step={max_steps}'
final_dir_path = os.path.join(output_path, gym_environment_name, 'sample_data', dest_dir_name)
os.makedirs(final_dir_path)
data_file = f'{final_dir_path}/data.csv'
config = __sample_gym_environment(gym_environment_name, data_file, episode_count=episode_count,
max_steps=max_steps)
config.set('input_config.input_file_name', os.path.abspath(data_file))
config.set('model_wrapping.reset.data_file', os.path.abspath(data_file))
config.set('model_wrapping.reset.type', 'EPISODE_START')
config.save_config(file=f'{final_dir_path}/config.yaml')
print(f'Data and config saved in: {final_dir_path}')
return data_file, config
| 6,044 | 1,835 |
# -*- coding: utf-8 -*-
"""ShapeletTransform test code."""
import numpy as np
from numpy import testing
from sktime.datasets import load_basic_motions, load_unit_test
from sktime.transformations.panel.shapelet_transform import RandomShapeletTransform
def test_st_on_unit_test():
"""Test of ShapeletTransform on unit test data."""
# load unit test data
X_train, y_train = load_unit_test(split="train")
indices = np.random.RandomState(0).choice(len(y_train), 5, replace=False)
# fit the shapelet transform
st = RandomShapeletTransform(
max_shapelets=10, n_shapelet_samples=500, random_state=0
)
st.fit(X_train.iloc[indices], y_train[indices])
# assert transformed data is the same
data = st.transform(X_train.iloc[indices])
testing.assert_array_almost_equal(data, shapelet_transform_unit_test_data)
def test_st_on_basic_motions():
"""Test of ShapeletTransform on basic motions data."""
# load basic motions data
X_train, y_train = load_basic_motions(split="train", return_X_y=True)
indices = np.random.RandomState(4).choice(len(y_train), 5, replace=False)
# fit the shapelet transform
st = RandomShapeletTransform(
max_shapelets=10, n_shapelet_samples=500, random_state=0
)
st.fit(X_train.iloc[indices], y_train[indices])
# assert transformed data is the same
data = st.transform(X_train.iloc[indices])
testing.assert_array_almost_equal(data, shapelet_transform_basic_motions_data)
shapelet_transform_unit_test_data = np.array(
[
[
0.0844948936238554,
0.15355295821521212,
0.181181653406588,
],
[
0.1005739706371287,
0.13728264572793214,
0.14232864157873387,
],
[
0.15574477738923456,
0.24572051577166446,
0.28114769465706674,
],
[
0.14368755013218146,
0.11173267717634065,
0.0832337710268795,
],
[
0.05901578181579171,
0.11702300016584308,
0.14612594209368096,
],
]
)
shapelet_transform_basic_motions_data = np.array(
[
[
1.0891026731161,
0.9869567751155376,
1.500478686384502,
1.9604066805556999,
1.9300459325831565,
1.6290470525017764,
1.2492444105003735,
1.0060446077996184,
],
[
1.1700758360488173,
1.0555356143008514,
0.6147335984845409,
0.9762741759423724,
0.5589265732729417,
1.032742062232156,
1.083409283764176,
1.111697276658204,
],
[
1.6798705292742746,
1.9684063044201972,
2.453685502926318,
1.9677105642494732,
2.029428399113479,
1.3483536658952058,
1.9877554408696116,
0.5488432707540976,
],
[
1.1079276425471314,
1.0065997349055864,
1.0618258792202282,
0.32297427738972406,
1.1450380706584913,
1.0387357068111138,
0.676941814847556,
1.0156811721014811,
],
[
0.33414369747055067,
0.2870956468054047,
1.734401894586686,
1.9064659364611127,
1.7299782521480092,
1.6297951854173116,
1.5659881867675363,
1.1203189560668823,
],
]
)
| 3,590 | 1,772 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2013 Paul Norman
# <penorman@mac.com>
# Released under the MIT license: http://opensource.org/licenses/mit-license.php
# Classes
class Geometry(object):
elementIdCounter = 0
elementIdCounterIncr = -1
geometries = []
def __init__(self):
#self.id = getNewID()
self.parents = set()
Geometry.geometries.append(self)
def replacejwithi(self, i, j):
pass
def addparent(self, parent):
self.parents.add(parent)
def removeparent(self, parent, shoulddestroy=True):
self.parents.discard(parent)
if shoulddestroy and len(self.parents) == 0:
Geometry.geometries.remove(self)
def getNewID(self):
Geometry.elementIdCounter += Geometry.elementIdCounterIncr
return Geometry.elementIdCounter
## Helper function to get a new ID
#def getNewID():
# Geometry.elementIdCounter += Geometry.elementIdCounterIncr
# return Geometry.elementIdCounter
class Point(Geometry):
idCounter = None
def __init__(self, x, y):
Geometry.__init__(self)
self.id = self.getNewID()
self.x = x
self.y = y
def replacejwithi(self, i, j):
pass
def getNewID(self):
if Point.idCounter is None:
return super(Point, self).getNewID()
else:
Point.idCounter += Geometry.elementIdCounterIncr
return Point.idCounter
class Way(Geometry):
idCounter = None
def __init__(self):
Geometry.__init__(self)
self.id = self.getNewID()
self.points = []
def replacejwithi(self, i, j):
self.points = [i if x == j else x for x in self.points]
j.removeparent(self)
i.addparent(self)
def getNewID(self):
if Way.idCounter is None:
return super(Way, self).getNewID()
else:
Way.idCounter += Geometry.elementIdCounterIncr
return Way.idCounter
class Relation(Geometry):
idCounter = None
def __init__(self):
Geometry.__init__(self)
self.id = self.getNewID()
self.members = []
def replacejwithi(self, i, j):
self.members = [(i, x[1]) if x[0] == j else x for x in self.members]
j.removeparent(self)
i.addparent(self)
def getNewID(self):
if Relation.idCounter is None:
return super(Relation, self).getNewID()
else:
Relation.idCounter += Geometry.elementIdCounterIncr
return Relation.idCounter
class Feature(object):
features = []
def __init__(self):
self.geometry = None
self.tags = {}
Feature.features.append(self)
def replacejwithi(self, i, j):
if self.geometry == j:
self.geometry = i
j.removeparent(self)
i.addparent(self)
| 2,830 | 891 |
import hashlib
from rest_framework import generics, permissions, mixins, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import *
from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import render_to_response, render, get_object_or_404, redirect
from django.db.models import Q
from django.urls import reverse
from django.conf import settings
from coreExtend.models import Account as User
from replica.pulse.models import Entry, Draft, Media, Topic, Channel, EntryLink
from replica.cms.models import MenuPosition, MenuItem, SiteSettings
from replica.contrib.micro.models import Timeline, Note
from replica.api.serializers import *
from replica.api.permissions import IsOwner, IsOwnerOrReadOnly
from replica import settings as replicaSettings
class TopicDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
queryset = Topic.objects.public()
serializer_class = TopicSerializer
permission_classes = (IsOwnerOrReadOnly,)
class ChannelDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
queryset = Channel.objects.all()
serializer_class = ChannelSerializer
permission_classes = (IsOwnerOrReadOnly,)
class TopicList(generics.ListAPIView):
lookup_field = 'slug'
serializer_class = TopicSerializer
permission_classes = (IsOwnerOrReadOnly,)
def get_queryset(self):
if self.request.user.is_authenticated():
request_user = self.request.user
topics = Topic.objects.filter(
Q(is_public=True) |
Q(user__username=request_user.username)
)
return topics
else:
return Topic.objects.filter(is_public=True)
class TopicEntryList(generics.ListAPIView):
model = Entry
serializer_class = EntrySerializer
permission_classes = (IsOwnerOrReadOnly,)
def get_queryset(self):
if self.request.user.is_authenticated():
request_user = self.request.user
entries = Entry.objects.filter(
Q(is_active=True) |
Q(user__username=request_user.username)
)
return entries.filter(topic__slug=self.kwargs.get('slug'))
else:
return Entry.objects.published().filter(topic__slug=self.kwargs.get('slug'))
class EntryList(generics.ListAPIView):
model = Entry
serializer_class = EntrySerializer
permission_classes = (IsOwnerOrReadOnly,)
def get_queryset(self):
if self.request.user.is_authenticated():
request_user = self.request.user
entries = Entry.objects.posts().filter(
Q(is_active=True) |
Q(user__username=request_user.username)
)
return entries
else:
return Entry.objects.published()
class ChannelList(generics.ListAPIView):
lookup_field = 'slug'
serializer_class = ChannelSerializer
permission_classes = (IsOwnerOrReadOnly,)
def get_queryset(self):
return Channel.objects.all()
class ChannelEntryList(generics.ListAPIView):
model = Entry
serializer_class = EntrySerializer
permission_classes = (IsOwnerOrReadOnly,)
def get_queryset(self):
if self.request.user.is_authenticated():
request_user = self.request.user
entries = Entry.objects.filter(
Q(is_active=True) |
Q(user__username=request_user.username)
)
return entries.filter(channel__slug=self.kwargs.get('slug'))
else:
return Entry.objects.published().filter(channel__slug=self.kwargs.get('slug'))
class EntryDraftList(generics.ListAPIView):
model = Entry
serializer_class = EntrySerializer
permission_classes = (permissions.IsAuthenticated, IsOwner)
def get_queryset(self):
request_user = self.request.user
entries = Entry.objects.ideas().filter(user__username=request_user.username)
return entries
class EntryUpcomingList(generics.ListAPIView):
model = Entry
serializer_class = EntrySerializer
permission_classes = (permissions.IsAuthenticated, IsOwner)
def get_queryset(self):
request_user = self.request.user
entries = Entry.objects.upcoming().filter(user__username=request_user.username)
return entries
class EntryDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Entry.objects.all()
serializer_class = EntrySerializer
permission_classes = (IsOwnerOrReadOnly,)
def pre_save(self, obj):
"""Force user to the current user on save"""
obj.user = self.request.user
return super(EntryDetail, self).pre_save(obj)
class EntryCreate(generics.CreateAPIView):
lookup_field = 'id'
queryset = Entry.objects.all()
serializer_class = EntrySerializer
def perform_create(self, serializer):
serializer.validated_data['user'] = self.request.user
return super(EntryCreate, self).perform_create(serializer)
class PageList(generics.ListAPIView):
model = Entry
serializer_class = EntrySerializer
permission_classes = (IsOwnerOrReadOnly,)
def get_queryset(self):
if self.request.user.is_authenticated():
request_user = self.request.user
entries = Entry.objects.pages().filter(
Q(is_active=True) |
Q(user__username=request_user.username)
)
return entries
else:
return Entry.objects.pages_published()
class PageDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
queryset = Entry.objects.pages_published()
serializer_class = EntrySerializer
permission_classes = (IsOwnerOrReadOnly,)
def pre_save(self, obj):
"""Force user to the current user on save"""
obj.user = self.request.user
return super(PagesDetail, self).pre_save(obj)
class CurrentSite(APIView):
permission_classes = (permissions.IsAdminUser,)
def get(self, request, format=None):
u = get_object_or_404(User, username=request.user.username)
a = hashlib.md5(u.email.encode('utf-8')).hexdigest()
avatar = "https://secure.gravatar.com/avatar/%s.jpg" % a
usereplicaSettings = {
"current_user": u.username,
"current_user_hash": a,
"current_avatar": avatar,
'AccountSettings': reverse('CoreExtend:AccountSettings'),
'password_change': reverse('CoreExtend:password_change'),
'logout': reverse('CoreExtend:Logout'),
}
site_settings = {
"site_name": replicaSettings.SITE_NAME,
"site_url": replicaSettings.SITE_URL,
}
settings = {
'site_settings': site_settings,
'usereplicaSettings': usereplicaSettings,
}
return Response(settings)
class CurrentSiteStats(APIView):
permission_classes = (permissions.IsAdminUser,)
def get(self, request, format=None):
request_user = request.user
topics = Topic.objects.all()
channels = Channel.objects.all()
published_e = Entry.objects.published()
upcoming_e = Entry.objects.upcoming()
ideas_e = Entry.objects.ideas()
pages_e = Entry.objects.pages()
media = Media.objects.all()
notes = Note.objects.all()
total_counts = {
'topics': topics.count(),
'channels': channels.count(),
'published': published_e.count(),
'upcoming': upcoming_e.count(),
'ideas': ideas_e.count(),
'pages': pages_e.count(),
'media': media.count(),
'notes': notes.count()
}
user_counts = {
'published': published_e.filter(user=request_user).count(),
'upcoming': upcoming_e.filter(user=request_user).count(),
'ideas': ideas_e.filter(user=request_user).count(),
'pages': pages_e.filter(user=request_user).count(),
'media': media.filter(user=request_user).count(),
'notes': notes.filter(user=request_user).count()
}
counts = {
'total_counts': total_counts,
'user_counts': user_counts
}
return Response(counts)
class CurrentSiteSettings(generics.RetrieveUpdateAPIView):
model = SiteSettings
lookup_field = 'id'
permission_classes = (permissions.IsAdminUser,)
serializer_class = SiteSettingsSerializer
def get(self, request, format=None):
site_id = SiteSettings.objects.get(id=settings.SITE_ID)
serializer = SiteSettingsSerializer(site_id)
return Response(serializer.data)
def put(self, request, format=None):
site_id = SiteSettings.objects.get(id=settings.SITE_ID)
serializer = SiteSettingsSerializer(site_id, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.data)
class DashboardView(APIView):
permission_classes = (permissions.IsAdminUser,)
def get(self, request, format=None):
count_total = 10
request_user = request.user
def DataObj(ObjQuery=None, ObjSerializer=None):
o = ObjQuery
object_obj = ObjSerializer(o[:count_total], many=True, context={'request': request})
object_data = {
'count': {'mine': o.filter(user=request_user).count(), 'total': o.count()},
'results': object_obj.data
}
return object_data
data = {
'topics': DataObj(Topic.objects.all(), TopicSerializer),
'channels': DataObj(Channel.objects.all(), ChannelSerializer),
'media': DataObj(Media.objects.all(), MediaSerializer),
'published_entries': DataObj(Entry.objects.published(), EntrySerializer),
'upcoming_entries': DataObj(Entry.objects.upcoming(), EntrySerializer),
'ideas': DataObj(Entry.objects.ideas(), EntrySerializer),
'pages': DataObj(Entry.objects.pages(), EntrySerializer),
}
return Response(data)
| 10,302 | 2,902 |
import json
from functools import wraps
from typing import Callable
from urllib.parse import parse_qsl
from rivr.http import Request, Response
from palaverapi.responses import ProblemResponse
def requires_body(func: Callable[..., Response]):
@wraps(func)
def wrapper(self, request: Request, *args, **kwargs) -> Response:
if request.content_type:
body = request.body.read()
content_type = request.content_type.split(';')[0]
try:
if content_type == 'application/json':
content = body.decode('utf-8')
payload = json.loads(content)
elif content_type == 'application/x-www-form-urlencoded':
content = body.decode('utf-8')
data = parse_qsl(content, True)
payload = dict((k, v) for k, v in data)
else:
return ProblemResponse(415, 'Unsupported Media Type')
except (UnicodeDecodeError, ValueError):
return ProblemResponse(400, 'Invalid request body')
return func(self, request, *args, payload, **kwargs)
return func(self, request, *args, {}, **kwargs)
return wrapper
| 1,238 | 332 |
from __future__ import annotations
from pexpect import spawn
import logging
from typing import List, Tuple
from datetime import datetime
import hashlib
import os
from shutil import rmtree
import tarfile
import yaml
from pathlib import Path
from colorama import Fore, Style
import re
import medperf.config as config
from medperf.ui import UI
def storage_path(subpath: str):
"""Helper funciton that converts a path to storage-related path"""
return os.path.join(config.storage, subpath)
def get_file_sha1(path: str) -> str:
"""Calculates the sha1 hash for a given file.
Args:
path (str): Location of the file of interest.
Returns:
str: Calculated hash
"""
BUF_SIZE = 65536
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def init_storage():
"""Builds the general medperf folder structure.
"""
parent = config.storage
data = storage_path(config.data_storage)
cubes = storage_path(config.cubes_storage)
results = storage_path(config.results_storage)
tmp = storage_path(config.tmp_storage)
dirs = [parent, data, cubes, results, tmp]
for dir in dirs:
if not os.path.isdir(dir):
logging.info(f"Creating {dir} directory")
os.mkdir(dir)
def cleanup():
"""Removes clutter and unused files from the medperf folder structure.
"""
if os.path.exists(storage_path(config.tmp_storage)):
logging.info("Removing temporary data storage")
rmtree(storage_path(config.tmp_storage), ignore_errors=True)
dsets = get_dsets()
prefix = config.tmp_reg_prefix
unreg_dsets = [dset for dset in dsets if dset.startswith(prefix)]
for dset in unreg_dsets:
logging.info("Removing unregistered dataset")
dset_path = os.path.join(storage_path(config.data_storage), dset)
if os.path.exists(dset_path):
rmtree(dset_path, ignore_errors=True)
def get_dsets() -> List[str]:
"""Retrieves the UID of all the datasets stored locally.
Returns:
List[str]: UIDs of prepared datasets.
"""
dsets = next(os.walk(storage_path(config.data_storage)))[1]
return dsets
def pretty_error(msg: str, ui: "UI", clean: bool = True, add_instructions=True):
"""Prints an error message with typer protocol and exits the script
Args:
msg (str): Error message to show to the user
clean (bool, optional):
Run the cleanup process before exiting. Defaults to True.
add_instructions (bool, optional):
Show additional instructions to the user. Defualts to True.
"""
logging.warning(
"MedPerf had to stop execution. See logs above for more information"
)
if msg[-1] != ".":
msg = msg + "."
if add_instructions:
msg += f" See logs at {config.log_file} for more information"
ui.print_error(msg)
if clean:
cleanup()
exit(1)
def cube_path(uid: int) -> str:
"""Gets the path for a given cube.
Args:
uid (int): Cube UID.
Returns:
str: Location of the cube folder structure.
"""
return os.path.join(storage_path(config.cubes_storage), str(uid))
def generate_tmp_datapath() -> Tuple[str, str]:
"""Builds a temporary folder for prepared but yet-to-register datasets.
Returns:
str: General temporary folder location
str: Specific data path for the temporary dataset
"""
dt = datetime.utcnow()
ts = str(int(datetime.timestamp(dt)))
tmp = config.tmp_reg_prefix + ts
out_path = os.path.join(storage_path(config.data_storage), tmp)
out_path = os.path.abspath(out_path)
out_datapath = os.path.join(out_path, "data")
if not os.path.isdir(out_datapath):
logging.info(f"Creating temporary dataset path: {out_datapath}")
os.makedirs(out_datapath)
return out_path, out_datapath
def check_cube_validity(cube: "Cube", ui: "UI"):
"""Helper function for pretty printing the cube validity process.
Args:
cube (Cube): Cube to check for validity
ui (UI): Instance of an UI implementation
"""
logging.info(f"Checking cube {cube.name} validity")
ui.text = "Checking cube MD5 hash..."
if not cube.is_valid():
pretty_error("MD5 hash doesn't match")
logging.info(f"Cube {cube.name} is valid")
ui.print(f"> {cube.name} MD5 hash check complete")
def untar_additional(add_filepath: str) -> str:
"""Untars and removes the additional_files.tar.gz file
Args:
add_filepath (str): Path where the additional_files.tar.gz file can be found.
Returns:
str: location where the untared files can be found.
"""
logging.info(f"Uncompressing additional_files.tar.gz at {add_filepath}")
addpath = str(Path(add_filepath).parent)
tar = tarfile.open(add_filepath)
tar.extractall(addpath)
tar.close()
os.remove(add_filepath)
return addpath
def approval_prompt(msg: str, ui: "UI") -> bool:
"""Helper function for prompting the user for things they have to explicitly approve.
Args:
msg (str): What message to ask the user for approval.
Returns:
bool: Wether the user explicitly approved or not.
"""
logging.info("Prompting for user's approval")
approval = None
while approval is None or approval not in "yn":
approval = ui.prompt(msg.strip() + " ").lower()
logging.info(f"User answered approval with {approval}")
return approval == "y"
def dict_pretty_print(in_dict: dict, ui: "UI"):
"""Helper function for distinctively printing dictionaries with yaml format.
Args:
in_dict (dict): dictionary to print
"""
logging.debug(f"Printing dictionary to the user: {in_dict}")
ui.print()
ui.print("=" * 20)
in_dict = {k: v for (k, v) in in_dict.items() if v is not None}
ui.print(yaml.dump(in_dict))
ui.print("=" * 20)
def combine_proc_sp_text(proc: spawn, ui: "UI") -> str:
"""Combines the output of a process and the spinner.
Joins any string captured from the process with the
spinner current text. Any strings ending with any other
character from the subprocess will be returned later.
Args:
proc (spawn): a pexpect spawned child
ui (UI): An instance of an UI implementation
Returns:
str: all non-carriage-return-ending string captured from proc
"""
static_text = ui.text
proc_out = ""
while proc.isalive():
line = byte = proc.read(1)
while byte and not re.match(b"[\r\n]", byte):
byte = proc.read(1)
line += byte
if not byte:
break
line = line.decode("utf-8", "ignore")
if line:
# add to proc_out list for logging
proc_out += line
ui.text = (
f"{static_text} {Fore.WHITE}{Style.DIM}{line.strip()}{Style.RESET_ALL}"
)
return proc_out
def get_folder_sha1(path: str) -> str:
"""Generates a hash for all the contents of the folder. This procedure
hashes all of the files in the folder, sorts them and then hashes that list.
Args:
path (str): Folder to hash
Returns:
str: sha1 hash of the whole folder
"""
hashes = []
for root, _, files in os.walk(path, topdown=False):
for file in files:
filepath = os.path.join(root, file)
hashes.append(get_file_sha1(filepath))
hashes = sorted(hashes)
sha1 = hashlib.sha1()
for hash in hashes:
sha1.update(hash.encode("utf-8"))
return sha1.hexdigest()
def results_path(benchmark_uid, model_uid, data_uid):
out_path = storage_path(config.results_storage)
bmark_uid = str(benchmark_uid)
model_uid = str(model_uid)
data_uid = str(data_uid)
out_path = os.path.join(out_path, bmark_uid, model_uid, data_uid)
out_path = os.path.join(out_path, config.results_filename)
return out_path
def results_ids(ui: UI):
results_storage = storage_path(config.results_storage)
results_ids = []
try:
bmk_uids = next(os.walk(results_storage))[1]
for bmk_uid in bmk_uids:
bmk_storage = os.path.join(results_storage, bmk_uid)
model_uids = next(os.walk(bmk_storage))[1]
for model_uid in model_uids:
bmk_model_storage = os.path.join(bmk_storage, model_uid)
data_uids = next(os.walk(bmk_model_storage))[1]
bmk_model_data_list = [
(bmk_uid, model_uid, data_uid) for data_uid in data_uids
]
results_ids += bmk_model_data_list
except StopIteration:
msg = "Couldn't iterate over the results directory"
logging.warning(msg)
pretty_error(msg, ui)
return results_ids
def setup_logger(logger, log_lvl):
fh = logging.FileHandler(config["log_file"])
fh.setLevel(log_lvl)
logger.addHandler(fh)
def list_files(startpath):
tree_str = ""
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, "").count(os.sep)
indent = " " * 4 * (level)
tree_str += "{}{}/\n".format(indent, os.path.basename(root))
subindent = " " * 4 * (level + 1)
for f in files:
tree_str += "{}{}\n".format(subindent, f)
return tree_str
| 9,489 | 3,067 |
from gwh import *
from tests.utils import *
app = GitWebhook()
app.add_handler(lambda: None, repository=KNOWN_REPO)
app.add_handler(lambda: None, type=KNOWN_TYPE)
def test_bitbucket():
check_bitbucket_webhook(app, KNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_bitbucket_webhook(app, UNKNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_bitbucket_webhook(app, KNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=True)
check_bitbucket_webhook(app, UNKNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=False)
def test_gitlab():
check_gitlab_webhook(app, KNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_gitlab_webhook(app, UNKNOWN_TYPE, KNOWN_REPO, "master", hit_expected=True)
check_gitlab_webhook(app, KNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=True)
check_gitlab_webhook(app, UNKNOWN_TYPE, UNKNOWN_REPO, "master", hit_expected=False)
| 919 | 367 |
from faker import Faker
from datetime import datetime
import random
import time
LINE = """\
{remote_addr} - - [{time_local} +0000] "{request_type} {request_path} HTTP/1.1" {status} {body_bytes_sent} "{http_referer}" "{http_user_agent}"\
"""
LOG_FILE_A = "log_a.txt"
LOG_FILE_B = "log_b.txt"
LOG_MAX = 100
def generate_log_line():
fake = Faker()
now = datetime.now()
remote_addr = fake.ipv4()
time_local = now.strftime('%d/%b/%Y:%H:%M:%S')
request_type = random.choice(["GET", "POST", "PUT"])
request_path = "/" + fake.uri_path()
status = random.choice([200, 401, 404])
body_bytes_sent = random.choice(range(5, 1000, 1))
http_referer = fake.uri()
http_user_agent = fake.user_agent()
log_line = LINE.format(
remote_addr=remote_addr,
time_local=time_local,
request_type=request_type,
request_path=request_path,
status=status,
body_bytes_sent=body_bytes_sent,
http_referer=http_referer,
http_user_agent=http_user_agent
)
return log_line
def write_log_line(log_file, line):
with open(log_file, "a") as f:
f.write(line)
f.write("\n")
def clear_log_file(log_file):
with open(log_file, "w+") as f:
f.write("")
if __name__ == "__main__":
current_log_file = LOG_FILE_A
lines_written = 0
clear_log_file(LOG_FILE_A)
clear_log_file(LOG_FILE_B)
while True:
line = generate_log_line()
write_log_line(current_log_file, line)
lines_written += 1
if lines_written % LOG_MAX == 0:
new_log_file = LOG_FILE_B
if current_log_file == LOG_FILE_B:
new_log_file = LOG_FILE_A
clear_log_file(new_log_file)
current_log_file = new_log_file
sleep_time = random.choice(range(1, 5, 1))
time.sleep(sleep_time)
| 1,873 | 703 |
import numpy as np
valor_aplicado = np.array([5000, 6000, 7000, 8000])
juros = np.array([1, 2, 3, 4])
saida = valor_aplicado * juros
print(saida)
| 150 | 82 |
"""Classical AMG"""
from __future__ import absolute_import
from .classical import *
from .split import *
from .interpolate import *
from .cr import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
| 260 | 85 |
MAX_LIMIT = 9999
class StyleEmbedding:
colorscheme = "tableau20"
filled = True
sizeDefault = 7
sizeMin = 5
sizeMax = 25
opacityDefault = 0.05
opacityMin = 0.05
opacityMax = 0.5
tickMinStep = 5
def get_embedding_style():
return {
"config": {
"axis": {
"labelFontSize": 20,
"labelSeparation": 10,
"titleFontSize": 24,
},
"mark": {"smooth": True},
"legend": {
"titleFontSize": 24,
"labelFontSize": 20,
"titleLimit": MAX_LIMIT,
"labelLimit": MAX_LIMIT,
"symbolLimit": MAX_LIMIT,
"orient": "right",
# "orient": "top",
# "columns": 3,
# "direction": "horizontal",
"titleAnchor": "middle",
"labelOpacity": 1,
"symbolOpacity": 1,
},
"title": {"anchor": "start", "color": "gray", "fontSize": 25},
}
}
class StyleEthogram:
colorscheme = "tableau20"
def get_ethogram_style():
return {
"config": {
"view": {"continuousWidth": 400, "continuousHeight": 300},
"axis": {
"labelFontSize": 20,
"labelSeparation": 10,
"titleFontSize": 24,
},
"legend": {
"labelFontSize": 20,
"labelLimit": MAX_LIMIT,
"labelOpacity": 1,
"orient": "right",
"symbolLimit": MAX_LIMIT,
"symbolOpacity": 1,
"titleFontSize": 24,
"titleLimit": MAX_LIMIT,
},
"title": {"anchor": "start", "color": "gray", "fontSize": 25},
}
}
| 2,027 | 590 |
#!/usr/bin/env python
# coding: utf8
'''Script to parse aggregated annotation responses into a CSV file of labels.
Example
-------
$ ./scripts/parse_aggregated_responses.py \
"path/to/dir/*.csv" \
openmic-2018-aggregated-labels.csv
'''
from __future__ import print_function
import argparse
import glob
import os
import pandas as pd
import sys
import tqdm
YN_MAP = {'no': -1, 'yes': 1}
COLUMNS = ['sample_key', 'instrument', 'relevance', 'num_responses']
CONF_COL = 'does_this_recording_contain_{}:confidence'
CONTAIN_COL = 'does_this_recording_contain_{}'
def parse_one(row):
sign = YN_MAP[row[CONTAIN_COL.format(row.instrument)]]
conf = row[CONF_COL.format(row.instrument)] / 2.0
proba = 0.5 + sign * conf
return dict(sample_key=row.sample_key, instrument=row.instrument,
relevance=proba, num_responses=row._trusted_judgments)
def main(csv_files, output_filename):
records = []
for csv_file in tqdm.tqdm(csv_files):
records += pd.read_csv(csv_file).apply(parse_one, axis=1).values.tolist()
df = pd.DataFrame.from_records(records)
print('Loaded {} records'.format(len(df)))
df.sort_values(by='sample_key', inplace=True)
df.to_csv(output_filename, columns=COLUMNS, index=None)
return os.path.exists(output_filename)
def process_args(args):
parser = argparse.ArgumentParser(description='Aggregated annotation results parser')
parser.add_argument('csv_pattern', type=str, action='store',
help='Glob-style file pattern for picking up CSV files.')
parser.add_argument(dest='output_filename', type=str, action='store',
help='Output filename for writing the sparse label CSV.')
return parser.parse_args(args)
if __name__ == '__main__':
args = process_args(sys.argv[1:])
csv_files = glob.glob(args.csv_pattern)
success = main(csv_files, args.output_filename)
sys.exit(0 if success else 1)
| 1,955 | 662 |
"""Test cases for abstract looseserver client."""
import uuid
from urllib.parse import urljoin
from looseserver.client.abstract import AbstractClient
def test_create_rule(client_rule_factory, client_response_factory, registered_rule):
"""Check request data that client uses to create a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the create_rule method.
4. Check the rule, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
serialized_rule = self._rule_factory.serialize_rule(rule=registered_rule)
assert url == "rules", "Wrong url"
assert method == "POST", "Wrong method"
assert json == serialized_rule, "Wrong rule data"
response_json = {"rule_id": rule_id}
response_json.update(serialized_rule)
return response_json
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
created_rule = client.create_rule(rule=registered_rule)
assert created_rule.rule_id == rule_id, "Rule ID has not been set"
def test_get_rule(client_rule_factory, client_response_factory, registered_rule):
"""Check request data that client uses to get a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the get_rule method.
4. Check the rule, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
assert url == "rule/{0}".format(rule_id), "Wrong url"
assert method == "GET", "Wrong method"
assert json is None, "Data has been specified"
response_json = {"rule_id": rule_id}
response_json.update(self._rule_factory.serialize_rule(rule=registered_rule))
return response_json
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
obtained_rule = client.get_rule(rule_id=rule_id)
assert obtained_rule.rule_id == rule_id, "Rule ID has not been set"
def test_delete_rule(client_rule_factory, client_response_factory):
"""Check request data that client uses to remove a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the remove_rule method.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
assert url == "rule/{0}".format(rule_id), "Wrong url"
assert method == "DELETE", "Wrong method"
assert json is None, "Data has been specified"
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
client.remove_rule(rule_id=rule_id)
def test_set_response(client_rule_factory, client_response_factory, registered_response):
"""Check request data that client uses to set a response.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the set_response method.
4. Check the response, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
serialized_response = self._response_factory.serialize_response(registered_response)
assert url == "response/{0}".format(rule_id), "Wrong url"
assert method == "POST", "Wrong method"
assert json == serialized_response, "Wrong response data"
return serialized_response
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
response = client.set_response(rule_id=rule_id, response=registered_response)
assert response.response_type == registered_response.response_type, "Wrong response is returned"
def test_build_url(client_rule_factory, client_response_factory):
"""Check method to build url.
1. Create a subclass of the abstract client.
2. Build url.
3. Check the built url.
"""
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
pass
def exposed_build_url(self, relative_url):
"""Expose 'protected' _build_url method."""
return self._build_url(relative_url=relative_url)
configuration_endpoint = "/config/"
client = _Client(
configuration_url=configuration_endpoint,
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
relative_path = "test"
expected_url = urljoin(configuration_endpoint, relative_path)
assert client.exposed_build_url(relative_path) == expected_url, "Wrong url"
| 5,303 | 1,487 |
import time
import pytest
import gitlab
import gitlab.v4.objects
def test_merge_requests(project):
project.files.create(
{
"file_path": "README.rst",
"branch": "master",
"content": "Initial content",
"commit_message": "Initial commit",
}
)
source_branch = "branch1"
project.branches.create({"branch": source_branch, "ref": "master"})
project.files.create(
{
"file_path": "README2.rst",
"branch": source_branch,
"content": "Initial content",
"commit_message": "New commit in new branch",
}
)
project.mergerequests.create(
{"source_branch": "branch1", "target_branch": "master", "title": "MR readme2"}
)
def test_merge_request_discussion(project):
mr = project.mergerequests.list()[0]
size = len(mr.discussions.list())
discussion = mr.discussions.create({"body": "Discussion body"})
assert len(mr.discussions.list()) == size + 1
note = discussion.notes.create({"body": "first note"})
note_from_get = discussion.notes.get(note.id)
note_from_get.body = "updated body"
note_from_get.save()
discussion = mr.discussions.get(discussion.id)
assert discussion.attributes["notes"][-1]["body"] == "updated body"
note_from_get.delete()
discussion = mr.discussions.get(discussion.id)
assert len(discussion.attributes["notes"]) == 1
def test_merge_request_labels(project):
mr = project.mergerequests.list()[0]
mr.labels = ["label2"]
mr.save()
events = mr.resourcelabelevents.list()
assert events
event = mr.resourcelabelevents.get(events[0].id)
assert event
def test_merge_request_milestone_events(project, milestone):
mr = project.mergerequests.list()[0]
mr.milestone_id = milestone.id
mr.save()
milestones = mr.resourcemilestoneevents.list()
assert milestones
milestone = mr.resourcemilestoneevents.get(milestones[0].id)
assert milestone
def test_merge_request_basic(project):
mr = project.mergerequests.list()[0]
# basic testing: only make sure that the methods exist
mr.commits()
mr.changes()
assert mr.participants()
def test_merge_request_rebase(project):
mr = project.mergerequests.list()[0]
assert mr.rebase()
@pytest.mark.skip(reason="flaky test")
def test_merge_request_merge(project):
mr = project.mergerequests.list()[0]
mr.merge()
project.branches.delete(mr.source_branch)
with pytest.raises(gitlab.GitlabMRClosedError):
# Two merge attempts should raise GitlabMRClosedError
mr.merge()
def test_merge_request_should_remove_source_branch(
project, merge_request, wait_for_sidekiq
) -> None:
"""Test to ensure
https://github.com/python-gitlab/python-gitlab/issues/1120 is fixed.
Bug reported that they could not use 'should_remove_source_branch' in
mr.merge() call"""
source_branch = "remove_source_branch"
mr = merge_request(source_branch=source_branch)
mr.merge(should_remove_source_branch=True)
result = wait_for_sidekiq(timeout=60)
assert result is True, "sidekiq process should have terminated but did not"
# Wait until it is merged
mr_iid = mr.iid
for _ in range(60):
mr = project.mergerequests.get(mr_iid)
if mr.merged_at is not None:
break
time.sleep(0.5)
assert mr.merged_at is not None
time.sleep(0.5)
# Ensure we can NOT get the MR branch
with pytest.raises(gitlab.exceptions.GitlabGetError):
project.branches.get(source_branch)
def test_merge_request_large_commit_message(
project, merge_request, wait_for_sidekiq
) -> None:
"""Test to ensure https://github.com/python-gitlab/python-gitlab/issues/1452
is fixed.
Bug reported that very long 'merge_commit_message' in mr.merge() would
cause an error: 414 Request too large
"""
source_branch = "large_commit_message"
mr = merge_request(source_branch=source_branch)
merge_commit_message = "large_message\r\n" * 1_000
assert len(merge_commit_message) > 10_000
mr.merge(merge_commit_message=merge_commit_message)
result = wait_for_sidekiq(timeout=60)
assert result is True, "sidekiq process should have terminated but did not"
# Wait until it is merged
mr_iid = mr.iid
for _ in range(60):
mr = project.mergerequests.get(mr_iid)
if mr.merged_at is not None:
break
time.sleep(0.5)
assert mr.merged_at is not None
time.sleep(0.5)
# Ensure we can get the MR branch
project.branches.get(source_branch)
def test_merge_request_merge_ref(merge_request) -> None:
source_branch = "merge_ref_test"
mr = merge_request(source_branch=source_branch)
response = mr.merge_ref()
assert response and "commit_id" in response
def test_merge_request_merge_ref_should_fail(
project, merge_request, wait_for_sidekiq
) -> None:
source_branch = "merge_ref_test2"
mr = merge_request(source_branch=source_branch)
# Create conflict
project.files.create(
{
"file_path": f"README.{source_branch}",
"branch": project.default_branch,
"content": "Different initial content",
"commit_message": "Another commit in main branch",
}
)
result = wait_for_sidekiq(timeout=60)
assert result is True, "sidekiq process should have terminated but did not"
# Check for non-existing merge_ref for MR with conflicts
with pytest.raises(gitlab.exceptions.GitlabGetError):
response = mr.merge_ref()
assert "commit_id" not in response
| 5,683 | 1,922 |
import numpy as np
nd1 = np.array([[1, 2], [3, 4]])
# reshape
nd1.reshape(4)
nd1.reshape(2, 2)
nd1.reshape((2, 2))
nd1.reshape((2, 2), order="C")
nd1.reshape(4, order="C")
# resize
nd1.resize()
nd1.resize(4)
nd1.resize(2, 2)
nd1.resize((2, 2))
nd1.resize((2, 2), refcheck=True)
nd1.resize(4, refcheck=True)
nd2 = np.array([[1, 2], [3, 4]])
# transpose
nd2.transpose()
nd2.transpose(1, 0)
nd2.transpose((1, 0))
# swapaxes
nd2.swapaxes(0, 1)
# flatten
nd2.flatten()
nd2.flatten("C")
# ravel
nd2.ravel()
nd2.ravel("C")
# squeeze
nd2.squeeze()
nd3 = np.array([[1, 2]])
nd3.squeeze(0)
nd4 = np.array([[[1, 2]]])
nd4.squeeze((0, 1))
| 687 | 388 |
import torch
def mask_conv2d_spatial(mask_type, height, width):
"""
Creates a mask for Conv2d such that it becomes autoregressive in
the spatial dimensions.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
height: int
Kernel height for layer.
width: int
Kernel width for layer.
Output:
mask: torch.FloatTensor
Shape (1, 1, height, width).
A mask with 0 in places for masked elements.
"""
mask = torch.ones([1, 1, height, width])
mask[:, :, height // 2, width // 2 + (mask_type == 'B'):] = 0
mask[:, :, height // 2 + 1:] = 0
return mask
def mask_channels(mask_type, in_channels, out_channels, data_channels=3):
"""
Creates an autoregressive channel mask.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
in_channels: int
Number of input channels to layer.
out_channels: int
Number of output channels of layer.
data_channels: int
Number of channels in the input data, e.g. 3 for RGB images. (default = 3).
Output:
mask: torch.FloatTensor
Shape (out_channels, in_channels).
A mask with 0 in places for masked elements.
"""
in_factor = in_channels // data_channels + 1
out_factor = out_channels // data_channels + 1
base_mask = torch.ones([data_channels,data_channels])
if mask_type == 'A':
base_mask = base_mask.tril(-1)
else:
base_mask = base_mask.tril(0)
mask_p1 = torch.cat([base_mask]*in_factor, dim=1)
mask_p2 = torch.cat([mask_p1]*out_factor, dim=0)
mask = mask_p2[0:out_channels,0:in_channels]
return mask
def mask_conv2d(mask_type, in_channels, out_channels, height, width, data_channels=3):
r"""
Creates a mask for Conv2d such that it becomes autoregressive in both
the spatial dimensions and the channel dimension.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
in_channels: int
Number of input channels to layer.
out_channels: int
Number of output channels of layer.
height: int
Kernel height for layer.
width: int
Kernel width for layer.
data_channels: int
Number of channels in the input data, e.g. 3 for RGB images. (default = 3).
Output:
mask: torch.FloatTensor
Shape (out_channels, in_channels, height, width).
A mask with 0 in places for masked elements.
"""
mask = torch.ones([out_channels,in_channels,height,width])
# RGB masking in central pixel
mask[:, :, height // 2, width // 2] = mask_channels(mask_type, in_channels, out_channels, data_channels)
# Masking all pixels to the right of the central pixel
mask[:, :, height // 2, width // 2 + 1:] = 0
# Masking all pixels below the central pixel
mask[:, :, height // 2 + 1:] = 0
return mask
| 3,128 | 1,012 |
import json
import os
import pathlib
import sys
from loguru import logger as log
class Config:
"""Retrieves configuration from environment variables or files or fails fast otherwise"""
def __init__(self):
self.keys = {}
# read configuration file first
self.from_env("CONFIG_PATH", cast=pathlib.Path)
self._json_config = self._read_config(self.config_path)
# query variables
self.from_env("REDIS_HOST", default="localhost")
self.from_env("REDIS_PORT", default=6379, cast=int)
self.from_cfg("app_influx_host", default="localhost")
self.from_cfg("app_influx_port", default=8086, cast=int)
self.from_cfg("app_voltage_reference", default=228, cast=float)
def from_env(self, key, default=None, cast=None):
value = self._get_key(key, os.environ, "env variable", default, cast)
self.keys[key.lower()] = value
def from_cfg(self, key, default=None, cast=None):
value = self._get_key(key, self._json_config, "config value", default, cast)
self.keys[key.lower()] = value
def _get_key(self, key, source, source_name, default=None, cast=None):
value = None
if key in source:
value = source[key]
elif default is not None:
log.debug(f"{source_name} '{key}' not set, using default '{default}'")
value = default
else:
log.error(f"required {source_name} '{key}' not set ")
sys.exit(1)
# potentially cast retrieved value
assert value is not None
if cast is not None:
value = cast(value)
return value
def _read_config(self, config_path):
with open(config_path, "r") as f:
jzon = json.load(f)
log.debug(f"config file '{config_path}' loaded: {jzon}")
return jzon
def __getattr__(self, name: str):
"""Enables the retrieval of configuration keys as instance fields"""
if name in self.keys:
return self.keys[name]
else:
raise AttributeError(f"no configuration key '{name}'")
| 2,130 | 631 |
import os
from pathlib import Path
from types import SimpleNamespace
import pytest
import bentoctl.operator.utils as operator_utils
# import bentoctl.operator.utils.git
# from bentoctl.operator import utils as operator_utils
def test_get_bentoctl_home(tmp_path):
os.environ["BENTOCTL_HOME"] = str(tmp_path)
bentoctl_home = operator_utils._get_bentoctl_home()
assert bentoctl_home == tmp_path
assert (tmp_path / "operators").exists()
@pytest.mark.parametrize(
"official_op, truth", [("aws-lambda", True), ("testop", False)]
)
def test_is_official_operator(official_op, truth):
assert operator_utils._is_official_operator(official_op) is truth
def test_get_operator_dir_path(tmp_path):
os.environ["BENTOCTL_HOME"] = str(tmp_path)
op_dir = operator_utils._get_operator_dir_path(operator_name="test_operator")
assert op_dir == str(tmp_path / "operators" / "test_operator")
class PatchedRepo:
def __init__(self, repo_path):
self.repo_path = Path(repo_path)
self.git = SimpleNamespace()
self.git.checkout = self.checkout
@classmethod
def clone_from(cls, _, repo_path):
return cls(repo_path)
def checkout(self, branch):
Path(self.repo_path / branch).touch()
| 1,256 | 427 |
import unittest
from test.utils import dedent
from bogi.parser.tail import TailParser
from bogi.parser.tail_transformer import MessageBody, ContentLine, InputFileRef, MultipartField, Header, ResponseHandler, ResponseReference
class TailParserTests(unittest.TestCase):
def test_content_lines(self):
body = dedent('''
{
"foo": "bar",
"param2": "value2"
}''')
tail = TailParser().parse(body)
self.assertEqual(tail.message_body, MessageBody([
ContentLine('{'),
ContentLine(' "foo": "bar",'),
ContentLine(' "param2": "value2"'),
ContentLine('}')
]))
def test_file_refs(self):
body = dedent('''
< body.json
< /home/file''')
tail = TailParser().parse(body)
self.assertEqual(tail.message_body, MessageBody([
InputFileRef('body.json'),
InputFileRef('/home/file')
]))
def test_message_body(self):
body = dedent('''
{
"key": "val"
}
< body.json
< /home/file
testing''')
tail = TailParser().parse(body)
self.assertEqual(tail.message_body, MessageBody([
ContentLine('{'),
ContentLine(' "key": "val"'),
ContentLine('}'),
InputFileRef('body.json'),
InputFileRef('/home/file'),
ContentLine('testing')
]))
def test_multipart(self):
body = dedent('''
--abcd
Content-Disposition: form-data; name="text"
Text
--abcd
Content-Disposition: form-data; name="file_to_send"; filename="input.txt"
< ./input.txt
--abcd--''')
tail = TailParser(multipart_boundary='abcd').parse(body)
self.assertEqual(tail.message_body, MessageBody([
MultipartField(
headers=[Header(field='Content-Disposition', value='form-data; name="text"')],
messages=[ContentLine(content='Text')]),
MultipartField(
headers=[Header(field='Content-Disposition', value='form-data; name="file_to_send"; filename="input.txt"')],
messages=[InputFileRef(path='./input.txt')])
]))
def test_response_handler_script(self):
script = dedent('''
console.log('Multiline script');
client.global.set("auth", response.body.token);
''')
body = dedent('''
> {% ''' + script + ''' %}''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_handler, ResponseHandler(script=script.strip(), path=None, expected_status_code=None))
def test_response_handler_path(self):
body = dedent('''
> ./script.js''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_handler, ResponseHandler(script=None, path='./script.js', expected_status_code=None))
def test_response_status_code(self):
body = dedent('''
>STATUS 301''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_handler, ResponseHandler(script=None, path=None, expected_status_code=301))
def test_response_ref(self):
body = dedent('''
<> ./previous-response.200.json''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_ref, ResponseReference(path='./previous-response.200.json'))
| 3,445 | 1,008 |
#!/usr/bin/env python3
import subprocess
import re
import xml.etree.ElementTree as et
import datetime as dt
import pipes
import os
import configparser
import sys
from typing import Iterable, Optional, Dict
class TmuxError(Exception):
pass
class TmuxCommandError(TmuxError):
pass
class TmuxParseError(TmuxError):
pass
class ConfigError(Exception):
pass
def list_sessions_cmd() -> str:
command = [
'tmux',
'list-sessions',
'-F',
'#{session_attached} #{session_created} #{session_name}',
]
try:
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = process.communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
except Exception as ex:
raise TmuxCommandError(repr(ex).strip())
if process.returncode == 0:
return out # type: ignore
if 'no server running' in err:
return ''
if re.search(r'^error connecting to .+ \(No such file or directory\)$', err):
return ''
raise TmuxCommandError(err.strip())
def parse_sessions(text: str) -> Iterable[Dict[str, str]]:
return [parse_session_line(l) for l in text.splitlines()]
def parse_session_line(line: str) -> Dict[str, str]:
match = re.search(
'^(?P<attached>[0-9]+) (?P<timestamp>[0-9]+) (?P<name>.*)$',
line
)
if match is None:
raise TmuxParseError('parse error: ' + line)
return match.groupdict()
def session_list_to_xml(sessions: Iterable[dict]) -> bytes:
if not sessions:
return error_message_to_xml('no sessions')
root = et.Element('openbox_pipe_menu')
cmd_tpl = reattach_cmd_template()
for session in sessions:
item = et.SubElement(root, 'item')
item.attrib['label'] = session_label(session)
action = et.SubElement(item, 'action')
action.attrib['name'] = 'Execute'
command = et.SubElement(action, 'command')
# the command is parsed with the g_shell_parse_argv funcion
# and therefore must have shell quoting (even though it does
# not spawn a shell)
command.text = cmd_tpl % pipes.quote(session['name'])
return et.tostring(root) # type: ignore
def session_label(session: Dict[str, str]) -> str:
label = session['name'] + ' started at '
label += dt.datetime.fromtimestamp(float(session['timestamp'])).isoformat()
if int(session['attached']):
label += ' (attached)'
return label
def reattach_cmd_template() -> str:
config = configparser.RawConfigParser()
config.read(os.path.expanduser('~/.config/openbox/tmux.ini'))
try:
return config.get('pipe-menu', 'attach-command-template')
except (configparser.NoSectionError, configparser.NoOptionError):
pass
term = find_executable(['urxvt', 'xterm'])
if term is None:
raise ConfigError("can't find terminal emulator")
return term + ' -e tmux attach -d -t %s'
def error_message_to_xml(message: str) -> bytes:
root = et.Element('openbox_pipe_menu')
item = et.SubElement(root, 'item')
item.attrib['label'] = message
return et.tostring(root) # type: ignore
def find_executable(names: Iterable[str]) -> Optional[str]:
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for name in names:
for directory in path:
filename = os.path.join(directory, name)
if os.path.exists(filename):
return filename
return None
def main() -> None:
try:
xml = session_list_to_xml(parse_sessions(list_sessions_cmd()))
except (TmuxError, ConfigError) as err:
xml = error_message_to_xml(repr(err))
sys.stdout.buffer.write(xml)
if __name__ == '__main__':
main()
| 3,830 | 1,219 |
"""
Tutorial reference: https://www.kdnuggets.com/2020/09/implementing-deep-learning-library-scratch-python.html
Original Library: https://github.com/parmeet/dll_numpy
Author: DrakenWan 2020
"""
import core as DL
import utilities
import numpy as np
if __name__ == "__main__":
batch_size = 20
num_epochs = 200
samples_per_class = 100
num_classes = 3
hidden_units = 100
data,target = utilities.genSpiralData(samples_per_class,num_classes)
model = utilities.Model()
model.add(DL.Linear(2,hidden_units))
model.add(DL.ReLU())
model.add(DL.Linear(hidden_units,num_classes))
optim = DL.SGD(model.parameters,lr=1.0,weight_decay=0.001,momentum=.9)
loss_fn = DL.SoftmaxWithLoss()
model.fit(data,target,batch_size,num_epochs,optim,loss_fn)
predicted_labels = np.argmax(model.predict(data),axis=1)
accuracy = np.sum(predicted_labels==target)/len(target)
print("Model Accuracy = {}".format(accuracy))
utilities.plot2DDataWithDecisionBoundary(data,target,model) | 1,109 | 422 |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.decorator`` module contains the the @srpc decorator and its
helper methods. The @srpc decorator is responsible for tagging methods as remote
procedure calls extracting method's input and output types.
It's possible to create custom decorators that wrap the @srpc decorator in order
to have a more elegant way of passing frequently-used parameter values. The @rpc
decorator is a simple example of this.
"""
from spyne import MethodDescriptor
from spyne._base import BODY_STYLE_EMPTY
from spyne._base import BODY_STYLE_WRAPPED
from spyne._base import BODY_STYLE_BARE
from spyne.model.complex import ComplexModel
from spyne.model.complex import TypeInfo
from spyne.const import add_request_suffix
from spyne.const import RESPONSE_SUFFIX
from spyne.const import RESULT_SUFFIX
from spyne.const.xml_ns import DEFAULT_NS
def _produce_input_message(f, params, kparams, _in_message_name,
_in_variable_names, no_ctx, args):
_body_style = _validate_body_style(kparams)
if no_ctx is True:
arg_start=0
else:
arg_start=1
if args is None:
try:
argcount = f.func_code.co_argcount
param_names = f.func_code.co_varnames[arg_start:argcount]
except AttributeError,e:
raise TypeError(
"It's not possible to instrospect builtins. You must pass a "
"sequence of argument names as the '_args' argument to the "
"rpc decorator to manually denote the arguments that this "
"function accepts."
)
else:
argcount = len(args)
param_names = args
in_params = TypeInfo()
try:
for i, n in enumerate(param_names):
if args is None or n in args:
e0 = _in_variable_names.get(n,n)
e1 = params[i]
in_params[e0] = e1
except IndexError, e:
raise Exception("The parameter numbers of the %r function and its "
"decorator mismatch." % f.func_name)
ns = DEFAULT_NS
if _in_message_name.startswith("{"):
ns, _, in_message_name = _in_message_name[1:].partition("}")
message = None
if _body_style == 'bare':
if len(in_params) > 1:
raise Exception("body_style='bare' can handle at most one function "
"argument.")
in_param = None
if len(in_params) == 1:
message, = in_params.values()
message = message.customize(sub_name=_in_message_name, sub_ns=ns)
assert message.Attributes.sub_name is not None
# This dates from a time when body_style='bare' could support more
# than one parameter. Maybe one day someone will bring that back.
else:
message = ComplexModel.produce(type_name=_in_message_name,
namespace=ns, members=in_params)
else:
message = ComplexModel.produce(type_name=_in_message_name,
namespace=ns, members=in_params)
message.__namespace__ = ns
return message
def _validate_body_style(kparams):
_body_style = kparams.get('_body_style')
_soap_body_style = kparams.get('_soap_body_style')
if _body_style is None:
_body_style = 'wrapped'
elif not (_body_style in ('wrapped', 'bare')):
raise ValueError("body_style must be one of ('wrapped', 'bare')")
elif _soap_body_style == 'document':
_body_style = 'bare'
elif _soap_body_style == 'rpc':
_body_style = 'wrapped'
elif _soap_body_style is None:
pass
else:
raise ValueError("soap_body_style must be one of ('rpc', 'document')")
assert _body_style in ('wrapped', 'bare')
return _body_style
def _produce_output_message(func_name, kparams):
"""Generate an output message for "rpc"-style API methods.
This message is a wrapper to the declared return type.
"""
_returns = kparams.get('_returns')
_body_style = _validate_body_style(kparams)
_out_message_name = kparams.get('_out_message', '%s%s' %
(func_name, RESPONSE_SUFFIX))
out_params = TypeInfo()
if _returns and _body_style == 'wrapped':
if isinstance(_returns, (list, tuple)):
default_names = ['%s%s%d' % (func_name, RESULT_SUFFIX, i) for i in
range(len(_returns))]
_out_variable_names = kparams.get('_out_variable_names',
default_names)
assert (len(_returns) == len(_out_variable_names))
var_pair = zip(_out_variable_names, _returns)
out_params = TypeInfo(var_pair)
else:
_out_variable_name = kparams.get('_out_variable_name',
'%s%s' % (func_name, RESULT_SUFFIX))
out_params[_out_variable_name] = _returns
ns = DEFAULT_NS
if _out_message_name.startswith("{"):
ns = _out_message_name[1:].partition("}")[0]
if _body_style == 'bare' and _returns is not None:
message = _returns.customize(sub_name=_out_message_name, sub_ns=ns)
else:
message = ComplexModel.produce(type_name=_out_message_name,
namespace=ns,
members=out_params)
message.Attributes._wrapper = True
message.__namespace__ = ns # FIXME: is this necessary?
return message
def srpc(*params, **kparams):
'''Method decorator to tag a method as a remote procedure call. See
:func:`spyne.decorator.rpc` for detailed information.
The initial "s" stands for "static". In Spyne terms, that means no implicit
first argument is passed to the user callable, which really means the
method is "stateless" rather than static. It's meant to be used for
existing functions that can't be changed.
'''
kparams["_no_ctx"] = True
return rpc(*params, **kparams)
def rpc(*params, **kparams):
'''Method decorator to tag a method as a remote procedure call in a
:class:`spyne.service.ServiceBase` subclass.
You should use the :class:`spyne.server.null.NullServer` transport if you
want to call the methods directly. You can also use the 'function' attribute
of the returned object to call the function itself.
```_operation_name``` vs ```_in_message_name```:
Soap clients(SoapUI, Savon, suds) will use the operation name as the function name.
The name of the input message(_in_message_name) is irrelevant when interfacing in this
manner; this is because the clients mostly wrap around it. However, the soap xml request
only uses the input message when posting with the soap server; the other protocols only
use the input message as well. ```_operation_name``` cannot be used with ```_in_message_name```
:param _returns: Denotes The return type of the function. It can be a type or
a sequence of types for functions that have multiple return values.
:param _in_header: A type or an iterable of types that that this method
accepts as incoming header.
:param _out_header: A type or an iterable of types that that this method
sends as outgoing header.
:param _operation_name: The function's soap operation name. The operation and
SoapAction names will equal the value of ```_operation_name```.
:param _in_message_name: The public name of the function's input message. If not set
explicitly in @srpc, the input message will equal the value of
```_operation_name + REQUEST_SUFFIX```.
:param _in_variable_names: The public names of the function arguments. It's
a dict that maps argument names in the code to public ones.
:param _out_variable_name: The public name of the function response object.
It's a string. Ignored when ``_body_style != 'wrapped'`` or ``_returns``
is a sequence.
:param _out_variable_names: The public name of the function response object.
It's a sequence of strings. Ignored when ``_body_style != 'wrapped'`` or
or ``_returns`` is not a sequence. Must be the same length as
``_returns``.
:param _body_style: One of ``('bare', 'wrapped')``. Default: ``'wrapped'``.
In wrapped mode, wraps response objects in an additional class.
:param _soap_body_style: One of ('rpc', 'document'). Default ``'document'``.
``_soap_body_style='document'`` is an alias for ``_body_style='wrapped'``.
``_soap_body_style='rpc'`` is an alias for ``_body_style='bare'``.
:param _port_type: Soap port type string.
:param _no_ctx: Don't pass implicit ctx object to the user method.
:param _udp: Short for UserDefinedProperties, you can use this to mark the
method with arbitrary metadata.
:param _aux: The auxiliary backend to run this method. ``None`` if primary.
:param _throws: A sequence of exceptions that this function can throw. No
real functionality besides publishing this information in interface
documents.
:param _args: the name of the arguments to expose.
'''
def explain(f):
def explain_method(*args, **kwargs):
retval = None
function_name = kwargs['_default_function_name']
_is_callback = kparams.get('_is_callback', False)
_is_async = kparams.get('_is_async', False)
_mtom = kparams.get('_mtom', False)
_in_header = kparams.get('_in_header', None)
_out_header = kparams.get('_out_header', None)
_port_type = kparams.get('_soap_port_type', None)
_no_ctx = kparams.get('_no_ctx', False)
_udp = kparams.get('_udp', None)
_aux = kparams.get('_aux', None)
_pattern = kparams.get("_pattern",None)
_patterns = kparams.get("_patterns",[])
_args = kparams.get("_args",None)
_faults = None
if ('_faults' in kparams) and ('_throws' in kparams):
raise ValueError("only one of '_throws ' or '_faults' arguments"
"should be given, as they're synonyms.")
elif '_faults' in kparams:
_faults = kparams.get('_faults', None)
elif '_throws' in kparams:
_faults = kparams.get('_throws', None)
_in_message_name = kparams.get('_in_message_name', function_name)
_operation_name = kparams.get('_operation_name', function_name)
if _operation_name != function_name and _in_message_name != function_name:
raise ValueError("only one of '_operation_name' and '_in_message_name' "
"arguments should be given")
if _in_message_name == function_name:
_in_message_name = add_request_suffix(_operation_name)
_in_variable_names = kparams.get('_in_variable_names', {})
in_message = _produce_input_message(f, params, kparams,
_in_message_name, _in_variable_names, _no_ctx, _args)
out_message = _produce_output_message(function_name, kparams)
doc = getattr(f, '__doc__')
if _pattern is not None and _patterns != []:
raise ValueError("only one of '_pattern' and '__patterns' "
"arguments should be given")
if _pattern is not None:
_patterns = [_pattern]
body_style = BODY_STYLE_WRAPPED
if _validate_body_style(kparams) == 'bare':
body_style = BODY_STYLE_BARE
t = in_message
from spyne.model import ComplexModelBase
if issubclass(t, ComplexModelBase) and len(t._type_info) == 0:
body_style = BODY_STYLE_EMPTY
retval = MethodDescriptor(f,
in_message, out_message, doc, _is_callback, _is_async,
_mtom, _in_header, _out_header, _faults,
port_type=_port_type, no_ctx=_no_ctx, udp=_udp,
class_key=function_name, aux=_aux, patterns=_patterns,
body_style=body_style, args=_args,
operation_name=_operation_name)
return retval
explain_method.__doc__ = f.__doc__
explain_method._is_rpc = True
return explain_method
return explain
| 13,454 | 3,884 |
from . import dataRoute
from flask import render_template, request
from utils import dataManagment
import dateutil
import datetime
@dataRoute.route('/head', methods=['POST'])
def create_head():
number = request.form['number']
df = dataManagment.data_frame_head(number)
return render_template('data.html', tables=[df.to_html()],isCentered = True)
@dataRoute.route('/sex/country', methods=['POST'])
def create_second_request():
sexo = request.form.get('selection-sex')
pais = request.form.get('selection-country')
df = dataManagment.get_sex_country_deaths(pais, sexo)
return render_template('data.html', tables=[df.to_html()],isCentered = True)
@dataRoute.route('/country/dates', methods=['POST'])
def create_third_request():
firstDate = dateutil.parser.parse(request.form.get(
'firstDate'), dayfirst=False) # Datetime
secondDate = dateutil.parser.parse(
request.form.get('secondDate'), dayfirst=False)
firstDate = datetime.date(firstDate.year, firstDate.month, firstDate.day)
secondDate = datetime.date(
secondDate.year, secondDate.month, secondDate.day)
pais = request.form.get('selection-country')
df = dataManagment.get_country_dates(pais, firstDate, secondDate)
return render_template('data.html', tables=[df.to_html()],isCentered = True)
@dataRoute.route('/count/country', methods=['POST'])
def create_fourth_request():
sexo = request.form.get('selection-sex')
df = dataManagment.get_contagios_por_pais(sexo)
return render_template('data.html', tables=[df.to_html()],isCentered = False)
@dataRoute.route('/count/state', methods=['POST'])
def create_fifth_request():
estado = request.form.get('selection-state')
df = dataManagment.get_estado_por_pais(estado)
return render_template('data.html', tables=[df.to_html()], isCentered = False)
@dataRoute.route('/resumen/departamento', methods=['POST'])
def create_sixth_request():
pais = request.form.get('country')
df = dataManagment.get_resumen(pais)
return render_template('data.html', tables=[df.to_html()], isCentered = False)
@dataRoute.route('/muertes/ciudad',methods=['POST'])
def create_seventh_request():
ciudad = request.form.get('selection-city')
df = dataManagment.get_muertes_por_ciudad(ciudad)
return render_template('data.html', tables=[df.to_html()], isCentered = False)
@dataRoute.route('/test',methods=['GET'])
def create_test():
df = dataManagment.test()
return render_template('data.html',tables=[df.to_html()],isCentered = True) | 2,547 | 830 |
# File Storage related class's
from .CacheFile import CacheFile
from .ScriptBlock import ScriptBlock
from .ToolchainFile import ToolchainFile
| 144 | 38 |
"""Auto-generated file, do not edit by hand. MY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MY = PhoneMetadata(id='MY', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[1369]\\d{2,4}', possible_length=(3, 4, 5)),
toll_free=PhoneNumberDesc(national_number_pattern='112|999', example_number='112', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='112|999', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0[01348]|1(?:[02]|1[128]|311)|2(?:0[125]|[13-6]|2\\d{0,2})|(?:3[1-35-79]|7[45])\\d\\d?|5(?:454|5\\d\\d?|77|888|999?)|8(?:18?|2|8[18])|9(?:[124]\\d?|68|71|9[0679]))|66628|99[1-469]|13[5-7]|(?:1(?:0[569]|309|5[12]|7[136-9]|9[03])|3[23679]\\d\\d)\\d', example_number='100', possible_length=(3, 4, 5)),
standard_rate=PhoneNumberDesc(national_number_pattern='666\\d\\d', example_number='66600', possible_length=(5,)),
sms_services=PhoneNumberDesc(national_number_pattern='(?:3[23679]\\d|666)\\d\\d', example_number='32000', possible_length=(5,)),
short_data=True)
| 1,177 | 553 |
import OpenGL.GL as gl
from OpenGL.GL import shaders
class lazy_class_attribute(object):
def __init__(self, function):
self.fget = function
def __get__(self, obj, cls):
value = self.fget(obj or cls)
# note: 인스턴스가 아닌 클래스 객체에 저장한다.
# 클래스-레벨 또는 인스턴스-레벨 접근과 관계없다.
setattr(cls, self.fget.__name__, value)
return value
class ObjectUsingShaderProgram(object):
# 전형적인 pass-through vertex shader 구현
VERTEX_CODE = """
#version 330 core
layout(location = 0) in vec4 vertexPosition;
void main(){
gl_Position = vertexPosition;
}
"""
# 전형적인 프래그먼트 셰이더
# 모든 요소를 흰색으로 그린다.
FRAGMENT_CODE = """
#version 330 core
out lowp vec4 out_color;
void main(){
out_color = vec4(1, 1, 1, 1);
}
"""
@lazy_class_attribute
def shader_program(self):
print("compiling!")
return shaders.compileProgram(
shaders.compileShader(self.VERTEX_CODE, gl.GL_VERTEX_SHADER),
shaders.compileShader(self.FRAGMENT_CODE, gl.GL_FRAGMENT_SHADER),
)
| 1,151 | 518 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
import sys
import time
from itertools import islice
from uuid import uuid4
import emojis
from github import Github
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram import InlineQueryResultArticle, ParseMode
from telegram import InputTextMessageContent
from telegram.ext import Updater, InlineQueryHandler, CommandHandler
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
access_token = os.environ.get("access_token")
g = Github(access_token)
def start(update, context):
update.message.reply_text('Hi!')
def help(update, context):
update.message.reply_text('Help!')
def fetch_url(query_term, query_type):
if query_type in ["u", "user"]:
result = get_user(query_term)
elif query_type in ["r", "repo"]:
result = get_repo(query_term)
else:
result = "NIL"
return result
def get_repo(query):
repo = g.get_repo(query)
name = repo.name
repo_url = repo.html_url
clone_url = repo.clone_url
# description = repo.description
stars = repo.stargazers_count
language = repo.language
owner_name = repo.owner.name
owner_url = repo.owner.html_url
response = f"""🗄 [{name}]({repo_url}) by [{owner_name}]({owner_url})"""
response += f""" in #{language}\n⭐️ {stars} Stars\n📥 [Clone]({clone_url})"""
return response
def get_user(query):
user = g.get_user(query)
name = "👥 " + user.name
location = "📌 " + user.location
bio = "🎭 " + user.bio
# avatar = user.avatar_url
response = "{}\n{}\n{}".format(name, location, bio)
response += "\n🔗 https://github.com/{}".format(query)
return response
def search_callback(update, context):
user_says = context.args
if len(user_says):
chat_id = update.message.chat.id
query_type = str(user_says[0])
query_term = str(user_says[1:][0])
result = fetch_url(query_term, query_type)
link = result.split("[Clone](")[-1][:-1]
data = result.split(".")[1].split("/")
base = "https://github.com/"
username = query_term
# repo_name = data[2]
url = base + username
if query_type == "u":
button_text = "🗄 repositories"
link = url + "?tab=repositories"
else:
button_text = "🗄 repository"
markup = InlineKeyboardMarkup(
[[InlineKeyboardButton("👤 profile", url=url), InlineKeyboardButton(button_text, url=link)]])
context.bot.send_message(chat_id=chat_id, text="{}".format(result), reply_markup=markup,
parse_mode=ParseMode.MARKDOWN)
else:
return
def download(update, context):
user_says = context.args
chat_id = update.message.chat.id
# query_type = str(user_says[0])
query_term = str(user_says[0])
url = f"https://github.com/{query_term}/archive/master.zip"
caption = f"✅ download successful for repository: {query_term}"
context.bot.send_document(chat_id=chat_id, document=url, caption=caption)
# except:
# context.bot.send_message(chat_id=chat_id, text="repository not found!")
def emoji_callback(update, context):
chat_id = update.message.chat.id
emojiset = g.get_emojis()
for x in emojiset:
x = f":{x}:"
context.bot.send_message(chat_id=chat_id, text=emojis.encode(x))
time.sleep(0.1)
def inlinequery(update, context):
try:
query = update.inline_query.query # .split(" ")
# query_type = query[0]
# query_term = query[1]
keywords = [keyword.strip() for keyword in query.split(',')]
except:
return
query = '+'.join(keywords) + '+in:readme+in:description'
result = g.search_repositories(query, 'stars', 'desc')
print(f'Found {result.totalCount} repo(s)')
# result = fetch_url(query_term, query_type)
title = "Result"
results = list()
if result.totalCount == 0:
title = "No results found."
content = "No results found."
results.append(
InlineQueryResultArticle(
id=uuid4(),
title=title,
input_message_content=InputTextMessageContent(
"{}".format(content),
parse_mode=ParseMode.MARKDOWN)))
update.inline_query.answer(results, cache_time=3)
stop = 10
for repo in islice(result, 0, stop):
name = repo.name
repo_url = repo.html_url
clone_url = repo.clone_url
description = repo.description
stars = repo.stargazers_count
language = repo.language
owner_name = repo.owner.name
owner_url = repo.owner.html_url
response = f"""🗄 [{name}]({repo_url}) by [{owner_name}]({owner_url})"""
response += f""" in #{language}\n⭐️ {stars} Stars\n📥 [Clone]({clone_url})"""
results.append(
InlineQueryResultArticle(
id=uuid4(),
title=name,
description=description,
input_message_content=InputTextMessageContent(
"{}".format(response),
parse_mode=ParseMode.MARKDOWN)))
update.inline_query.answer(results, cache_time=3)
def error(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
try:
TOKEN = sys.argv[1]
except IndexError:
TOKEN = os.environ.get("telegram_token")
updater = Updater(TOKEN, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("search", search_callback))
dp.add_handler(CommandHandler("emoji", emoji_callback))
dp.add_handler(CommandHandler("download", download))
dp.add_handler(InlineQueryHandler(inlinequery))
dp.add_error_handler(error)
updater.start_polling()
logger.info("Ready to rock..!")
updater.idle()
if __name__ == '__main__':
main()
| 6,134 | 2,027 |
from flask import Flask
from celery import Celery
import os
app = Flask(__name__)
app.config['CELERY_BROKER_URL'] = os.getenv("CELERY_BROKER_URL")
app.config['RESULT_BACKEND'] = os.getenv("CELERY_RESULT_BACKEND")
app.config['SECRET_KEY'] = os.getenv("SECRET_KEY")
celery = Celery(app.import_name,
backend=app.config['RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
| 440 | 173 |
import pygame
import random
pygame.init()
white = (255, 255, 255)
black = (0, 0, 0)
yellow = (255, 255, 102)
red = (250, 0, 0) # Other nice red color: 213, 50, 80
green = (152, 251, 152)
blue = (30, 144, 255) # other nice combo: 50, 151, 213
dis_width = 800
dis_height = 600
dis = pygame.display.set_mode((dis_width, dis_height))
pygame.display.set_caption('Snake Game')
clock = pygame.time.Clock()
snake_block = 10
snake_speed = 15
# Fonts
font_style = pygame.font.SysFont("roboto", 30)
score_font = pygame.font.SysFont("chango", 55)
level_font = pygame.font.SysFont("chango", 55)
def score(score):
value = score_font.render("Score: " + str(score), True, blue)
dis.blit(value, [0, 0])
def our_snake(snake_block, snake_list):
for x in snake_list:
pygame.draw.rect(dis, black, [x[0], x[1], snake_block, snake_block])
def message(msg, color):
mesg = font_style.render(msg, True, color)
dis.blit(mesg, [dis_width/6, dis_height/3])
def gameLoop():
end_game = False
close_game = False
x1 = dis_width / 2
y1 = dis_height / 2
x1_change = 0
y1_change = 0
snake_List = []
Snake_Length = 1
foodx = round(random.randrange(0, dis_width - snake_block) / 10) * 10
foody = round(random.randrange(0, dis_width - snake_block) / 10) * 10
while not end_game:
while close_game:
dis.fill(green) # change color
message("GAME OVER! Press SPACE to play again or ESC to quit.", red)
score(Snake_Length - 1)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
end_game = True
close_game = False
if event.key == pygame.K_SPACE:
gameLoop()
# for event in pygame.event.get():
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_2:
# end_game = True
# close_game = False
# if event.key == pygame.K_1:
# gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
end_game = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x1_change = -snake_block
y1_change = 0
elif event.key == pygame.K_RIGHT:
x1_change = snake_block
y1_change = 0
elif event.key == pygame.K_UP:
x1_change = 0
y1_change = -snake_block
elif event.key == pygame.K_DOWN:
x1_change = 0
y1_change = snake_block
if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:
close_game = True
x1 += x1_change
y1 += y1_change
dis.fill(green) # change color maybe
pygame.draw.rect(dis, red, [foodx, foody, snake_block, snake_block]) # change food color maybe
snake_Head = []
snake_Head.append(x1)
snake_Head.append(y1)
snake_List.append(snake_Head)
if len(snake_List) > Snake_Length:
del snake_List[0]
for x in snake_List[:-1]:
if x == snake_Head:
close_game = True
our_snake(snake_block, snake_List)
score(Snake_Length - 1)
pygame.display.update()
if x1 == foodx and y1 == foody:
foodx = round(random.randrange(0, dis_width - snake_block) / 10) * 10
foody = round(random.randrange(0, dis_height - snake_block) / 10) * 10
Snake_Length += 1
clock.tick(snake_speed)
# Planning to add levels:
# if level == 1 and score == 10:
# level +=2
# snake_speed = 20
pygame.quit()
quit()
gameLoop()
| 4,132 | 1,540 |
#C:\Python27\python.exe
#!/usr/bin/env python
# encoding: utf-8
import os
# import subprocess
import SupportFunc as supp
import ReadIndexesFunc as rind
import CollectBcMutFunc as colb
import WriteFunc as wrt
import param
import picks
from TripMain_0_2 import Pdump
def main():
supp.setup_logging()
for name in param.indexList:
index = param.indexList[name]
# readsStat = {}
if not os.path.exists(os.path.join(picks.workdir, name)): os.makedirs(os.path.join(picks.workdir, name))
indexFile = os.path.join(picks.workdir, name, "index_{}.fastq".format(index.upper()))
# indexFiltFile = os.path.join(picks.workdir, name, "filt_index_{}.fastq".format(index.upper()))
if not os.path.exists(indexFile) or os.stat(indexFile).st_size == 0:
rind.SplitFastqByIndexes(picks.input_file, indexFile, index.upper(), param.indexError, param.const_1.upper(), param.const_1Error, param.regExpIndex, picks.no_trim_index)
if picks.random_read:
indexFileRand = os.path.join(picks.workdir, name, "random_index_{}.fastq".format(index.upper()))
rind.RandomReadIndexes(indexFile, indexFileRand, param.probability)
indexFile = indexFileRand
supp.LogInfo("\n\nEnd splitting.\n\n#####################################\n")
# readsStat[name] = rind.filterShadyReads(indexFile, param.reFilter, indexFiltFile)
# indexFile = indexFiltFile
# supp.LogInfo("Filter before: {}, after: {}\n indexFile - {}, indexFiltFile - {}".format(readsStat[name][0], readsStat[name][1], indexFile, indexFiltFile))
supp.LogInfo('''Processing on: '{}'.\n
Total reads in file '{}': {} reads.\n
Generate dictionary of barcodes.\n'''.format(os.path.basename(indexFile), os.path.basename(indexFile), supp.GetTotalSeqRecords(indexFile)))
bcDictPI = colb.CollectBarcodeGenome(indexFile, param.barcodeLength, param.readsValue, param.barcodeError, param.const_2.upper(), param.const_2Error, param.regExpBc, picks.merge_indexes, picks.reverse_barcode, param.pmi, param.pmiLength, param.pmiSubst)
Pdump(bcDictPI, name + "_bcDictPI", picks.PdumpDir)
# Pdump(readsStat, name + "_readsStat", picks.PdumpDir)
for pI in bcDictPI:
csvFile = wrt.WriteBcDictToFile(bcDictPI[pI], os.path.join(picks.workdir, name), indexFile, pI)
# csvFile_R = wrt.SimpleCsvWriter(None, bcDictPI[pI], os.path.join(picks.workdir, name), indexFile, pI)
supp.LogInfo(''' I had select the {} unique barcodes.\n
Results writing to file '{}'
in your working directory: '{}'\n'''.format(len(bcDictPI[pI]), csvFile, os.path.join(picks.workdir, name)))
# if os.path.exists(param.rscript):
# pathToScript = os.path.join(os.getcwd(), "trip_Rstat.R")
# option = [csvFile_R, os.path.dirname(csvFile_R), index]
# cmd = [param.rscript, pathToScript] + option
# subprocess.call(cmd)
# else:
# print("You do not have installed R-session, or you incorrectly specified the path to the Rscript.\nStatistics on barcodes will not be displayed.")
supp.LogInfo("End processing with: '{}'.\n\n".format(os.path.basename(indexFile)))
if __name__ == "__main__":
main() | 3,335 | 1,098 |
class UdbStorage(object):
def is_available(self):
raise NotImplementedError
def is_capture_events(self):
return False
def drop(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
def save(self, indexes, revision, data):
raise NotImplementedError
def save_meta(self, indexes, revision):
raise NotImplementedError
def on_delete(self, rid):
return self
def on_insert(self, rid, record):
return self
def on_update(self, rid, record, values):
return self
| 589 | 173 |
"""
This file handles the input of PolyArt.
"""
# if this is the origin file (not imported)
if __name__ == "__main__":
# print the documentation
print(__doc__)
# add sysmessages module location to path
from sys import path
path.append('..')
# run sysmessages
import common.sysmessages
# import parent package
import polyart
# import used functions from math
from math import cos, atan, sin, radians
# ------------------------------------------------------------------------ #
# rotation #
# ------------------------------------------------------------------------ #
# region rotation
def rotate_left(event):
"""
Wrapper for rotating the model anti-clockwise.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, rotate 22.5 degrees
rotate(-22.5)
else:
# else, rotate 1 degrees
rotate(-1)
def rotate_right(event):
"""
Wrapper for rotating the model clockwise.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, rotate 22.5 degrees
rotate(22.5)
else:
# else, rotate 1 degrees
rotate(1)
def rotate(angle): # assumes anti-clockwise
"""
Rotates the model.
"""
angle *= -1 # makes it clockwise
if angle < 0: # this cleanses the number to ensure it is between 0 and 360
angle = -(abs(angle) % 360)
else:
angle = angle % 360
new_points = []
for point in polyart.model_data:
x = point[0] - polyart.CENTER[0]
y = -(point[1] - polyart.CENTER[1])
# point_rotation = the angle from the center of the model to the point
if x == 0 and y == 0: # ignore this point, it is in the centre
new_points.append((polyart.CENTER[0] + x, polyart.CENTER[1] - y))
else:
if x == 0:
if y > 0: # it is directly up
point_rotation = radians(0 - 90)
else: # it is directly down
point_rotation = radians(180 - 90)
elif y == 0:
if x > 0: # it is directly right
point_rotation = radians(90 - 90)
else: # it is directly left
point_rotation = radians(270 - 90)
else:
if x > 0 and y > 0:
point_rotation = atan(x / y) + radians(0 - 90)
elif x > 0 and y < 0:
point_rotation = atan(x / y) + radians(180 - 90)
elif x < 0 and y > 0:
point_rotation = atan(x / y) + radians(360 - 90)
else: # x < 0 and y < 0:
point_rotation = atan(x / y) + radians(180 - 90)
theta = radians(
angle) - point_rotation # theta is equal to the rotation of the object added to the angle, minus the model rotation of the point
radius = polyart.cached_hypot(x, y) # get distance from the point to the center of the object
new_xdiff = radius * cos(theta)
new_ydiff = radius * sin(theta)
new_points.append((polyart.CENTER[0] + new_xdiff, polyart.CENTER[1] - new_ydiff))
# update model_data
polyart.model_data = new_points
# refresh the canvas
polyart.refresh()
# endregion
# ------------------------------------------------------------------------ #
# mouse #
# ------------------------------------------------------------------------ #
# region mouse
def left_click(event):
"""
Either creates a new point, or chooses the point to move (index_moving).
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
# get mouse position
mouse = (event.x, event.y)
# iterate through model_data
for i in range(len(polyart.model_data)):
point = polyart.model_data[i]
# calculate the distance between the mouse and point
distance = polyart.distance(mouse, point)
if distance <= polyart.POINTSELECTDISTANCE:
# if the distance is less than or equal to the point_select_distance
polyart.index_moving = i
break
if polyart.index_moving == None:
# if no point is selected, assume that the user is trying to create a new point
# iterate through model_data
for i in range(len(polyart.model_data)):
# get the parent points of the line
a = polyart.model_data[i % len(polyart.model_data)]
c = polyart.model_data[(i + 1) % len(polyart.model_data)]
if polyart.is_between(a, mouse, c):
# if the mouse position lies on that line
# work out index
index = (i + 1) % len(polyart.model_data)
if polyart.snapped:
# position is snapped
position = (polyart.snap(mouse[0]), polyart.snap(mouse[1]))
else:
# position not snapped
position = mouse
# insert new point
polyart.model_data.insert(index, position)
# refresh the canvas
polyart.refresh()
# force only one point to be made
break
def left_release(event):
"""
Sets the selected point (index_moving) to None.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
polyart.index_moving = None
def right_click(event):
"""
Deletes the point hovered over.
"""
# if the focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if len(polyart.model_data) > 3 and polyart.index_moving == None:
# if there are less than 4 points, deleting a point should NOT be allowed to happen
# if a point is selected, the same goes
# get the mouse position
mouse = (event.x, event.y)
# iterate through model_data
for point in polyart.model_data:
if polyart.distance(mouse, point) <= polyart.POINTSELECTDISTANCE:
# if the distance is less than or equal to the point_select_distance
# remove the point
polyart.model_data.remove(point)
# force only one point to be deleted
break
# refresh the canvas
polyart.refresh()
def motion(event):
"""
Moves the selected point, if there is one.
"""
# get the moue position
mouse = (event.x, event.y)
if polyart.index_moving is not None:
# if a point is selected
if polyart.snapped:
# if snapped, snap the position
new_position = (polyart.snap(mouse[0]), polyart.snap(mouse[1]))
else:
# if not, do not snap the position
new_position = mouse
# update variable model_data
polyart.model_data[polyart.index_moving] = new_position
# refresh the canvas
polyart.refresh()
# endregion
# ------------------------------------------------------------------------ #
# movement #
# ------------------------------------------------------------------------ #
# region movement
def left(event):
"""
Moves the model left.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (-polyart.GRIDSIZE, 0)
else:
move = (-1, 0)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
def right(event):
"""
Moves the model right.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (polyart.GRIDSIZE, 0)
else:
move = (1, 0)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
def up(event):
"""
Moves the model up.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (0, polyart.GRIDSIZE)
else:
move = (0, 1)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
def down(event):
"""
Moves the model down.
"""
# if focus is on an entry widget, return
try:
if polyart.ui.root.focus_get().winfo_class() == "Entry":
return
except Exception:
pass
if polyart.snapped:
# if snapped, move vector is effectively snapped
move = (0, -polyart.GRIDSIZE)
else:
move = (0, -1)
# iterate through model data
for i in range(len(polyart.model_data)):
x = polyart.model_data[i][0]
y = polyart.model_data[i][1]
# offset each point by the move vector
polyart.model_data[i] = (x + move[0], y - move[1])
# refresh the canvas
polyart.refresh()
# endregion
# ------------------------------------------------------------------------ #
# binding #
# ------------------------------------------------------------------------ #
# region binding
def bind_inputs():
"""
Binds the inputs to each function in this file.
"""
# bind the mouse movement on the canvas to the motion function
polyart.ui.canvas.bind("<Motion>", motion)
# bind the mouse presses on the root to the correct functions
polyart.ui.root.bind("<ButtonPress-1>", left_click)
polyart.ui.root.bind("<ButtonRelease-1>", left_release)
polyart.ui.root.bind("<ButtonPress-3>", right_click)
# bind the rotate functions to E and Q
polyart.ui.root.bind("e", rotate_right)
polyart.ui.root.bind("q", rotate_left)
# bind the arrow keys to the movement functions
polyart.ui.root.bind("<Key-Left>", left)
polyart.ui.root.bind("<Key-Right>", right)
polyart.ui.root.bind("<Key-Up>", up)
polyart.ui.root.bind("<Key-Down>", down)
# bind the WSAD keys to the movement functions
polyart.ui.root.bind("<a>", left)
polyart.ui.root.bind("<d>", right)
polyart.ui.root.bind("<w>", up)
polyart.ui.root.bind("<s>", down)
# endregion
| 12,036 | 3,557 |
"""Alphabet-related methods."""
import logging
import numpy
ALPHABETS = {
'protein': '-ACDEFGHIKLMNPQRSTVWY',
'dna': '-ACGT',
'rna': '-ACGU',
'protein_u': '-ACDEFGHIKLMNPQRSTVWYBZX',
'dna_u': '-ACGTRYMKWSBDHVN',
'rna_u': '-ACGURYMKWSBDHVN',
}
logger = logging.getLogger(__name__)
def check_alphabet(alphabet):
# A string of ordered, unique symbols
return ''.join(sorted(set(alphabet)))
def check_alphabet_records(records, alphabet):
"""Filter out records not consistent with alphabet."""
alphabet_set = set(alphabet)
return (r for r in records if set(r[1]) <= alphabet_set)
def score_alphabet(alphabet, counts):
"""Score for alphabet given counts."""
import math
chars = set(alphabet) - set('*-')
score = (sum([counts.get(a, 0) for a in chars]) / math.log(len(alphabet)))
logger.debug('alphabet %r score %r', alphabet, score)
return score
def guess_alphabet(records):
"""Guess alphabet from an iterable of records."""
from collections import Counter
from biodada import ALPHABETS
data = numpy.array([list(record[1]) for record in records],
dtype='U1').flatten()
counts = Counter(data)
max_score = float('-inf')
for key, alphabet in ALPHABETS.items():
score = score_alphabet(alphabet, counts)
if score > max_score:
max_score = score
guess = key
logger.info('Alphabet guess: %r', guess)
return ALPHABETS[guess]
| 1,486 | 520 |
import os
import gevent
import redis
from flask import Flask, render_template, send_from_directory
from flask_cors import CORS
from flask_restplus import Api
from flask_sockets import Sockets
from geventwebsocket.exceptions import WebSocketError
from .image_resource import api as images_api
from .thumbnails import SMALL_THUMBNAIL_DIR
REDIS_CHAN = "sensor_data"
app = Flask(__name__)
app.config["SERVE_LOCAL_IMAGES"] = os.environ.get("SERVE_LOCAL_IMAGES")
CORS(app)
sockets = Sockets(app)
REDIS_URL = os.environ.get("REDIS_URL")
if REDIS_URL:
redis_conn = redis.StrictRedis.from_url(REDIS_URL)
@app.route("/")
def splash():
return render_template("splash.html")
if app.config["SERVE_LOCAL_IMAGES"]:
@app.route("/images/small-thumbnail/<path:filename>")
def thumbnail(filename):
return send_from_directory(str(SMALL_THUMBNAIL_DIR), str(filename))
if REDIS_URL:
@sockets.route("/sensor_data")
def sensor_data_route(ws):
def publish():
while not ws.closed:
data = ws.receive()
if data:
# print(ws, "publish", data, type(data))
redis_conn.publish(REDIS_CHAN, data)
def subscribe():
pubsub = redis_conn.pubsub()
pubsub.subscribe(REDIS_CHAN)
for message in pubsub.listen():
if message["type"] == "message":
data = message.get("data")
# print(ws, "send", data, type(data))
try:
ws.send(data.decode())
except WebSocketError:
return
gevent.spawn(subscribe)
publish()
api = Api(app, doc="/docs/", title="Matrix Image Gallery API", version="0.1")
api.add_namespace(images_api)
| 1,813 | 574 |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
class multi_head_network(tf.keras.Model):
"""The convolutional network used to compute the agent's Q-values."""
def __init__(self, num_actions, num_heads, network_type, name=None, **kwargs):
"""Creates the layers used for calculating Q-values.
"""
super(multi_head_network, self).__init__(name=name)
self.num_actions = num_actions
self.network_type = network_type
self.num_heads = num_heads
# Defining layers.
activation_fn = tf.keras.activations.relu
# Setting names of the layers manually to make variable names more similar
# with tf.slim variable names/checkpoints.
self.conv1 = tf.keras.layers.Conv2D(32, [8, 8], strides=4, padding='same',
activation=activation_fn, name='Conv')
self.conv2 = tf.keras.layers.Conv2D(64, [4, 4], strides=2, padding='same',
activation=activation_fn, name='Conv')
self.conv3 = tf.keras.layers.Conv2D(64, [3, 3], strides=1, padding='same',
activation=activation_fn, name='Conv')
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(512, activation=activation_fn,
name='fully_connected')
self.dense2 = tf.keras.layers.Dense(num_actions*num_heads,
activation=None,
name='fully_connected_q_heads')
def call(self, state):
"""Creates the output tensor/op given the state tensor as input.
See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
information on this. Note that tf.keras.Model implements `call` which is
wrapped by `__call__` function by tf.keras.Model.
Parameters created here will have scope according to the `name` argument
given at `.__init__()` call.
Args:
state: Tensor, input tensor.
Returns:
collections.namedtuple, output ops (graph mode) or output tensors (eager).
"""
x = tf.cast(state, tf.float32)
x = tf.div(x, 255.)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.flatten(x)
x = self.dense1(x)
x = self.dense2(x)
q_heads = tf.reshape(x, [-1, self.num_actions, self.num_heads])
unordered_q_heads = q_heads
q_values = tf.reduce_mean(q_heads, axis=-1)
return self.network_type(q_heads, unordered_q_heads, q_values)
| 3,045 | 1,004 |
from time import time
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
import numpy as np
import scipy as sp
import pandas as pd
import math
df = pd.read_csv("parsed_heart.csv")
y1 = df["num"].values
cols = list(df)
mlp = MLPClassifier(hidden_layer_sizes=(100,100,100))
clf1 = BaggingClassifier(n_estimators=10)
clf2 = BaggingClassifier(n_estimators=100)
clf3 = RandomForestClassifier(n_estimators=10,criterion='gini', min_samples_split=2,max_features=None)
clf4 = AdaBoostClassifier(n_estimators=100)
clf5 = VotingClassifier(estimators=[("rf",clf3),('bg',clf2),('ml',mlp),('ada',clf4)],voting='soft')
dropped = set(['num','id'])
columns2 = [z for z in cols if z not in dropped]
X2 = df[columns2].values
X_train, X_test, y_train, y_test = train_test_split(X2,y1,test_size=0.90)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
mlp.fit(X_train,y_train)
predictions2 = mlp.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
kfold = KFold(n_splits=3,shuffle=True)
print(cross_val_score(mlp,X_test,y_test,cv=kfold).mean())
clf2.fit(X_train,y_train)
predictions = clf2.predict(X_test)
print(classification_report(y_test, predictions))
print(accuracy_score(y_test, predictions))
clf3.fit(X_train,y_train)
predictions2 = clf3.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
clf4.fit(X_train,y_train)
predictions2 = clf4.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
clf5.fit(X_train,y_train)
predictions2 = clf5.predict(X_test)
print(classification_report(y_test, predictions2))
print(accuracy_score(y_test, predictions2))
print(cross_val_score(clf5,X_test,y_test,cv=kfold).mean())
| 2,358 | 923 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mock
from infra_libs.ts_mon.common import interface
from infra_libs.ts_mon.common import metric_store
from infra_libs.ts_mon.common import monitors
from infra_libs.ts_mon.common import targets
class MockState(interface.State): # pragma: no cover
def __init__(self, store_ctor=None):
if store_ctor is None:
store_ctor = metric_store.InProcessMetricStore
self.global_monitor = None
self.target = None
self.flush_mode = None
self.flush_thread = None
self.metrics = {}
self.store = store_ctor(self)
def MockMonitor(): # pragma: no cover
return mock.MagicMock(monitors.Monitor)
def MockTarget(): # pragma: no cover
return mock.MagicMock(targets.Target)
| 870 | 287 |
# MIT License
# Copyright 2021 Ryan Hausen and contributers
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import string
from functools import reduce
from itertools import chain, filterfalse
from typing import Iterable, List, Tuple
from astropy.io import fits
from tqdm import tqdm
from PIL import Image
import fitsmap
def digit_to_string(digit: int) -> str:
"""Converts an integer into its word representation"""
if digit == 0:
return "zero"
elif digit == 1:
return "one"
elif digit == 2:
return "two"
elif digit == 3:
return "three"
elif digit == 4:
return "four"
elif digit == 5:
return "five"
elif digit == 6:
return "six"
elif digit == 7:
return "seven"
elif digit == 8:
return "eight"
elif digit == 9:
return "nine"
else:
raise ValueError("Only digits 0-9 are supported")
def make_fname_js_safe(fname: str) -> str:
"""Converts a string filename to a javascript safe identifier."""
if fname[0] in string.digits:
adj_for_digit = digit_to_string(int(fname[0])) + fname[1:]
else:
adj_for_digit = fname
return adj_for_digit.replace(".", "_dot_").replace("-", "_")
def get_fits_image_size(fits_file: str) -> Tuple[int, int]:
"""Returns image size (x, y)
Args:
fits_file (str): fits file path
Returns:
Tuple[int, int]: returns the x and y dims of the input file
"""
hdr = fits.getheader(fits_file)
return hdr["NAXIS1"], hdr["NAXIS2"]
def get_standard_image_size(image_file: str) -> Tuple[int, int]:
"""Returns image size (x, y)
Args:
image_file (str): image file path
Returns:
Tuple[int, int]: returns the x and y dims of the input file
"""
with Image.open(image_file) as f:
size = f.size
return size
def peek_image_info(img_file_names: List[str]) -> Tuple[int, int]:
"""Gets image size values given passed image file names
Args:
img_file_names (List[str]): Input image files that are being tiled
Returns:
Tuple[int, int]: The `max x`, and `max y`
"""
fits_sizes = list(
map(get_fits_image_size, filter(lambda f: f.endswith("fits"), img_file_names),)
)
standard_sizes = list(
map(
get_standard_image_size,
filterfalse(lambda f: f.endswith("fits"), img_file_names),
)
)
max_x, max_y = reduce(
lambda x, y: (max(x[0], y[0]), max(x[1], y[1])),
chain.from_iterable([fits_sizes, standard_sizes]),
(0, 0),
)
return max_x, max_y
def get_version():
with open(os.path.join(fitsmap.__path__[0], "__version__.py"), "r") as f:
return f.readline().strip().replace('"', "")
class MockQueue:
def __init__(self, bar):
self.bar = bar
def put(self, n):
self.bar.update(n=n)
| 3,925 | 1,336 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 10:49:22 2017
@author: marks
"""
# File: tree.py
# References:
# http://hg.python.org/cpython/file/4e32c450f438/Lib/tkinter/ttk.py
# http://www.tcl.tk/man/tcl8.5/TkCmd/ttk_treeview.htm#M79
# http://svn.python.org/projects/python/branches/pep-0384/Demo/tkinter/ttk/dirbrowser.py
import os
from tkinter import *
from tkinter import ttk #@Reimport
from demopanels import MsgPanel, SeeDismissPanel
# Constants for formatting file sizes
KB = 1024.0
MB = KB * KB
GB = MB * KB
class TreeDemo(ttk.Frame):
def __init__(self, isapp=True, name='treedemo'):
ttk.Frame.__init__(self, name=name)
self.pack(expand=Y, fill=BOTH)
self.master.title('Tree Demo')
self.isapp = isapp
self._create_widgets()
def _create_widgets(self):
if self.isapp:
MsgPanel(self, ["One of the new Tk themed widgets is a tree widget, which allows ",
"the user to browse a hierarchical data-set such as a file system. ",
"The tree widget not only allows for the tree part itself, but it ",
"also supports an arbitrary number of additional columns which can ",
"show additional data (in this case, the size of the files found ",
"on your file system). You can also change the width of the columns ",
"by dragging the boundary between them."])
SeeDismissPanel(self)
self._create_demo_panel()
def _create_demo_panel(self):
demoPanel = Frame(self)
demoPanel.pack(side=TOP, fill=BOTH, expand=Y)
self._create_treeview(demoPanel)
self._populate_root()
def _create_treeview(self, parent):
f = ttk.Frame(parent)
f.pack(side=TOP, fill=BOTH, expand=Y)
# create the tree and scrollbars
self.dataCols = ('fullpath', 'type', 'size')
self.tree = ttk.Treeview(columns=self.dataCols,
displaycolumns='size')
ysb = ttk.Scrollbar(orient=VERTICAL, command= self.tree.yview)
xsb = ttk.Scrollbar(orient=HORIZONTAL, command= self.tree.xview)
self.tree['yscroll'] = ysb.set
self.tree['xscroll'] = xsb.set
# setup column headings
self.tree.heading('#0', text='Directory Structure', anchor=W)
self.tree.heading('size', text='File Size', anchor=W)
self.tree.column('size', stretch=0, width=70)
# add tree and scrollbars to frame
self.tree.grid(in_=f, row=0, column=0, sticky=NSEW)
ysb.grid(in_=f, row=0, column=1, sticky=NS)
xsb.grid(in_=f, row=1, column=0, sticky=EW)
# set frame resizing priorities
f.rowconfigure(0, weight=1)
f.columnconfigure(0, weight=1)
# action to perform when a node is expanded
self.tree.bind('<<TreeviewOpen>>', self._update_tree)
def _populate_root(self):
# use current directory as root node
self.path = os.getcwd()
# insert current directory at top of tree
# 'values' = column values: fullpath, type, size
# if a column value is omitted, assumed empty
parent = self.tree.insert('', END, text=self.path,
values=[self.path, 'directory'])
# add the files and sub-directories
self._populate_tree(parent, self.path, os.listdir(self.path))
def _populate_tree(self, parent, fullpath, children):
# parent - id of node acting as parent
# fullpath - the parent node's full path
# children - list of files and sub-directories
# belonging to the 'parent' node
for child in children:
# build child's fullpath
cpath = os.path.join(fullpath, child).replace('\\', '/')
if os.path.isdir(cpath):
# directory - only populate when expanded
# (see _create_treeview() 'bind')
cid =self.tree.insert(parent, END, text=child,
values=[cpath, 'directory'])
# add 'dummy' child to force node as expandable
self.tree.insert(cid, END, text='dummy')
else:
# must be a 'file'
size = self._format_size(os.stat(cpath).st_size)
self.tree.insert(parent, END, text=child,
values=[cpath, 'file', size])
def _format_size(self, size):
if size >= GB:
return '{:,.1f} GB'.format(size/GB)
if size >= MB:
return '{:,.1f} MB'.format(size/MB)
if size >= KB:
return '{:,.1f} KB'.format(size/KB)
return '{} bytes'.format(size)
def _update_tree(self, event): #@UnusedVariable
# user expanded a node - build the related directory
nodeId = self.tree.focus() # the id of the expanded node
if self.tree.parent(nodeId): # not at root
topChild = self.tree.get_children(nodeId)[0]
# if the node only has a 'dummy' child, remove it and
# build new directory; skip if the node is already
# populated
if self.tree.item(topChild, option='text') == 'dummy':
self.tree.delete(topChild)
path = self.tree.set(nodeId, 'fullpath')
self._populate_tree(nodeId, path, os.listdir(path))
if __name__ == '__main__':
TreeDemo().mainloop() | 5,819 | 1,742 |
from __future__ import print_function
import sys
import os
import copy
import unittest
import logging
import random
# from unittest.main import main
import torch
from torch import nn
import torch_mlu.core.mlu_model as ct
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
from common_utils import testinfo, TestCase # pylint: disable=C0413, C0411
logging.basicConfig(level=logging.DEBUG)
class buildMultiAdd(nn.Module): # pylint: disable=W0223
r"""
graph:
fc --> split --> squeeze --> transpose --> \
\ --> squeeze --> slice --> batch_dot
"""
def __init__(self, shape):
super(buildMultiAdd, self).__init__()
self.shape = len(shape)
self.s1_dim0_1 = random.randint(5, 25)
self.s1_dim0_2 = random.randint(5,25)
self.s2_dim2 = random.randint(5, 25)
self.split_dim_0_2 = random.randint(0, 2)
self.split_dim_0_3 = random.randint(0, 3)
self.split_dim_0_4 = random.randint(0, 4)
self.split_dim_0_5 = random.randint(0, 5)
self.unbind_dim_0_2 = random.randint(0, 2)
self.unbind_dim_0_3 = random.randint(0, 3)
self.unbind_dim_0_4 = random.randint(0, 4)
self.unbind_dim_0_5 = random.randint(0, 5)
self.select_dim_0_3 = random.randint(0, 3)
self.select_dim_0_4 = random.randint(0, 4)
self.narrow_dim_0_2 = random.randint(0, 2)
def forward(self, x):
if self.shape == 1:
dim1 = x.size()[0]
x = x.unsqueeze(1)
dim2 = x.size()[1]
dim0 = self.s1_dim0_1
x = x.expand(dim0, dim1, dim2)
x = x.permute(2, 0, 1)
dim0, dim1, dim2 = dim2, dim0, dim1
x = x.add(x)
x = x[:, :, :dim2-1]
x = x.transpose(0, 1)
dim0, dim2 = self.s1_dim0_2, dim2-1
x = x.expand(dim0, dim1, 1, dim2)
x = x.squeeze()
tensors = x.split(2, self.split_dim_0_2)
elif self.shape == 2:
dim0, dim1 = x.size()
x = x.unsqueeze(1)
x = x.permute(0, 2, 1)
dim2 = self.s2_dim2
x = x.expand(dim2, dim0, dim1, 1)
x = x.squeeze()
dim0, dim1, dim2 = dim2, dim0, dim1
x = x.add(x)
x = x[:, :, :dim2-1]
tensors = x.split(2, self.split_dim_0_2)
elif self.shape == 3:
dim0, dim1, dim2 = x.size()
x = x.permute(2, 0, 1)
x = x.transpose(1, 2)
x = x.add(x)
x = x.unsqueeze(2)
dim0, dim2 = dim2, dim0
x = x[:, :, :, :dim2-1]
x = x.squeeze()
tensors = x.split(2, self.split_dim_0_2)
elif self.shape == 4:
x = x.permute(0, 1, 3, 2)
x = x.transpose(0, 1)
x = x.add(x)
dim0, dim1, dim2, dim3 = x.size()
x = x[:, :, :, :dim3-1]
x = x.split(2, self.split_dim_0_3)[0]
tensors = x.unbind(self.unbind_dim_0_3)
elif self.shape == 5:
x = x.permute(3, 2, 0, 4, 1)
x = x.transpose(0, 3)
x = x.add(x)
dim0, dim1, dim2, dim3, _ = x.size()
x = x[:, :dim1-1, :, :dim3-1, :]
x = x.split(2, self.split_dim_0_4)[0]
x = x.select(self.select_dim_0_4, 1)
tensors = x.unbind(self.unbind_dim_0_3)
else:
x = x.permute(0, 3, 4, 1, 2, 5)
x = x.transpose(0, 5)
x = x.add(x)
dim0, dim1, dim2, dim3, _, dim5 = x.size()
x = x[:, :dim1-1, :, :dim3-1, :, :dim5-1]
x = x.split(2, self.split_dim_0_5)[0]
x = x.unbind(self.unbind_dim_0_5)[0]
x = x.select(self.select_dim_0_4, 1)
tensors = x.unbind(self.unbind_dim_0_2)
y = None
for idx in range(len(tensors)-1):
tensor = tensors[idx]
tensor = tensor.transpose(0, 1)
tensor = tensor.permute(2, 1, 0)
tensor = tensor.add(tensor)
tensor = tensor.narrow(self.narrow_dim_0_2, 0, 1)
tensor = tensor.chunk(2, 1)[0]
tensor = tensor.squeeze()
y = y.add(tensor) if y is not None else tensor
return y
class TestMultiWayNetOp(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_multi_way(self):
#print('----Multi-way structure----')
for d in range(6):
dim = d + 1
shape = ()
for _ in range(1, dim+1):
ran_d = random.randint(5, 25)
shape = shape + (ran_d,)
data = torch.randn(shape, dtype=torch.float)
in_cpu = copy.deepcopy(data)
in_mlu = self.to_mlu(data)
net_cpu = buildMultiAdd(shape)
out_cpu = net_cpu(in_cpu)
out_mlu = net_cpu(in_mlu)
self.assertTensorsEqual(out_cpu,
out_mlu.contiguous().cpu().float(),
0.03,
use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_multi_way_channel_last(self):
#print('----Multi-way structure----')
shape = (3,4,5,6)
data = torch.randn(shape).to(memory_format=torch.channels_last)
in_cpu = copy.deepcopy(data)
in_mlu = self.to_mlu(data)
net_cpu = buildMultiAdd(shape)
out_cpu = net_cpu(in_cpu)
out_mlu = net_cpu(in_mlu)
self.assertTensorsEqual(out_cpu,
out_mlu.contiguous().cpu().float(),
0.03,
use_MSE=True)
if __name__ == '__main__':
unittest.main()
| 5,803 | 2,229 |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Users)
admin.site.register(ShoppingList)
admin.site.register(ShoppingItem)
| 180 | 55 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Question:
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
Example:
given the array [-2,1,-3,4,-1,2,1,-5,4],
the contiguous subarray [4,-1,2,1] has the largest sum = 6.
'''
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
cur_sum = max_sum = nums[0] #当前和与最大和
for num in nums[1:]:
cur_sum = max(num, cur_sum + num)
max_sum = max(cur_sum, max_sum)
return max_sum | 645 | 242 |
"""Unit tests for Docstring."""
import docconvert
class TestDocstring(object):
def test_element_ordering(self):
docstring = docconvert.parser.Docstring()
docstring.add_element(("raw", "Docstring."))
docstring.add_return(kind="int")
docstring.add_raises(kind="ValueError")
docstring.add_arg("arg", kind="str")
docstring.add_element(("note", ["First note.", "Second Note."]))
assert docstring.elements == [
("raw", "Docstring."),
("return",),
("raises",),
("args",),
("note", ["First note.", "Second Note."]),
]
def test_args(self):
docstring = docconvert.parser.Docstring()
docstring.add_arg_type("arg1", "Object")
docstring.add_arg("arg2", kind="str")
docstring.add_arg("arg3", desc=["Description."], optional=True)
docstring.add_arg_type("arg3", "int")
assert docstring.elements == [("args",)]
first_arg = docstring.arg_fields.popitem(last=False)
assert first_arg[0] == "arg1"
assert first_arg[1].kind == "Object"
assert docstring.arg_fields["arg2"].kind == "str"
assert docstring.arg_fields["arg2"].optional == False
assert docstring.arg_fields["arg3"].kind == "int"
assert docstring.arg_fields["arg3"].desc == ["Description."]
assert docstring.arg_fields["arg3"].optional == True
def test_attributes(self):
docstring = docconvert.parser.Docstring()
docstring.add_attribute_type("attr1", "Object")
docstring.add_attribute("attr2", kind="str")
docstring.add_attribute("attr3", desc=["Description."])
docstring.add_attribute_type("attr3", "int")
assert docstring.elements == [("attributes",)]
first_attribute = docstring.attribute_fields.popitem(last=False)
assert first_attribute[0] == "attr1"
assert first_attribute[1].kind == "Object"
assert docstring.attribute_fields["attr2"].kind == "str"
assert docstring.attribute_fields["attr2"].optional == False
assert docstring.attribute_fields["attr3"].kind == "int"
assert docstring.attribute_fields["attr3"].desc == ["Description."]
def test_raises(self):
docstring = docconvert.parser.Docstring()
docstring.add_raises("ValueError")
docstring.add_raises("RuntimeError", desc=["Description."])
assert docstring.elements == [("raises",)]
assert docstring.raise_fields[0].kind == "ValueError"
assert docstring.raise_fields[0].desc == []
assert docstring.raise_fields[1].kind == "RuntimeError"
assert docstring.raise_fields[1].desc == ["Description."]
def test_returns(self):
docstring = docconvert.parser.Docstring()
docstring.add_return_type("int")
docstring.add_return(desc=["Description."])
assert docstring.elements == [("return",)]
assert docstring.return_field.kind == "int"
assert docstring.return_field.desc == ["Description."]
docstring.add_return_type("str")
assert docstring.return_field.kind == "str"
| 3,153 | 914 |
""" The following script is used to preprocess text once and cache it to a csv file. Currently, this means obtaining
the UPOS tags and universal features + renaming columns to a common format.
This is done because it's quite a long process and we do not want to do it every time we make a change. """
import pandas as pd
import os
import argparse
import json
import stanza
from conllu import parse
from tqdm import tqdm
from utils import PAD
parser = argparse.ArgumentParser()
parser.add_argument("--lang", type=str, default="de",
help="2-letter code (ISO 639-1) of used language")
parser.add_argument("--package", type=str, default="default",
help="Name of the used processor for POS/ufeats tagging")
parser.add_argument("--data_path", type=str, default="/home/matej/Documents/embeddia/morphological-additions/morphological-comment-filtering/data/GER/test.csv",
help="PATH to your data")
parser.add_argument("--data_column", type=str, default="content",
help="Column of csv in which the text to be processed is stored")
parser.add_argument("--target_column", type=str, default="target",
help="Column of csv in which the target label is stored")
parser.add_argument("--target_dir", type=str, default="preprocessed/GER",
help="DIRECTORY where processed data should be stored")
def process_conllu(conllu_data):
""" Accepts a conllu string, containing processed sequence, and returns a list[list[dict]] containing properties
of tokens by sentence, i.e. index [i][j] of returned list represents features of j-th token in i-th sentence."""
sent_features = parse(conllu_data)
processed = []
for curr_sent in sent_features:
converted_sent = []
for curr_token in curr_sent:
curr_features = {"form": curr_token["form"]}
# Unpack universal features; note that some tokens don't have universal features (e.g. punctuation)
universal_features = curr_token["feats"]
if universal_features is not None:
curr_features.update(universal_features)
curr_features.update({"upostag": curr_token.get("upostag", PAD)})
converted_sent.append(curr_features)
processed.append(converted_sent)
return processed
def extract_features(stanza_output):
""" Filter the result returned by a stanza Pipeline, keeping only 'form' (raw word), 'upostag' and universal
features (if present)"""
# features of tokens inside sentence(s): each sentence is a list of dicts, containing token features
relevant_features = []
for curr_sent in stanza_output.sentences:
sent_features = []
for curr_token in curr_sent.words:
processed_feats = {"form": curr_token.text}
# Note: if FEATURES are not predicted for token, they will not be present in dict, whereas if POS TAG is not
# predicted, a generic PAD gets written
token_feats = curr_token.feats
if token_feats is not None:
for feat_val_pair in token_feats.split("|"):
feat, val = feat_val_pair.split("=")
processed_feats[feat] = val
token_upos = curr_token.upos
if token_upos is None:
token_upos = PAD
processed_feats["upostag"] = token_upos
sent_features.append(processed_feats)
relevant_features.append(sent_features)
return relevant_features
if __name__ == "__main__":
import torch
args = parser.parse_args()
df = pd.read_csv(args.data_path)
# hr - ftb, en - ewt
nlp = stanza.Pipeline(lang=args.lang, processors='tokenize,pos', package=args.package,
use_gpu=torch.cuda.is_available())
features = []
take_mask = []
for idx_ex in tqdm(range(df.shape[0])):
curr_ex = df.iloc[idx_ex][args.data_column]
try:
output = nlp(curr_ex)
except RuntimeError:
# Undiagnosed stanza error
print(f"Skipping example #{idx_ex}: '{curr_ex}'")
take_mask.append(False)
continue
ex_features = extract_features(output)
take_mask.append(True)
features.append(json.dumps(ex_features))
if not os.path.exists(args.target_dir):
print("Warning: creating directory to store processed data")
os.makedirs(args.target_dir)
# Extract file name from given source path
file_name = args.data_path.split(os.sep)[-1]
target_path = os.path.join(args.target_dir, file_name)
df = df.loc[take_mask].reset_index(drop=True)
df["features"] = features
df = df.rename({args.data_column: "content", args.target_column: "target"}, axis=1)
df.to_csv(os.path.join(args.target_dir, file_name), index=False)
| 4,890 | 1,426 |
import os
import abc
import json
import numpy as np
class Callback(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
def __call__(self, predictions, labels):
pass
class Evaluation(Callback):
def __init__(self, ks=(1, 5, 10), ignore_index: int = -100, n_samples_file: str = None):
super().__init__()
self.ks = ks
self.ignore_index = ignore_index
self.evaluation = None
self.use_neg_sampling = False
if n_samples_file:
self.use_neg_sampling = True
with open(n_samples_file, "r") as file:
negative_samples = json.load(file)
self.negative_samples = {int(k): v for k, v in negative_samples.items()}
self.reset()
def __call__(self, predictions, labels): # predictions: torch [batch, 50, 35115], labels: [batch, 50]
for i in range(labels.shape[0]):
for j in range(labels.shape[1]):
if labels[i, j] == self.ignore_index: # [ignore, ignore, ..., mask]
continue
candidate = labels[i, j].item() # integer
samples = self.negative_samples[candidate] + [candidate]
sample_predictions = predictions[i, j][samples].tolist()
ranked_samples = list(sorted(zip(samples, sample_predictions), key=lambda x: x[1], reverse=True)) # list of id, logit
self.evaluation["n"] += 1
rank = 0
for index, sample in enumerate(ranked_samples):
if sample[0] == candidate:
rank = index
break
for k in self.ks:
if rank < k:
self.evaluation["sampled_ndcg"][k] += 1 / np.log2(rank + 2)
self.evaluation["sampled_hit"][k] += 1
# Again without neg sampling
all_predictions = predictions[i, j].tolist()
all_samples = np.arange(len(all_predictions))
ranked_predictions = list(sorted(zip(all_samples, all_predictions), key=lambda x: x[1], reverse=True))
rank = 0
for index, sample in enumerate(ranked_predictions):
if sample[0] == candidate:
rank = index
break
for k in self.ks:
if rank < k:
self.evaluation["ndcg"][k] += 1 / np.log2(rank + 2)
self.evaluation["hit"][k] += 1
def __str__(self):
return " ".join(
f"{key}@{k}={self.evaluation[key][k] / self.evaluation['n']:.5f}" for key in ("sampled_ndcg", "sampled_hit", "ndcg", "hit") for k in
self.evaluation[key])
def reset(self):
self.evaluation = {"sampled_ndcg": {k: 0 for k in self.ks},
"sampled_hit": {k: 0 for k in self.ks},
"ndcg": {k: 0 for k in self.ks},
"hit": {k: 0 for k in self.ks},
"n": 0}
def get_metric(self, metric: str):
if metric in self.evaluation:
return [(k, self.evaluation[metric][k] / self.evaluation['n']) for k in self.evaluation[metric]]
class PredictionSerializer(Callback):
def __init__(self,
file_name: str,
ignore_index: int = -100):
super().__init__()
self.predictions = []
self.labels = []
self.ignore_index = ignore_index
parent_dir = os.path.dirname(file_name)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
self.file = open(file_name, "w")
self.file.write("Prediction\tLabel\n")
def __call__(self, predictions, labels):
for i in range(labels.shape[0]):
for j in range(labels.shape[1]):
if labels[i, j] != self.ignore_index:
self.predictions.append(np.argsort(predictions[i, j].cpu()).tolist()[:100])
self.labels.append(labels[i, j].item())
for p, l in zip(self.predictions, self.labels):
self.file.write(",".join([str(x) for x in p]) + "\t" + str(l) + "\n")
self.predictions, self.labels = [], []
def serialize(self, file_path: str):
parent_dir = os.path.dirname(file_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
with open(file_path, "w") as file:
json.dump({
"predictions": self.predictions,
"labels": self.labels,
}, file)
self.predictions, self.labels = [], []
| 4,685 | 1,413 |
"""
# -*- coding: utf-8 -*-
@author: techwiz
Created on Sun May 27 14:47:20 2018
"""
import pandas as pd
train_set = pd.read_csv("train.csv")
test_set = pd.read_csv("test.csv")
""" Exploratory Data Analysis """
train_set['Sex'].value_counts()
train_set['Age'].value_counts()
train_set['Embarked'].value_counts()
train_set.isnull().values.any()
train_set.isnull().sum().sum()
train_set.describe()
# Selecting required features from training dataset
train_set.drop('PassengerId', axis=1, inplace= True)
train_set.drop('Name' , axis=1,inplace=True)
train_set.drop('Cabin' , axis =1 , inplace=True)
train_set.drop('Ticket',axis=1, inplace = True)
test_set.drop(['PassengerId','Name','Cabin','Ticket'],axis=1,inplace=True)
#Encoding Categorial Data
train_set['Age'].hist(bins=30)
train_set['Fare'].hist(bins=30)
# impute missing values
"""
Losing Data Distribution by imputing through mean and median
train_set.fillna(train_set.mean(),inplace=True)
train_set.isnull().values.any()
test_set.fillna(train_set.mean(),inplace=True)
test_set.isnull().values.any()
"""
# imputing data with outliners
train_set['Age'].fillna(-1,inplace=True)
train_set['Fare'].fillna(-1,inplace=True)
train_set['Embarked'].fillna('Q',inplace=True)
test_set['Age'].fillna(-1,inplace=True)
test_set['Fare'].fillna(-1,inplace=True)
test_set['Embarked'].fillna('Q',inplace=True)
#LabelEncoder
from sklearn.preprocessing import LabelEncoder
lb = LabelEncoder()
train_set['Sex'] = lb.fit_transform(train_set['Sex'])
test_set['Sex'] = lb.fit_transform(test_set['Sex'])
lb_t = LabelEncoder()
train_set['Embarked'] = lb_t.fit_transform(train_set['Embarked'])
test_set['Embarked'] = lb_t.fit_transform(test_set['Embarked'])
"""
train_set = pd.get_dummies(data= train_set , dummy_na = True,columns =['Sex' , 'Embarked'])
test_set = pd.get_dummies(data= test_set , dummy_na = True,columns =['Sex' , 'Embarked'])
train_set.drop('Sex_nan',axis=1,inplace=True)
test_set.drop('Sex_nan',axis=1,inplace=True)
"""
# Selecting Features and target
X = train_set.iloc[:,1:13].values
y = train_set.iloc[:,0].values
X_test = test_set.iloc[:,:].values
"""
#Validating Model for Parameter tuning
from sklearn.model_selection import train_test_split
X_train , X_validate , y_train , y_validate = train_test_split(X,y,test_size=0.18,random_state=42)
#Now Appling Various ML Models For Classification
#Feature Scaling , testing differnt scalers and their effect on data distibution
#Using Min Max Scalar
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0.5,0.95))
train_set = scaler.fit_transform(train_set)
test_set = scaler.fit_transform(test_set)
train_set['Age'].hist(bins=30)
#testing differnt scalers
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
train_set = sc_X.fit_transform(train_set)
test_set = sc_X.fit_transform(test_set)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=1000,min_samples_split=30,min_samples_leaf=4,random_state=42,warm_start=True)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_validate)
import xgboost as xg
classifier = xg.XGBClassifier()
classifier.fit(X_train,y_train)
y_predict_xg = classifier.predict(X_validate)
#metrics
from sklearn.metrics import confusion_matrix
cnf = confusion_matrix(y_validate,y_pred)
cnf1 = confusion_matrix(y_validate,y_predict_xg)
"""
#Feature Scaling , testing differnt scalers and their effect on data distibution
#Using Min Max Scalar
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0.5,0.95))
X = scaler.fit_transform(X)
X_test= scaler.transform(X_test)
train_set['Age'].hist(bins=30)
"""
#testing differnt scalers
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X = sc_X.fit_transform(X)
X_test = sc_X.transform(X_test)
"""
#using various ml models
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=1000,min_samples_split=30,min_samples_leaf=4,random_state=42,warm_start=True)
clf.fit(X,y)
"""
import xgboost as xg
classifier = xg.XGBClassifier()
classifier.fit(X,y)
y_pred_xg = classifier.predict(X_test)
"""
y_predict = clf.predict(X_test)
sub = pd.read_csv('gender_submission.csv')
print(sub['Survived'].value_counts())
#submission
sub['Survived']=y_predict
sub.to_csv('submissions1.csv',index=False)
final = pd.read_csv('submissions1.csv')
print(final['Survived'].value_counts()) | 4,436 | 1,725 |
import os
import heapq
import json
class SortFiles:
def __init__(self):
self._spell_dir = self._read_json()
self._heap = []
def execute(self):
self._heap = []
with os.scandir(self._spell_dir) as iter:
for entry in iter:
if entry.is_dir():
count = self._count_png_in_dir(entry)
heapq.heappush(self._heap, (count, entry.name))
self._iterate_heap()
###########################################################################
# PRIVATE FUNCTIONS
###########################################################################
def _count_png_in_dir(self, entry: os.DirEntry):
count = 0
with os.scandir(entry) as iter2:
for entry2 in iter2:
if entry2.is_file() and entry2.name.endswith(".png"):
count += 1
return count
def _iterate_heap(self):
while self._heap:
val = heapq.heappop(self._heap)
print("{}: {}".format(val[0], val[1]))
def _read_json(self):
with open("config.json", "r") as file:
data = json.load(file)
return data["spellsDirectory"]
if __name__ == "__main__":
sort_files = SortFiles()
sort_files.execute()
| 1,301 | 392 |
# Generated by Django 3.0.8 on 2020-09-20 20:04
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('durak', '0013_auto_20200909_0124'),
]
operations = [
migrations.CreateModel(
name='GameRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parameters', django.contrib.postgres.fields.jsonb.JSONField()),
('players', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
('variant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='durak.GameVariant')),
],
),
]
| 914 | 296 |
# fmt: off
import cProfile
import os
import ray
from rlo import analytics
from rlo.config_utils import config_for_repetition, kwargs_from_config
from rlo.factory import seed_from_config, simul_search_curriculum_from_config, get_train_and_eval_exprs
from rlo.flags import make_config_for_scenario, make_parser, check_save_config, ray_run_arguments
from rlo.ray_worker import RayWorkerPool
def main():
from rlo.summarize_logs import summarize_logs
run_parser = make_parser(ray_run_arguments)
run_args, _ = run_parser.parse_known_args()
if run_args.workers_per_gpu > 1 and (
run_args.gpu_memory_fraction is None or run_args.gpu_memory_fraction * run_args.workers_per_gpu > 1.0):
# In fact it seems there may need to be some margin of extra space on the GPU after allocating each worker
# but we haven't identified how much, or good defaults for gpu_memory_fraction, yet.
raise ValueError("Must have --gpu_memory_fraction <= 1/workers_per_gpu")
config = make_config_for_scenario(run_args.scenario, ray_run_arguments)
ray.init(config['address'], **kwargs_from_config(config,
required_keys=("log_to_driver", "num_cpus", "num_gpus"),
optional_keys=(),
renames=(("redis_token", "redis_password"),)))
train_set, eval_set = get_train_and_eval_exprs(config)
check_save_config(config, train_set.named_exprenvs(), eval_set.named_exprenvs())
pool = RayWorkerPool(config, remote_timeout=config["ray_timeout"], local_task_limit=run_args.profile_local or 0)
with analytics.log_events_to_files(os.path.join(config["result_save_path"], "head" + os.path.sep)):
analytics.event("expression_summary", num_train_expr = len(train_set.named_exprenvs()), num_test_expr = len(eval_set.named_exprenvs()))
for rep_config in ([config] if config.get("repetition") is not None
else [config_for_repetition(config, repetition) for repetition in range(config["num_repetitions"])]):
with analytics.Scope(repetition=rep_config['repetition']):
curriculum = simul_search_curriculum_from_config(rep_config, train_set, eval_set)
pool.schedule_work_requests_from(
curriculum.request_initial(seed_from_config(rep_config)))
if (run_args.profile_local is None) or (run_args.profile_local > 0):
# None means --profile_local was specified without a time limit
cProfile.runctx("pool.run()", {}, {"pool": pool}, os.path.join(config["result_save_path"], "head", "prof.pstats"))
else:
pool.run()
print("Run finished, {} live weights".format(len(pool._weight_id_map)))
if run_args.timeline:
ray.timeline(filename=os.path.join(config['result_save_path'], "ray_timeline.json"))
ray.object_transfer_timeline(filename=os.path.join(config['result_save_path'], "ray_object_transfers.json"))
ray.shutdown() # Reduce memory use of Ray while this headnode machine does all the plotting
events = summarize_logs(config, eval_set, ray=True)
if config["test_kill_worker_after_tasks"] >= 0:
# Test mode - check the logs were sensible; otherwise, fail the run (after producing plots).
# Note that these asserts are not guaranteed or even expected to hold for all parameter values.
# Rather they are intended to allow writing useful tests via sensible choices of parameters.
# First, check that at least one worker was killed. This is only guaranteed if the total number
# of tasks is at least (num_workers * (test_kill_worker_after_tasks-1))+1.
assert any(e["event"] == "worker_died" for e in events)
# Second, check that at least one worker joined after the start.
# Note that this doesn't check that the joining worker was one that had been killed (e.g. from
# the same IP address); instead, another node might have connected for the first time instead.
# Only if num_workers == num_repetitions (the number of workers required before we start),
# can we be sure that the new-joiner was a reconnection.
# Conversely, failing does not necessarily imply that such a worker cannot reconnect, merely that
# it didn't (before the run finished). Only if the total number of tasks is greater than
# (num_workers * test_kill_worker_after_tasks) can we be sure that at least one worker would *have* to
# reconnect for the run to get this far.
assert any(e["event"] == "worker_joined" for e in events)
if __name__ == "__main__": main()
| 4,585 | 1,373 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from binascii import crc32
from six import with_metaclass
WORDLISTS = {}
_log = logging.getLogger(__name__)
class WordlistType(type):
def __new__(cls, name, bases, attrs):
if bases:
if 'language_name' not in attrs:
raise TypeError("Missing language_name for {0}".format(name))
if 'unique_prefix_length' not in attrs:
raise TypeError("Missing 'unique_prefix_length' for {0}".format(name))
if 'word_list' not in attrs:
raise TypeError("Missing 'word_list' for {0}".format(name))
if 'english_language_name' not in attrs:
_log.warn("No 'english_language_name' for {0} using '{1}'".format(name, language_name))
attrs['english_language_name'] = attrs['language_name']
if len(attrs['word_list']) != 1626:
raise TypeError("Wrong word list length for {0}".format(name))
new_cls = super(WordlistType, cls).__new__(cls, name, bases, attrs)
if bases:
WORDLISTS[new_cls.english_language_name] = new_cls
return new_cls
class Wordlist(with_metaclass(WordlistType)):
n = 1626
@classmethod
def encode(cls, hex):
"""Convert hexadecimal string to mnemonic word representation with checksum.
"""
out = []
for i in range(len(hex) // 8):
word = endian_swap(hex[8*i:8*i+8])
x = int(word, 16)
w1 = x % cls.n
w2 = (x // cls.n + w1) % cls.n
w3 = (x // cls.n // cls.n + w2) % cls.n
out += [cls.word_list[w1], cls.word_list[w2], cls.word_list[w3]]
checksum = cls.get_checksum(" ".join(out))
out.append(checksum)
return " ".join(out)
@classmethod
def decode(cls, phrase):
"""Calculate hexadecimal representation of the phrase.
"""
phrase = phrase.split(" ")
out = ""
for i in range(len(phrase) // 3):
word1, word2, word3 = phrase[3*i:3*i+3]
w1 = cls.word_list.index(word1)
w2 = cls.word_list.index(word2) % cls.n
w3 = cls.word_list.index(word3) % cls.n
x = w1 + cls.n *((w2 - w1) % cls.n) + cls.n * cls.n * ((w3 - w2) % cls.n)
out += endian_swap("%08x" % x)
return out
@classmethod
def get_checksum(cls, phrase):
"""Given a mnemonic word string, return a string of the computed checksum.
:rtype: str
"""
phrase_split = phrase.split(" ")
if len(phrase_split) < 12:
raise ValueError("Invalid mnemonic phrase")
if len(phrase_split) > 13:
# Standard format
phrase = phrase_split[:24]
else:
# MyMonero format
phrase = phrase_split[:12]
wstr = "".join(word[:cls.unique_prefix_length] for word in phrase)
wstr = bytearray(wstr.encode('utf-8'))
z = ((crc32(wstr) & 0xffffffff) ^ 0xffffffff ) >> 0
z2 = ((z ^ 0xffffffff) >> 0) % len(phrase)
return phrase_split[z2]
def get_wordlist(name):
try:
return WORDLISTS[name]
except KeyError:
raise ValueError("No such word list")
def list_wordlists():
return WORDLISTS.keys()
def endian_swap(word):
"""Given any string, swap bits and return the result.
:rtype: str
"""
return "".join([word[i:i+2] for i in [6, 4, 2, 0]])
| 3,490 | 1,205 |
from aliexpress_api_client import AliExpress
import PIL
from PIL import Image, ImageChops
import urllib2 as urllib
import io
from itertools import izip
from libImgComp import comp_imgs
def comp_images(i1, i2):
maxsize = (500, 500)
i1.resize(maxsize)
i2.resize(maxsize)
i1 = i1.convert('RGB')
i2 = i2.convert('RGB')
return comp_imgs(i1, i2)
'''pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
return 100 - (dif / 255.0 * 100) / ncomponents'''
import math, operator
def process_str(s):
s = s.replace('(', '')
s = s.replace(')', '')
s = s.replace('<b>', '')
s = s.replace('</b>', '')
s = s.replace('<font>', '')
s = s.replace('</font>', '')
s = s.replace('Generic', '')
s = s.replace(',', '')
s = s.replace('.', '')
s = s.replace('-', '')
s = s.replace('/', '')
s = s.replace('\\', '')
s = s.replace(' ', ' ')
return s
def rmsdiff(im1, im2):
"Calculate the root-mean-square difference between two images"
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value*((idx%256)**2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares/float(im1.size[0] * im1.size[1]))
return rms
def strdiff(s1, s2):
s1 = s1.lower()
s2 = s2.lower()
count = 0
l1 = s1.split(' ')
for item in l1:
if (s2.find(item) != -1):
count += 1
return count
def get_perc(mi, ma, va):
if (mi == ma):
for item in va:
yield 100.0
else:
for item in va:
yield (((item - mi)/(ma - mi)))*100
def get_perc_w(mi, ma, va):
if (mi == ma):
for item in va:
yield 100.0
else:
for item in va:
n = (item/ma)*100
yield n
def get_max_ind(a):
x = max(a)
for i in range(len(a)):
if (a[i] >= x - 15):
yield i
def get_min_ind(a):
x = min(a)
for i in range(len(a)):
if (a[i] <= x + 15):
return i
def get_m_i(a):
x = max(a)
for i in range(len(a)):
if (a[i] == x):
return i
def get_avg(st, img):
return (1.7*st + 0.3*img) / 2.0
def price_float(s):
return float(s[4:])
def eval_p(prices, or_p):
for price in prices:
print str(price) + ' <- PRICE'
print str(or_p) + ' <- OR_PRICE'
if (5*price > (or_p - price) and price >= 0.45*or_p):
print 'GOT ' + str(price)
yield price
def get_pairs(li):
for i in range(len(li)-1):
yield li[i] + ' ' + li[i + 1]
def get_pairs_strict(li):
for i in range(len(li)/2):
yield li[2*i] + ' ' + li[2*i + 1]
def get_all_maxs(li):
m = max(li)
for i in range(len(li)):
if li[i] == m:
yield i
def get_all_mins(li):
m = min([n for n in li if n>0])
for i in range(len(li)):
if li[i] == m:
yield i
def get_all_maxs_mild(li):
m = max(li)
for i in range(len(li)):
if li[i] >= m - 10:
yield i
def get_all_mins_mild(li):
m = min([n for n in li if n>0])
for i in range(len(li)):
if li[i] <= m + 10:
yield i
def process_pr(li, t):
for i in li:
if i > t:
yield -1
else:
yield i
def calc_result(s_item, or_price, or_img):
# print 'starting Daniils part'
COEFF = 0.7
s_item = process_str(s_item)
item_copy = s_item
aliexpress = AliExpress('YOUR_CODE_HERE')
'''while (not_working):
try:
print ' '.join(s_item.split(' ')[:-count])
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], ' '.join(s_item.split(' ')[0:-count]))['products']
cur_len = len(products)
print cur_len
if ((cur_len < old_len or cur_len >= 15) and count >= 3):
if (cur_len < old_len):
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], ' '.join(s_item.split(' ')[0:-(count - 1)]))['products']
print 'disabling'
not_working = False
else:
raise ValueError(' fff ')
except:
count += 1;
old_len = cur_len
if (count + 1 == len(item_copy.split(' '))):
break
#print ' '.join(s_item.split(' ')[:count])'''
done = False
old_len = 0
cur_len = 0
products = {}
le_s = len(item_copy.split(' '))
search_query = s_item.split(' ')
previous_max = 20
#a = raw_input()
while (not done):
count = 0
print "Going into the next lap"
print search_query
lens_titles = []
lens_values = []
if (len(search_query) != 1):
search_query = list(get_pairs(search_query))
max_count = len(search_query)
while (count < max_count):
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], search_query[count],
originalPriceFrom=str(or_price*COEFF), sort="orignalPriceUp")['products']
lens_titles.append(search_query[count])
lens_values.append(len(products))
count += 1
maxs_i = list(get_all_maxs(lens_values))
print '--------------------------------'
#print maxs_i
if (len(maxs_i) == 0 or lens_values[maxs_i[0]] == 0):
#print maxs_i
#print lens_values[maxs_i[0]]
search_query = list(get_pairs_strict(final_search_query))
print 'Shutting down'
done = True
elif (len(maxs_i) == 1 and lens_values[maxs_i[0]] >= 2):
search_query = [lens_titles[maxs_i[0]]]
#print maxs_i
#print lens_values
print 'Shutting down - one good result'
done = True
elif (len(maxs_i) == 1 and lens_values[maxs_i[0]] < 2):
search_query = list(get_pairs_strict(final_search_query))
#print maxs_i
#print lens_values
print 'Shutting down - one bad result'
done = True
else:
search_query = []
#print maxs_i
print 'Keeping on'
if (len(maxs_i) >= 2 and lens_values[maxs_i[0]] != 0):
final_search_query = []
for item in maxs_i:
k = len(lens_titles[item].split(' '))
final_search_query.append(' '.join(lens_titles[item].split(' ')[:k/2]))
final_search_query.append(' '.join(lens_titles[-1].split(' ')[k/2+1:]))
search_query = list(get_pairs_strict(final_search_query))
#printing the result
'''
for item in search_query:
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], item)['products']
print '----------------------------------------------------------------------'
print item
print len(products)
for i in products:
print i['productTitle']
'''
links = []
prices = []
perc = []
diffs = []
print search_query
print 'STARTING CHECK FOR EACH POS ...'
for s in search_query:
print 'INPUT:'
print s
products = aliexpress.get_product_list(['productTitle', 'salePrice', 'imageUrl', 'productUrl'], s,
originalPriceFrom=str(or_price*COEFF), sort="orignalPriceUp")['products']
print len(products)
#a = raw_input()
l, p, perct, diff = search(products, item_copy, or_price, or_img)
links.extend(l)
prices.extend(p)
perc.extend(perct)
diffs.extend(diff)
max_perc = list(get_all_maxs_mild(perc))
min_prices = list(get_all_mins_mild(prices))
print 'ORIG PR : ' + str(or_price)
result = list(set(max_perc).intersection(min_prices))
print 'MAX PERC:'
print max_perc
print 'MIN PRC:'
print min_prices
prices = list(process_pr(prices, or_price))
print prices
print 'RES:'
print result
result_perc = []
for item in result:
print links[item]
print prices[item]
print perc[item]
result_perc.append(perc[item])
if (len(result) != 0):
final_ind = get_m_i(result_perc)
fin = result[final_ind]
#a = raw_input()
if (len(result) != 0):
return links[fin], prices[fin], diffs[fin]
else:
return links[min_prices[0]], prices[min_prices[0]], diffs[min_prices[0]]
def search(products, s_item, or_price, or_img):
print 'Starting search...'
#print len(products)
#try:
#print or_img
fd = urllib.urlopen(or_img)
orig_img_link = io.BytesIO(fd.read())
orig_img = Image.open(orig_img_link)
#except:
#orig_img_link = cStringIO.StringIO(urllib.urlopen('http://cs617219.vk.me/v617219415/c9c4/KUCX_V8m7CQ.jpg').read())
#orig_img = Image.open(orig_img_link)
titles = []
image_diffs = []
img_data = []
#i = 0;
for item in products:
#i += 1;
#img.show()
#print process_str(item['productTitle'])
titles.append(process_str(item['productTitle']))
try:
#print item['productTitle'] + item['salePrice'] + '\n' + item['imageUrl'] + '\n'
fd = urllib.urlopen(item['imageUrl'])
img_link = io.BytesIO(fd.read())
img = Image.open(img_link)
#image_diffs.append(rmsdiff(img, orig_img))
#print comp_images(orig_img, img)
img_data.append(comp_images(orig_img, img))
#a = raw_input();
#print i
#print '___________________________________________________________________________'
except:
img_data.append(50)
string_diffs = map(strdiff, titles, [s_item]*len(titles))
max_strdiff = float(max(string_diffs))
# max_imgdiff = float(max(image_diffs))
min_strdiff = float(min(string_diffs))
# min_imgdiff = float(min(image_diffs))
#print 'CHECK IMG DATA'
#print img_data
#print 'MIN'
#print min(img_data)
#print 'MAX'
#print max(img_data)
str_data = list(get_perc_w(min_strdiff, max_strdiff, string_diffs))
img_data = list(get_perc(min(img_data), max(img_data), img_data))
comp_data = map(get_avg, str_data, img_data)
#print "word matches: "
#print str_data
#print "images:"
#print img_data
#print "comp:"
#print comp_data
ids = list(get_max_ind(comp_data))
#print 'IDs'
#print ids
urls = []
prices = []
percs = []
diffs = []
for item in ids:
urls.append(products[item]['productUrl'])
prices.append(price_float(products[item]['salePrice']))
percs.append(comp_data[item])
diffs.append(or_price - price_float(products[item]['salePrice']))
print urls
print prices
print percs
print diffs
#'''or (or_price - new_price > 5*new_price) or comp_data[ids[get_min_ind(prices)]] < 50'''
return urls, prices, percs, diffs
| 11,174 | 4,126 |
from django.shortcuts import render
# Create your views here.
def entreprise_home(request):
template_name = 'pages/entreprise/entreprise_home.html'
context = None
return render(request, template_name, context)
| 224 | 65 |
""" module doc """
__revision__ = None
def somegen():
"""this kind of mix is OK"""
yield 1
return
| 111 | 38 |
import os
import argparse
import bpy
# Constants
FBX_EXTENSION = ".fbx"
BLENDER_ACTION_SELECT = "SELECT"
BLENDER_TYPE_MESH = "MESH"
BLENDER_MODIFIER_BEVEL = "BEVEL"
def get_args():
"""
A method to obtain the arguments that came with the triggered Python file - from the .bat file.
:rtype: object
:return: An object containing the arguments as properties.
"""
parser_double_dash = "--"
parser_path_short_argument = "-p"
parser_path_long_argument = "--path"
parser_path_help = "asset path"
parser = argparse.ArgumentParser()
_, all_arguments = parser.parse_known_args()
double_dash_index = all_arguments.index(parser_double_dash)
script_args = all_arguments[double_dash_index + 1:]
parser.add_argument(parser_path_short_argument, parser_path_long_argument, help=parser_path_help)
parsed_script_args, _ = parser.parse_known_args(script_args)
return parsed_script_args
def setup_and_run_mesh_process():
"""
Initialize the arguments and run the mesh process.
"""
args = get_args()
source_asset_path = args.path
process_mesh(source_asset_path)
def process_mesh(asset_path):
"""
Process the mesh at the given asset_path.
In this sample, processing = beveling and exporting the beveled mesh to the same path, with an added
suffix to the name.
:param string asset_path: The absolute asset path.
"""
processed_mesh_suffix = "_processed"
asset_name = os.path.splitext(os.path.basename(asset_path))[0]
source_asset_directory = os.path.dirname(asset_path)
# Determine new naming and paths for the processed mesh
export_asset_name = asset_name + processed_mesh_suffix
export_asset_path = os.path.join(source_asset_directory, export_asset_name + FBX_EXTENSION)
print("The source asset path is: " + asset_path)
print("The source asset name is: " + asset_name)
print("The source directory path is: " + source_asset_directory)
# Clear the default Blender scene
bpy.ops.object.select_all(action=BLENDER_ACTION_SELECT)
bpy.ops.object.delete()
# Import the asset in the Blender scene
processing_failed = False
try:
bpy.ops.import_scene.fbx(filepath=asset_path)
except Exception as e:
processing_failed = True
print("Could not import asset at : " + asset_path)
print(e)
# Process the asset
# In this sample, I'm bevelling the asset and exporting the new mesh right next to the old one.
# You can add your custom processing here and replace the sample.
try:
imported_assets = bpy.context.selected_objects
for asset in imported_assets:
if asset.type != BLENDER_TYPE_MESH:
continue
# Apply a bevel modifier on the mesh
bevel_modifier_name = "Bevel Modifier"
asset.modifiers.new(name=bevel_modifier_name, type=BLENDER_MODIFIER_BEVEL)
except Exception as e:
processing_failed = True
print("Could not process asset.")
print(e)
# Export the asset from Blender back to Unity, next to the original asset
if processing_failed:
return
try:
bpy.ops.export_scene.fbx(
filepath=export_asset_path,
use_selection=True)
except Exception as e:
print("Could not export to path: " + export_asset_path)
print(e)
# Triggering the mesh process
setup_and_run_mesh_process()
| 3,165 | 1,124 |
from django_inlinecss.tests.test_templatetags import *
| 55 | 19 |
import os
from absl import app, flags
import dataset
import networks
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
layers = tf.keras.layers
flags.DEFINE_enum('dataset', 'static_mnist', ['static_mnist', 'dynamic_mnist', 'fashion_mnist', 'omniglot'], 'Dataset to use.')
flags.DEFINE_float('genmo_lr', 1e-4, 'Learning rate for decoder, Generation network.')
flags.DEFINE_float('infnet_lr', 1e-4, 'Learning rate for encoder, Inference network.')
flags.DEFINE_float('prior_lr', 1e-2, 'Learning rate for prior variables.')
flags.DEFINE_integer('batch_size', 50, 'Training batch size.')
flags.DEFINE_integer('num_pairs', 1, ('Number of sample pairs used gradient estimators.'))
flags.DEFINE_integer('num_steps', int(1e6), 'Number of training steps.')
flags.DEFINE_string('encoder_type', 'linear', 'Choice supported: linear, nonlinear')
flags.DEFINE_string('grad_type', 'arm', 'Choice supported: arm, disarm, reinforce')
flags.DEFINE_string('logdir', 'logs/tmp', 'Directory for storing logs.')
flags.DEFINE_bool('verbose', False, 'Whether to turn on training result logging.')
flags.DEFINE_integer('repeat_idx', 0, 'Dummy flag to label the experiments in repeats.')
flags.DEFINE_bool('half_p_trick', False, 'Enforce the p range is [0., 0.5]')
flags.DEFINE_float('epsilon', 0., 'Additive float to prevent numerical underflow in log(x).')
flags.DEFINE_float('temperature', None, 'Temperature for RELAX estimator.')
flags.DEFINE_float('scaling_factor', None, 'Scaling factor for RELAX estimator.')
flags.DEFINE_bool('eager', False, 'Enable eager execution.')
flags.DEFINE_bool('bias_check', False, 'Carry out bias check for RELAX and baseline')
flags.DEFINE_bool('demean_input', False, 'Demean for encoder and decoder inputs.')
flags.DEFINE_bool('initialize_with_bias', False, 'Initialize the final layer bias of decoder with dataset mean.')
flags.DEFINE_integer('seed', 1, 'Global random seed.')
flags.DEFINE_integer('num_eval_samples', None, 'Number of samples for evaluation, default to num_pairs.')
flags.DEFINE_integer('num_train_samples', None, 'Number of samples for evaluation, default to num_pairs.')
flags.DEFINE_bool('debug', False, 'Turn on debugging mode.')
FLAGS = flags.FLAGS
def process_batch_input(input_batch):
input_batch = tf.reshape(input_batch, [tf.shape(input_batch)[0], -1])
input_batch = tf.cast(input_batch, tf.float32)
return input_batch
def initialize_grad_variables(target_variable_list):
return [tf.Variable(tf.zeros(shape=i.shape)) for i in target_variable_list]
def estimate_gradients(input_batch, bvae_model, gradient_type, sample_size=1):
if gradient_type == 'relax':
with tf.GradientTape(persistent=True) as tape:
genmo_loss, reparam_loss, learning_signal, log_q = (
bvae_model.get_relax_loss(input_batch, temperature=FLAGS.temperature,
scaling_factor=FLAGS.scaling_factor, num_samples=sample_size))
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_vars = bvae_model.encoder_vars
infnet_grads_1 = tape.gradient(log_q, infnet_vars, output_gradients=learning_signal)
infnet_grads_2 = tape.gradient(reparam_loss, infnet_vars)
infnet_grads = [infnet_grads_1[i] + infnet_grads_2[i] for i in range(len(infnet_vars))]
else:
with tf.GradientTape(persistent=True) as tape:
elbo, _, infnet_logits, _ = bvae_model(input_batch)
genmo_loss = -1. * tf.reduce_mean(elbo)
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grad_multiplier = -1. * bvae_model.get_layer_grad_estimation(input_batch, num_samples=sample_size)
infnet_grads = tape.gradient(infnet_logits, bvae_model.encoder_vars, output_gradients=infnet_grad_multiplier)
del tape
return (genmo_grads, prior_grads, infnet_grads, genmo_loss)
@tf.function
def train_one_step(
train_batch_i,
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
theta_optimizer,
encoder_grad_variable,
encoder_grad_sq_variable):
"""Train Discrete VAE for 1 step."""
metrics = {}
input_batch = process_batch_input(train_batch_i)
if FLAGS.grad_type in ['loorf', 'arms', 'arms_normal']:
num_samples = 2 * FLAGS.num_pairs
else:
num_samples = FLAGS.num_pairs
if FLAGS.grad_type == 'relax':
with tf.GradientTape(persistent=True) as theta_tape:
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type, num_samples)
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
infnet_grads_sq = [tf.square(grad_i) for grad_i in infnet_grads]
theta_vars = []
if bvae_model.control_nn:
theta_vars.extend(bvae_model.control_nn.trainable_variables)
if FLAGS.temperature is None:
theta_vars.append(bvae_model.log_temperature_variable)
if FLAGS.scaling_factor is None:
theta_vars.append(bvae_model.scaling_variable)
theta_grads = theta_tape.gradient(infnet_grads_sq, theta_vars)
theta_optimizer.apply_gradients(zip(theta_grads, theta_vars))
del theta_tape
metrics['learning_signal'] = bvae_model.mean_learning_signal
else:
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type, num_samples)
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
batch_size_sq = tf.cast(FLAGS.batch_size * FLAGS.batch_size, tf.float32)
encoder_grad_var = bvae_model.compute_grad_variance(
encoder_grad_variable, encoder_grad_sq_variable,
infnet_grads) / batch_size_sq
return (encoder_grad_var, None, genmo_loss, metrics)
@tf.function
def evaluate(model, tf_dataset, max_step=1000, num_eval_samples=None):
"""Evaluate the model."""
if num_eval_samples:
num_samples = num_eval_samples
elif FLAGS.num_eval_samples:
num_samples = FLAGS.num_eval_samples
elif FLAGS.grad_type in ['vimco', 'local-disarm', 'local-arms']:
num_samples = FLAGS.num_pairs * 2
elif FLAGS.grad_type in ['loorf', 'arms', 'arms_normal']:
num_samples = 2 * FLAGS.num_pairs
else:
num_samples = FLAGS.num_pairs
loss = 0.
n = 0.
for batch in tf_dataset.map(process_batch_input):
if n >= max_step: # used for train_ds, which is a `repeat` dataset.
break
if num_samples > 1:
batch_size = tf.shape(batch)[0]
input_batch = tf.tile(batch, [num_samples, 1])
elbo = tf.reshape(model(input_batch)[0], [num_samples, batch_size])
objectives = (tf.reduce_logsumexp(elbo, axis=0, keepdims=False) -
tf.math.log(tf.cast(tf.shape(elbo)[0], tf.float32)))
else:
objectives = model(batch)[0]
loss -= tf.reduce_mean(objectives)
n += 1.
return loss / n
def main(_):
tf.random.set_seed(FLAGS.seed)
logdir = FLAGS.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
if FLAGS.eager:
tf.config.experimental_run_functions_eagerly(FLAGS.eager)
genmo_lr = tf.constant(FLAGS.genmo_lr)
infnet_lr = tf.constant(FLAGS.infnet_lr)
prior_lr = tf.constant(FLAGS.prior_lr)
genmo_optimizer = tf.keras.optimizers.Adam(learning_rate=genmo_lr)
infnet_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr)
prior_optimizer = tf.keras.optimizers.SGD(learning_rate=prior_lr)
theta_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr,
beta_1=0.999)
batch_size = FLAGS.batch_size
if FLAGS.dataset == 'static_mnist':
train_ds, valid_ds, test_ds = dataset.get_static_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'dynamic_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'fashion_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(
batch_size, fashion_mnist=True)
train_size = 50000
elif FLAGS.dataset == 'omniglot':
train_ds, valid_ds, test_ds = dataset.get_omniglot_batch(batch_size)
train_size = 23000
num_steps_per_epoch = int(train_size / batch_size)
train_ds_mean = dataset.get_mean_from_iterator(
train_ds, dataset_size=train_size, batch_size=batch_size)
if FLAGS.initialize_with_bias:
bias_value = -tf.math.log(
1./tf.clip_by_value(train_ds_mean, 0.001, 0.999) - 1.).numpy()
bias_initializer = tf.keras.initializers.Constant(bias_value)
else:
bias_initializer = 'zeros'
if FLAGS.encoder_type == 'linear':
encoder_hidden_sizes = [200]
encoder_activations = ['linear']
decoder_hidden_sizes = [784]
decoder_activations = ['linear']
elif FLAGS.encoder_type == 'nonlinear':
encoder_hidden_sizes = [200, 200, 200]
encoder_activations = [
layers.LeakyReLU(alpha=0.3),
layers.LeakyReLU(alpha=0.3),
'linear']
decoder_hidden_sizes = [200, 200, 784]
decoder_activations = [
layers.LeakyReLU(alpha=0.3),
layers.LeakyReLU(alpha=0.3),
'linear']
else:
raise NotImplementedError
encoder = networks.BinaryNetwork(
encoder_hidden_sizes,
encoder_activations,
mean_xs=train_ds_mean,
demean_input=FLAGS.demean_input,
name='bvae_encoder')
decoder = networks.BinaryNetwork(
decoder_hidden_sizes,
decoder_activations,
demean_input=FLAGS.demean_input,
final_layer_bias_initializer=bias_initializer,
name='bvae_decoder')
prior_logit = tf.Variable(tf.zeros([200], tf.float32))
if FLAGS.grad_type == 'relax':
control_network = tf.keras.Sequential()
control_network.add(
layers.Dense(137, activation=layers.LeakyReLU(alpha=0.3)))
control_network.add(
layers.Dense(1))
else:
control_network = None
bvae_model = networks.SingleLayerDiscreteVAE(
encoder,
decoder,
prior_logit,
grad_type=FLAGS.grad_type,
half_p_trick=FLAGS.half_p_trick,
epsilon=FLAGS.epsilon,
control_nn=control_network)
bvae_model.build(input_shape=(None, 784))
tensorboard_file_writer = tf.summary.create_file_writer(logdir)
encoder_grad_variable = initialize_grad_variables(bvae_model.encoder_vars)
encoder_grad_sq_variable = initialize_grad_variables(bvae_model.encoder_vars)
start_step = infnet_optimizer.iterations.numpy()
train_iter = train_ds.__iter__()
for step_i in range(start_step, FLAGS.num_steps):
(encoder_grad_var, variance_dict, genmo_loss, metrics) = train_one_step(
train_iter.next(),
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
theta_optimizer,
encoder_grad_variable,
encoder_grad_sq_variable)
train_loss = tf.reduce_mean(genmo_loss)
if step_i % 1000 == 0:
metrics.update({
'train_objective': train_loss,
'eval_metric/train': evaluate(bvae_model, train_ds, max_step=num_steps_per_epoch, num_eval_samples=FLAGS.num_train_samples),
'eval_metric/valid': evaluate(bvae_model, valid_ds, num_eval_samples=FLAGS.num_eval_samples),
'eval_metric/test': evaluate(bvae_model, test_ds, num_eval_samples=FLAGS.num_eval_samples),
'var/grad': encoder_grad_var
})
if FLAGS.grad_type == 'relax':
if FLAGS.temperature is None:
metrics['relax/temperature'] = tf.math.exp(bvae_model.log_temperature_variable)
if FLAGS.scaling_factor is None:
metrics['relax/scaling'] = bvae_model.scaling_variable
tf.print(step_i, metrics)
with tensorboard_file_writer.as_default():
for k, v in metrics.items():
tf.summary.scalar(k, v, step=step_i)
if variance_dict is not None:
tf.print(variance_dict)
for k, v in variance_dict.items():
tf.summary.scalar(k, v, step=step_i)
if __name__ == '__main__':
app.run(main)
| 12,562 | 4,681 |
import warnings
import iso639
import pytest
import wikipron
from data.src.codes import _get_language_categories, _get_language_sizes
from wikipron.languagecodes import LANGUAGE_CODES
from . import can_connect_to_wiktionary
# We handle languages with at least this number of pronunciation entries.
_MIN_LANGUAGE_SIZE = 100
@pytest.mark.skipif(not can_connect_to_wiktionary(), reason="need Internet")
def test_language_coverage():
"""Check if WikiPron covers languages with a sufficient amount of data.
If any warnings are raised, they should be suppressed by expanding
the LANGUAGE_CODES dict to handle the relevant languages.
"""
categories = _get_language_categories()
sizes = _get_language_sizes(categories)
for language, size in sizes.items():
if size < _MIN_LANGUAGE_SIZE:
continue
if language in ("Mon", "Translingual"):
# "mon" is the ISO 639 code for Mongolian, but there is also
# the Mon language (ISO 639 code: "mnw").
continue
try:
language_code = iso639.to_iso639_2(language)
except iso639.NonExistentLanguageError:
# Check if WikiPron can handle `language` directly.
language_code = language
try:
language_inferred = wikipron.Config(key=language_code).language
except iso639.NonExistentLanguageError:
warnings.warn(f'WikiPron cannot handle "{language}".')
continue
if language_inferred != language:
warnings.warn(
f'WikiPron resolves the key "{language_code}" to '
f'"{language_inferred}", '
f'which is not "{language}" on Wiktionary.'
)
def test_language_codes_dict_keys():
"""LANGUAGE_CODES keys must be in lowercase for Config._get_language."""
for k in LANGUAGE_CODES.keys():
assert k == k.lower()
| 1,916 | 565 |
import sys
def plen():
print(len(sys.argv))
plen()
| 59 | 27 |
# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Description:
# Unit tests for model_reader.
import pytest
from ethosu.vela import model_reader
from ethosu.vela.errors import InputFileError
def test_read_model_incorrect_extension(tmpdir):
# Tests read_model with a file name that does not end with .tflite
with pytest.raises(InputFileError):
model_reader.read_model("no_tflite_file.txt", model_reader.ModelReaderOptions())
def test_read_model_file_not_found(tmpdir):
# Tests read_model with a .tflite file that does not exist
with pytest.raises(FileNotFoundError):
model_reader.read_model("non_existing.tflite", model_reader.ModelReaderOptions())
| 1,281 | 392 |
# -*- coding: utf-8 -*-
"""
Degradation models.
Created on Thu May 24 11:00:00 2018
Author: Prasun Roy | CVPRU-ISICAL (http://www.isical.ac.in/~cvpr)
GitHub: https://github.com/prasunroy/cnn-on-degraded-images
"""
# imports
import cv2
import numpy
import random
# apply a degradation model on an image
def imdegrade(image, model, mu=0, sigma=0, density=0, gb_ksize=(1, 1),
mb_kernel=numpy.zeros((1, 1), dtype='uint8'), quality=100,
seed=None):
# setup seeds for random number generators
# (only required for reproducibility)
numpy.random.seed(seed)
random.seed(seed)
# create a copy of the input image to prevent direct modification
# on the original input image
image = image.copy()
# add an extra dimension for color channel
# (only required for grayscale images)
if len(image.shape) == 2:
image = numpy.expand_dims(image, 2)
# get dimension of the image
h, w, c = image.shape
# apply a degradation model
model = model.lower()
if model == 'gaussian_white' and sigma > 0:
image = image / 255.0
noise = numpy.random.normal(mu, sigma, (h, w))
noise = numpy.dstack([noise]*c)
image += noise
image = numpy.clip(image, 0, 1)
image = (image * 255.0).astype('uint8')
elif model == 'gaussian_color' and sigma > 0:
image = image / 255.0
noise = numpy.random.normal(mu, sigma, (h, w, c))
image += noise
image = numpy.clip(image, 0, 1)
image = (image * 255.0).astype('uint8')
elif model == 'salt_and_pepper':
if density < 0:
density = 0
elif density > 1:
density = 1
x = random.sample(range(w), w)
y = random.sample(range(h), h)
x, y = numpy.meshgrid(x, y)
xy = numpy.c_[x.reshape(-1), y.reshape(-1)]
n = int(w * h * density)
n = random.sample(range(w*h), n)
for i in n:
if random.random() > 0.5:
image[xy[i][1], xy[i][0], :] = 255
else:
image[xy[i][1], xy[i][0], :] = 0
elif model == 'motion_blur':
image = cv2.filter2D(image, -1, mb_kernel,
borderType=cv2.BORDER_CONSTANT)
elif model == 'gaussian_blur':
image = cv2.GaussianBlur(image, gb_ksize, 0,
borderType=cv2.BORDER_CONSTANT)
elif model == 'jpeg_compression':
if quality < 0:
quality = 0
elif quality > 100:
quality = 100
image = cv2.imencode('.jpg', image,
[int(cv2.IMWRITE_JPEG_QUALITY), quality])[-1]
image = cv2.imdecode(image, -1)
# remove the extra dimension for color channel
# (only required for grayscale images)
if image.shape[-1] == 1:
image = numpy.squeeze(image, 2)
return image
| 2,947 | 1,040 |