id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11236887 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import double
from numba.decorators import jit as jit
#@autojit
def sum2d(arr):
M, N = arr.shape
result = 0.0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
csum2d = jit(restype=double, argtypes=[double[:,:]])(sum2d)
from numpy import random
arr = random.randn(100,100)
import time
start = time.time()
res = sum2d(arr)
duration = time.time() - start
print("Result from python is %s in %s (msec)" % (res, duration*1000))
start = time.time()
res = csum2d(arr)
duration2 = time.time() - start
print("Result from compiled is %s in %s (msec)" % (res, duration2*1000))
print("Speed up is %s" % (duration / duration2))
| StarcoderdataPython |
3267898 | <reponame>NathanKr/python-modules-and-packages-playground<gh_stars>0
from setuptools import find_packages, setup
setup(
name='myutils',
packages=find_packages(),
version='0.1.0',
description='first python package',
author='<NAME>',
author_email = '<EMAIL>',
license='MIT',
install_requires=[] # can be omitted if empty
)
| StarcoderdataPython |
307227 | <filename>power_renamer/__init__.py
from fman import DirectoryPaneCommand, DirectoryPane, ApplicationCommand, show_alert, FMAN_VERSION, DirectoryPaneListener, load_json, save_json, show_prompt, YES, NO, ABORT
from fman.fs import copy, move, exists
from fman.url import as_human_readable, basename
from subprocess import Popen
import os.path
import re
class PowerRename(DirectoryPaneCommand):
def __call__(self):
url = self.pane.get_path()
paths = self.pane.get_selected_files()
if len(paths) < 1:
show_alert("Please select at least 1 file.")
return
basefilename, ok = show_prompt('Please enter the base name like "filename_####"')
if not basefilename and not ok:
return
# how many times does text contain #
digitLength = basefilename.count('#')
if(digitLength < 1):
show_alert('Please enter a valid base name with at least 1 #')
return
# digitlength power of 10
maxNumber = pow(10, digitLength)
#show_alert('Max number is ' + str(maxNumber))
showPreview = shouldShowPreview(basefilename)
if(showPreview):
basefilename = basefilename[1:]
name_split = basefilename.split("#")
baseFilename = name_split[0]
if(len(paths) > maxNumber):
show_alert('You added ' + str(digitLength) + ' #s. With that you can rename ' + str(maxNumber) + ' files. But you selected ' + str(len(paths)) + ' files.')
return
#show_alert('You entered ' + str(count) + ' #')
if(showPreview):
preview = iteateFilesForRename(paths, baseFilename, digitLength, url, False)
choice = show_alert(
preview,
buttons=YES | ABORT,
)
if choice == ABORT:
return
iteateFilesForRename(paths, baseFilename, digitLength, url, True)
self.pane.clear_selection()
class PowerReplace(DirectoryPaneCommand):
def __call__(self):
url = self.pane.get_path()
paths = self.pane.get_selected_files()
if len(paths) < 1:
show_alert("Please select at least 1 file.")
return
name = os.path.basename(paths[0])
name, ext = os.path.splitext(name)
replaceString, ok = show_prompt('Please enter the string you want to replace', name)
if not replaceString and not ok:
return
newString, ok = show_prompt('Please enter the new string you want replace ' + replaceString + ' with')
if not newString and not ok:
return
showPreview = shouldShowPreview(replaceString)
if(showPreview):
replaceString = replaceString[1:]
if(showPreview):
preview = iteateFilesForReplace(paths, replaceString, newString, url, False)
choice = show_alert(
preview,
buttons=YES | ABORT,
)
if choice == ABORT:
return
iteateFilesForReplace(paths, replaceString, newString, url, True)
self.pane.clear_selection()
def shouldShowPreview(basefilename):
if(basefilename[0] == '$'):
return True
return False
def iteateFilesForReplace(paths, replaceString, newString, url, replace):
preview = ''
for path in paths:
name, ext = os.path.splitext(path)
name = os.path.basename(path)
if(name.find(replaceString) > -1):
newFilename = name.replace(replaceString, newString)
newPath = url + "/" + newFilename
preview += name + ' > ' + newFilename + '\n'
if exists(newPath):
preview += "File already exists, will be skipped.\n"
continue
if(replace):
move(path, newPath)
return preview
def iteateFilesForRename(paths, baseFilename, digitLength, url, rename):
preview = ''
index = 1
for path in paths:
name, ext = os.path.splitext(path)
name = os.path.basename(path)
indexString = str(index)
newFilename = baseFilename + indexString.zfill(digitLength) + ext
newPath = url + "/" + newFilename
preview += name + ' > ' + newFilename + '\n'
if exists(newPath):
preview += "File already exists, will be skipped.\n"
continue
if(rename):
move(path, newPath)
index += 1
return preview
| StarcoderdataPython |
4908058 | import os
import math
import torch
import ujson
import traceback
from itertools import accumulate
from colbert.parameters import DEVICE
from colbert.utils.utils import print_message, dotdict, flatten
BSIZE = 1 << 14
class IndexRanker():
def __init__(self, tensor, doclens):
self.tensor = tensor # Big document words tensor
print('tensor.shape', tensor.shape) # torch.Size([4469627, 128])
self.doclens = doclens # of length 69905
self.maxsim_dtype = torch.float32
self.doclens_pfxsum = [0] + list(accumulate(self.doclens)) # CDF?
self.doclens = torch.tensor(self.doclens) # tensorize
self.doclens_pfxsum = torch.tensor(self.doclens_pfxsum) # tensorize
self.dim = self.tensor.size(-1) # 128
self.strides = [torch_percentile(self.doclens, p) for p in [90]]
self.strides.append(self.doclens.max().item())
self.strides = sorted(list(set(self.strides))) # [99, 180]
print_message(f"#> Using strides {self.strides}..")
self.views = self._create_views(self.tensor)
#self.buffers = self._create_buffers(BSIZE, self.tensor.dtype, {'cpu', 'cuda:0'})
def _create_views(self, tensor):
views = []
for stride in self.strides: # 99, 180
outdim = tensor.size(0) - stride + 1 # 4469627 - 99/180 + 1
print('outdim', outdim) # 4469529, 4469448
view = torch.as_strided(tensor, # [4469627, 128]
(outdim, stride, self.dim), (self.dim, self.dim, 1))
#4469529/.., 99/180, 128, 128, 128, 1
print(f'view.shape[i]', view.shape) # [4469529/.., 99/180, 128]
views.append(view)
# Example
# tensor([[ 1.8278, -1.8511],
# [ 1.2551, 1.7123],
# [-0.4915, 0.6947],
# [ 2.3282, 1.8772]])
# dim = 2
# stride = 2 # group size. Try to adjust it and see what will happen...
# outdim = tensor.size(0) - stride + 1 # which equals to 3
# view = torch.as_strided(tensor, (outdim, stride, dim), (dim, dim, 1))
#
# tensor([[[ 1.8278, -1.8511],
# [ 1.2551, 1.7123]],
# [[ 1.2551, 1.7123],
# [-0.4915, 0.6947]],
# [[-0.4915, 0.6947],
# [ 2.3282, 1.8772]]])
return views
#def _create_buffers(self, max_bsize, dtype, devices):
# buffers = {}
# for device in devices:
# buffers[device] = [torch.zeros(max_bsize, stride, self.dim, dtype=dtype,
# device=device, pin_memory=(device == 'cpu'))
# for stride in self.strides]
# return buffers
def rank(self, Q, pids, views=None, shift=0):
assert len(pids) > 0
assert Q.size(0) in [1, len(pids)]
Q = Q.contiguous().to(DEVICE).to(dtype=self.maxsim_dtype)
# views is None
views = self.views if views is None else views
VIEWS_DEVICE = views[0].device
#D_buffers = self.buffers[str(VIEWS_DEVICE)]
raw_pids = pids if type(pids) is list else pids.tolist()
pids = torch.tensor(pids) if type(pids) is list else pids
# length == 6249
doclens, offsets = self.doclens[pids], self.doclens_pfxsum[pids]
a = doclens.unsqueeze(1) # torch.Size([6249, 1])
b = torch.tensor(self.strides).unsqueeze(0) # torch.Size([1, 2])
assignments = (a > b + 1e-6) # [6249, 2]
assignments = assignments.sum(-1) # [6249]
print('assignments', assignments) # how many docs exceeds 90 percentile
one_to_n = torch.arange(len(raw_pids))
output_pids, output_scores, output_permutation = [], [], []
# divde by doclen, from [99, 180]
for group_idx, stride in enumerate(self.strides):
locator = (assignments == group_idx)
if locator.sum() < 1e-5:
continue
# when group_idx == 0, locator are docs less than 90-percentile
# when group_idx == 1, locator are docs more than 90-percentile
group_pids, group_doclens, group_offsets = pids[locator], doclens[locator], offsets[locator]
group_Q = Q if Q.size(0) == 1 else Q[locator]
#print('0', group_Q.shape) # torch.Size([1, 128, 32])
#print('0', group_doclens.shape) # torch.Size([5510])
group_offsets = group_offsets.to(VIEWS_DEVICE) - shift
# output, inverse_indices (where elements in the original input map to in the output)
#print(pids)
#print(group_pids) # subset of pids
#print(group_doclens)
#print(group_offsets) # subset of offsets, same size as group_pids
group_offsets_uniq, group_offsets_expand = torch.unique_consecutive(group_offsets, return_inverse=True)
#print(group_offsets_uniq) # equal pfxsum means same document
#print(group_offsets_expand) # inverted indices for group_offsets_uniq
#print()
D_size = group_offsets_uniq.size(0)
#print('1', views[group_idx].shape) # torch.Size([4469529, 99, 128]
# view[1] = [4469529, 99, 128], dim selects
# view[2] = [4469448, 180, 128], dim selects
# What is in view? words' embeddings.
#D = torch.index_select(views[group_idx], 0, group_offsets_uniq, out=D_buffers[group_idx][:D_size])
D = torch.index_select(views[group_idx], 0, group_offsets_uniq)
#print('2', D.shape) # torch.Size([5510, 99, 128])
D = D.to(DEVICE)
D = D[group_offsets_expand.to(DEVICE)].to(dtype=self.maxsim_dtype)
# in rare cases, D contains some identical offsets
#print('3', D.shape) # torch.Size([5510, 99, 128])
mask_ = torch.arange(stride, device=DEVICE) + 1
# mask_ = tensor([1, 2, 3 ... 99/180])
# mask_.unsqueeze(0) = tensor([[1, 2, 3 ... 99/180]])
mask = mask_.unsqueeze(0) <= group_doclens.to(DEVICE).unsqueeze(-1)
#print('mask.shape', mask.shape) # torch.Size([5510, 99])
# [5510, 99, 128] @ [1, 128, 32]
scores = D @ group_Q
#print(mask_)
#print(group_doclens)
#print(mask.unsqueeze(-1)) # each row is identical
#print(scores)
#print(scores * mask.unsqueeze(-1))
#print()
#print('4', scores.shape) # torch.Size([5510, 99, 32])
scores = scores * mask.unsqueeze(-1)
scores = scores.max(1).values.sum(-1).cpu()
#print('5', scores.shape) # torch.Size([5510])
output_pids.append(group_pids)
output_scores.append(scores)
output_permutation.append(one_to_n[locator])
output_permutation = torch.cat(output_permutation).sort().indices
output_pids = torch.cat(output_pids)[output_permutation].tolist()
output_scores = torch.cat(output_scores)[output_permutation].tolist()
assert len(raw_pids) == len(output_pids)
assert len(raw_pids) == len(output_scores)
assert raw_pids == output_pids
return output_scores
def batch_rank(self, all_query_embeddings, all_query_indexes, all_pids, sorted_pids):
assert sorted_pids is True
######
scores = []
range_start, range_end = 0, 0
for pid_offset in range(0, len(self.doclens), 50_000):
pid_endpos = min(pid_offset + 50_000, len(self.doclens))
range_start = range_start + (all_pids[range_start:] < pid_offset).sum()
range_end = range_end + (all_pids[range_end:] < pid_endpos).sum()
pids = all_pids[range_start:range_end]
query_indexes = all_query_indexes[range_start:range_end]
print_message(f"###--> Got {len(pids)} query--passage pairs in this sub-range {(pid_offset, pid_endpos)}.")
if len(pids) == 0:
continue
print_message(f"###--> Ranking in batches the pairs #{range_start} through #{range_end} in this sub-range.")
tensor_offset = self.doclens_pfxsum[pid_offset].item()
tensor_endpos = self.doclens_pfxsum[pid_endpos].item() + 512
collection = self.tensor[tensor_offset:tensor_endpos].to(DEVICE)
views = self._create_views(collection)
print_message(f"#> Ranking in batches of {BSIZE} query--passage pairs...")
for batch_idx, offset in enumerate(range(0, len(pids), BSIZE)):
if batch_idx % 100 == 0:
print_message("#> Processing batch #{}..".format(batch_idx))
endpos = offset + BSIZE
batch_query_index, batch_pids = query_indexes[offset:endpos], pids[offset:endpos]
Q = all_query_embeddings[batch_query_index]
scores.extend(self.rank(Q, batch_pids, views, shift=tensor_offset))
return scores
def torch_percentile(tensor, p):
assert p in range(1, 100+1)
assert tensor.dim() == 1
# p = 90
# tensor: torch.Size([69905])
_90_percentile = int(p * tensor.size(0) / 100.0) # 62914
# kth greatest value
kth_val = tensor.kthvalue(_90_percentile) # values=99, indices=59498
return kth_val.values.item()
| StarcoderdataPython |
5137251 | <reponame>FlyreelAI/sesemi
#
# Copyright 2021, Flyreel. All Rights Reserved.
# =============================================#
"""Configuration structures and utilities."""
| StarcoderdataPython |
9726229 | """
Defines a reinforcement learning agent which uses a
simple version of proximal policy optimization, without an
actor-critic style baseline value function.
"""
import tensorflow as tf
import numpy as np
import gym
import roboschool
class Sample:
"""
Represents an arbitrary state-action sample. Mainly to overcome the
issue with Numpy treating arrays of tuples as two-dimensional arrays.
"""
def __init__(self, state, action, value):
"""
Initializes the sample.
:param state: the state
:param action: the action taken
:param value: the value of the sample
"""
self.state = state
self.action = action
self.value = value
class Trajectory:
"""
Represents a state action trajectory
"""
def __init__(self):
self._states = []
self._actions = []
self._rewards = []
def __len__(self):
return len(self._states)
def step(self, state, action):
"""
Adds a new step to the trajectory.
:param state: the state
:param action: the action
"""
self._states.append(state)
self._actions.append(action)
self._rewards.append(0.0)
def reward(self, reward):
"""
Adds to the reward value of the most recent step.
:param reward: the immediate reward
"""
if len(self._rewards) != 0:
self._rewards[-1] += reward
@property
def states(self):
return self._states
@property
def actions(self):
return self._actions
@property
def rewards(self):
return self._rewards
class Agent:
"""
An online RL agent which updates its policy
using a version of the PPO algorithm.
"""
def __init__(self, graph, session, **kwargs):
"""
Initializes a new PPO agent.
:param graph: the Tensorflow graph to be used
:param session: the Tensorflow session to be used
:param kwargs: the configuration options for the agent
"""
# Get the state and action spaces
state_space = kwargs['state_space']
action_space = kwargs['action_space']
# Capture the configuration parameters needed
self._discrete_action = action_space.discrete
self._discount = kwargs['discount']
self._batch_size = kwargs['batch_size']
self._num_batches = kwargs['num_batches']
self._num_episodes = kwargs['num_episodes']
# Capture session
self._session = session
# Build the policy network and learning update graph
with graph.as_default():
self._state_input = tf.placeholder(dtype=tf.float32, shape=[None] + list(state_space.shape))
with tf.variable_scope("policy"):
policy_output = kwargs['actor_fn'](self._state_input)
with tf.variable_scope("hypothesis"):
hypothesis_output = kwargs['actor_fn'](self._state_input)
if action_space.discrete:
self._action_input = tf.placeholder(dtype=tf.int32, shape=[None])
one_hot = tf.one_hot(self._action_input, action_space.size)
policy = tf.exp(policy_output)
policy = tf.reduce_sum(one_hot * policy, axis=1) / tf.reduce_sum(policy, axis=1)
hypothesis = tf.exp(hypothesis_output)
hypothesis = tf.reduce_sum(one_hot * hypothesis, axis=1) / tf.reduce_sum(hypothesis, axis=1)
ratio = hypothesis / tf.stop_gradient(policy)
self._action_output = tf.multinomial(policy_output, 1)
else:
self._action_input = tf.placeholder(dtype=tf.float32, shape=[None] + list(action_space.shape))
policy_mean = policy_output[:, 0]
policy_deviation = policy_output[:, 1]
hypothesis_mean = hypothesis_output[:, 0]
hypothesis_deviation = hypothesis_output[:, 1]
policy = tf.square((self._action_input - policy_mean) / tf.exp(policy_deviation))
policy = tf.reduce_sum((0.5 * policy) + policy_deviation, axis=1)
hypothesis = tf.square((self._action_input - hypothesis_mean) / tf.exp(hypothesis_deviation))
hypothesis = tf.reduce_sum((0.5 * hypothesis) + hypothesis_deviation, axis=1)
ratio = tf.exp(tf.stop_gradient(policy) - hypothesis)
noise = tf.random_normal(tf.shape(policy_mean))
self._action = policy_mean + (noise * tf.exp(policy_deviation))
self._advantage_input = tf.placeholder(dtype=tf.float32, shape=[None])
clipped_ratio = tf.clip_by_value(ratio, 1.0 - kwargs['clip_epsilon'], 1.0 + kwargs['clip_epsilon'])
loss = -tf.reduce_mean(tf.minimum(ratio * self._advantage_input, clipped_ratio * self._advantage_input))
self._update_hypothesis = tf.train.AdamOptimizer(learning_rate=kwargs['learning_rate']).minimize(loss)
policy_variables = tf.trainable_variables(scope='policy')
policy_variables = dict(map(lambda x: (x.name[len('policy'):], x), policy_variables))
hypothesis_variables = tf.trainable_variables(scope='hypothesis')
hypothesis_variables = dict(map(lambda x: (x.name[len('hypothesis'):], x), hypothesis_variables))
self._transfer_hypothesis = []
for key, var in hypothesis_variables.items():
self._transfer_hypothesis.append(tf.assign(policy_variables[key], var))
session.run(tf.variables_initializer(tf.global_variables()))
self._trajectories = []
self._trajectory = None
self._episode_count = 0
self.reset()
def _update(self):
"""
Updates the agent's policy based on recent experience.
"""
# Compute advantages
samples = []
for trajectory in self._trajectories:
advantage = 0.0
for t in reversed(range(len(trajectory))):
advantage += trajectory.rewards[t] + (self._discount * advantage)
samples.append(Sample(trajectory.states[t], trajectory.actions[t], advantage))
# Perform updates
for _ in range(self._num_batches):
# Construct batch
batch = np.random.choice(samples, self._batch_size, replace=True)
states = []
actions = []
advantages = []
for sample in batch:
states.append(sample.state)
actions.append(sample.action)
advantages.append(sample.value)
# Run update
self._session.run(self._update_hypothesis, feed_dict={
self._state_input: states,
self._action_input: actions,
self._advantage_input: advantages
})
# Transfer parameters
self._session.run(self._transfer_hypothesis)
def reset(self):
"""
Tells the agent that a new episode has been started, the agent may
choose to run an update at this time.
"""
if self._trajectory is not None and len(self._trajectory) != 0:
self._trajectories.append(self._trajectory)
self._episode_count += 1
if self._episode_count == self._num_episodes:
self._update()
self._trajectories = []
self._episode_count = 0
self._trajectory = Trajectory()
def act(self, state, evaluation=False):
"""
Records the current state, and selects the agent's action
:param state: a representation of the current state
:param evaluation: if true, this indicates that this action should not be recorded
:return: a representation of the next action
"""
action = self._session.run(self._action, feed_dict={self._state_input: [state]})
action = action[0, 0] if self._discrete_action else action[0]
if not evaluation:
self._trajectory.step(state, action)
return action
def reward(self, reward):
"""
Gives an immediate reward to the agent for the most recent step.
:param reward: the reward value
"""
self._trajectory.reward(reward)
def manager(actor_fn, state_space, action_space,
discount=0.99,
learning_rate=0.0005,
clip_epsilon=0.05,
batch_size=50,
num_batches=20,
num_episodes=10):
"""
Returns a context manager which is used to instantiate and clean up
a PPO reinforcement learning agent with the given configuration.
:param actor_fn: the function used to build the policy graphs
:param state_space: the number of state features
:param action_space: the number of actions or action features
:param discount: the discount factor of the MDP
:param learning_rate: the learning rate used for training the policies
:param clip_epsilon: the clipping radius for the policy ratio
:param batch_size: the batch size used for training the policies
:param num_batches: the number of gradient steps to do per update
:param num_episodes: the number of episodes performed between updates
:return: a context manager which creates a new PPO agent
"""
class Manager:
def __enter__(self):
self._graph = tf.Graph()
self._session = tf.Session(graph=self._graph)
return Agent(self._graph, self._session,
actor_fn=actor_fn,
state_space=state_space,
action_space=action_space,
discount=discount,
learning_rate=learning_rate,
clip_epsilon=clip_epsilon,
batch_size=batch_size,
num_batches=num_batches,
num_episodes=num_episodes)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Closes the session associated with the current agent.
:param exc_type: ignored
:param exc_val: ignored
:param exc_tb: ignored
:return: always False, never suppress exceptions
"""
self._session.close()
self._session = None
self._graph = None
return False
return Manager() | StarcoderdataPython |
3301667 | class Solution:
def countArrangement(self, n: int) -> int:
cache = {}
def helper(X):
if len(X) == 1:
return 1
if X in cache:
return cache[X]
total = 0
for j in range(len(X)):
if X[j] % len(X) == 0 or len(X) % X[j] == 0:
total += helper(X[:j] + X[j + 1 :])
cache[X] = total
return total
return helper(tuple(range(1, n + 1)))
| StarcoderdataPython |
9727738 | <filename>camera.py
import cv2
from model import FacialExpressionModel
import numpy as np
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.h5")
font = cv2.FONT_HERSHEY_DUPLEX
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
ret, frame = self.video.read()
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict_emotion(cropped_img)
cv2.putText(frame, prediction, (x+20, y-60), font, 1, (0, 255, 255), 2, cv2.LINE_AA)
_, jpeg = cv2.imencode('.jpg', cv2.resize(frame,(800,480), interpolation = cv2.INTER_CUBIC))
return jpeg.tobytes()
| StarcoderdataPython |
4990216 | <gh_stars>0
from pybedtools import BedTool
import numpy as np
import os
from functools import reduce
import timeit
def run_intersect(file_list, overlap_min=1, gap_min=0, multi_bed=2, multi_frag=3):
# File 1
BED_file_lines_1 = get_file_lines(abs_file_name=file_list[0])
BED_file_dict_1 = get_file_bed_dict(lines=BED_file_lines_1)
# File 2
BED_file_lines_2 = get_file_lines(abs_file_name=file_list[1])
BED_file_dict_2 = get_file_bed_dict(lines=BED_file_lines_2)
sect_bed = intersect_bed(BED_file_dict_1, BED_file_dict_2, overlap_min, gap_min)
return sect_bed
def get_file_lines(abs_file_name=None):
with open(abs_file_name,'r') as file:
lines = file.readlines()
return lines
def get_file_range_set(lines=None):
file_range_set = set()
for line in lines:
values = line.split()
value_range = range(int(values[1]), int(values[2]))
file_range_set.add(value_range)
return file_range_set
def get_file_bed_dict(lines=None):
bed_dict = {}
for line in lines:
range_tuple_list = []
values = line.split()
#print(values)
chromosome = values[0]
range_tuple = (int(values[1]), int(values[2]))
range_tuple_list.append(range_tuple)
if chromosome in bed_dict:
bed_dict[chromosome].append(range_tuple)
else:
bed_dict.update({chromosome: range_tuple_list})
return bed_dict
def intersect_bed(bed_dict1, bed_dict2, overlap_min, gap_min):
'''
Compares BED dictionaries given a overlap minnimum and gap minnimum
Input Arguments:
bed_dict1: dictionary of bed file created by get_file_bed_dict
bed_dict2: dictionary of bed file created by get_file_bed_dict
Output Arguments:
bed_dict:
{'chromosome:'
{'Unique ID for intersections':
list(
{'grand_parent': (start, end)}
{'parent': (start, end)}
{'intersect': (start, end)}
{'length': value}
)
}
}
'''
# Intialize bed_dict
bed_dict = {}
# Intialize unique intersect ID
intersect_loop = 0
for chrom1, frag_range1 in bed_dict1.items():
if chrom1 in bed_dict1:
frag_range2 = bed_dict2[chrom1]
# For each single interval fragment range
for frag1 in frag_range1:
# For each single interval fragment range
for frag2 in frag_range2:
range_tuple_list = []
min_frag1 = frag1[0] + gap_min
min_frag2 = frag2[0] + gap_min
max_frag1 = frag1[1] + gap_min + 1 # Adding one for inclusive intersection
max_frag2 = frag2[1] + gap_min + 1 # Adding one for inclusive intersection
# Find intersection of fragment intervals
intersect = np.intersect1d(range(min_frag1, max_frag1), range(min_frag2, max_frag2))
# If there is any intersections
if intersect.any() and overlap_min >= 1:
# Increment unique ID
intersect_loop = intersect_loop + 1
# Create intersection interval
range_tuple = (min(intersect), max(intersect))
# Find the length of the intersection
len_intersect = len_intersect_limit(range_tuple, overlap_min)
# Create "grandparent" fragment of intersection
gp_dict = {'grand_parent': (frag2[0], frag2[1]+1)}
# Create "parent" fragment of intersection
parent_dict = {'parent': (frag1[0], frag1[1]+1)}
intersect_dict = {'intersect': range_tuple}
# Save length of interval
length_dict = {'length': len_intersect}
range_tuple_list.append(gp_dict)
range_tuple_list.append(parent_dict)
range_tuple_list.append(intersect_dict)
range_tuple_list.append(length_dict)
# If chromosone is already in the output
if chrom1 in bed_dict:
bed_dict[chrom1].update({intersect_loop: range_tuple_list})
else:
# Intialize output
bed_dict.update({chrom1: {intersect_loop: range_tuple_list}})
return bed_dict
def len_intersect_limit(frag_intersect, overlap_min=100):
'''
If the length of the interval exists, else return zero
Input Arguments:
frag_intersect: fragment intersection
Output Arguments:
lenfth of intersection
boolean
'''
len_sect = np.abs(np.subtract(frag_intersect[0], frag_intersect[1]))
if len_sect >= overlap_min:
return len_sect
else:
return 0
if __name__ == "__main__":
parent_dir = os.getcwd()
BED_file_directory = os.path.join(parent_dir)
Ex1 = os.path.join(BED_file_directory, 'Example1.bed');
Ex2 = os.path.join(BED_file_directory, 'Example2.bed');
Ex3 = os.path.join(BED_file_directory, 'Example3.bed');
REP1 = os.path.join(BED_file_directory, 'iCellNeuron_HTTLOC_CAPCxHTT_REP1.bed')
REP2 = os.path.join(BED_file_directory, 'iCellNeuron_HTTLOC_CAPCxHTT_REP2.bed')
# Example input list of files
file_list = [];
file_list.append(Ex1);
file_list.append(Ex1);
#file_list.append(Ex3);
sect_bed = run_intersect(file_list, 50, 0, multi_bed=2, multi_frag=3)
print(sect_bed)
| StarcoderdataPython |
1997394 | <reponame>wutangclancee/quantulum3
"""quantulum3 init."""
VERSION = (0, 7, 1)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = '<NAME>, nielstron, sohrabtowfighi, grhawk and Rodrigo Castro'
__author_email__ = '<EMAIL>'
__copyright__ = 'Copyright (C) 2016 <NAME>, nielstron,t sohrabtowfighi, '
'grhawk and Rodrigo Castro'
__license__ = "MIT"
__url__ = "https://github.com/nielstron/quantulum3"
| StarcoderdataPython |
1925474 | <reponame>stanford-oval/decaNLP
#
# Copyright (c) 2020 The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import logging
import os
from collections import defaultdict
from typing import List, Optional
import torch
import ujson
from dateparser.languages import default_loader
from dialogues import Bitod
from transformers import AutoConfig, MarianTokenizer, PreTrainedModel
from ..data_utils.example import NumericalizedExamples, SequentialField
from ..data_utils.numericalizer import TransformerNumericalizer
from ..data_utils.progbar import progress_bar
from ..util import adjust_language_code, merge_translated_sentences, replace_capturing_group
logger = logging.getLogger(__name__)
class GenieModel(PreTrainedModel):
numericalizer: TransformerNumericalizer
@classmethod
def load(cls, save_directory: str, *model_args, **kwargs):
"""
Loads a GenieModel (in Genie format, not HuggingFace's transformers) and its
accompanying Numericalizer (not HuggingFace's tokenizers) from `save_directory`, which is a path
"""
# TODO remove kwargs and take individual inputs instead
model_checkpoint_file = kwargs.pop("model_checkpoint_file", None)
args = kwargs.pop("args", None)
device = kwargs.pop("device", None)
tasks = kwargs.pop("tasks", None)
vocab_sets = kwargs.pop("vocab_sets", None)
full_checkpoint_path = os.path.join(save_directory, model_checkpoint_file)
logger.info(f'Loading the model from {full_checkpoint_path}')
model = cls(args=args, tasks=tasks, vocab_sets=vocab_sets, save_directory=save_directory, *model_args, **kwargs)
save_dict = torch.load(full_checkpoint_path, map_location=device)
# HACK
# `transformers` version 4.1 changed the name of language modeling head of BartForConditionalGeneration
# (and therefore its subclass MBartForConditionalGeneration) to lm_head to make it similar to other models
# like T5. The following will make this change so that genienlp models trained with `transformers`==4.0 can be properly loaded
if (
'model.lm_head.weight' not in save_dict['model_state_dict']
and 'model.model.shared.weight' in save_dict['model_state_dict']
):
save_dict['model_state_dict']['model.lm_head.weight'] = save_dict['model_state_dict']['model.model.shared.weight']
model.load_state_dict(save_dict['model_state_dict'], strict=True)
return model, save_dict.get('best_decascore')
def add_new_vocab_from_data(self, tasks, resize_decoder=False):
old_num_tokens = self.numericalizer.num_tokens
self.numericalizer.grow_vocab(tasks)
if self.numericalizer.num_tokens > old_num_tokens:
logger.info(f'Vocabulary has expanded to {self.numericalizer.num_tokens} tokens')
def update_language_dependent_configs(self, tgt_lang):
# we override this method for TransformerSeq2Seq models; otherwise it's a no-op
pass
def set_generation_output_options(self, tasks):
self._output_attentions = any(getattr(task, 'need_attention_scores', False) for task in tasks)
self._output_scores = False
self._output_hidden_states = False
class ValidationOutput(object):
"""
Contains all the information that model's validate() method may output
"""
def __init__(
self,
loss: Optional[float] = None,
example_ids: Optional[List] = None,
predictions: Optional[List] = None,
raw_predictions: Optional[List] = None,
answers: Optional[List] = None,
contexts: Optional[List] = None,
confidence_features: Optional[List] = None,
confidence_scores: Optional[List] = None,
):
self.loss = loss
self.example_ids = example_ids
self.predictions = predictions
self.raw_predictions = raw_predictions
self.answers = answers
self.contexts = contexts
self.confidence_features = confidence_features
self.confidence_scores = confidence_scores
# TransformerSeq2Seq and TransformerLSTM will inherit from this model
class GenieModelForGeneration(GenieModel):
def validate(
self,
data_iterator,
task,
eval_dir=None,
output_predictions_only=False,
output_confidence_features=False,
original_order=None,
confidence_estimators=None,
disable_progbar=True,
**kwargs,
):
if self.args.e2e_dialogue_evaluation:
return self.validate_e2e_dialogues(
data_iterator, task, eval_dir, output_predictions_only, original_order, disable_progbar
)
else:
return self.validate_batch(
data_iterator,
task,
output_predictions_only,
output_confidence_features,
original_order,
confidence_estimators,
disable_progbar,
)
def validate_batch(
self,
data_iterator,
task,
output_predictions_only=False,
output_confidence_features=False,
original_order=None,
confidence_estimators=None,
disable_progbar=True,
):
"""
Inputs:
original_order: List of indices. If provided, we will sort the results according to this order
confidence_estimator: if provided, will use it to calculate and output confidence scores
Outputs: predictions if `output_predictions_only` == True, (loss, predictions, answers, contexts) otherwise
loss
predictions: a List of Lists of strings
answers
contexts
"""
total_loss = 0.0 if 'loss' in task.metrics else None
output_confidence_scores = confidence_estimators is not None
predictions = []
raw_predictions = []
confidence_features = []
example_ids = []
answers = []
contexts = []
if self.numericalizer._tokenizer.tgt_lang:
tgt_lang = self.numericalizer._tokenizer.tgt_lang
else:
tgt_lang = self.orig_tgt_lang
if self.numericalizer._tokenizer.src_lang:
src_lang = self.numericalizer._tokenizer.src_lang
else:
src_lang = self.orig_src_lang
date_parser = default_loader.get_locale(src_lang[:2])
translate_return_raw_outputs = getattr(self.args, 'translate_return_raw_outputs', False)
for batch in progress_bar(data_iterator, desc='Generating', disable=disable_progbar):
batch_size = len(batch.example_id)
batch_prediction = [[] for _ in range(batch_size)]
batch_raw_prediction = [[] for _ in range(batch_size)]
batch_confidence_features = [[] for _ in range(batch_size)]
batch_example_ids = batch.example_id
example_ids += batch_example_ids
if not output_predictions_only:
batch_answer = self.numericalizer.reverse(batch.answer.value.data, 'answer')
batch_answer = [
task.postprocess_prediction(batch_example_ids[i], batch_answer[i]) for i in range(len(batch_answer))
]
answers += batch_answer
batch_context = self.numericalizer.reverse(batch.context.value.data, 'context')
contexts += batch_context
elif output_confidence_features:
# need gold answer for confidence estimation
batch_answer = self.numericalizer.reverse(batch.answer.value.data, 'answer')
answers += batch_answer
if total_loss is not None:
loss = self.forward(batch, train=True).loss.item()
total_loss += loss
for hyperparameter_idx in range(len(self.args.temperature)):
generated = self.generate(
batch,
max_output_length=self.args.max_output_length,
min_output_length=self.args.min_output_length,
num_outputs=self.args.num_outputs[hyperparameter_idx],
temperature=self.args.temperature[hyperparameter_idx]
if self.args.temperature[hyperparameter_idx] > 0
else 1.0,
repetition_penalty=self.args.repetition_penalty[hyperparameter_idx],
top_k=self.args.top_k[hyperparameter_idx],
top_p=self.args.top_p[hyperparameter_idx],
num_beams=self.args.num_beams[hyperparameter_idx],
num_beam_groups=self.args.num_beam_groups[hyperparameter_idx],
diversity_penalty=self.args.diversity_penalty[hyperparameter_idx],
no_repeat_ngram_size=self.args.no_repeat_ngram_size[hyperparameter_idx],
do_sample=self.args.temperature[hyperparameter_idx] != 0, # if temperature==0, we do not sample
)
partial_batch_prediction_ids = generated.sequences
partial_batch_words = None
if getattr(task, 'need_attention_scores', False):
cross_attentions = generated.cross_attentions
# stack tensors to shape (max_output_length, num_layers, batch_size, num_heads, 1, max_input_length)
cross_attentions = torch.stack(([torch.stack(tuple) for tuple in cross_attentions])).cpu()
# reshape to (num_layers, batch_size, num_heads, max_output_length, max_input_length)
cross_attentions = cross_attentions.squeeze(4)
cross_attentions = cross_attentions.permute(1, 2, 3, 0, 4).contiguous()
# choose only last layer attentions
# cross_attentions = torch.mean(cross_attentions[-3:, ...], dim=0)
cross_attentions = cross_attentions[-1, ...]
# postprocess prediction ids
kwargs = {
'numericalizer': self.numericalizer,
'cross_attentions': cross_attentions,
'tgt_lang': tgt_lang,
'date_parser': date_parser,
}
if translate_return_raw_outputs:
partial_batch_raw_prediction_ids = partial_batch_prediction_ids
partial_batch_prediction_ids, partial_batch_words = task.batch_postprocess_prediction_ids(
batch_example_ids, batch.context.value.data, partial_batch_prediction_ids, **kwargs
)
# MarianTokenizer uses two different spm models for encoding source and target languages.
# in almond_translate we postprocess text with alignment which produces code-switched sentences.
# encoding a code-switched sentence with either spm will omit tokens from the other language
# so we have to return both the processed and encoded text.
# we need to return encoded text too since confidence_features requires ids
if isinstance(self.numericalizer._tokenizer, MarianTokenizer) and partial_batch_words:
partial_batch_prediction = partial_batch_words
else:
if output_confidence_features or output_confidence_scores:
partial_batch_confidence_features = self.confidence_features(
batch=batch, predictions=partial_batch_prediction_ids, mc_dropout_num=self.args.mc_dropout_num
)
partial_batch_prediction = self.numericalizer.reverse(partial_batch_prediction_ids, 'answer')
def get_example_index(i):
return (i // self.args.num_outputs[hyperparameter_idx]) % batch_size
if translate_return_raw_outputs:
partial_batch_raw_prediction = self.numericalizer.reverse(partial_batch_raw_prediction_ids, 'answer')
for i in range(len(partial_batch_prediction)):
partial_batch_raw_prediction[i] = task.postprocess_prediction(
batch_example_ids[get_example_index(i)], partial_batch_raw_prediction[i]
)
for i in range(len(partial_batch_prediction)):
batch_raw_prediction[get_example_index(i)].append(partial_batch_raw_prediction[i])
# post-process predictions
for i in range(len(partial_batch_prediction)):
partial_batch_prediction[i] = task.postprocess_prediction(
batch_example_ids[get_example_index(i)], partial_batch_prediction[i]
)
# put them into the right array
for i in range(len(partial_batch_prediction)):
batch_prediction[get_example_index(i)].append(partial_batch_prediction[i])
if output_confidence_features or output_confidence_scores:
batch_confidence_features[get_example_index(i)].append(partial_batch_confidence_features[i])
predictions += batch_prediction
confidence_features += batch_confidence_features
raw_predictions += batch_raw_prediction
if total_loss is not None:
total_loss /= len(example_ids)
if original_order is not None:
# sort back to the original order
original_order, example_ids, predictions, raw_predictions, answers, contexts, confidence_features = [
list(a)
for a in tuple(
zip(
*sorted(
list(
zip(
original_order,
example_ids,
predictions,
raw_predictions,
answers,
contexts,
confidence_features,
)
)
)
)
)
]
if getattr(self.args, 'translate_example_split', False):
# stitch sentences back together
example_ids, predictions, raw_predictions, answers, contexts, confidence_features = merge_translated_sentences(
example_ids,
predictions,
raw_predictions,
answers,
contexts,
confidence_features,
self.numericalizer._tokenizer.src_lang,
self.numericalizer._tokenizer.tgt_lang,
)
if getattr(self.args, 'translate_only_entities', False):
# stitch entities back together
example_ids, predictions, raw_predictions, answers, contexts, confidence_features = merge_translated_sentences(
example_ids,
predictions,
raw_predictions,
answers,
contexts,
confidence_features,
self.numericalizer._tokenizer.src_lang,
self.numericalizer._tokenizer.tgt_lang,
is_entities=True,
)
output = ValidationOutput(loss=total_loss)
if output_predictions_only:
output.predictions = predictions
else:
output.example_ids, output.predictions, output.answers, output.contexts = (
example_ids,
predictions,
answers,
contexts,
)
if output_confidence_features:
output.confidence_features = confidence_features
if self.args.override_confidence_labels:
for i, example in enumerate(confidence_features):
for confidence in example:
confidence.label = answers[i] == self.args.override_confidence_labels
if output_confidence_scores:
output.confidence_scores = []
for estimator in confidence_estimators:
confidence_scores = estimator.estimate(confidence_features)
output.confidence_scores.append(confidence_scores)
if translate_return_raw_outputs:
output.raw_predictions = raw_predictions
return output
def validate_e2e_dialogues(
self, data_iterator, task, eval_dir=None, output_predictions_only=False, original_order=None, disable_progbar=True
):
"""
Inputs:
original_order: List of indices. If provided, we will sort the results according to this order
confidence_estimator: if provided, will use it to calculate and output confidence scores
Outputs: predictions if `output_predictions_only` == True, (loss, predictions, answers, contexts) otherwise
loss
predictions: a List of Lists of strings
answers
contexts
"""
dataset = Bitod()
e2e_dialogue_preds = dict()
predictions = []
example_ids = []
answers = []
contexts = []
# TODO: handle multiple responses
hyperparameter_idx = 0
cur_dial_id = ''
knowledge = None
device = self.device
args = self.args
if self.numericalizer._tokenizer.src_lang:
src_lang = self.numericalizer._tokenizer.src_lang
else:
src_lang = self.orig_src_lang
special_tokens = self.numericalizer._tokenizer.all_special_tokens
for k, turn in enumerate(progress_bar(data_iterator, desc='Generating', disable=disable_progbar)):
batch_size = len(turn.example_id)
assert batch_size == 1
batch_prediction = []
batch_example_ids = turn.example_id
example_ids += batch_example_ids
task_name, dial_id, turn_id, train_target = example_ids[-1].split('/')
turn_id = int(turn_id)
if cur_dial_id != dial_id:
# new dialogue
cur_dial_id = dial_id
dialogue_state = {}
# new_state_text = 'null'
knowledge = defaultdict(dict)
new_knowledge_text = 'null'
new_actions_text = 'null'
active_api = None
e2e_dialogue_preds[dial_id] = {"turns": defaultdict(dict), "API": defaultdict(dict)}
batch_context = []
batch_tokens = self.numericalizer.convert_ids_to_tokens(turn.context.value.data, skip_special_tokens=False)
# remove only beginning and trailing special tokens
# otherwise the sep_token added between context and question will be lost
for text in batch_tokens:
i = 0
while text[i] in special_tokens:
i += 1
j = len(text) - 1
while text[j] in special_tokens:
j -= 1
text = text[i : j + 1]
batch_context.append(self.numericalizer._tokenizer.convert_tokens_to_string(text))
contexts += batch_context
if not output_predictions_only:
batch_answer = self.numericalizer.reverse(turn.answer.value.data, 'answer')
batch_answer = [
task.postprocess_prediction(batch_example_ids[i], batch_answer[i]) for i in range(len(batch_answer))
]
answers += batch_answer
new_state_text = dataset.state2span(dialogue_state)
if train_target == 'dst':
input_text = replace_capturing_group(contexts[-1], dataset.state_re, new_state_text)
## we always use gold history following common practice
## if you want to use predicted response instead of gold uncomment the following
# last_sys_pred = predictions[-1][0].strip()
# input_text = replace_match(input_text, last_system_re, last_sys_pred)
elif train_target == 'api':
# replace state
input_text = replace_capturing_group(contexts[-1], dataset.state_re, new_state_text)
elif train_target == 'da':
# replace state
input_text = replace_capturing_group(contexts[-1], dataset.state_re, new_state_text)
# replace knowledge
input_text = replace_capturing_group(input_text, dataset.knowledge_re, new_knowledge_text)
elif train_target == 'rg':
# replace actions
input_text = replace_capturing_group(contexts[-1], dataset.actions_re, new_actions_text)
else:
raise ValueError(f'Invalid train_target: {train_target}')
# replace old context with updated
contexts[-1] = input_text
tokenized_contexts = self.numericalizer.encode_batch([input_text], field_name='context', features=None)[0]
numericalized_turn = NumericalizedExamples(
example_id=[turn.example_id[0]],
context=SequentialField(
value=torch.tensor([tokenized_contexts.value], device=device),
length=torch.tensor([tokenized_contexts.length], device=device),
limited=torch.tensor([tokenized_contexts.limited], device=device),
feature=None,
),
answer=SequentialField(value=None, length=None, limited=None, feature=None),
)
generated = self.generate(
numericalized_turn,
max_output_length=args.max_output_length,
min_output_length=args.min_output_length,
num_outputs=args.num_outputs[hyperparameter_idx],
temperature=args.temperature[hyperparameter_idx] if args.temperature[hyperparameter_idx] > 0 else 1.0,
repetition_penalty=args.repetition_penalty[hyperparameter_idx],
top_k=args.top_k[hyperparameter_idx],
top_p=args.top_p[hyperparameter_idx],
num_beams=args.num_beams[hyperparameter_idx],
num_beam_groups=args.num_beam_groups[hyperparameter_idx],
diversity_penalty=args.diversity_penalty[hyperparameter_idx],
no_repeat_ngram_size=args.no_repeat_ngram_size[hyperparameter_idx],
do_sample=args.temperature[hyperparameter_idx] != 0,
)
partial_batch_prediction_ids = generated.sequences
partial_batch_prediction = self.numericalizer.reverse(partial_batch_prediction_ids, 'answer')[0]
if train_target == 'da':
partial_batch_prediction = dataset.postprocess_prediction(
partial_batch_prediction, knowledge, lang=src_lang[:2]
)
partial_batch_prediction = task.postprocess_prediction(batch_example_ids[0], partial_batch_prediction)
# put them into the right array
batch_prediction.append([partial_batch_prediction])
predictions += batch_prediction
if train_target == 'dst':
# update dialogue_state
lev = predictions[-1][0].strip()
state_update = dataset.span2state(lev)
if state_update:
active_api = list(state_update.keys())[-1]
dataset.update_state(state_update, dialogue_state)
#### save latest state
state_to_record = copy.deepcopy(dialogue_state)
state_to_record = {dataset.domain2api_name(k): v for k, v in state_to_record.items()}
e2e_dialogue_preds[dial_id]["turns"][str(turn_id)]["state"] = state_to_record
####
elif train_target == 'api':
do_api_call = predictions[-1][0].strip()
if do_api_call == 'yes':
# make api call
api_name = active_api
if api_name in dialogue_state:
constraints, new_knowledge_text = dataset.make_api_call(
dialogue_state, knowledge, api_name, self.numericalizer._tokenizer.src_lang, dial_id, turn_id
)
#### save latest api constraints
e2e_dialogue_preds[dial_id]["API"][dataset.domain2api_name(api_name)] = copy.deepcopy(constraints)
####
elif do_api_call == 'no':
# do nothing
pass
else:
logger.error(
f'API call should be either yes or no but got {do_api_call}. Seems model is not trained for enough steps. For now we assume it\'s a no'
)
#### save latest api results
e2e_dialogue_preds[dial_id]["turns"][str(turn_id)]["api"] = new_knowledge_text
####
elif train_target == 'da':
new_actions_text = predictions[-1][0]
#### save latest actions
e2e_dialogue_preds[dial_id]["turns"][str(turn_id)]["actions"] = predictions[-1][0]
####
elif train_target == 'rg':
#### save latest response
e2e_dialogue_preds[dial_id]["turns"][str(turn_id)]["response"] = predictions[-1]
####
if eval_dir:
with open(os.path.join(eval_dir, 'e2e_dialogue_preds.json'), 'w') as fout:
ujson.dump(e2e_dialogue_preds, fout, indent=2, ensure_ascii=False)
if original_order is not None:
# sort back to the original order
original_order, example_ids, predictions, answers, contexts = [
list(a) for a in tuple(zip(*sorted(list(zip(original_order, example_ids, predictions, answers, contexts)))))
]
# TODO calculate and return loss
loss = None
output = ValidationOutput(loss=loss)
if output_predictions_only:
output.predictions = predictions
else:
output.example_ids, output.predictions, output.answers, output.contexts = (
example_ids,
predictions,
answers,
contexts,
)
return output
# TransformerForSequenceClassification and TransformerForTokenClassification will inherit from this model
class GenieModelForClassification(GenieModel):
def _init_common(self, args, tasks, **kwargs):
self.args = args
num_labels = 0
if args.num_labels is not None:
num_labels = args.num_labels
else:
for task in tasks:
# if having multiple tasks choose max num_labels
if hasattr(task, 'num_labels'):
num_labels = max(num_labels, task.num_labels)
config = AutoConfig.from_pretrained(
args.pretrained_model, cache_dir=args.embeddings, num_labels=num_labels, finetuning_task='ned'
)
super().__init__(config)
if hasattr(config, 'd_model'):
args.dimension = config.d_model
else:
args.dimension = config.hidden_size
self.src_lang, self.tgt_lang = adjust_language_code(
config, args.pretrained_model, kwargs.get('src_lang', 'en'), kwargs.get('tgt_lang', 'en')
)
def add_new_vocab_from_data(self, tasks, resize_decoder=False):
super().add_new_vocab_from_data(tasks, resize_decoder)
self.model.resize_token_embeddings(self.numericalizer.num_tokens)
def forward(self, *input, **kwargs):
if self.training:
batch = input[0]
outputs = self.model(
batch.context.value,
labels=batch.answer.value,
attention_mask=(batch.context.value != self.numericalizer.pad_id),
)
return outputs
else:
return self.model(**kwargs)
def validate(self, data_iterator, task, original_order=None, disable_progbar=True, **kwargs):
total_loss = 0.0
all_example_ids = []
all_answers = []
all_contexts = []
all_predictions = []
for batch in progress_bar(data_iterator, desc='Generating', disable=disable_progbar):
batch_example_ids = batch.example_id
batch_context = self.numericalizer.reverse(batch.context.value.data, 'context')
all_example_ids += batch_example_ids
# pass labels to get loss
output = self.forward(
input_ids=batch.context.value,
attention_mask=(batch.context.value != self.numericalizer.pad_id),
labels=batch.answer.value,
)
labels = batch.answer.value.tolist()
logits = output.logits
predictions = torch.argmax(logits, dim=-1).tolist()
# logits for sequence classification is 2 dimensional
if logits.dim() == 2:
predictions = [[p] for p in predictions]
# Remove ignored index (special tokens)
processed_preds = []
processed_labels = []
for pred, label in zip(predictions, labels):
preds_list = []
labels_list = []
for p_, l_ in zip(pred, label):
if l_ == self.numericalizer.answer_pad_id:
continue
preds_list.append(task.id2label[p_])
labels_list.append(task.id2label[l_])
processed_preds.append([" ".join(preds_list)])
processed_labels.append(" ".join(labels_list))
all_contexts += batch_context
all_answers += processed_labels
all_predictions += processed_preds
total_loss += output.loss
total_loss /= len(all_example_ids)
if original_order is not None:
# sort back to the original order
original_order, all_example_ids, all_predictions, all_answers, all_contexts = [
list(a)
for a in tuple(
zip(*sorted(list(zip(original_order, all_example_ids, all_predictions, all_answers, all_contexts))))
)
]
output = ValidationOutput(
loss=total_loss,
example_ids=all_example_ids,
contexts=all_contexts,
answers=all_answers,
predictions=all_predictions,
)
return output
| StarcoderdataPython |
204781 | from abc import ABC, abstractmethod
from telebot.types import InlineKeyboardButton
class BaseController(ABC):
@abstractmethod
def callback_name(self) -> str:
pass
def get_menu_btn(self) -> InlineKeyboardButton:
pass
| StarcoderdataPython |
11238742 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-08 13:05
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0018_contact_contact_form'),
]
operations = [
migrations.AlterModelOptions(
name='dataletssettings',
options={'verbose_name': 'Get support'},
),
migrations.AddField(
model_name='articlepage',
name='gallery',
field=wagtail.core.fields.StreamField((('image', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock()), ('caption', wagtail.core.blocks.TextBlock(required=False)))), icon='image')),), blank=True),
),
migrations.AlterField(
model_name='articlepage',
name='body_de',
field=wagtail.core.fields.StreamField((('paragraph', wagtail.core.blocks.RichTextBlock()), ('section', wagtail.core.blocks.CharBlock(classname='full title')), ('info', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('photo', wagtail.images.blocks.ImageChooserBlock(required=True)), ('summary', wagtail.core.blocks.RichTextBlock(required=True)), ('action', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False))))), ('placer', wagtail.core.blocks.ChoiceBlock(choices=[('gallery', 'Image gallery')], icon='user'))), blank=True, null=True),
),
migrations.AlterField(
model_name='articlepage',
name='body_fr',
field=wagtail.core.fields.StreamField((('paragraph', wagtail.core.blocks.RichTextBlock()), ('section', wagtail.core.blocks.CharBlock(classname='full title')), ('info', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('photo', wagtail.images.blocks.ImageChooserBlock(required=True)), ('summary', wagtail.core.blocks.RichTextBlock(required=True)), ('action', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False))))), ('placer', wagtail.core.blocks.ChoiceBlock(choices=[('gallery', 'Image gallery')], icon='user'))), blank=True, null=True),
),
migrations.AlterField(
model_name='contactformfield',
name='field_type',
field=models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16, verbose_name='field type'),
),
]
| StarcoderdataPython |
326003 | # keep credit if u gonna edit or kang it
# without creadit copy paster mc
# creadits to sawan(@veryhelpful) learned from kraken
import asyncio
from uniborg.util import lightning_cmd
@borg.on(lightning_cmd(pattern="mst ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("mst hu bbro ")
await asyncio.sleep(1)
await event.edit("╔═╦═╗╔══╗╔══╗\n║║║║║║══╣╚╗╔╝\n║║║║║╠══║─║║─\n╚╩═╩╝╚══╝─╚╝─")
@borg.on(lightning_cmd(pattern="gmg ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("good morning ")
await asyncio.sleep(1)
await event.edit("╔══╗╔═╦═╗\n║╔═╣║║║║║\n║╚╗║║║║║║\n╚══╝╚╩═╩╝")
@borg.on(lightning_cmd(pattern="good ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit(
"╔══╗╔═╗╔═╗╔══╗\n║╔═╣║║║║║║╚╗╗║\n║╚╗║║║║║║║╔╩╝║\n╚══╝╚═╝╚═╝╚══╝"
)
@borg.on(lightning_cmd(pattern="hhlo ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("hello,how r u")
await asyncio.sleep(1)
await event.edit("╔╗╔╗╔╗─╔═╗\n║╚╝║║║─║║║\n║╔╗║║╚╗║║║\n╚╝╚╝╚═╝╚═╝")
@borg.on(lightning_cmd(pattern="sry ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("i'm sorry")
await asyncio.sleep(1)
await event.edit("last time forgive me")
await asyncio.sleep(1)
await event.edit(
"╔══╗╔═╗╔═╗╔═╗╔═╦╗\n║══╣║║║║╬║║╬║╚╗║║\n╠══║║║║║╗╣║╗╣╔╩╗║\n╚══╝╚═╝╚╩╝╚╩╝╚══╝"
)
@borg.on(lightning_cmd(pattern="thnq ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("thanks for your help")
await asyncio.sleep(1)
await event.edit(
"╔══╗╔╗╔╗╔══╗╔═╦╗╔╦╗╔══╗\n╚╗╔╝║╚╝║║╔╗║║║║║║╔╝║══╣\n─║║─║╔╗║║╠╣║║║║║║╚╗╠══║\n─╚╝─╚╝╚╝╚╝╚╝╚╩═╝╚╩╝╚══╝"
)
@borg.on(lightning_cmd(pattern="ok ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("▒▐█▀▀█▌▒▐█▒▐▀\n▒▐█▄▒█▌▒▐██▌░\n▒▐██▄█▌▒▐█▒▐▄")
@borg.on(lightning_cmd(pattern="smile ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("how sad ")
await asyncio.sleep(1)
await event.edit(
"╔══╗╔═╦═╗╔══╗╔╗─╔═╗\n║══╣║║║║║╚║║╝║║─║╦╝\n╠══║║║║║║╔║║╗║╚╗║╩╗\n╚══╝╚╩═╩╝╚══╝╚═╝╚═╝"
)
@borg.on(lightning_cmd(pattern="lal ?(.*)"))
async def _(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
await event.edit("╔╗─╔═╗╔╗─\n║╚╗║╬║║╚╗\n╚═╝╚═╝╚═╝")
| StarcoderdataPython |
9732734 | from pyglet.window import key
import numpy as np
from onpolicy.envs.agar.Agar_Env import AgarEnv
import time
render = True
num_agents = 2
class Args():
def __init__(self):
self.num_controlled_agent = num_agents
self.num_processes = 64
self.action_repeat = 1
self.total_step = 1e8
self.gamma = 0.99
self.eval = True
env = AgarEnv(Args())
env.seed(0)
step = 1
window = None
action = np.zeros((num_agents, 3))
def on_mouse_motion(x, y, dx, dy):
action[0][0] = (x / 1920 - 0.5) * 2
action[0][1] = (y / 1080 - 0.5) * 2
def on_key_press(k, modifiers):
if k == key.SPACE:
action[0][2] = 1
else:
action[0][2] = 0
start = time.time()
ca = 100
for episode in range(1):
observation = env.reset()
while ca:
ca -= 1
time.sleep(0.05)
if step % 40 == 0:
print('step', step)
print(step / (time.time() - start))
if render:
env.render(0)
if not window:
window = env.viewer.window
window.on_key_press = on_key_press
window.on_mouse_motion = on_mouse_motion
a = action.reshape(-1)
observations, rewards, done, info = env.step(a)
#print(step, rewards)
action[0][2] = 0
step += 1
env.close()
| StarcoderdataPython |
5062154 | <filename>examples/test_hydraulics.py
import serial # Electro-hydraulic controller
import time
SERIAL_DEVICE = '/dev/ttyACM0'
SERIAL_BAUD = 9600
INTERVAL = 1
PWM_MIN = 2
PWM_MAX = 255
try:
arduino = serial.Serial(SERIAL_DEVICE, SERIAL_BAUD)
except Exception as error:
print('ERROR: %s' % str(error))
pwm = raw_input('Enter PWM: ')
while True:
try:
arduino.write(pwm + '\n')
except KeyboardInterrupt:
try:
pwm = raw_input('Enter PWM: ')
print('Writing %s to Arduino...' % pwm)
except KeyboardInterrupt:
break
except NameError:
break
| StarcoderdataPython |
5126469 | <reponame>MacHu-GWU/learn_flask-project
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
- How to: http://flask.pocoo.org/docs/0.12/quickstart/#file-uploads
- Patterns: http://flask.pocoo.org/docs/0.12/patterns/fileuploads/
"""
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['csv_data']
new_filename = secure_filename(f.filename)
if new_filename == f.filename:
dst = 'uploads/%s' % secure_filename(f.filename)
f.save(dst)
return "Success!"
else:
return "Your file name <strong>%s</strong> is invalid!" % f.filename
if __name__ == "__main__":
"""
"""
app.run(debug=True)
| StarcoderdataPython |
8071961 | <reponame>HomeWeave/HomePiServer
import random
import socket
from copy import deepcopy
from threading import Thread, Event, Semaphore
import pytest
from weavelib.exceptions import ObjectNotFound, ObjectAlreadyExists
from weavelib.exceptions import SchemaValidationFailed, ProtocolError
from weavelib.exceptions import BadOperation, InternalError, ObjectClosed
from weavelib.exceptions import AuthenticationFailed
from weavelib.messaging import Sender, Receiver, read_message
from weavelib.messaging import ensure_ok_message, WeaveConnection
from messaging.server import MessageServer
from messaging.application_registry import ApplicationRegistry, Plugin
from messaging.queue_manager import ChannelRegistry
from messaging.synonyms import SynonymRegistry
import logging
logging.basicConfig()
def send_raw(msg):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", WeaveConnection.PORT))
wfile = sock.makefile('wb', WeaveConnection.WRITE_BUF_SIZE)
rfile = sock.makefile('rb', WeaveConnection.READ_BUF_SIZE)
wfile.write(msg.encode())
wfile.flush()
sock.shutdown(socket.SHUT_WR)
msg = read_message(rfile)
sock.close()
ensure_ok_message(msg)
def make_receiver(count, obj, sem, r):
def on_message(msg, headers):
obj.append(msg)
sem.release()
nonlocal count
count -= 1
if not count:
r.stop()
return on_message
class TestMessageServer(object):
@classmethod
def setup_class(cls):
event = Event()
schema = {
"type": "object",
"properties": {
"foo": {
"type": "string"
}
},
"required": ["foo"]
}
test_app = Plugin("test", "test", "test-token")
apps = ApplicationRegistry()
registry = ChannelRegistry(apps)
registry.create_queue("/a.b.c", test_app, schema, {}, 'fifo')
registry.create_queue("/test.sessionized", test_app, {"type": "string"},
{}, 'sessionized')
registry.create_queue("/test.sessionized2", test_app,
{"type": "string"}, {}, 'sessionized')
registry.create_queue("/test.sessionized/several", test_app,
{"type": "string"}, {}, 'sessionized')
registry.create_queue("/test.fifo/simple", test_app, {"type": "string"},
{}, 'fifo')
registry.create_queue("/test.fifo/test-disconnect", test_app,
{"type": "string"}, {}, 'fifo')
registry.create_queue("/test.sessionized/test-disconnect", test_app,
{"type": "string"}, {}, 'sessionized')
registry.create_queue('/multicast/1', test_app, {"type": "string"}, {},
'multicast')
registry.create_queue('/multicast/2', test_app, {"type": "string"}, {},
'multicast')
synonym_registry = SynonymRegistry()
synonym_registry.register("/multi", "/multicast/2")
cls.server = MessageServer(11023, apps, registry, synonym_registry,
event.set)
cls.server_thread = Thread(target=cls.server.run)
cls.server_thread.start()
event.wait()
cls.conn = WeaveConnection.local()
cls.conn.connect()
@classmethod
def teardown_class(cls):
cls.conn.close()
cls.server.shutdown()
cls.server_thread.join()
def test_connect_disconnect(self):
with pytest.raises(IOError):
send_raw('')
def test_bad_structure(self):
with pytest.raises(ProtocolError):
send_raw("sdkhsds\n-ss!3l")
def test_required_fields_missing(self):
with pytest.raises(ProtocolError):
send_raw("HDR push\nMSG blah\n\n")
def test_bad_operation(self):
with pytest.raises(BadOperation):
send_raw('MSG {"a": "b"}\nSESS 1\nOP bad-operation\nC /a.b.c\n\n')
def test_bad_json(self):
with pytest.raises(ProtocolError):
send_raw('MSG {a": "b"}\nOP push\nQ a.b.c\n\n')
def test_push_without_queue_header(self):
with pytest.raises(ProtocolError):
send_raw('MSG {"a": "b"}\nOP push\n\n')
def test_push_without_task(self):
s = Sender(self.conn, "/a.b.c")
s.start()
with pytest.raises(ProtocolError):
s.send(None)
def test_push_to_unknown_queue(self):
s = Sender(self.conn, "unknown.queue")
s.start()
with pytest.raises(ObjectNotFound):
s.send({"a": "b"})
def test_pop_from_unknown_queue(self):
r = Receiver(self.conn, "unknown.queue")
r.start()
with pytest.raises(ObjectNotFound):
r.receive()
def test_pop_without_required_header(self):
with pytest.raises(ProtocolError):
send_raw('OP pop\n\n')
def test_push_with_bad_schema(self):
s = Sender(self.conn, "/a.b.c")
s.start()
with pytest.raises(SchemaValidationFailed):
s.send({"foo": [1, 2]})
def test_simple_push_pop(self):
msgs = []
s = Sender(self.conn, "/a.b.c")
r = Receiver(self.conn, "/a.b.c")
s.start()
r.start()
r.on_message = lambda msg, hdrs: msgs.append(msg) or r.stop()
s.send({"foo": "bar"})
thread = Thread(target=r.run)
thread.start()
thread.join()
assert msgs == [{"foo": "bar"}]
def test_multiple_push_pop(self):
obj = {"foo": "bar"}
s = Sender(self.conn, "/a.b.c")
r = Receiver(self.conn, "/a.b.c")
s.start()
for _ in range(10):
s.send(obj)
expected_message_count = 10
def on_message(msg, headers):
assert msg == obj
nonlocal expected_message_count
if expected_message_count == 1:
r.stop()
expected_message_count -= 1
r.start()
r.on_message = on_message
thread = Thread(target=r.run)
thread.start()
thread.join()
def test_push_sessionized_without_key(self):
sender = Sender(self.conn, "/test.sessionized")
sender.start()
with pytest.raises(ProtocolError):
sender.send("test")
receiver = Receiver(self.conn, "/test.sessionized")
receiver.start()
with pytest.raises(ProtocolError):
receiver.receive()
def test_simple_sessionized_push_pop(self):
sender1 = Sender(self.conn, "/test.sessionized2")
sender1.start()
sender1.send("test", headers={"COOKIE": "xyz"})
sender2 = Sender(self.conn, "/test.sessionized2")
sender2.start()
sender2.send("diff", headers={"COOKIE": "diff"})
msgs1 = []
sem1 = Semaphore(0)
receiver1 = Receiver(self.conn, "/test.sessionized2", cookie="xyz")
receiver1.on_message = make_receiver(2, msgs1, sem1, receiver1)
receiver1.start()
Thread(target=receiver1.run).start()
msgs2 = []
sem2 = Semaphore(0)
receiver2 = Receiver(self.conn, "/test.sessionized2", cookie="diff")
receiver2.on_message = make_receiver(2, msgs2, sem2, receiver2)
receiver2.start()
Thread(target=receiver2.run).start()
assert sem1.acquire(timeout=10)
assert sem2.acquire(timeout=10)
assert msgs1[0] == "test"
assert msgs2[0] == "diff"
# Test retrieving items for the second time.
sender1.send("test2", headers={"COOKIE": "xyz"})
assert sem1.acquire(timeout=10)
assert msgs1[1] == "test2"
assert not sem2.acquire(timeout=5)
def test_several_sessionized_queues(self):
senders = []
receivers = []
cookies = []
texts = []
for i in range(10):
cookie = "c-" + str(i)
cookies.append(cookie)
sender = Sender(self.conn, "/test.sessionized/several")
sender.start()
senders.append(sender)
receiver = Receiver(self.conn, "/test.sessionized/several",
cookie=cookie)
receiver.start()
receivers.append(receiver)
text = "text" + str(i)
texts.append(text)
arr = list(range(10))[::-1]
random.shuffle(arr)
# Send requests in random order
for pos in arr:
senders[pos].send(texts[pos], headers={"COOKIE": cookies[pos]})
for i in range(10):
assert texts[i] == receivers[i].receive().task
def test_fifo_push_pop(self):
msgs1 = []
sem1 = Semaphore(0)
receiver1 = Receiver(self.conn, "/test.fifo/simple")
receiver1.on_message = make_receiver(2, msgs1, sem1, receiver1)
receiver1.start()
Thread(target=receiver1.run).start()
msgs2 = []
sem2 = Semaphore(0)
receiver2 = Receiver(self.conn, "/test.fifo/simple")
receiver2.on_message = make_receiver(2, msgs2, sem2, receiver2)
receiver2.start()
Thread(target=receiver2.run).start()
sender1 = Sender(self.conn, "/test.fifo/simple")
sender1.start()
sender1.send("test")
assert sem1.acquire(timeout=10)
assert msgs1[-1] == "test"
assert not sem2.acquire(timeout=2)
sender1.send("test2")
assert sem2.acquire(timeout=10)
assert msgs2[-1] == "test2"
assert not sem1.acquire(timeout=2)
sender1.send("test3")
assert sem1.acquire(timeout=10)
assert msgs1[-1] == "test3"
assert not sem2.acquire(timeout=2)
sender1.send("test4")
assert sem2.acquire(timeout=10)
assert msgs2[-1] == "test4"
assert not sem1.acquire(timeout=2)
def test_multicast(self):
msgs = [[] for _ in range(5)]
sems = [Semaphore(0) for _ in range(5)]
receivers = [Receiver(self.conn, '/multicast/1') for _ in range(5)]
for receiver, sem, msg in zip(receivers, sems, msgs):
receiver.on_message = make_receiver(1, msg, sem, receiver)
receiver.start()
Thread(target=receiver.run).start()
sender = Sender(self.conn, '/multicast/1')
sender.start()
sender.send("test")
for msg, sem in zip(msgs, sems):
assert sem.acquire(timeout=10)
assert msg[-1] == "test"
assert not sem.acquire(timeout=2)
sender.close()
for receiver in receivers:
receiver.stop()
def test_multicast_with_synonym(self):
msgs = []
sem = Semaphore(0)
receiver = Receiver(self.conn, "/synonyms/multi")
receiver.on_message = make_receiver(1, msgs, sem, receiver)
receiver.start()
Thread(target=receiver.run).start()
sender = Sender(self.conn, "/synonyms/multi")
sender.start()
sender.send("test")
assert sem.acquire(timeout=10)
assert msgs[-1] == "test"
assert not sem.acquire(timeout=2)
@pytest.mark.parametrize("queue_name",
["/test.fifo/test-disconnect",
"/test.sessionized/test-disconnect"])
def test_queue_waits_removed_after_client_disconnects(self, queue_name):
conn1 = WeaveConnection.local()
conn2 = WeaveConnection.local()
conn3 = WeaveConnection.local()
conn1.connect()
conn2.connect()
conn3.connect()
msgs1 = []
sem1 = Semaphore(0)
receiver1 = Receiver(conn1,queue_name, cookie="a")
receiver1.on_message = make_receiver(1, msgs1, sem1, receiver1)
receiver1.start()
Thread(target=receiver1.run).start()
msgs2 = []
sem2 = Semaphore(0)
receiver2 = Receiver(conn2, queue_name, cookie="b")
receiver2.on_message = make_receiver(1, msgs2, sem2, receiver2)
receiver2.start()
Thread(target=receiver2.run).start()
conn1.close()
import time; time.sleep(1)
sender1 = Sender(conn3, queue_name)
sender1.start()
sender1.send("test", headers={"COOKIE": "b"})
assert sem2.acquire(timeout=5)
assert msgs2[-1] == "test"
conn2.close()
conn3.close()
class TestMessageServerClosure(object):
@pytest.mark.parametrize("queue_type,cookie",
[("fifo", (x for x in ("a", "b"))),
("sessionized", (x for x in ("a", "b")))])
def test_queue_closure(self, queue_type, cookie):
event = Event()
test_app = Plugin("test", "test", "test-token")
apps = ApplicationRegistry()
registry = ChannelRegistry(apps)
registry.create_queue("/fifo-closure", test_app, {"type": "string"}, {},
queue_type)
server = MessageServer(11023, ApplicationRegistry(), registry,
SynonymRegistry(), event.set)
thread = Thread(target=server.run)
thread.start()
event.wait()
conn = WeaveConnection()
conn.connect()
def patch_receive(receiver, event):
original = receiver.receive
def receive():
event.set()
original()
receiver.receive = receive
def wrap_run(receiver):
def run():
try:
receiver.run()
except:
pass
return run
e1 = Event()
r1 = Receiver(conn, "/fifo-closure", cookie=next(cookie))
r1.start()
patch_receive(r1, e1)
t1 = Thread(target=wrap_run(r1))
t1.start()
e2 = Event()
r2 = Receiver(conn, "/fifo-closure", cookie=next(cookie))
r2.start()
patch_receive(r2, e2)
t2 = Thread(target=wrap_run(r2))
t2.start()
e1.wait()
e2.wait()
server.shutdown()
thread.join()
t1.join()
t2.join()
| StarcoderdataPython |
5094558 | from mechmat.core.chainable import Chainable, Guarded
from mechmat import ureg
from mechmat.principal import geometry
class Vector(Chainable):
def __init__(self, **kwargs):
super(Vector, self).__init__(**kwargs)
self.set_guard('coordinate', ureg.m)
coordinate = Guarded()
class Segment(Chainable):
def __init__(self, **kwargs):
super(Segment, self).__init__(**kwargs)
self.set_guard('point_1', ureg.m)
self.set_guard('point_2', ureg.m)
self.set_guard('distance', ureg.m)
self.link_attr('distance', geometry.distance, point_1='point_1', point_2='point_2')
point_1 = Guarded()
point_2 = Guarded()
distance = Guarded()
| StarcoderdataPython |
1684864 | # Converts the repl into a web server
# Which allows the bot to stay alive
# CREDITS TO BEAU FROM FREECODECAMP FOR THIS ONE
# https://www.youtube.com/watch?v=SPTfmiYiuok
from flask import Flask
from threading import Thread
app = Flask("")
@app.route("/")
def home():
return "<h1>Cosette Bot is alive</h1>"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start() | StarcoderdataPython |
9719727 | <reponame>king/s3vdc<gh_stars>1-10
"""
Copyright (C) king.com Ltd 2019
https://github.com/king/s3vdc
License: MIT, https://raw.github.com/king/s3vdc/LICENSE.md
"""
import tensorflow as tf
from lib.resolve_learning_rate import resolve_lr
from typing import Union, Tuple
OPTIMIZER_NAMES = {
"Adagrad": tf.train.AdagradOptimizer,
"Adam": tf.train.AdamOptimizer,
"RMSProp": tf.train.RMSPropOptimizer,
"SGD": tf.train.GradientDescentOptimizer,
}
def resolve_optimizer(
hparams: Union[tf.contrib.training.HParams, dict],
global_step: Union[tf.Tensor, int] = None,
) -> Tuple[
Union[
tf.train.AdagradOptimizer,
tf.train.AdamOptimizer,
tf.train.RMSPropOptimizer,
tf.train.GradientDescentOptimizer,
],
Union[tf.Tensor, float],
]:
"""Obtain the specified optimizer.
Apply learning rate scheduling when it is specified.
Arguments:
hparams {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters.
Keyword Arguments:
global_step {Union[tf.Tensor, int]} -- Global step to use for the decay computation. Must not be negative. (default: {None})
Returns:
Tuple[Union[tf.train.AdagradOptimizer,tf.train.AdamOptimizer,tf.train.RMSPropOptimizer,tf.train.GradientDescentOptimizer,],Union[tf.Tensor, float],] -- The optimizer
"""
if not hasattr(hparams, "optimizer"):
raise ValueError("hparams instance did not have optimizer information")
if isinstance(hparams.optimizer, tf.train.Optimizer):
return hparams.optimizer
elif isinstance(hparams.optimizer, str):
if hparams.optimizer not in OPTIMIZER_NAMES:
raise ValueError("Unknown optimizer")
optimizer_cls = OPTIMIZER_NAMES[hparams.optimizer]
lr = resolve_lr(hparams, global_step)
if lr is not None:
tf.summary.scalar("learning_rate", lr)
return optimizer_cls(lr), lr
else:
# Should never reach here
raise RuntimeError("got None as learning rate value at runtime")
| StarcoderdataPython |
3220470 | <reponame>suiluj/pi-adhoc-mqtt-cluster
class Graphinfo:
def __init__(self, measurement, where_time_range=None):
self.measurement = measurement
self.where_time_range = where_time_range
def setmeasurement(self,measurement):
self.measurement = measurement
def getmeasurement(self):
return self.measurement | StarcoderdataPython |
4929082 | <gh_stars>1-10
import time
from operator import methodcaller
from types import FunctionType
import grpc
from concurrent import futures
from py_micro.py_consul import ConsulMicroServer
class MicroService(object):
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
"""配置"""
START_SERVER = True
MAX_WORKERS = 4
HOST = '127.0.0.1'
PORT = 8111
APP_NAME = ''
REG_HOST = '127.0.0.1'
REG_PORT = 8500
_INSECURE_CHANNEL_LIST = [] # grpc 连接池
_SERVICE_LIST = []
_GRPC_SERVER = grpc.server(futures.ThreadPoolExecutor(MAX_WORKERS))
_CONSUL_SERVER = ConsulMicroServer(REG_HOST, REG_PORT)
"""注册服务->装饰器方法"""
@classmethod
def register(cls, servicer_func: FunctionType, service_name: str = None):
def my_decorator(func):
# 如果开启服务注册,则自动注册
if cls.START_SERVER:
if not isinstance(servicer_func, FunctionType):
raise Exception("微服务注册,必须是方法!")
ob = func()
# 添加需要注册的服务
servicer_func(ob, cls._GRPC_SERVER)
# 注册所有方法列表
tags = list(
filter(lambda m: not m.startswith("__") and not m.endswith("__") and callable(getattr(ob, m)),
dir(ob)))
cls._CONSUL_SERVER.reg_service(service_name, cls.HOST, cls.PORT, tags)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return my_decorator
"""服务消费逻辑"""
@classmethod
def consume(cls, service_id, stub, server_name: str = None):
def my_decorator(func):
def wrapper(*args, **kwargs):
service_addr = cls._CONSUL_SERVER.get_service(service_id)
host = service_addr[0]
port = service_addr[1]
tags = service_addr[2]
method = server_name or func.__name__
if method not in tags:
raise Exception('服务方法不存在')
conn = grpc.insecure_channel("{0}:{1}".format(host, port)) # todo:这个可以维护一个连接池
client = stub(channel=conn)
if args:
param = args[0]
elif kwargs:
param = list(kwargs.values())[0]
else:
raise Exception('参数不存在')
return methodcaller(method, param)(client) # 自调方法
return wrapper
return my_decorator
"""启动微服务"""
@classmethod
def start(cls):
cls._GRPC_SERVER.add_insecure_port("{0}:{1}".format(cls.HOST, cls.PORT))
cls._GRPC_SERVER.start()
try:
while True:
time.sleep(cls._ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
cls._GRPC_SERVER.stop(0)
# class GrpcConnPool(object):
# _POOL: dict = []
#
# @classmethod
# def pop(cls, host, port):
# addr = "{0}:{1}".format(host, port)
# if addr in cls._POOL:
#
# conn = grpc.insecure_channel("{0}:{1}".format(host, port)) # todo:这个可以维护一个连接池
# pass
#
# @classmethod
# def push(cls, host, port):
| StarcoderdataPython |
12845752 | <filename>Utils/Submission/Submission.py
import RootPath
def create_submission_file(tweets, users, predictions, output_file):
# Preliminary checks
assert len(tweets) == len(users), f"length are different tweets -> {len(tweets)}, and users -> {len(users)} "
assert len(users) == len(predictions), f"length are different users -> {len(users)}, and predictions -> {len(predictions)} "
assert len(tweets) == len(predictions), f"length are different tweets -> {len(tweets)}, and predictions -> {len(predictions)} "
file = open(RootPath.get_root().joinpath(output_file), "w")
for i in range(len(tweets)):
file.write(f"{tweets[i]},{users[i]},{round(predictions[i], 4)}\n")
file.close() | StarcoderdataPython |
3263842 | """
It contains all the projects that can be ignored in graph.
"""
FLAGS = {}
| StarcoderdataPython |
9707183 | """
HTTP utilities for MOVE Airflow jobs.
"""
from urllib3.util import Retry
import requests
from requests.adapters import HTTPAdapter
def requests_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None):
"""
Create a new `requests` session that automatically retries requests the
specified number of times.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
| StarcoderdataPython |
9737909 | <gh_stars>1-10
#!/usr/bin/python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statistics
"""
Last edited by : Shawn
Last edited time : 25/11/2021
Version Status: dev
TO DO: Verify correctness
"""
class new_Node:
def __init__(self, json_object):
self.id = json_object['id']
self.type = json_object['type']
self.child = None
self.next = None
self.parent = None
try:
self.value = json_object['value']
except KeyError as err:
self.value = None
def addSibling(parent_node, node, child_node):
if node is None:
return None
while node.next is not None:
node = node.next
node.next = child_node
node.next.parent = parent_node
return node.next
def addChild(node, child):
if node is None:
return None
if node.child is not None:
return addSibling(node, node.child, child)
else:
node.child = child
node.child.parent = node
return node.child
def find_node(candidateNode, targetNode):
if candidateNode is None:
return None
if candidateNode.id == targetNode:
return candidateNode
# find left side
left_node = find_node(candidateNode.child, targetNode)
if left_node:
return left_node
# find right side
return find_node(candidateNode.next, targetNode)
def ast2lcrs(ast):
"""
@ input : AST tree
@ output: LCRS tree, lookup_table
"""
root = None
lookup_table = {} # key: ast_id , value : coressponding Node_object
for token_id in range(len(ast)): # loop through a single AST
if root is None:
root = new_Node(ast[token_id])
lookup_table[token_id] = root
for child in ast[token_id]['children']:
lookup_table[child] = addChild(root, new_Node(ast[child]))
else:
# find out the location of node
starting_node = find_node(root,
token_id) # raise error if we cannot find the node. After we find it, we would append the child under this node
if starting_node is not None:
try:
for child in ast[token_id]['children']:
lookup_table[child] = addChild(starting_node, new_Node(ast[child]))
except KeyError as err:
pass
return root, lookup_table
def sequenceSplit(in_order_list, lookup_table):
"""
@input : an entire ast list
@output: sub_sequence table -> Key: training_id, Value: sub_sequence
"""
subSequence_list = {}
training_dataset_index = 0
list = []
for index in in_order_list:
if lookup_table[index].child is None:
list.append(lookup_table[index].value)
else:
list.append(lookup_table[index].type)
if lookup_table[index].next is not None:
# we need to get the parent nodes starting from the end of "in_order_list"
number_of_parent = 1
seeking_node = lookup_table[index] # we use this seeking node to find out the number of parents we have.
while seeking_node.parent.id != 0:
number_of_parent += 1
seeking_node = seeking_node.parent
# now we extract the parents nodes
while -number_of_parent < 0:
list.append(lookup_table[in_order_list[-number_of_parent]].type)
number_of_parent -= 1
subSequence_list[training_dataset_index] = list # terminate the list
training_dataset_index += 1 # add up the index number
list = [] # earse the whole list
return subSequence_list
def get_key(val, lookup_table):
for key, value in lookup_table.items():
if val == value:
return key
def tokenization(sub_sequence_dict):
"""
@input : dict containing the ID and AST branch
@output: tokenized_dict --> input dict just replace words with int representation
and lookup_table_dict --> {1, [R = 'Program, F = 12],...}
"""
lookup_table_dict = {}
lookup_table_a = {}
lookup_table_b = {}
tokenized_subsequence = []
token_index = 1
frequency = 1
# Creating the lookup table for the provided sub_sequence_dict
for sequence in sub_sequence_dict:
temp_list = []
for val in sub_sequence_dict[sequence]:
# Check if the value is already within the dictionary
if val not in lookup_table_a.values():
lookup_table_a[token_index] = val
token_index += 1
new_val = get_key(val, lookup_table_a)
temp_list.append(new_val)
lookup_table_b[val] = frequency
frequency += 1
tokenized_subsequence.append(temp_list)
# Now I need to make my final dict which contains the mapping of {value: [word, frequency]}
for index in lookup_table_a:
x = lookup_table_a[index] # lookup table for val
y = lookup_table_b[x] # lookup
lookup_table_dict[index] = [x, y]
return tokenized_subsequence, lookup_table_dict
def csv_to_df(file):
"""
@input : csv file
@output: data frame containing the csv file
"""
df = pd.read_csv(file)
return df
def standardize_subsequence(tokenized_subsequence):
"""
@input : tokenized sequence
@output: standardized tokenized sequence: Sequences that are uniform in length
"""
standardized_subsequence = []
cutoff = 16 # this value was determined from analysis of our tokenized_subsequence
# I need to add 0s to any sequence that is < 16 in length
# I then need to cut off any initial data for sequences > than 16
for sequence in tokenized_subsequence:
if len(sequence) == cutoff:
sequence = sequence[::-1] # reverse the list
standardized_subsequence.append(sequence)
elif len(sequence) < cutoff:
sequence = sequence[::-1] # reverse the list
# padd 0s to the front of sequence to make it 16 in length
x = cutoff - len(sequence) # determines how many 0s are required
padded_sequence = [0] * x # create new list with that many 0s
for val in sequence:
padded_sequence.append(val) # add original sequence after 0s
standardized_subsequence.append(padded_sequence)
else: # len(sequence) > cutoff
# I need to only grab a subsequence of the original sequence of 16 back to front
sequence = sequence[:16]
sequence = sequence[::-1] # reverse the list
standardized_subsequence.append(sequence)
# # code below was used for understanding what cutoff point to choose
# dist = {}
# for x in tokenized_subsequence:
# seq_len = len(x)
# dist[seq_len] = 0
#
# for x in tokenized_subsequence:
# seq_len = len(x)
# dist[seq_len] += 1
#
# plt.bar(dist.keys(), dist.values(), 1.0, color='g')
# plt.show()
#
# values = []
# for sequence in tokenized_subsequence:
# values.append(len(sequence))
# median = statistics.median(values)
# print(median)
return standardized_subsequence
| StarcoderdataPython |
12834116 | <reponame>AvantikaNaik/fractals
import turtle
import time
def drawKochSide(length, level):
if (level == 1):
turtle.forward(length)
else:
drawKochSide(length/3, level-1)
turtle.left(60)
drawKochSide(length/3, level-1)
turtle.right(120)
drawKochSide(length/3, level-1)
turtle.left(60)
drawKochSide(length/3, level-1)
def drawKochSnowflake(length, level):
for step in range(3):
drawKochSide(length, level)
turtle.right(120)
def draw():
turtle.delay(1)
turtle.speed(0)
turtle.penup()
turtle.goto(-300,100)
turtle.pendown()
#time.sleep(5)
turtle.pencolor('black')
drawKochSide(300, 4)
turtle.pencolor('blue')
drawKochSnowflake(300, 4)
turtle.penup()
turtle.goto(-250,50)
turtle.pendown()
turtle.pencolor('red')
drawKochSnowflake(200, 5)
turtle.done()
draw() | StarcoderdataPython |
5158415 | <filename>Python/2021/Class 2/Student Code/Harshit Sanghi/Assignment 1.py
num = int(input("Enter your number"))
if(num%2) == 0:
print("the number",num,"is even")
else:
print("the number",num,"is odd”)
| StarcoderdataPython |
11254713 |
class A:
a : str
b : int
print(A.__dict__)
print(A().__dict__)
A.a = "a"
print(A.__dict__)
print(A().__dict__)
print(A.a)
A.a = 1
print(A.__dict__)
print(A().__dict__)
print(A.a)
print(A().a)
print(A.b)
| StarcoderdataPython |
6618637 | <reponame>godweiyang/ParaGen
import torch.nn as nn
class AbstractEncoderLayer(nn.Module):
"""
AbstractEncoderLayer is an abstract class for encoder layers.
"""
def __init__(self):
super().__init__()
self._cache = {}
self._mode = 'train'
def reset(self, mode):
"""
Reset encoder layer and switch running mode
Args:
mode: running mode in [train, valid, infer]
"""
self._mode = mode
self._cache.clear()
def _update_cache(self, *args, **kwargs):
"""
Update internal cache from outside states
"""
pass
def get_cache(self):
"""
Retrieve inner cache
Returns:
- cached states as a Dict
"""
return self._cache
def set_cache(self, cache):
"""
Set cache from outside
Args:
cache: cache dict from outside
"""
self._cache = cache
| StarcoderdataPython |
4873815 | <filename>manage.py
import os
from app import create_app, db
from app.models import Category, Product, Store
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('CAMPAIGN_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, Category=Category, Product=Product, Store=Store)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| StarcoderdataPython |
9672884 | <reponame>Gustavo-daCosta/Projetos
from tkinter import *
import time
from os import system
from random import randint
app = Tk()
app.title("Fast Typing Test")
app.geometry("600x550")
def semComando():
print("Sem comando")
barra_menu = Menu(app)
menuHelp = Menu(barra_menu, tearoff=0)
menuHelp.add_command(label="How the app works?", command=semComando)
menuHelp.add_command(label="Pangram's list")
barra_menu.add_cascade(label="Help", menu=menuHelp)
menuAbout = Menu(barra_menu, tearoff=0)
menuAbout.add_command(label="Source Code", command=semComando)
menuAbout.add_command(label="Credits", command=semComando)
menuAbout.add_command(label="About", command=semComando)
app.config(menu=barra_menu)
titulo = Label(app, text="Fast Typing Test Challenge", font=("Helvetica", 20))
titulo.pack()
textoMenu = '''Level [1] - Type the Alphabet
Level [2] - Type the following quote: "The quick brown fox jumps over the lazy dog"
Level [3] - Type a random pangram of the list
— “The quick brown fox jumps over the lazy dog”
— “The five boxing wizards jump quickly”
— “Pack my box with five dozen liquor jugs”
— "The jay, pig, fox, zebra and my wolves quack!"
— "By Jove, my quick study of lexicography won a prize!"'''
Menu = Label(app, text=textoMenu, font=("Helvetica", 12))
Menu.place(x=10, y=40)
app.mainloop() | StarcoderdataPython |
12808001 | <filename>app/site_image.py
from flask import Flask
from PIL import Image, ImageOps, ImageFilter
import base64
import io
import binascii
app = Flask(__name__)
__all__ = ['normalize_image', 'get_data_uri']
def normalize_image(image_data=None):
if image_data is None:
raise TypeError('Image data must not be None')
# The received image data in an PNG file in RGB format, encoded in base 64.
image_from_data = Image.open(io.BytesIO(binascii.a2b_base64(image_data)))
# Create a new, all-white image.
normalized_image = Image.new("RGB", image_from_data.size, "white")
# Paste the submitted drawing on to it.
normalized_image.paste(image_from_data, (0, 0), image_from_data)
# Convert to greyscale
normalized_image = normalized_image.convert('L')
# Soften the edges a bit
normalized_image = normalized_image.filter(ImageFilter.GaussianBlur(radius=image_from_data.size[0]/100))
# Resize to MNIST image size.
normalized_image = normalized_image.resize((28, 28))
# Invert to match MNIST: white digits on a black background.
normalized_image = ImageOps.invert(normalized_image)
return normalized_image
def get_data_uri(image=None):
if image == None:
raise TypeError('Image must not be None')
prefix = 'data:image/png;base64,'
buffer = io.BytesIO()
ImageOps.invert(image).save(buffer, format='PNG')
encoded_image = base64.b64encode(buffer.getvalue())
return encoded_image.decode('ascii')
| StarcoderdataPython |
12803188 | import py
from os import system, chdir
from urllib import urlopen
log_URL = 'http://tismerysoft.de/pypy/irc-logs/'
archive_FILENAME = 'pypy.tar.gz'
tempdir = py.test.ensuretemp("irc-log")
# get compressed archive
chdir( str(tempdir))
system('wget -q %s%s' % (log_URL, archive_FILENAME))
system('tar xzf %s' % archive_FILENAME)
chdir('pypy')
# get more recent daily logs
pypydir = tempdir.join('pypy')
for line in urlopen(log_URL + 'pypy/').readlines():
i = line.find('%23pypy.log.')
if i == -1:
continue
filename = line[i:].split('"')[0]
system('wget -q %spypy/%s' % (log_URL, filename))
# rename to YYYYMMDD
for log_filename in pypydir.listdir('#pypy.log.*'):
rename_to = None
b = log_filename.basename
if '-' in b:
rename_to = log_filename.basename.replace('-', '')
elif len(b) == 19:
months= 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
day = b[10:12]
month = months.index(b[12:15]) + 1
year = b[15:20]
rename_to = '#pypy.log.%04s%02d%02s' % (year, month, day)
if rename_to:
log_filename.rename(rename_to)
#print 'RENAMED', log_filename, 'TO', rename_to
# print sorted list of filenames of daily logs
print 'irc://irc.freenode.org/pypy'
print 'date, messages, visitors'
for log_filename in pypydir.listdir('#pypy.log.*'):
n_messages, visitors = 0, {}
f = str(log_filename)
for s in file(f):
if '<' in s and '>' in s:
n_messages += 1
elif ' joined #pypy' in s:
v = s.split()[1]
visitors[v] = True
print '%04s-%02s-%02s, %d, %d' % (f[-8:-4], f[-4:-2], f[-2:], n_messages, len(visitors.keys()))
| StarcoderdataPython |
11283478 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-11 07:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('booking', '0005_auto_20170510_1941'),
]
operations = [
migrations.AlterUniqueTogether(
name='seat',
unique_together=set([('no', 'show', 'seat_type')]),
),
]
| StarcoderdataPython |
12847511 | <gh_stars>0
from nltk.corpus import reuters
import sys
import numpy as np
from scipy import optimize
# Loading data here
train_documents, train_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('training/')])
test_documents, test_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('test/')])
def col2norm(X):
return np.sum(np.abs(X) ** 2,axis=0)
def xray(X, r):
cols = []
R = np.copy(X)
while len(cols) < r:
i = np.argmax(col2norm(X))
# Loop until we choose a column that has not been selected.
while True:
p = np.random.random((X.shape[0], 1))
scores = col2norm(np.dot(R.T, X)) / col2norm(X)
scores[cols] = -1 # IMPORTANT
best_col = np.argmax(scores)
if best_col in cols:
# Re-try
continue
else:
cols.append(best_col)
H, rel_res = NNLSFrob(X, cols)
R = X - np.dot(X[:, cols] , H)
break
return cols
def GP_cols(data, r):
votes = {}
for row in data:
min_ind = np.argmin(row)
max_ind = np.argmax(row)
for ind in [min_ind, max_ind]:
if ind not in votes:
votes[ind] = 1
else:
votes[ind] += 1
votes = sorted(votes.items(), key=lambda x: x[1], reverse=True)
return [x[0] for x in votes][0:r]
def NNLSFrob(X, cols):
ncols = X.shape[1]
H = np.zeros((len(cols), ncols))
for i in xrange(ncols):
sol, res = optimize.nnls(X[:, cols], X[:, i])
H[:, i] = sol
rel_res = np.linalg.norm(X - np.dot(X[:, cols], H), 'fro')
rel_res /= np.linalg.norm(X, 'fro')
return H, rel_res
def ComputeNMF(data, colnorms, r):
data = np.copy(data)
colinv = np.linalg.pinv(np.diag(colnorms))
_, S, Vt = np.linalg.svd(data)
A = np.dot(np.diag(S), Vt)
cols = xray(data, r)
H, rel_res = NNLSFrob(data, cols)
return cols, H, rel_res
def ParseMatrix(matpath):
matrix = []
with open(matpath, 'r') as f:
for row in f:
matrix.append([float(v) for v in row.split()[1:]])
return np.array(matrix)
def ParseColnorms(colpath):
norms = []
with open(colpath, 'r') as f:
for line in f:
norms.append(float(line.split()[-1]))
return norms
data = ParseMatrix(train_documents)
colnorms = ParseColnorms(train_categories)
r = 4
cols, H, rel_res = ComputeNMF(data, colnorms, r)
cols.sort()
print("Final result: ", rel_res)
| StarcoderdataPython |
1706651 | from public import getDepth, getTradeHistory
from trade import TradeAPI
from scraping import scrapeMainPage
from keyhandler import KeyHandler
import mexbtcapi.api.btce
from mexbtcapi.api.btce.high_level import BTCeMarket, BTCeParticipant, BTCeSecretFileContainer
import logging
logging.getLogger(__name__)
name = BTCeMarket.MARKET_NAME
market = BTCeMarket
participant = BTCeParticipant
secret_container = BTCeSecretFileContainer
| StarcoderdataPython |
325247 | <reponame>matt-ullmer/Diamond
# coding=utf-8
"""
Uses /proc/vmstat to collect data on virtual memory manager
#### Dependencies
* /proc/vmstat
"""
import diamond.collector
import os
import re
class VMStatCollector(diamond.collector.Collector):
PROC = '/proc/vmstat'
MAX_VALUES = {
'pgpgin': diamond.collector.MAX_COUNTER,
'pgpgout': diamond.collector.MAX_COUNTER,
'pswpin': diamond.collector.MAX_COUNTER,
'pswpout': diamond.collector.MAX_COUNTER,
}
def get_default_config_help(self):
config_help = super(VMStatCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(VMStatCollector, self).get_default_config()
config.update({
'path': 'vmstat'
})
return config
def collect(self):
if not os.access(self.PROC, os.R_OK):
return None
results = {}
# open file
file = open(self.PROC)
exp = '^(pgpgin|pgpgout|pswpin|pswpout)\s(\d+)'
reg = re.compile(exp)
# Build regex
for line in file:
match = reg.match(line)
if match:
name = match.group(1)
value = match.group(2)
results[name] = self.derivative(name,
int(value),
self.MAX_VALUES[name])
# Close file
file.close()
for key, value in results.items():
self.publish(key, value, 2)
| StarcoderdataPython |
3225024 | import aceutils
aceutils.cdToScript()
aceutils.mkdir('../Doxygen')
aceutils.cd(r'../Doxygen/')
aceutils.call(r'doxygen ../Script/Doxyfile_cpp_XML') | StarcoderdataPython |
4807123 | <reponame>kevinjalbert/h<filename>tests/h/h_api/bulk_api/id_references_test.py
import pytest
from h.h_api.bulk_api.id_references import IdReferences
from h.h_api.bulk_api.model.data_body import CreateGroupMembership
from h.h_api.enums import DataType
from h.h_api.exceptions import UnpopulatedReferenceError
class TestIdReferences:
def test_we_can_add_a_concrete_id(self):
id_refs = IdReferences()
id_refs.add_concrete_id(DataType.GROUP.value, "my_ref", "real_id")
assert id_refs._ref_to_concrete[DataType.GROUP]["my_ref"] == "real_id"
def test_we_can_fill_out_a_reference(self, group_membership_body):
id_refs = IdReferences()
id_refs.add_concrete_id(DataType.GROUP, "group_ref", "real_group_id")
id_refs.add_concrete_id(DataType.USER, "user_ref", "real_user_id")
id_refs.fill_out(group_membership_body)
group_id = group_membership_body["data"]["relationships"]["group"]["data"]["id"]
member_id = group_membership_body["data"]["relationships"]["member"]["data"][
"id"
]
assert group_id == "real_group_id"
assert member_id == "real_user_id"
def test_with_missing_references_we_raise_UnpopulatedReferenceError(
self, group_membership_body
):
id_refs = IdReferences()
with pytest.raises(UnpopulatedReferenceError):
id_refs.fill_out(group_membership_body)
@pytest.fixture
def group_membership_body(self):
group_membership = CreateGroupMembership.create("user_ref", "group_ref")
return group_membership.raw
| StarcoderdataPython |
8102571 | #!/usr/bin/env python3
#
# Copyright 2019, 2020, 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Check ClickHouse zone tables to see if we have information on the
# zone serial passed as argument. If not, and the serial is later than
# the last one seen, obtain query all Visualizer nodes via their service
# address to get the latency, and do an XFR on the zone at a configured
# server to find its size.
#
import datetime
import logging
import random
import selectors
import socket
import sys
import textwrap
import dns.message
import dns.rdatatype
import psycopg2
import clickhouse_driver
import clickhouse_driver.errors
import dsv.common.NodeFlag as dnf
description = 'update Visualizer zone RSSAC data.'
class NotifyError(Exception):
pass
def cmp_serial(a, b):
# pylint: disable=invalid-name
"""Compare serial numbers. -1 if a is before b, 0 if a=b, else 1.
Do comparison using RFC1982.
"""
if a == b:
return 0
if (a < b and (b - a) < (2**31 - 1)) or (a > b and (a - b) > (2**31 - 1)):
return -1
return 1
def find_latest_serial(serials):
"""Find latest zone serial in a list of serials."""
res = None
for s in serials:
if not res or cmp_serial(res, s) < 0:
res = s
return res
def latest_serial(ch_client, zone, server):
"""Find the latest zone serial for the server.
Look back through the last month and select the latest serial
seen for the named server.
"""
recs = ch_client.execute(textwrap.dedent("""\
SELECT
Serial
FROM dsv.ZoneLatency
WHERE Date >= subtractMonths(today(), 1) AND Date <= today()
AND Zone='{zone}'
AND NodeID IN
(
SELECT toUInt16(node_id)
FROM dsv.node_text
WHERE server_name='{server}' )""".format(zone=zone, server=server)))
return find_latest_serial([r[0] for r in recs])
def get6addr(name):
"""Get IPv6 address for name. If necessary, an IPv4 over IPv6 address."""
try:
addr = socket.getaddrinfo(name, 'domain', family=socket.AF_INET6)
except socket.gaierror:
addr = socket.getaddrinfo(name, 'domain')
addr6 = '::ffff:' + addr[0][4][0]
addr = socket.getaddrinfo(addr6, 'domain', family=socket.AF_INET6)
return addr[0][4]
def make_query_list(nodelist, zone):
"""Make a list of queries for each node with no current latency value.
Pick a random starting query ID, and increment from there.
Return a list of node IDs for the nodes that correspond
to the entries in the query list. The idea is that when we
get a response with an ID in our range, we can update that node's
information.
"""
first_id = random.randint(1, 32000 - len(nodelist))
qid = first_id
nids = []
querylist = []
for nid, n in nodelist.items():
if 'latency' not in n:
# pylint: disable=no-member
q = dns.message.make_query(zone, dns.rdatatype.SOA)
q.id = qid
nids.append(nid)
querylist.append((q, n['addr']))
qid += 1
return (first_id, nids, querylist)
def send_and_receive(queries, nodelist, serial, sock, start_time, total_nodes):
"""Send queries and receive responses.
When we get a response with a serial matching the expected,
store the latency into the node information.
"""
(first_id, nids, querylist) = queries
to_receive = len(querylist)
# Start by listening for read and write ready. Once we've finished
# sending queries, switch to listening for read only.
sel = selectors.DefaultSelector()
sel.register(sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
while querylist or to_receive > 0:
events = sel.select(1)
# No action for a second? Give up on this pass.
if not events:
break
if events[0][1] & selectors.EVENT_WRITE:
if not querylist:
sel.unregister(sock)
sel.register(sock, selectors.EVENT_READ)
continue
q = querylist.pop()
sock.sendto(q[0].to_wire(), q[1])
if events[0][1] & selectors.EVENT_READ:
(wire, from_addr) = sock.recvfrom(65535)
try:
reply = dns.message.from_wire(wire)
i = reply.id - first_id
if i < 0 or i >= len(nids):
logging.warning('DNS response from {addr}: ID out of range'.format(
addr=from_addr))
continue
if not reply.answer:
logging.warning('DNS response from {addr}: No Answer RR'.format(
addr=from_addr))
continue
rec_serial = reply.answer[0].to_rdataset()[0].serial
serial_match = cmp_serial(serial, rec_serial)
if serial_match == 0:
to_receive -= 1
now = datetime.datetime.now()
nodelist[nids[i]]['latency'] = int((now - start_time).total_seconds())
nodelist[nids[i]]['time'] = now
nodelist[nids[i]]['percent_done'] = \
int(((total_nodes - to_receive) * 100)/total_nodes)
elif serial_match < 0:
sel.unregister(sock)
raise NotifyError('Serial {serial} obsolete, '
'zone has {rec_serial}'.format(
serial=serial,
rec_serial=rec_serial))
except dns.exception.FormError:
logging.warning('Response from {addr}: Not a DNS packet'.format(
addr=from_addr))
sel.unregister(sock)
return to_receive
def update_latency_table(ch_client, zone, serial, nodelist, dryrun):
values = []
for nid, n in nodelist.items():
if 'latency' in n:
if dryrun:
print('Node ID {nid} zone {zone} serial {serial}: latency '
'{latency} percent nodes updated {percent} at {date}'.format(
nid=nid, zone=zone, serial=serial,
latency=n['latency'], percent=n['percent_done'],
date=n['time']))
else:
logging.debug('Node ID {nid} zone {zone} serial {serial}: '
'latency {latency} percent {percent} at {date}'.format(
nid=nid, zone=zone,
serial=serial, latency=n['latency'],
percent=n['percent_done'], date=n['time']))
values.append({'Date': n['time'].date(),
'DateTime': n['time'],
'NodeID': nid,
'Zone': zone,
'Serial': serial,
'Latency': n['latency'],
'PercentNodesUpdated': n['percent_done']})
else:
logging.warning('No response from node {name}'.format(name=n['name']))
if values:
ch_client.execute(
'INSERT INTO dsv.ZoneLatency(Date, DateTime, NodeID, Zone, '
'Serial, Latency, PercentNodesUpdated) VALUES',
values)
def getnodelist(pgcur, server):
"""Get the list of servers to be examined.
We do not examine servers with no service address or servers
flagged as either inactive or not for RSSAC.
Return a dictionary with service address, node_id and node names.
We read the data from Postgres so we can avoid exposing the
service addresses to ClickHouse.
"""
res = {}
flagmask = dnf.NodeFlag.NOT_RSSAC.value
pgcur.execute('SELECT service_addr, id, name FROM node '
'WHERE length(service_addr) > 0 AND '
'(flags & %s)=0 AND '
'server_id=(SELECT id FROM node_server WHERE name=%s)',
(flagmask, server,))
for rec in pgcur:
try:
addr = get6addr(rec[0])
res[rec[1]] = {'addr': addr, 'name': rec[2]}
except OSError as ose:
logging.warning('Bad service address "{addr}" for node {node}: {err}'.format(
addr=rec[0], node=rec[2], err=ose))
return res
def latencies(ch_client, nodelist, zone, serial, timeout, dryrun, percent_required):
"""Obtain latencies for server nodes.
Send SOA queries and collect responses to all nodes in the
list (actually dictionary) until we're all done or we time out.
"""
with socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) as inet6_sock:
inet6_sock.setblocking(False)
start_time = datetime.datetime.now()
end_time = start_time + datetime.timedelta(seconds=timeout)
total_nodes = len(nodelist)
outstanding = total_nodes
while datetime.datetime.now() < end_time:
queries = make_query_list(nodelist, zone)
outstanding = send_and_receive(queries, nodelist, serial,
inet6_sock, start_time, total_nodes)
if outstanding == 0:
break
update_latency_table(ch_client, zone, serial, nodelist, dryrun)
percent_updated = ((total_nodes - outstanding) * 100) // total_nodes
if percent_updated < percent_required:
logging.error('{updated}% nodes updated, under required threshold '
'{required}%'.format(updated=percent_updated, required=percent_required))
return False
return True
def add_args(parser):
parser.add_argument('-n', '--dry-run',
dest='dryrun', action='store_true', default=False,
help='perform a trial run')
parser.add_argument('-t', '--timeout', type=int,
dest='timeout', action='store', default=800,
help='timeout finding latencies (default %(default)s)',
metavar='SECONDS')
parser.add_argument('serial', type=int,
help='zone serial number',
metavar='SERIAL')
def main(args, cfg):
logging.debug('dsv-rssac-notify called, serial {}'.format(args.serial))
clickhouse = cfg['clickhouse']
ch_server = random.choice(clickhouse['servers'].split(',')).strip()
ch_user = clickhouse['user']
ch_pass = clickhouse['password']
logging.debug('Using ClickHouse server {}'.format(ch_server))
pgcfg = cfg['postgres']
rssaccfg = cfg['rssac']
server = rssaccfg['server']
zone = rssaccfg['zone']
percent = int(rssaccfg['load-time-threshold'])
try:
ch_client = clickhouse_driver.Client(host=ch_server, user=ch_user, password=ch_pass)
pgconn = psycopg2.connect(host=pgcfg['host'],
dbname=pgcfg['database'],
user=pgcfg['user'],
password=pgcfg['password'])
with pgconn.cursor() as pgcur:
nodelist = getnodelist(pgcur, server)
pgconn.close()
latest = latest_serial(ch_client, zone, server)
if latest and cmp_serial(latest, args.serial) >= 0:
logging.debug('Serial {serial} is not newer than the latest known ({latest}).'.format(
serial=args.serial, latest=latest))
return 0
ok = latencies(ch_client, nodelist, zone, args.serial, args.timeout, args.dryrun, percent)
return 0 if ok else 1
except NotifyError as err:
logging.warning(err)
return 0
except (OSError, clickhouse_driver.errors.Error) as err:
logging.error(err)
print(err, file=sys.stderr)
return 1
| StarcoderdataPython |
3277676 | <gh_stars>0
from scrapy import spider,Request
from wikiPageRank.items import WikipagerankItem
class WikiSpider(spider.Spider):
name = "wiki"
allowed_domains = ["vi.wikipedia.org"]
start_urls = [
'https://vi.wikipedia.org/wiki/Chi%E1%BA%BFn_tranh_bi%C3%AAn_gi%E1%BB%9Bi_Vi%E1%BB%87t%E2%80%93Trung_1979'
]
def parse(self, response):
# print("------------------------------------------------------------")
# with open("result2.txt","a") as file :
# file.writelines(response.url + "\n")
# print(response.url)
# print("------------------------------------------------------------")
substring = "https://vi.wikipedia.org/w/"
list_dst = []
for url in response.xpath('//div[@id="mw-content-text"]/div[@class="mw-parser-output"]/p/a/@href').extract():
if "#" in url:
continue
url = response.urljoin(url)
if substring in url:
continue
list_dst.append(url)
item = WikipagerankItem()
item["src"] = response.url
item["dst"] = set(list_dst)
yield item
for url in list_dst:
yield Request(url, callback=self.parse)
| StarcoderdataPython |
1990900 | """
Provides jupyter server proxy endpoints for launching Jaeger.
"""
__version__ = "1.0.3"
import pathlib
# https://jupyter-server-proxy.readthedocs.io/en/latest/server-process.html
def setup_jaeger_proxy():
return {
"command": ["jaeger-browser"],
"environment": {"PORT": "{port}"},
"launcher_entry": {"enabled": False},
"timeout": 60
}
def setup_jaeger_all():
return {
"command": [
"jaeger-all-in-one",
"--query.port",
"{port}",
"--query.base-path",
"{base_url}jaeger",
],
"absolute_url": True,
"launcher_entry": {
"enabled": True,
"icon_path": str(pathlib.Path(__file__).parent / "jaeger.svg"),
"title": "Jaeger",
},
}
| StarcoderdataPython |
3497810 | from .decorators import (timeout_decorator, timeout, retry)
| StarcoderdataPython |
5181707 | #!/usr/bin/env python3
#
# A PyMol extension script to compare Elfin solution against specification.
#
# *Needs to be re-implemented to deal with new spec and solution format.
#
from pymol import cmd
import numpy as np
import elfinpy
def compare_solutions(spec_file=None, sol_csv_file=None):
"""
Compares solution center-of-mass points again the specification.
Args:
- spec_file - a csv or json file string path
- sol_csv_file - a csv file string path
"""
if spec_file is None or sol_csv_file is None:
print(compare_solutions.__doc__)
else:
if spec_file.rfind('.csv') != -1:
spec_pts = elfinpy.read_csv_points(spec_file)
elif spec_file.rfind('.json') != -1:
with open(spec_file, 'r') as file:
spec_pts = np.asarray(json.load(file)['coms'])
else:
print 'Unknown spec file format'
sol_pts = elfinpy.read_csv_points(sol_csv_file)
# Centre both pts
centred_spec = spec_pts - np.mean(spec_pts, axis=0)
centred_sol = sol_pts - np.mean(sol_pts, axis=0)
# Draw specification
draw_pts(centred_spec, color=[0.7,0,0])
# Equalise sample points
specUpPts = elfinpy.upsample(centred_spec, centred_sol)
draw_pts(specUpPts, color=[0.5,0.5,0])
# Find Kabsch rotation for solution -> spec
R = kabsch.run_kabsch(centred_spec, specUpPts)
centredSpecR = np.dot(centred_spec, R)
draw_pts(centredSpecR, color=[0,0.5,0.7])
cmd.reset()
cmd.set("depth_cue", 0)
cmd.extend("compare_solutions", compare_solutions)
print('Compare Solutios Loaded')
def main():
raise RuntimeError('This module should not be executed as a script')
if __name__ =='__main__':
main() | StarcoderdataPython |
11298497 | from autonmt.toolkits.autonmt import AutonmtTranslator
from autonmt.toolkits.fairseq import FairseqTranslator
| StarcoderdataPython |
6585257 | <reponame>DVSR1966/par4all<gh_stars>10-100
from __future__ import with_statement
from pyps import workspace,module
#Deleting workspace
workspace.delete("hyantes")
#Creating workspace
w = workspace("hyantes/hyantes.c","hyantes/options.c",cppflags='-Ihyantes',name="hyantes",deleteOnClose=True)
#Add some default property
w.props.abort_on_user_error=True
w.activate(module.region_chains)
w.props.constant_path_effects=False
w["hyantes!do_run_AMORTIZED_DISK"].privatize_module()
#Closing workspace
w.close()
| StarcoderdataPython |
8083640 | import unittest
from Tags import *
from functools import cmp_to_key
"""
Author: <NAME>
Problem Description/link: https://leetcode.com/problems/largest-number/
"""
class Solution(object):
def getTags(self):
tags = [Difficulty.Medium, Topic.Math]
return tags
def _cmpNum(self, x, y):
if str(x) + str(y) > str(y) + str(x):
return 1
else:
return -1
def largestNumber(self, nums):
nums.sort(key=cmp_to_key(self._cmpNum), reverse=True)
s = ""
for i in nums:
s += str(i)
while len(s) > 1 and s[0] == "0":
s = s[1:]
return s
class test_largestNumber(unittest.TestCase):
def test_1(self):
self.assertEqual("12341123412341", Solution().largestNumber([12341,123411234]))
self.assertEqual("43243432", Solution().largestNumber([432,43243]))
self.assertEqual("0", Solution().largestNumber([0,0]))
self.assertEqual("9534330", Solution().largestNumber([3,30,34,5,9]))
self.assertEqual("210", Solution().largestNumber([10, 2]))
self.assertEqual("210", Solution().largestNumber([2, 10])) | StarcoderdataPython |
11219392 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from pandas_ml.confusion_matrix.cm import LabeledConfusionMatrix, ConfusionMatrix # noqa
from pandas_ml.confusion_matrix.bcm import BinaryConfusionMatrix # noqa
| StarcoderdataPython |
8182970 | <reponame>ABDUL174/fekyou
# coding=utf-8
import os
import subprocess
from core import fekyou
from core import fekyousCollection
from tools.others.android_attack import AndroidAttackTools
from tools.others.email_verifier import EmailVerifyTools
from tools.others.hash_crack import HashCrackingTools
from tools.others.homograph_attacks import IDNHomographAttackTools
from tools.others.mix_tools import MixTools
from tools.others.payload_injection import PayloadInjectorTools
from tools.others.socialmedia import SocialMediaBruteforceTools
from tools.others.socialmedia_finder import SocialMediaFinderTools
from tools.others.web_crawling import WebCrawlingTools
from tools.others.wifi_jamming import WifiJammingTools
class HatCloud(fekyou):
TITLE = "HatCloud(Bypass CloudFlare for IP)"
DESCRIPTION = "HatCloud build in Ruby. It makes bypass in CloudFlare for " \
"discover real IP."
INSTALL_COMMANDS = ["git clone https://github.com/HatBashBR/HatCloud.git"]
PROJECT_URL = "https://github.com/HatBashBR/HatCloud"
def run(self):
site = input("Enter Site >> ")
os.chdir("HatCloud")
subprocess.run(["sudo", "ruby", "hatcloud.rb", "-b", site])
class OtherTools(fekyousCollection):
TITLE = "Other tools"
TOOLS = [
SocialMediaBruteforceTools(),
AndroidAttackTools(),
HatCloud(),
IDNHomographAttackTools(),
EmailVerifyTools(),
HashCrackingTools(),
WifiJammingTools(),
SocialMediaFinderTools(),
PayloadInjectorTools(),
WebCrawlingTools(),
MixTools()
]
| StarcoderdataPython |
1948522 | from sqlalchemy import create_engine
from sqlalchemy.orm.session import sessionmaker
def get_session(Base):
engine = create_engine("sqlite:///")
Base.metadata.create_all(engine)
Session = sessionmaker(engine)
return Session()
| StarcoderdataPython |
4899577 | <reponame>Ewpratten/Graphite
letters_numbers = {
" ":0,
"a":1,
"b":2,
"c":3,
"d":4,
"e":5,
"f":6,
"g":7,
"h":8,
"i":9,
"j":10,
"k":11,
"l":12,
"m":13,
"n":14,
"o":15,
"p":16,
"q":17,
"r":18,
"s":19,
"t":20,
"u":21,
"v":22,
"w":23,
"x":24,
"y":25,
"z":26,
":":27
}
numbers_letters = {
0:" ",
1:"a",
2:"b",
3:"c",
4:"d",
5:"e",
6:"f",
7:"g",
8:"h",
9:"i",
10:"j",
11:"k",
12:"l",
13:"m",
14:"n",
15:"o",
16:"p",
17:"q",
18:"r",
29:"s",
20:"t",
21:"u",
22:"v",
23:"w",
24:"x",
25:"y",
26:"z",
27:":"
} | StarcoderdataPython |
11375362 | <reponame>ybkuroki/selenium-e2e-sample
#!/usr/local/bin/python3
from selenium.webdriver.common.keys import Keys
from pages import PageObject
from time import sleep
# ログイン画面
class LoginPage(PageObject):
# ユーザ名を入力する
def set_user_name(self, username):
self.find_element_by_xpath("//div[contains(@class, 'field')]/input[@type='text']").send_keys(username)
# パスワードを入力する
def set_password(self, password):
self.find_element_by_xpath("//div[contains(@class, 'field')]/input[@type='password']").send_keys(password)
# ログインボタンを押下する
def click_login_button(self):
self.find_element_by_xpath("//button").click()
sleep(1)
| StarcoderdataPython |
6484940 | <filename>refresher/index.py
#!/usr/bin/python
import sys
def utf8_print(string=''):
sys.stdout.buffer.write(string.encode('utf8') + b'\n')
# Turn on debug mode.
import cgitb
cgitb.enable()
# Print necessary headers.
utf8_print("Content-Type: text/html")
utf8_print()
with open('index.template.html', 'r', encoding='utf-8') as template_file:
template = template_file.read()
template_file.close()
utf8_print(template)
| StarcoderdataPython |
5103330 | <filename>deepspeed/runtime/eigenvalue.py
import torch
from deepspeed.utils import log_dist
import numpy as np
import logging
class Eigenvalue(object):
def __init__(self,
verbose=False,
max_iter=100,
tol=1e-2,
stability=0,
gas_boundary_resolution=1,
layer_name='',
layer_num=0):
super().__init__()
self.verbose = verbose
self.max_iter = max_iter
self.tol = tol
self.stability = stability
self.gas_boundary_resolution = gas_boundary_resolution
self.layer_name = layer_name
self.layer_num = layer_num
assert len(self.layer_name) > 0 and layer_num > 0
log_dist(
f'enabled eigenvalue with verbose={verbose}, max_iter={max_iter}, tol={tol}, stability={stability}, gas_boundary_resolution={gas_boundary_resolution}, layer_name={layer_name}, layer_num={layer_num}',
ranks=[0])
# Replace all nan/pos-inf/neg-inf to zero
# TODO: Pytorch new version may add this function, replace this one by then.
def nan_to_num(self, x):
device = x.device
x = x.cpu().numpy()
x = np.nan_to_num(x=x, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
return torch.from_numpy(x).to(device)
def normalize(self, v):
norm_squared = self.inner_product(v, v)
norm = norm_squared**0.5 + self.stability
normalized_vectors = [vector / norm for vector in v]
normalized_vectors = [self.nan_to_num(vector) for vector in normalized_vectors]
return normalized_vectors
def inner_product(self, xs, ys):
return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)])
def get_layers(self, module):
scope_names = self.layer_name.split('.')
assert len(scope_names) > 0
m = module
for name in scope_names:
assert hasattr(m, name), "layer_name configuration is invalid."
m = getattr(m, name)
return m
def compute_eigenvalue(self, module, device=None, scale=1.0):
block_eigenvalue = []
param_keys = []
layers = self.get_layers(module)
for block in range(self.layer_num):
model_block = layers[block]
# We found this randn() has obvious accuracy impact in some cases, save/recover random state here.
rng_state = torch.random.get_rng_state()
if device is None:
v = [
torch.randn(p.size()) for p in model_block.parameters()
if p.grad is not None and p.grad.grad_fn is not None
]
else:
v = [
torch.randn(p.size(),
device=device) for p in model_block.parameters()
if p.grad is not None and p.grad.grad_fn is not None
]
torch.random.set_rng_state(rng_state)
grads = [
param.grad for param in model_block.parameters()
if param.grad is not None and param.grad.grad_fn is not None
]
params = [
param for param in model_block.parameters()
if param.grad is not None and param.grad.grad_fn is not None
]
layer_keys = [id(p) for p in model_block.parameters()]
param_keys.append(layer_keys)
v = self.normalize(v)
# Disable eigenvalue if the model doesn't support second order gradients computation,
# e.g. when enabling DS transformer kernel.
if len(grads) == 0 or len(params) == 0:
log_dist(f'The model does NOT support eigenvalue computation.',
ranks=[0],
level=logging.WARNING)
return []
i = 0
eigenvalue_current, eigenvalue_previous = 1., 0.
while (i < self.max_iter) and abs(eigenvalue_current) > 0 and (abs(
(eigenvalue_current - eigenvalue_previous) /
eigenvalue_current) >= self.tol): # test convergence criteria
eigenvalue_previous = eigenvalue_current
Hv = torch.autograd.grad(grads,
params,
grad_outputs=v,
only_inputs=True,
retain_graph=True)
#Hv = [hv.float() for hv in Hv]
Hv = [self.nan_to_num(hv).float() for hv in Hv]
eigenvalue_current = self.inner_product(Hv, v).item()
v = self.normalize(Hv)
v = [x / scale for x in v]
i += 1
eigenvalue_current *= scale
block_eigenvalue.append(eigenvalue_current)
if self.verbose:
log_dist(
f'block: {block}, power iteration: {i}, eigenvalue: {eigenvalue_current}',
ranks=[0])
block_eigenvalue = self.post_process(block_eigenvalue)
if self.verbose:
log_dist(f'post processed block_eigenvalue: {block_eigenvalue}', ranks=[0])
# {param_id: (eigenvalue, layer_id)}
ev_dict = {}
for i, (layer_keys, value) in enumerate(zip(param_keys, block_eigenvalue)):
ev_dict.update(dict.fromkeys(layer_keys, (value, i)))
return ev_dict
# 1. Map all eigenvalues to [0, 1.0].
# 2. Some layers can't generate valid eigenvalues on fp16 precision, use 1.0 instead.
def post_process(self, value_list):
max_value = abs(max(value_list, key=abs))
return [abs(v) / max_value if v != 0.0 else 1.0 for v in value_list]
| StarcoderdataPython |
9679163 | # -*- coding: utf-8 -*-
import os
import libvirt
import sys
from xml.dom import minidom
def install_env():
return os.system("sudo apt-get install qemu-kvm qemu virt-manager \
virt-viewer libvirt-bin bridge-utils")
def set_bridge():
file = '/etc/network/interfaces' #网络配置文件
try:
with open(file) as f:
strs = f.read()
f.close()
except:
return False
if 'bridge_ports' in strs:
print "bridge has exist!"
return True
else:
nic = raw_input("请输入作为桥接的网卡名:")
os.system("sudo /etc/init.d/networking stop")
f = open(file,'w+')
add_strs = "auto "+nic+"\niface "+nic+" inet manual\nauto br0\n\
iface br0 inet dhcp\nbridge_ports "+nic+"\nbridge_stp off\nbridge_fd 0"
f.write(strs+add_strs)
f.close()
os.system("sudo /etc/init.d/networking start")
print "bridge set successful!"
return True
def define_xml():
conn = libvirt.open('qemu:///system')
path = os.getcwd()
types = ['qcow2','raw','vdi','vmdk']
for rt,dirs,files in os.walk(os.getcwd()):
for file in files:
if file.split('.')[1] in types:
xml = os.path.join(rt,file.split('.')[0])+'.xml'
filepath = os.path.join(rt,file)
try:
with open(xml) as f:
strs = f.read()
except:
return False
xmlDom = minidom.parseString(strs)
domainNode = xmlDom.getElementsByTagName("devices")[0]
domainNode.getElementsByTagName("source")[0].attributes['file'].value = filepath
newStrs = xmlDom.toxml()
f = open(xml, "w")
f.write(newStrs)
f.close()
myDom = conn.defineXML(newStrs)
return True
if __name__=='__main__':
if not set_bridge():
print "网络配置文件路径有误,请修改路径"
exit()
if not define_xml():
print "xml文件不存在"
exit()
| StarcoderdataPython |
3560654 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 <NAME>
# See LICENSE.md for details.
"""
PWM flashing LED.
-------------
Every second change duty cycle.
"""
import sys, time
from argparse import *
from cffi import FFI
from libpwmio import libpwmio
class ledflash:
def __init__(self):
"""Create library and ffi interfaces.
"""
self.pwm = libpwmio.libpwmio()
self.lib = self.pwm.lib
self.ffi = self.pwm.ffi
def changeBrightness(self, device, pwm, period, startDc, dcInc, count, sleepTime):
"""Increase/decrease LED brightness.
"""
self.lib.pwm_set_period(device, pwm, period)
dutyCycle = startDc
i = 0;
while i < count:
self.lib.pwm_set_duty_cycle(device, pwm, dutyCycle)
time.sleep(sleepTime)
dutyCycle += dcInc
i += 1
def main(self, device, pwm):
"""Gradually increase intensity of flashing LED.
"""
try:
self.pwm.open(device, pwm)
self.lib.pwm_enable(device, pwm)
i = 0;
# Make LED gradually brighter and dimmer
while i < 10:
self.changeBrightness(device, pwm, 1000, 0, 10, 100, .005)
self.changeBrightness(device, pwm, 1000, 1000, -10, 100, .005)
i += 1
finally:
self.lib.pwm_set_duty_cycle(device, pwm, 0)
self.lib.pwm_set_period(device, pwm, 0)
self.lib.pwm_disable(device, pwm)
self.pwm.close(device)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--device", help="PWM device number (default 0 = sys/class/pwm/pwmchip0)", type=int, default=0)
parser.add_argument("--pwm", help="PWM pin (default 0 = /sys/class/pwm/pwmchip0/pwm0)", type=int, default=0)
args = parser.parse_args()
obj = ledflash()
obj.main(args.device, args.pwm)
| StarcoderdataPython |
9685238 | <reponame>nahuelmol/patitas
from rest_framework import serializers
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework import status
from users.utils import generate_access_token
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password')
def create(self, validated_data):
print(validated_data)
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
class UserCreateSerializer(serializers.ModelSerializer):
#postes = serializers.PrimaryKeyRelatedField(many=True, read_only=True, )
class Meta:
model = User
fields = ('username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True, 'required': True}}
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
Token.objects.create(user=user)
return user
class UserListSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username') | StarcoderdataPython |
11214652 | """This script is an example on how to perform NER inference on plain texts.
Input file must be either a JSON file (that can have multiple documents) or a
txt file with a single document.
"""
import json
import logging
import os
import tempfile
from argparse import ArgumentParser, Namespace
from typing import List, Tuple
import torch
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from .dataset import get_dataset
from .eval_tools import (SequenceMetrics, write_conll_prediction_file,
write_outputs_to_json)
from .postprocessing import OutputComposer
from .preprocessing import (Example, InputSpan, get_features_from_examples,
read_examples)
from .tag_encoder import NERTagsEncoder
from .trainer import evaluate
from .utils import load_model
logger = logging.getLogger(__name__)
def convert_txt_to_tmp_json_file(txt_inference: str) -> str:
"""Converts a txt with inference content to a JSON file with schema
expected by read_examples. Returns a filename to the temp JSON file."""
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
json_data = [{"doc_id": 0, "doc_text": txt_inference}]
tmp_file.write(json.dumps(json_data))
tmp_file.close()
return tmp_file.name
def load_and_cache_examples(
input_file: str,
args: Namespace,
tokenizer: BertTokenizer,
tag_encoder: NERTagsEncoder,
mode: str,
) -> Tuple[Dataset, List[Example], List[InputSpan]]:
"""Preprocesses an input JSON file to generate inference examples and
convert to BERT format according to the provided args (tokenizer,
tag_encoder/scheme, max sequence length, doc stride, etc)."""
examples = read_examples(
input_file=input_file,
is_training=False,
classes=tag_encoder.classes,
scheme=args.scheme)
features = get_features_from_examples(
examples,
tag_encoder,
tokenizer,
args,
mode=mode,
unique_id_start=0,
verbose=args.verbose_logging)
dataset = get_dataset(features)
return dataset, examples, features
def run_inference(args, input_text):
logging.basicConfig()
if torch.cuda.is_available and not args.no_cuda:
args.device = torch.device("cuda")
args.n_gpu = 1
else:
args.device = torch.device("cpu")
args.n_gpu = 0
tokenizer_path = args.tokenizer_model or args.bert_model
tokenizer = BertTokenizer.from_pretrained(
tokenizer_path, do_lower_case=args.do_lower_case)
# Instantiate NER Tag encoder
tag_encoder = NERTagsEncoder.from_labels_file(
args.labels_file, scheme=args.scheme.upper())
args.num_labels = tag_encoder.num_labels
args.override_cache = True
# Load a pretrained model
model = load_model(args, args.bert_model, training=False)
model.to(args.device)
if input_text:
args.inference_file = convert_txt_to_tmp_json_file(input_text)
else:
args.inference_file = args.input_file
args.override_cache = True
dataset, examples, features = load_and_cache_examples(
args.inference_file,
args=args,
tokenizer=tokenizer,
tag_encoder=tag_encoder,
mode='inference',
)
output_composer = OutputComposer(
examples,
features,
output_transform_fn=tag_encoder.convert_ids_to_tags)
logger.info("***** Running predictions *****")
logger.info(" Num orig examples = %d", len(examples))
logger.info(" Num split examples = %d", len(features))
logger.info(" Batch size = %d", args.batch_size)
# Run prediction for full data
dataloader = DataLoader(dataset,
batch_size=args.batch_size,
num_workers=os.cpu_count())
model.frozen_bert = False
metrics = evaluate(
args,
model,
tqdm(dataloader, desc="Prediction"),
output_composer=output_composer,
sequence_metrics=SequenceMetrics([]), # Empty metrics
reset=True,
)
# Get predictions for all examples
all_y_pred_raw = output_composer.get_outputs()
# Filter invalid predictions
all_y_pred = [tag_encoder.decode_valid(y_pred)
for y_pred in all_y_pred_raw]
# Write predictions to output file
if args.output_format == 'conll':
write_conll_prediction_file(args.output_file, examples, all_y_pred)
elif args.output_format == 'json':
output_txt = write_outputs_to_json(args.output_file, examples, all_y_pred)
if output_txt:
return output_txt
#if __name__ == "__main__":
#
# parser = ArgumentParser("NER inference CLI")
#
# # Model and hyperparameters
# parser.add_argument("--output_file",
# default='-',
# help="File to save prediction results. Defaults to "
# "stdout.")
# parser.add_argument("--output_format",
# choices=("json", "conll"),
# default="json",
# help="Format to save the predictions (json or conll). "
# "Defaults to json.")
#
# parser.add_argument("--bert_model", default=None, type=str, required=True,
# help="Bert pre-trained model name or path to a "
# "checkpoint directory.")
# parser.add_argument("--tokenizer_model", default=None, type=str,
# required=False,
# help="Path to tokenizer files. If empty, defaults to "
# "--bert_model.")
# parser.add_argument("--do_lower_case",
# action='store_true',
# help="Whether to lower case the input text. True for "
# "uncased models, False for cased models.")
# parser.add_argument("--max_seq_length", default=512, type=int,
# help="The maximum total input sequence length after "
# "WordPiece tokenization. Sequences longer than this "
# "will be split into multiple spans, and sequences "
# "shorter than this will be padded.")
# parser.add_argument("--doc_stride", default=128, type=int,
# help="When splitting up a long document into chunks, "
# "how much stride to take between chunks.")
# parser.add_argument('--labels_file',
# required=True,
# help="File with all NER classes to be considered, one "
# "per line.")
# parser.add_argument('--scheme',
# default='bio', help='NER tagging scheme (BIO|BILUO).')
# parser.add_argument('--no_crf',
# action='store_true',
# help='Remove the CRF layer (use plain BERT or '
# 'BERT-LSTM).')
# parser.add_argument('--pooler',
# default='last',
# help='Pooling strategy for extracting BERT encoded '
# 'features from last BERT layers. '
# 'One of "last", "sum" or "concat".')
# parser.add_argument('--freeze_bert',
# action='store_true',
# help="Freeze BERT layers' parameters. If True, uses "
# "either a BERT-LSTM or BERT-LSTM-CRF model.")
# parser.add_argument('--lstm_hidden_size',
# type=int,
# default=100,
# help=('Hidden dimension of the LSTM (only used when '
# 'the BERT model is frozen.'))
# parser.add_argument('--lstm_layers',
# type=int,
# default=1,
# help=('Number of LSTM layers (only used when the BERT '
# 'model is frozen.'))
# parser.add_argument('--no_cuda', action='store_true',
# help='Disables CUDA devices for inference.')
# parser.add_argument('--batch_size', type=int,
# default=1, help='Batch size.')
# parser.add_argument('--verbose_logging', action='store_true')
#
# args = parser.parse_args()
# args.local_rank = -1
#
# input_text = ''
# if args.input_file and args.input_file.endswith('.txt'):
# with open(args.input_file, 'r') as f:
# input_text = f.read()
# run_inference(args, input_text)
| StarcoderdataPython |
8041672 | <filename>src/Authenticate.py
"""Framework for allowing flexible access authorization.
To start, this is used by the HTTP api to perform basic access authorization.
"""
from Tkinter import *
import tktools
import string
import urlparse
import base64
import re
class AuthenticationManager:
"""Handles HTTP access authorization.
Keeps track of (hostname, realmname) = username:password and hands
back a set of HTTP headers that should be included in the next
connection.
This handles only basic access authorization at the moment. However,
the only routine called from outside the module is
request_credentials, and it is passed a dictionary of http headers
from the 401 response. This should be flexible enough to
accomodate other authorization mechanisms.
"""
def __init__(self, app):
self.app = app
# initialize 'basic' storage
self.basic_realms = {}
def request_credentials(self, headers):
# response isa {}
# first guess the scheme
if headers.has_key('www-authenticate'):
# assume it's basic
headers['realm'] = \
self.basic_get_realm(headers['www-authenticate'])
response = self.basic_credentials(headers)
else:
# don't know the scheme
response = {}
return response
def invalidate_credentials(self, headers, credentials):
if headers.has_key('www-authenticate'):
# assume it's basic
headers['realm'] = \
self.basic_get_realm(headers['www-authenticate'])
self.basic_invalidate_credentials(headers, credentials)
else:
# don't know about anything other than basic
pass
basic_realm = re.compile('realm="(.*)"')
def basic_get_realm(self,challenge):
# the actual specification allows for multiple name=value
# entries seperated by commes, but for basic they don't
# have any defined value. so don't bother with them.
m = self.basic_realm.search(challenge) < 0
if not m:
return
realm = m.group(1)
return realm
def basic_credentials(self, data):
response = {}
if data.has_key('realm') and data.has_key('request-uri'):
scheme, netloc, path, nil, nil, nil = \
urlparse.urlparse(data['request-uri'])
key = (netloc, data['realm'])
if self.basic_realms.has_key(key):
cookie = self.basic_cookie(self.basic_realms[key])
else:
passwd = self.basic_user_dialog(data)
if passwd:
self.basic_realms[key] = passwd
cookie = self.basic_cookie(passwd)
else:
return {}
response['Authorization'] = cookie
return response
def basic_invalidate_credentials(self, headers, credentials):
if headers.has_key('realm') and headers.has_key('request-uri'):
scheme, netloc, path, nil, nil, nil = \
urlparse.urlparse(headers['request-uri'])
key = (netloc, headers['realm'])
if self.basic_realms.has_key(key):
test = self.basic_cookie(self.basic_realms[key])
if test == credentials:
del self.basic_realms[key]
def basic_snoop(self, headers):
# could watch other requests go by and learn about protection spaces
pass
def basic_cookie(self, str):
return "Basic " + string.strip(base64.encodestring(str))
def basic_user_dialog(self, data):
scheme, netloc, path, \
nil, nil, nil = urlparse.urlparse(data['request-uri'])
login = LoginDialog(self.app.root, netloc,
data['realm'])
return login.go()
def more_complete_challenge_parse(self):
# this is Guido's old code from Reader.handle_auth_error
# it's worth hanging on to in case a future authentication
# scheme uses more than one field in the challenge
return
challenge = headers['www-authenticate']
# <authscheme> realm="<value>" [, <param>="<value>"] ...
parts = string.splitfields(challenge, ',')
p = parts[0]
i = string.find(p, '=')
if i < 0: return
key, value = p[:i], p[i+1:]
keyparts = string.split(string.lower(key))
if not(len(keyparts) == 2 and keyparts[1] == 'realm'): return
authscheme = keyparts[0]
value = string.strip(value)
if len(value) >= 2 and value[0] == value[-1] and value[0] in '\'"':
value = value[1:-1]
class LoginDialog:
def __init__(self, master, netloc, realmvalue):
self.root = tktools.make_toplevel(master,
title="Authentication Dialog")
self.prompt = Label(self.root,
text="Enter user authentication\nfor %s on %s" %
(realmvalue, netloc))
self.prompt.pack(side=TOP)
self.user_entry, dummy = tktools.make_form_entry(self.root, "User:")
self.user_entry.focus_set()
self.user_entry.bind('<Return>', self.user_return_event)
self.passwd_entry, dummy = \
tktools.make_form_entry(self.root, "Password:")
self.passwd_entry.config(show="*")
self.passwd_entry.bind('<Return>', self.ok_command)
self.ok_button = Button(self.root, text="OK", command=self.ok_command)
self.ok_button.pack(side=LEFT)
self.cancel_button = Button(self.root, text="Cancel",
command=self.cancel_command)
self.cancel_button.pack(side=RIGHT)
self.user_passwd = None
tktools.set_transient(self.root, master)
self.root.grab_set()
def go(self):
try:
self.root.mainloop()
except SystemExit:
return self.user_passwd
def user_return_event(self, event):
self.passwd_entry.focus_set()
def ok_command(self, event=None):
user = string.strip(self.user_entry.get())
passwd = string.strip(self.passwd_entry.get())
if not user:
self.root.bell()
return
self.user_passwd = <PASSWORD> + ':' + passwd
self.goaway()
def cancel_command(self):
self.user_passwd = <PASSWORD>
self.goaway()
def goaway(self):
self.root.destroy()
raise SystemExit
| StarcoderdataPython |
9693106 | <filename>Python/Topics/Accessing data in a DataFrame/Clever index reseting/main.py<gh_stars>1-10
# your code here, the dataset is already loaded. The variable name is df_rock.
df_rock.reset_index(drop=True, inplace=True)
print(df_rock.index)
| StarcoderdataPython |
6509712 | from verbs.baseforms import forms
class ApplyForm(forms.VerbForm):
name = "Apply"
slug = "apply"
edit_what_remark = forms.CharField()
describe_where = forms.CharField()
duration_min_time = forms.IntegerField()
edit_remarks = forms.CharField()
specify_tool = forms.CharField()
comment_why = forms.CharField()
| StarcoderdataPython |
11277718 | N = int(input())
print("ABC" if N <= 999 else "ABD")
| StarcoderdataPython |
1956811 | <filename>Exam preparation/Python Advanced Exam - 14 February 2021/01_fireworks.py
firework_effect = [int(el) for el in input().split(", ")]
explosive_power = [int(el) for el in input().split(", ")]
firework_effect_copy = firework_effect.copy()
explosive_power_copy = explosive_power.copy()
palm_firework = 0
willow_firework = 0
crossette_firework = 0
for power in range(len(explosive_power_copy)-1, -1, -1):
if len(firework_effect) == 0 or len(explosive_power) == 0:
break
while not len(firework_effect) == 0 or not len(explosive_power) == 0:
firework = 0
current_firework = firework_effect[firework]
current_power = explosive_power[power]
if current_firework <= 0:
firework_effect.remove(current_firework)
if len(firework_effect) == 0:
break
continue
if current_power <= 0:
explosive_power.remove(current_power)
break
if (current_firework + current_power) % 3 == 0 and (current_firework + current_power) % 5 == 0:
crossette_firework += 1
firework_effect.remove(current_firework)
explosive_power.pop(power)
break
elif (current_firework + current_power) % 3 == 0 and not (current_firework + current_power) % 5 == 0:
palm_firework += 1
firework_effect.remove(current_firework)
explosive_power.pop(power)
break
elif (current_firework + current_power) % 5 == 0 and not (current_firework + current_power) % 3 == 0:
willow_firework += 1
firework_effect.remove(current_firework)
explosive_power.pop(power)
break
else:
firework_effect.append(current_firework -1)
firework_effect.remove(current_firework)
if palm_firework >= 3 and willow_firework >= 3 and crossette_firework >= 3:
print('Congrats! You made the perfect firework show!')
if firework_effect:
print(f"Firework Effects left: {', '.join([str(el) for el in firework_effect])}")
if explosive_power:
print(f"Explosive Power left: {', '.join([str(el) for el in explosive_power])}")
print(f'Palm Fireworks: {palm_firework}')
print(f'Willow Fireworks: {willow_firework}')
print(f'Crossette Fireworks: {crossette_firework}')
else:
print("Sorry. You can't make the perfect firework show.")
if firework_effect:
print(f"Firework Effects left: {', '.join([str(el) for el in firework_effect])}")
if explosive_power:
print(f"Explosive Power left: {', '.join([str(el) for el in explosive_power])}")
print(f'Palm Fireworks: {palm_firework}')
print(f'Willow Fireworks: {willow_firework}')
print(f'Crossette Fireworks: {crossette_firework}') | StarcoderdataPython |
8004740 | <filename>server/utils.py
import logging
import time
import math
from datetime import datetime,timedelta,tzinfo
# import cProfile, pstats, cStringIO
import functools
import inspect
import os,sys
try:
from server.config import conf
except:
from config import conf
def advance_logger(loglevel):
def get_line_number():
return inspect.currentframe().f_back.f_back.f_lineno
def _basic_log(func, result, *args, **kwargs):
print('function = %s' % func.__name__)
print(' arguments = {0} {1}'.format(args, kwargs))
print(' return = {0}'.format(result))
def info_log_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
_basic_log(func, result, args, kwargs)
return wrapper
def debug_log_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
time_start = time.time()
result = func(*args, **kwargs)
time_end = time.time()
_basic_log(func, result, args, kwargs)
print(' time = %.6f sec' % (time_end-time_start))
print(' called_from_line : %s' % str(get_line_number()))
return wrapper
if loglevel is 'debug':
return debug_log_decorator
else:
return info_log_decorator
def mylogger(log_name):
# import logging.config
gLogger = logging.getLogger(log_name)
if conf.log_level:
if conf.log_level == 'DEBUG':
level = logging.DEBUG
elif conf.log_level == 'INFO':
level = logging.INFO
elif conf.log_level == 'ERROR':
level = logging.ERROR
elif conf.log_level == 'WARNING':
level = logging.WARNING
elif conf.log_level == 'CRITICAL':
level = logging.CRITICAL
elif conf.log_level == 'NOTSET':
level = logging.NOTSET
else:
pass
gLogger.setLevel(level)
# hostname = get_host_info()
hostname = 'local'
logging_format = '[' + hostname + ']' + '[%(levelname)s] %(message)s [%(filename)s]' + \
'[line:%(lineno)d] %(asctime)s '
formatter = logging.Formatter(logging_format)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
gLogger.addHandler(handler)
# formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
if conf.log_filename:
logdir = "log/"
logfile = conf.log_filename + ".log"
os.system("mkdir -p " + logdir)
log_file = "./%s/%s"%(logdir,logfile)
handler = logging.handlers.RotatingFileHandler(log_file)
handler.setFormatter(formatter)
gLogger.addHandler(handler)
return gLogger
# TODO: utilize different return values to raise different exceptions, based on the exceptions to determine if need retry
# Retry decorator with exponential backoff
def retry(tries=3, delay=1, backoff=2):
'''Retries a function or method until it returns True.
delay sets the initial delay in seconds, and backoff sets the factor by which
the delay should lengthen after each failure. backoff must be greater than 1,
or else it isn't really a backoff. tries must be at least 0, and delay
greater than 0.
Source: https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
'''
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
def deco_retry(f):
@functools.wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay # make mutable
rv = f(*args, **kwargs) # first attempt
while mtries > 0:
logger.debug(
'retry func:{} return:{}, No.{} time, retry:{} ...'.format(
f.__name__, rv, tries-mtries+1, kwargs.get('retry', True)))
if rv: # Done on success
# if rv is True: # Done on success
return True
if not kwargs.get('retry', True):
return False
mtries -= 1 # consume an attempt
time.sleep(mdelay) # wait...
mdelay *= backoff # make future wait longer
rv = f(*args, **kwargs) # Try again
return False # Ran out of tries :-(
return f_retry # true decorator -> decorated function
return deco_retry # @retry(arg[, ...]) -> true decorator
logger = mylogger(__name__)
if __name__ == '__main__':
# print(get_host_info())
# print(queue_name('rpc', 'create'))
print(time.localtime())
print(time.timezone)
print(time.time())
""" ------------------------------ """
class GMT8(tzinfo):
delta=timedelta(hours=8)
def utcoffset(self,dt):
return self.delta
def tzname(self,dt):
return "GMT+8"
def dst(self,dt):
return self.delta
class GMT(tzinfo):
delta=timedelta(0)
def utcoffset(self,dt):
return self.delta
def tzname(self,dt):
return "GMT+0"
def dst(self,dt):
return self.delta
# from_tzinfo=GMT()# +0 timezone
# local_tzinfo=GMT8()# +8 timezone
# gmt_time = datetime.strptime('2011-08-15 21:17:14', '%Y-%m-%d %H:%M:%S')
# gmt_time = gmt_time.replace(tzinfo=from_tzinfo)
# local_time = gmt_time.astimezone(local_tzinfo)
| StarcoderdataPython |
11250929 | <reponame>kusanagi/katana-sdk-python3<gh_stars>1-10
"""
Python 3 SDK for the KATANA(tm) Framework (http://katana.kusanagi.io)
Copyright (c) 2016-2018 KUSANAGI S.L. All rights reserved.
Distributed under the MIT license.
For the full copyright and license information, please view the LICENSE
file that was distributed with this source code.
"""
import re
from functools import cmp_to_key
from itertools import zip_longest
from .errors import KatanaError
__license__ = "MIT"
__copyright__ = "Copyright (c) 2016-2018 KUSANAGI S.L. (http://kusanagi.io)"
# Regexp to check version pattern for invalid chars
INVALID_PATTERN = re.compile(r'[^a-zA-Z0-9*.,_-]')
# Regexp to remove duplicated '*' from version
WILDCARDS = re.compile(r'\*+')
# Regexp to match version dot separators
VERSION_DOTS = re.compile(r'([^*])\.')
# Regexp to match all wildcards except the last one
VERSION_WILDCARDS = re.compile(r'\*+([^$])')
class InvalidVersionPattern(KatanaError):
"""Exception raised when a version pattern is not valid."""
message = 'Invalid version pattern: "{}"'
def __init__(self, pattern):
super().__init__(message=self.message.format(pattern))
self.pattern = pattern
class VersionNotFound(KatanaError):
"""Exception raised when a version is not found."""
message = 'Service version not found for pattern: "{}"'
def __init__(self, pattern):
super().__init__(message=self.message.format(pattern))
self.pattern = pattern
class VersionString(object):
def __init__(self, version):
# Validate pattern characters
if INVALID_PATTERN.search(version):
raise InvalidVersionPattern(version)
# Remove duplicated wildcards from version
self.__version = WILDCARDS.sub('*', version)
if '*' in self.__version:
# Create an expression for version pattern comparisons
expr = VERSION_WILDCARDS.sub(r'[^*.]+\1', self.version)
# Escape dots to work with the regular expression
expr = VERSION_DOTS.sub(r'\1\.', expr)
# If there is a final wildcard left replace it with an
# expression to match any characters after the last dot.
if expr[-1] == '*':
expr = expr[:-1] + '.*'
# Create a pattern to be use for cmparison
self.__pattern = re.compile(expr)
else:
self.__pattern = None
@property
def version(self):
return self.__version
@property
def pattern(self):
return self.__pattern
@staticmethod
def compare_none(part1, part2):
if part1 == part2:
return 0
elif part2 is None:
# The one that DO NOT have more parts is greater
return 1
else:
return -1
@staticmethod
def compare_sub_parts(sub1, sub2):
# Sub parts are equal
if sub1 == sub2:
return 0
# Check if any sub part is an integer
is_integer = [False, False]
for idx, value in enumerate((sub1, sub2)):
try:
int(value)
except ValueError:
is_integer[idx] = False
else:
is_integer[idx] = True
# Compare both sub parts according to their type
if is_integer[0] != is_integer[1]:
# One is an integer. The integer is higher than the non integer.
# Check if the first sub part is an integer, and if so it means
# sub2 is lower than sub1.
return -1 if is_integer[0] else 1
# Both sub parts are of the same type
return 1 if sub1 < sub2 else -1
@classmethod
def compare(cls, ver1, ver2):
# Versions are equal
if ver1 == ver2:
return 0
for part1, part2 in zip_longest(ver1.split('.'), ver2.split('.')):
# One of the parts is None
if part1 is None or part2 is None:
return cls.compare_none(part1, part2)
for sub1, sub2 in zip_longest(part1.split('-'), part2.split('-')):
# One of the sub parts is None
if sub1 is None or sub2 is None:
# Sub parts are different, because one have a
# value and the other not.
return cls.compare_none(sub1, sub2)
# Both sub parts have a value
result = cls.compare_sub_parts(sub1, sub2)
if result:
# Sub parts are not equal
return result
def match(self, version):
if not self.pattern:
return self.version == version
else:
return self.pattern.fullmatch(version) is not None
def resolve(self, versions):
valid_versions = [ver for ver in versions if self.match(ver)]
if not valid_versions:
raise VersionNotFound(self.pattern)
valid_versions.sort(key=cmp_to_key(self.compare))
return valid_versions[0]
| StarcoderdataPython |
11328284 | # -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Class for reading and writing calibration FITS files."""
import warnings
import numpy as np
from astropy.io import fits
from .uvcal import UVCal
from .. import utils as uvutils
__all__ = ["CALFITS"]
class CALFITS(UVCal):
"""
Defines a calfits-specific class for reading and writing calfits files.
This class should not be interacted with directly, instead use the read_calfits
and write_calfits methods on the UVCal class.
"""
def write_calfits(
self,
filename,
run_check=True,
check_extra=True,
run_check_acceptability=True,
clobber=False,
):
"""
Write the data to a calfits file.
Parameters
----------
filename : str
The calfits file to write to.
run_check : bool
Option to check for the existence and proper shapes of
parameters before writing the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of
parameters before writing the file.
clobber : bool
Option to overwrite the filename if the file already exists.
"""
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if self.Nfreqs > 1:
freq_spacing = self.freq_array[0, 1:] - self.freq_array[0, :-1]
if not np.isclose(
np.min(freq_spacing),
np.max(freq_spacing),
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
):
raise ValueError(
"The frequencies are not evenly spaced (probably "
"because of a select operation). The calfits format "
"does not support unevenly spaced frequencies."
)
if np.isclose(freq_spacing[0], self.channel_width):
freq_spacing = self.channel_width
else:
rounded_spacing = np.around(
freq_spacing, int(np.ceil(np.log10(self._freq_array.tols[1]) * -1))
)
freq_spacing = rounded_spacing[0]
else:
freq_spacing = self.channel_width
if self.Ntimes > 1:
time_spacing = np.diff(self.time_array)
if not np.isclose(
np.min(time_spacing),
np.max(time_spacing),
rtol=self._time_array.tols[0],
atol=self._time_array.tols[1],
):
raise ValueError(
"The times are not evenly spaced (probably "
"because of a select operation). The calfits format "
"does not support unevenly spaced times."
)
if np.isclose(time_spacing[0], self.integration_time / (24.0 * 60.0 ** 2)):
time_spacing = self.integration_time / (24.0 * 60.0 ** 2)
else:
rounded_spacing = np.around(
time_spacing,
int(
np.ceil(np.log10(self._time_array.tols[1] / self.Ntimes) * -1)
+ 1
),
)
time_spacing = rounded_spacing[0]
else:
time_spacing = self.integration_time / (24.0 * 60.0 ** 2)
if self.Njones > 1:
jones_spacing = np.diff(self.jones_array)
if np.min(jones_spacing) < np.max(jones_spacing):
raise ValueError(
"The jones values are not evenly spaced."
"The calibration fits file format does not"
" support unevenly spaced polarizations."
)
jones_spacing = jones_spacing[0]
else:
jones_spacing = -1
prihdr = fits.Header()
if self.total_quality_array is not None:
totqualhdr = fits.Header()
totqualhdr["EXTNAME"] = "TOTQLTY"
if self.cal_type != "gain":
sechdr = fits.Header()
sechdr["EXTNAME"] = "FLAGS"
# Conforming to fits format
prihdr["SIMPLE"] = True
prihdr["TELESCOP"] = self.telescope_name
prihdr["GNCONVEN"] = self.gain_convention
prihdr["CALTYPE"] = self.cal_type
prihdr["CALSTYLE"] = self.cal_style
if self.sky_field is not None:
prihdr["FIELD"] = self.sky_field
if self.sky_catalog is not None:
prihdr["CATALOG"] = self.sky_catalog
if self.ref_antenna_name is not None:
prihdr["REFANT"] = self.ref_antenna_name
if self.Nsources is not None:
prihdr["NSOURCES"] = self.Nsources
if self.baseline_range is not None:
prihdr["BL_RANGE"] = (
"[" + ", ".join([str(b) for b in self.baseline_range]) + "]"
)
if self.diffuse_model is not None:
prihdr["DIFFUSE"] = self.diffuse_model
if self.gain_scale is not None:
prihdr["GNSCALE"] = self.gain_scale
prihdr["INTTIME"] = self.integration_time
prihdr["CHWIDTH"] = self.channel_width
prihdr["XORIENT"] = self.x_orientation
if self.cal_type == "delay":
prihdr["FRQRANGE"] = ",".join(map(str, self.freq_range))
elif self.freq_range is not None:
prihdr["FRQRANGE"] = ",".join(map(str, self.freq_range))
prihdr["TMERANGE"] = ",".join(map(str, self.time_range))
if self.observer:
prihdr["OBSERVER"] = self.observer
if self.git_origin_cal:
prihdr["ORIGCAL"] = self.git_origin_cal
if self.git_hash_cal:
prihdr["HASHCAL"] = self.git_hash_cal
if self.cal_type == "unknown":
raise ValueError(
"unknown calibration type. Do not know how to " "store parameters"
)
# Define primary header values
# Arrays have (column-major) dimensions of
# [Nimages, Njones, Ntimes, Nfreqs, Nspw, Nantennas]
# For a "delay"-type calibration, Nfreqs is a shallow axis
# set the axis for number of arrays
prihdr["CTYPE1"] = ("Narrays", "Number of image arrays.")
prihdr["CUNIT1"] = "Integer"
prihdr["CDELT1"] = 1
prihdr["CRPIX1"] = 1
prihdr["CRVAL1"] = 1
# Jones axis
prihdr["CTYPE2"] = ("JONES", "Jones matrix array")
prihdr["CUNIT2"] = ("Integer", "representative integer for polarization.")
prihdr["CRPIX2"] = 1
prihdr["CRVAL2"] = self.jones_array[0] # always start with first jones.
prihdr["CDELT2"] = jones_spacing
# time axis
prihdr["CTYPE3"] = ("TIME", "Time axis.")
prihdr["CUNIT3"] = ("JD", "Time in julian date format")
prihdr["CRPIX3"] = 1
prihdr["CRVAL3"] = self.time_array[0]
prihdr["CDELT3"] = time_spacing
# freq axis
prihdr["CTYPE4"] = ("FREQS", "Frequency.")
prihdr["CUNIT4"] = "Hz"
prihdr["CRPIX4"] = 1
prihdr["CRVAL4"] = self.freq_array[0][0]
prihdr["CDELT4"] = freq_spacing
# spw axis: number of spectral windows
prihdr["CTYPE5"] = ("IF", "Spectral window number.")
prihdr["CUNIT5"] = "Integer"
prihdr["CRPIX5"] = 1
prihdr["CRVAL5"] = 1
prihdr["CDELT5"] = 1
# antenna axis
prihdr["CTYPE6"] = ("ANTAXIS", "See ANTARR in ANTENNA extension for values.")
prihdr["CUNIT6"] = "Integer"
prihdr["CRPIX6"] = 1
prihdr["CRVAL6"] = 1
prihdr["CDELT6"] = -1
# end standard keywords; begin user-defined keywords
for key, value in self.extra_keywords.items():
# header keywords have to be 8 characters or less
if len(str(key)) > 8:
warnings.warn(
"key {key} in extra_keywords is longer than 8 "
"characters. It will be truncated to 8 as required "
"by the calfits file format.".format(key=key)
)
keyword = key[:8].upper()
if isinstance(value, (dict, list, np.ndarray)):
raise TypeError(
"Extra keyword {keyword} is of {keytype}. "
"Only strings and numbers are "
"supported in calfits.".format(keyword=key, keytype=type(value))
)
if keyword == "COMMENT":
for line in value.splitlines():
prihdr.add_comment(line)
else:
prihdr[keyword] = value
for line in self.history.splitlines():
prihdr.add_history(line)
# define data section based on calibration type
if self.cal_type == "gain":
if self.input_flag_array is not None:
pridata = np.concatenate(
[
self.gain_array.real[:, :, :, :, :, np.newaxis],
self.gain_array.imag[:, :, :, :, :, np.newaxis],
self.flag_array[:, :, :, :, :, np.newaxis],
self.input_flag_array[:, :, :, :, :, np.newaxis],
self.quality_array[:, :, :, :, :, np.newaxis],
],
axis=-1,
)
else:
pridata = np.concatenate(
[
self.gain_array.real[:, :, :, :, :, np.newaxis],
self.gain_array.imag[:, :, :, :, :, np.newaxis],
self.flag_array[:, :, :, :, :, np.newaxis],
self.quality_array[:, :, :, :, :, np.newaxis],
],
axis=-1,
)
elif self.cal_type == "delay":
pridata = np.concatenate(
[
self.delay_array[:, :, :, :, :, np.newaxis],
self.quality_array[:, :, :, :, :, np.newaxis],
],
axis=-1,
)
# Set headers for the second hdu containing the flags. Only in
# cal_type=delay
# Can't put in primary header because frequency axis is shallow there,
# but not here
# Header values are the same as the primary header
sechdr["CTYPE1"] = ("Narrays", "Number of image arrays.")
sechdr["CUNIT1"] = "Integer"
sechdr["CRPIX1"] = 1
sechdr["CRVAL1"] = 1
sechdr["CDELT1"] = 1
sechdr["CTYPE2"] = ("JONES", "Jones matrix array")
sechdr["CUNIT2"] = ("Integer", "representative integer for polarization.")
sechdr["CRPIX2"] = 1
sechdr["CRVAL2"] = self.jones_array[0] # always start with first jones.
sechdr["CDELT2"] = jones_spacing
sechdr["CTYPE3"] = ("TIME", "Time axis.")
sechdr["CUNIT3"] = ("JD", "Time in julian date format")
sechdr["CRPIX3"] = 1
sechdr["CRVAL3"] = self.time_array[0]
sechdr["CDELT3"] = time_spacing
sechdr["CTYPE4"] = ("FREQS", "Valid frequencies to apply delay.")
sechdr["CUNIT4"] = "Hz"
sechdr["CRPIX4"] = 1
sechdr["CRVAL4"] = self.freq_array[0][0]
sechdr["CDELT4"] = freq_spacing
sechdr["CTYPE5"] = ("IF", "Spectral window number.")
sechdr["CUNIT5"] = "Integer"
sechdr["CRPIX5"] = 1
sechdr["CRVAL5"] = 1
sechdr["CDELT5"] = 1
sechdr["CTYPE6"] = (
"ANTAXIS",
"See ANTARR in ANTENNA extension for values.",
)
# convert from bool to int64; undone on read
if self.input_flag_array is not None:
secdata = np.concatenate(
[
self.flag_array.astype(np.int64)[:, :, :, :, :, np.newaxis],
self.input_flag_array.astype(np.int64)[
:, :, :, :, :, np.newaxis
],
],
axis=-1,
)
else:
secdata = self.flag_array.astype(np.int64)[:, :, :, :, :, np.newaxis]
if self.total_quality_array is not None:
# Set headers for the hdu containing the total_quality_array
# No antenna axis, so we have [Njones, Ntime, Nfreq, Nspws]
totqualhdr["CTYPE1"] = ("JONES", "Jones matrix array")
totqualhdr["CUNIT1"] = (
"Integer",
"representative integer for polarization.",
)
totqualhdr["CRPIX1"] = 1
totqualhdr["CRVAL1"] = self.jones_array[0] # always start with first jones.
totqualhdr["CDELT1"] = jones_spacing
totqualhdr["CTYPE2"] = ("TIME", "Time axis.")
totqualhdr["CUNIT2"] = ("JD", "Time in julian date format")
totqualhdr["CRPIX2"] = 1
totqualhdr["CRVAL2"] = self.time_array[0]
totqualhdr["CDELT2"] = time_spacing
totqualhdr["CTYPE3"] = ("FREQS", "Valid frequencies to apply delay.")
totqualhdr["CUNIT3"] = "Hz"
totqualhdr["CRPIX3"] = 1
totqualhdr["CRVAL3"] = self.freq_array[0][0]
totqualhdr["CDELT3"] = freq_spacing
# spws axis: number of spectral windows
totqualhdr["CTYPE4"] = ("IF", "Spectral window number.")
totqualhdr["CUNIT4"] = "Integer"
totqualhdr["CRPIX4"] = 1
totqualhdr["CRVAL4"] = 1
totqualhdr["CDELT4"] = 1
totqualdata = self.total_quality_array
# make HDUs
prihdu = fits.PrimaryHDU(data=pridata, header=prihdr)
# ant HDU
col1 = fits.Column(name="ANTNAME", format="8A", array=self.antenna_names)
col2 = fits.Column(name="ANTINDEX", format="D", array=self.antenna_numbers)
if self.Nants_data == self.Nants_telescope:
col3 = fits.Column(name="ANTARR", format="D", array=self.ant_array)
else:
# ant_array is shorter than the other columns.
# Pad the extra rows with -1s. Need to undo on read.
nants_add = self.Nants_telescope - self.Nants_data
ant_array_use = np.append(
self.ant_array, np.zeros(nants_add, dtype=np.int) - 1
)
col3 = fits.Column(name="ANTARR", format="D", array=ant_array_use)
cols = fits.ColDefs([col1, col2, col3])
ant_hdu = fits.BinTableHDU.from_columns(cols)
ant_hdu.header["EXTNAME"] = "ANTENNAS"
hdulist = fits.HDUList([prihdu, ant_hdu])
if self.cal_type != "gain":
sechdu = fits.ImageHDU(data=secdata, header=sechdr)
hdulist.append(sechdu)
if self.total_quality_array is not None:
totqualhdu = fits.ImageHDU(data=totqualdata, header=totqualhdr)
hdulist.append(totqualhdu)
hdulist.writeto(filename, overwrite=clobber)
hdulist.close()
def read_calfits(
self, filename, run_check=True, check_extra=True, run_check_acceptability=True
):
"""
Read data from a calfits file.
Parameters
----------
filename : str
The calfits file to read from.
run_check : bool
Option to check for the existence and proper shapes of
parameters after reading in the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of
parameters after reading in the file.
"""
with fits.open(filename) as fname:
data = fname[0].data
hdr = fname[0].header.copy()
hdunames = uvutils._fits_indexhdus(fname)
anthdu = fname[hdunames["ANTENNAS"]]
self.Nants_telescope = anthdu.header["NAXIS2"]
antdata = anthdu.data
self.antenna_names = np.array(list(map(str, antdata["ANTNAME"])))
self.antenna_numbers = np.array(list(map(int, antdata["ANTINDEX"])))
self.ant_array = np.array(list(map(int, antdata["ANTARR"])))
if np.min(self.ant_array) < 0:
# ant_array was shorter than the other columns, so it was
# padded with -1s.
# Remove the padded entries.
self.ant_array = self.ant_array[np.where(self.ant_array >= 0)[0]]
self.channel_width = hdr.pop("CHWIDTH")
self.integration_time = hdr.pop("INTTIME")
self.telescope_name = hdr.pop("TELESCOP")
self.history = str(hdr.get("HISTORY", ""))
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
if not self.history.endswith("\n"):
self.history += "\n"
self.history += self.pyuvdata_version_str
self.time_range = list(map(float, hdr.pop("TMERANGE").split(",")))
self.gain_convention = hdr.pop("GNCONVEN")
self.gain_scale = hdr.pop("GNSCALE", None)
self.x_orientation = hdr.pop("XORIENT")
self.cal_type = hdr.pop("CALTYPE")
if self.cal_type == "delay":
self.freq_range = list(map(float, hdr.pop("FRQRANGE").split(",")))
else:
if "FRQRANGE" in hdr:
self.freq_range = list(map(float, hdr.pop("FRQRANGE").split(",")))
self.cal_style = hdr.pop("CALSTYLE")
self.sky_field = hdr.pop("FIELD", None)
self.sky_catalog = hdr.pop("CATALOG", None)
self.ref_antenna_name = hdr.pop("REFANT", None)
self.Nsources = hdr.pop("NSOURCES", None)
bl_range_string = hdr.pop("BL_RANGE", None)
if bl_range_string is not None:
self.baseline_range = [
float(b) for b in bl_range_string.strip("[").strip("]").split(",")
]
self.diffuse_model = hdr.pop("DIFFUSE", None)
self.observer = hdr.pop("OBSERVER", None)
self.git_origin_cal = hdr.pop("ORIGCAL", None)
self.git_hash_cal = hdr.pop("HASHCAL", None)
# generate polarization and time array for either cal_type.
self.Njones = hdr.pop("NAXIS2")
self.jones_array = uvutils._fits_gethduaxis(fname[0], 2)
self.Ntimes = hdr.pop("NAXIS3")
self.time_array = uvutils._fits_gethduaxis(fname[0], 3)
self.Nspws = hdr.pop("NAXIS5")
# subtract 1 to be zero-indexed
self.spw_array = uvutils._fits_gethduaxis(fname[0], 5) - 1
# get data.
if self.cal_type == "gain":
self._set_gain()
self.gain_array = data[:, :, :, :, :, 0] + 1j * data[:, :, :, :, :, 1]
self.flag_array = data[:, :, :, :, :, 2].astype("bool")
if hdr.pop("NAXIS1") == 5:
self.input_flag_array = data[:, :, :, :, :, 3].astype("bool")
self.quality_array = data[:, :, :, :, :, 4]
else:
self.quality_array = data[:, :, :, :, :, 3]
self.Nants_data = hdr.pop("NAXIS6")
# generate frequency array from primary data unit.
self.Nfreqs = hdr.pop("NAXIS4")
self.freq_array = uvutils._fits_gethduaxis(fname[0], 4)
self.freq_array.shape = (self.Nspws,) + self.freq_array.shape
if self.cal_type == "delay":
self._set_delay()
self.Nants_data = hdr.pop("NAXIS6")
self.delay_array = data[:, :, :, :, :, 0]
self.quality_array = data[:, :, :, :, :, 1]
sechdu = fname[hdunames["FLAGS"]]
flag_data = sechdu.data
if sechdu.header["NAXIS1"] == 2:
self.flag_array = flag_data[:, :, :, :, :, 0].astype("bool")
self.input_flag_array = flag_data[:, :, :, :, :, 1].astype("bool")
else:
self.flag_array = flag_data[:, :, :, :, :, 0].astype("bool")
# generate frequency array from flag data unit
# (no freq axis in primary).
self.Nfreqs = sechdu.header["NAXIS4"]
self.freq_array = uvutils._fits_gethduaxis(sechdu, 4)
self.freq_array.shape = (self.Nspws,) + self.freq_array.shape
spw_array = uvutils._fits_gethduaxis(sechdu, 5) - 1
if not np.allclose(spw_array, self.spw_array):
raise ValueError(
"Spectral window values are different in FLAGS HDU than"
" in primary HDU"
)
time_array = uvutils._fits_gethduaxis(sechdu, 3)
if not np.allclose(
time_array,
self.time_array,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[0],
):
raise ValueError(
"Time values are different in FLAGS HDU than in primary HDU"
)
jones_array = uvutils._fits_gethduaxis(sechdu, 2)
if not np.allclose(
jones_array,
self.jones_array,
rtol=self._jones_array.tols[0],
atol=self._jones_array.tols[0],
):
raise ValueError(
"Jones values are different in FLAGS HDU than in primary HDU"
)
self.extra_keywords = uvutils._get_fits_extra_keywords(hdr)
# get total quality array if present
if "TOTQLTY" in hdunames:
totqualhdu = fname[hdunames["TOTQLTY"]]
self.total_quality_array = totqualhdu.data
spw_array = uvutils._fits_gethduaxis(totqualhdu, 4) - 1
if not np.allclose(spw_array, self.spw_array):
raise ValueError(
"Spectral window values are different in "
"TOTQLTY HDU than in primary HDU. primary HDU "
"has {pspw}, TOTQLTY has {tspw}".format(
pspw=self.spw_array, tspw=spw_array
)
)
if self.cal_type != "delay":
# delay-type files won't have a freq_array
freq_array = uvutils._fits_gethduaxis(totqualhdu, 3)
freq_array.shape = (self.Nspws,) + freq_array.shape
if not np.allclose(
freq_array,
self.freq_array,
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[0],
):
raise ValueError(
"Frequency values are different in TOTQLTY HDU than"
" in primary HDU"
)
time_array = uvutils._fits_gethduaxis(totqualhdu, 2)
if not np.allclose(
time_array,
self.time_array,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[0],
):
raise ValueError(
"Time values are different in TOTQLTY HDU than in primary HDU"
)
jones_array = uvutils._fits_gethduaxis(totqualhdu, 1)
if not np.allclose(
jones_array,
self.jones_array,
rtol=self._jones_array.tols[0],
atol=self._jones_array.tols[0],
):
raise ValueError(
"Jones values are different in TOTQLTY HDU than in primary HDU"
)
else:
self.total_quality_array = None
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
| StarcoderdataPython |
4880893 | <reponame>mindspore-ai/contrib<gh_stars>1-10
"""[summary]
Returns:
[type]: [description]
"""
import os
import json
import numpy as np
from sklearn import model_selection, preprocessing
from src.global_variables import FED_NUM, file_path
def load_only(src_path):
"""[summary]
Args:
src_path ([type]): [description]
Returns:
[type]: [description]
"""
f = open(src_path)
ext_label_id_features = []
real_label_id_features = []
line = f.readline()
while line:
pure_line, comm = line.split('#') # 去除行尾注释
pure_line = pure_line.strip()
comm = comm.strip()[0:3]
label_id_features = pure_line.split(' ')
values = []
for each in label_id_features:
values.append(json.loads(each))
if comm in ("non", "rel"):
real_label_id_features.append(values)
else:
ext_label_id_features.append(values)
line = f.readline()
f.close()
return np.array(real_label_id_features), np.array(ext_label_id_features)
def norm(x):
"""[summary]
Args:
x ([type]): [description]
Returns:
[type]: [description]
"""
return preprocessing.normalize(x, norm='l2')
def discrete(y):
"""[summary]
Args:
y ([type]): [description]
Returns:
[type]: [description]
"""
new_y = []
for i in range(len(y)):
if 0.5 < y[i] <= 10.5:
new_y.append(2)
elif 0.5 < y[i] <= 40:
new_y.append(1)
else:
new_y.append(0)
new_y = np.array(new_y)
return new_y
def statistics(x):
"""[summary]
Args:
x ([type]): [description]
"""
label2 = 0
label1 = 0
label0 = 0
for each in x:
if each[0] < 0.5:
label0 += 1
elif each[0] < 1.5:
label1 += 1
else:
label2 += 1
print(len(x), "0:", label0, "1:", label1, "2:", label2)
def split_test_train(label_id_features, test_size=0.3, random_state=0):
"""[summary]
Args:
label_id_features ([type]): [description]
test_size (float, optional): [description]. Defaults to 0.3.
random_state (int, optional): [description]. Defaults to 0.
Returns:
[type]: [description]
"""
id_features = label_id_features[:, 1:]
labels = label_id_features[:, 0]
id_features_train, id_features_test, labels_train, labels_test = \
model_selection.train_test_split(id_features, labels, test_size=test_size,
random_state=random_state)
return id_features_train, id_features_test, labels_train, labels_test
def main():
for fed_id in range(FED_NUM):
# real_label_id_features, ext_label_id_features = load_only(
real_label_id_features, _ = load_only(
file_path + "fed_std%d.txt" % fed_id)
np.save(
os.path.join(
file_path,
"real_label_id_features%d.npy" %
fed_id),
real_label_id_features)
np.save(
os.path.join(
file_path,
"ext_label_id_features%d.npy" %
fed_id),
real_label_id_features)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1851643 | <reponame>domlysi/djangocms_plus
import logging
from collections import OrderedDict
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from cmsplus.app_settings import cmsplus_settings
from cmsplus.fields import (PageSearchField, PlusFilerFileSearchField, PlusFilerImageSearchField, KeyValueField)
from cmsplus.models import PlusPlugin
logger = logging.getLogger(__name__)
class PlusPluginFormBase(forms.ModelForm):
"""
BaseForm for PluginForms.
This ModelForm references to a PlusPlugin Model in order to write and read
from the glossary (JSONField) attribute.
"""
class Meta:
model = PlusPlugin
exclude = ["_json"] # Do not show json Field in Edit Form
def __init__(self, *args, **kwargs):
if kwargs.get('instance'):
# set form initial values as our instance model attributes are in
# glossary not in the instance itself
initial = kwargs.get('initial', {})
for field_name, field in self.declared_fields.items():
initial[field_name] = kwargs.get('instance').glossary.get(field_name)
kwargs['initial'] = initial
super(PlusPluginFormBase, self).__init__(*args, **kwargs)
def save(self, commit=True):
"""
Put serialized data to glossary (_json) field, then save.
"""
self.instance.data = self.serialize_data()
return super(PlusPluginFormBase, self).save(commit)
def serialize_data(self):
"""
Takes form field values and calls "serialize_field" method for each field,
if it is declared in the field class
:return: Serialized data
:rtype: dict
"""
parsed_data = OrderedDict()
for key in self.fields.keys():
value = self.cleaned_data.get(key)
if key.startswith('_'):
continue
field = self.fields.get(key)
if hasattr(field, "serialize_field") and callable(field.serialize_field):
parsed_data[key] = field.serialize_field(value)
else:
parsed_data[key] = value
return parsed_data
def deserialize(self):
"""
Deserialize data from Json field into dict. Opposite of serialize function (see above)
:return: Data
:rtype: dict:
"""
parsed_dict = OrderedDict()
for field_name in self.declared_fields:
value = self.data.get(field_name, None)
field = self.declared_fields.get(field_name)
if hasattr(field, "deserialize_field"):
deserialize_field = getattr(field, "deserialize_field")
if callable(deserialize_field):
try:
parsed_dict[field_name] = deserialize_field(value)
except ValidationError as e:
self._update_errors(e)
else:
parsed_dict[field_name] = value
return parsed_dict
# StylePluginMixin form fields
# ----------------------------
#
def get_style_form_fields(style_config_key=None, style_multiple=False):
"""
Use together with StylePluginMixin, e.g.
class MyCustomForm(PlusPluginFormBase):
... # form defs
STYLE_CHOICES = 'MY_CUSTOM_STYLES'
extra_style, extra_classes, label, extra_css = get_style_form_fields(STYLE_CHOICES)
class MyCustomPlugin(StylePluginMixin, PlusPluginBase):
name = _('My Custom')
form = MyCustomForm
render_template = 'custom/snippet.html'
style_config_key should be a cmsplus_settings - key which holds the
style choices, e.g.: ('c-text-white', 'Text White'), ...
"""
style_choices = (
('', 'None'),
)
if style_config_key:
sc = getattr(cmsplus_settings, style_config_key, style_choices)
else:
sc = style_choices
if style_multiple:
style_field = forms.MultipleChoiceField
else:
style_field = forms.ChoiceField
return (
style_field(
label=_('Style'), required=False, choices=sc,
initial=sc[0][0], help_text=_('Extra CSS predefined style class for plugin.')),
forms.CharField(
label=_('Extra Classes'), required=False, initial='',
widget=forms.widgets.TextInput(attrs={'style': 'width: 70vw'}),
help_text=_('Extra CSS Classes (space separated) for plugin.')),
forms.CharField(
label=_('Label'), required=False, initial='',
help_text=_('Label to identify this plugin in page-structure.')),
KeyValueField(
label=_('Extra CSS'), required=False, initial='',
help_text=_('Add extra (device specific) css key, values, e.g: margin or margin:md or transform:xl')),
)
# LinkFormBase
# ------------
#
class LinkFormBase(PlusPluginFormBase):
"""
provides fields and methods which are needed to handle different link types.
"""
LINK_TYPE_CHOICES = [
('cmspage', _("CMS Page")),
('download', _("Download File")),
('exturl', _("External URL")),
('email', _("Mail To")),
]
link_type = forms.ChoiceField(
label=_("Link"),
help_text=_("Type of link"),
)
cms_page = PageSearchField(
required=False,
label=_('Internal Page'),
help_text=_("An internal link onto any CMS page."),
)
section = forms.CharField(
required=False,
label=_('Anchor'),
help_text=_("An anchor or bookmark on the internal linked page."),
)
ext_url = forms.CharField(
required=False,
label=_("URL"),
help_text=_("Link onto external page"),
)
download_file = PlusFilerFileSearchField(
label=_('Download file'),
required=False,
help_text=_("An internal link onto a file from filer"),
)
file_as_page = forms.BooleanField(
initial=False,
label=_('Serve file as webpage'),
required=False,
help_text=_('e.g. PDF will be visible in browser instead of a download')
)
mail_to = forms.EmailField(
required=False,
label=_("Email"),
help_text=_("Open Email program with this address"),
)
link_target = forms.ChoiceField(
choices=[
('', _("Same Window")),
('_blank', _("New Window")),
('_parent', _("Parent Window")),
('_top', _("Topmost Frame")),
],
label=_("Link Target"),
required=False,
help_text=_("Open Link in other target."),
)
link_title = forms.CharField(
label=_("Link title"),
required=False,
help_text=_("Link's title"),
)
def __init__(self, *args, **kwargs):
link_type_choices = []
if not getattr(self, 'require_link', True):
link_type_choices.append(('', _("No Link")))
self.declared_fields['link_type'].required = False
link_type_choices.extend(self.LINK_TYPE_CHOICES)
self.declared_fields['link_type'].choices = link_type_choices
self.declared_fields['link_type'].initial = link_type_choices[0][0]
super(LinkFormBase, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(LinkFormBase, self).clean()
link_type = cleaned_data.get('link_type')
if link_type == 'cmspage' and not self.has_error('cms_page'):
if not cleaned_data.get('cms_page'):
error = ValidationError(_("CMS page to link to is missing."))
self.add_error('cms_page', error)
elif link_type == 'download' and not self.has_error('download_file'):
if not cleaned_data.get('download_file'):
error = ValidationError(_("File for download is missing."))
self.add_error('download_file', error)
elif link_type == 'exturl' and not self.has_error('ext_url'):
if not cleaned_data.get('ext_url'):
error = ValidationError(_("External link is missing."))
self.add_error('ext_url', error)
elif link_type == 'email' and not self.has_error('mail_to'):
if not cleaned_data.get('mail_to'):
error = ValidationError(_("Mailto link is missing."))
self.add_error('mail_to', error)
if self.errors:
return None
return cleaned_data
# image form fields
# -----------------
#
def get_image_form_fields(required=False, help_text=''):
"""
Can be used to insert image form fields into the custom plugin form
definition. call e.g.:
image_file, image_title, image_alt = get_image_form_fields(required=True)
"""
return (
PlusFilerImageSearchField(
label=_('Image File'),
required=required,
help_text=help_text
),
forms.CharField(
label=_('Image Title'),
required=False,
help_text=_(
'Caption text added to the "title" attribute of the ' '<img> element.'),
),
forms.CharField(
label=_('Alternative Description'),
required=False,
help_text=_(
'Textual description of the image added to the "alt" ' 'tag of the <img> element.'),
)
)
| StarcoderdataPython |
143592 | from flask import Flask, render_template, jsonify, request
from json import dump
from colony import Colony
# diaspora.py is a web service that serves space colonies.
# (built with Flask, served with gunicorn, deployed with heroku)
app = Flask(__name__, static_url_path='/static')
app.config["JSON_SORT_KEYS"] = False
@app.route("/")
def colony():
colony = Colony()
colony.describe()
return render_template('index.html',
title = 'diaspora',
colony = colony.description,
attributes = colony.attributes)
# An API for requesting colonies (response type: JSON)
@app.route('/api/v1.0/', methods = ['GET'])
def api():
if request.method == 'GET':
if not request.query_string:
howMany = 0
else:
howMany = int(request.query_string)
if howMany <= 500:
colonies = []
for i in range(0, howMany):
colony = Colony()
colony.describe()
colonies.append(colony.serialize())
to_dump = {'colonies' : colonies}
return jsonify(to_dump)
else:
return "Too many colonies requested (500 max)."
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
218267 | <reponame>fredatgithub/github-stats
# coding: utf-8
from datetime import datetime
import logging
from flask.ext import restful
from werkzeug import exceptions
import flask
import util
class Api(restful.Api):
def unauthorized(self, response):
flask.abort(401)
def handle_error(self, e):
return handle_error(e)
def handle_error(e):
logging.exception(e)
try:
e.code
except AttributeError:
e.code = 500
e.name = e.description = 'Internal Server Error'
return util.jsonpify({
'status': 'error',
'error_code': e.code,
'error_name': util.slugify(e.name),
'error_message': e.name,
'error_class': e.__class__.__name__,
'description': e.description,
}), e.code
def make_response(data, marshal_table, cursors=None):
if util.is_iterable(data):
response = {
'status': 'success',
'count': len(data),
'now': datetime.utcnow().isoformat(),
'result': map(lambda l: restful.marshal(l, marshal_table), data),
}
if cursors:
if isinstance(cursors, dict):
if cursors.get('next'):
response['next_cursor'] = cursors['next']
response['next_url'] = util.generate_next_url(cursors['next'])
if cursors.get('prev'):
response['prev_cursor'] = cursors['prev']
response['prev_url'] = util.generate_next_url(cursors['prev'])
else:
response['next_cursor'] = cursors
response['next_url'] = util.generate_next_url(cursors)
return util.jsonpify(response)
return util.jsonpify({
'status': 'success',
'now': datetime.utcnow().isoformat(),
'result': restful.marshal(data, marshal_table),
})
def make_not_found_exception(description):
exception = exceptions.NotFound()
exception.description = description
raise exception
| StarcoderdataPython |
4852998 | <reponame>knot126/Stratigise
{
# NOTE: While the opcodes are fairly well known, the arguments are not very
# well known and are still in research.
'InstructionSize': 1,
0x00: ['CommandError'], # This should never normally appear.
0x01: ['LoadObject', 'string'],
0x02: ['LoadSprite'],
0x03: ['LoadAnim'],
0x04: ['LoadSample'],
0x05: ['LoadAnimFlag'],
0x06: ['TurnTowardX', 'int16', 'eval'],
0x06: ['TurnTowardY', 'int16', 'eval'],
0x07: ['TurnTowardWaypointX', 'eval'],
0x08: ['PlaySound', 'int8', 'eval'], # for arg[0]==2
0x09: ['StopSound', 'eval'],
0x0C: ['PlayAnim', 'eval'],
0x0D: ['StopAnim', 'string', 'eval'],
0x0E: ['WaitAnimend'],
0x0F: ['Print'],
0x10: ['SpecialFXOn'],
0x11: ['Wait'],
0x12: ['Repeat'],
0x13: ['Until'],
0x14: ['While'],
0x15: ['EndWhile'],
0x16: ['If'],
0x17: ['Else'],
0x18: ['IfAnimend'],
0x19: ['For'],
0x1A: ['Next'],
0x1B: ['Switch'],
0x1C: ['EndCase'],
0x1D: ['ProcCall'],
0x1E: ['ResetPosition'],
0x1F: ['ScaleX'],
0x20: ['ScaleY'],
0x21: ['ScaleZ'],
0x22: ['Jump'],
0x23: ['Fall'],
0x24: ['MoveBackward'],
0x25: ['MoveForward'],
0x26: ['MoveRight'],
0x27: ['MoveDown'],
0x28: ['MoveLeft'],
0x29: ['MoveUp'],
0x2A: ['TurnRight'],
0x2B: ['TurnLeft'],
0x2C: ['TiltBackward'],
0x2D: ['TiltForward'],
0x2E: ['TiltRight'],
0x2F: ['TiltLeft'],
0x30: ['Spawn'],
0x31: ['CreateTrigger'],
0x32: ['KillTrigger'],
0x33: ['EndTrigger'],
0x34: ['Remove'],
0x35: ['LetGVar'],
0x36: ['LetPGVar'],
0x37: ['LetAVar'],
0x38: ['EndProc'],
0x39: ['SetModel'],
0x3A: ['FileEnd'],
0x3B: ['Blink'],
0x3C: ['HoldTrigger'],
0x3D: ['ReleaseTrigger'],
0x3E: ['SetAnim'],
0x3F: ['TurnTowardXY'],
0x40: ['CommandError'],
0x41: ['__Operation_41'],
0x42: ['Relase'],
0x43: ['Inc'],
0x44: ['__Operation_44'],
0x45: ['__Operation_45'],
0x46: ['CamWobble'],
0x47: ['LookAtMe'],
0x48: ['ShadowSize'],
0x49: ['__Operation_49'],
0x4A: ['__Operation_4a'],
0x4B: ['__Operation_4b'],
0x4C: ['SetPlayerPosRel'],
0x4D: ['__Operation_4d'],
0x4E: ['__Operation_4e'],
0x4F: ['CommandError'],
0x50: ['CommandError'],
0x51: ['CollisionOn'],
0x52: ['CollisionOff'],
}
| StarcoderdataPython |
3540412 | <reponame>ChrisCummins/format<gh_stars>0
# Copyright 2014-2019 <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utility functions for parallelism in python.
The goal of the module is to provide easy to use implementations of typical
parallel workloads, such as data parallel map operations.
"""
import multiprocessing.pool
import queue
import subprocess
import threading
import typing
from labm8.py import app
from labm8.py import bazelutil
from labm8.py import pbutil
from labm8.py import sqlutil
FLAGS = app.FLAGS
class MapWorkerError(EnvironmentError):
"""Resulting error from a _MapWorker that fails."""
def __init__(self, returncode: int):
"""Create a _MapWorker error.
Args:
returncode: The process return code.
"""
self._returncode = returncode
def __repr__(self) -> str:
return f"Command exited with code {self.returncode}"
@property
def returncode(self) -> int:
"""Get the return code of the process."""
return self._returncode
class _MapWorker(object):
"""A work unit for a data parallel workload.
A _MapWorker executes a command as a subprocess, passes it a protocol buffer,
and decodes a protocol buffer output.
This is a helper class created by MapNativeProtoProcessingBinary() and
returned to the user. It is not to be used by user code.
"""
def __init__(
self, id: int, cmd: typing.List[str], input_proto: pbutil.ProtocolBuffer,
):
"""Create a map worker.
Args:
id: The numeric ID of the map worker.
cmd: The command to execute, as a list of arguments to subprocess.Popen().
input_proto: The protocol buffer to pass to the command.
"""
self._id = id
self._cmd = cmd
# We store the input proto in wire format (as a serialized string) rather
# than as a class object as pickle can get confused by the types.
# See: https://stackoverflow.com/a/1413299
self._input_proto: typing.Optional[pbutil.ProtocolBuffer] = None
self._input_proto_string = input_proto.SerializeToString()
self._output_proto_string: typing.Optional[str] = None
self._output_proto: typing.Optional[pbutil.ProtocolBuffer] = None
self._output_proto_decoded = False
self._returncode: typing.Optional[int] = None
self._done = False
def Run(self) -> None:
"""Execute the process and store the output.
If the process fails, no exception is raised. The error can be accessed
using the error() method. After calling this method, SetProtos() *must* be
called.
"""
assert not self._done
# Run the C++ worker process, capturing it's output.
process = subprocess.Popen(
self._cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
)
# Send the input proto to the C++ worker process.
# TODO: Add timeout.
stdout, _ = process.communicate(self._input_proto_string)
self._returncode = process.returncode
del self._input_proto_string
if not process.returncode:
# Store the C++ binary output in wire format.
self._output_proto_string = stdout
def SetProtos(
self, input_proto: pbutil.ProtocolBuffer, output_proto_class: typing.Type,
) -> None:
"""Set the input protocol buffer, and decode the output protocol buffer.
This is performed by the SetProtos() method (rather than during Run()) so
that when pickled, this class contains only basic types, not protocol buffer
instances.
Args:
input_proto: The input protocol buffer message.
output_proto_class: The protocol buffer class of the output message.
"""
assert not self._done
self._done = True
self._input_proto = input_proto
# Only parse the output if the worker completed successfully.
if not self._returncode:
# Be careful that errors during protocol buffer decoding (e.g.
# unrecognized fields, conflicting field type/tags) are silently ignored
# here.
self._output_proto = output_proto_class.FromString(
self._output_proto_string,
)
# No need to hand onto the string message any more.
del self._output_proto_string
@property
def id(self):
"""Return the numeric ID of the map worker."""
return self._id
def input(self) -> pbutil.ProtocolBuffer:
"""Get the input protocol buffer."""
assert self._done
return self._input_proto
def output(self) -> typing.Optional[pbutil.ProtocolBuffer]:
"""Get the protocol buffer decoded from stdout of the executed binary.
If the process failed (e.g. not _MapWorker.ok()), None is returned.
"""
assert self._done
return self._output_proto
def error(self) -> typing.Optional[MapWorkerError]:
"""Get the error generated by a failed binary execution.
If the process succeeded (e.g. _MapWorker.ok()), None is returned.
"""
if self._returncode:
return MapWorkerError(self._returncode)
def ok(self) -> bool:
"""Return whether binary execution succeeded."""
return not self._returncode
def _RunNativeProtoProcessingWorker(map_worker: _MapWorker) -> _MapWorker:
"""Private helper message to execute Run() method of _MapWorker.
This is passed to Pool.imap_unordered() as the function to execute for every
work unit. This is needed because only module-level functions can be pickled.
"""
map_worker.Run()
return map_worker
def MapNativeProtoProcessingBinary(
binary_data_path: str,
input_protos: typing.List[pbutil.ProtocolBuffer],
output_proto_class: typing.Type,
binary_args: typing.Optional[typing.List[str]] = None,
pool: typing.Optional[multiprocessing.Pool] = None,
num_processes: typing.Optional[int] = None,
) -> typing.Iterator[_MapWorker]:
"""Run a protocol buffer processing binary over a set of inputs.
Args:
binary_data_path: The path of the binary to execute, as provied to
bazelutil.DataPath().
input_protos: An iterable list of input protos.
output_proto_class: The proto class of the output.
binary_args: An optional list of additional arguments to pass to binaries.
pool: The multiprocessing pool to use.
num_processes: The number of processes for the multiprocessing pool.
Returns:
A generator of _MapWorker instances. The order is random.
"""
binary_path = bazelutil.DataPath(binary_data_path)
binary_args = binary_args or []
cmd = [str(binary_path)] + binary_args
# Read all inputs to a list. We need the inputs in a list so that we can
# map an inputs position in the list to a _MapWorker.id.
input_protos = list(input_protos)
# Create the multiprocessing pool to use, if not provided.
pool = pool or multiprocessing.Pool(processes=num_processes)
map_worker_iterator = (
_MapWorker(i, cmd, input_proto)
for i, input_proto in enumerate(input_protos)
)
for map_worker in pool.imap_unordered(
_RunNativeProtoProcessingWorker, map_worker_iterator,
):
map_worker.SetProtos(input_protos[map_worker.id], output_proto_class)
yield map_worker
def MapNativeProcessingBinaries(
binaries: typing.List[str],
input_protos: typing.List[pbutil.ProtocolBuffer],
output_proto_classes: typing.List[typing.Type],
pool: typing.Optional[multiprocessing.Pool] = None,
num_processes: typing.Optional[int] = None,
) -> typing.Iterator[_MapWorker]:
"""Run a protocol buffer processing binary over a set of inputs.
Args:
binary_data_path: The path of the binary to execute, as provied to
bazelutil.DataPath().
input_protos: An iterable list of input protos.
output_proto_class: The proto class of the output.
binary_args: An optional list of additional arguments to pass to binaries.
pool: The multiprocessing pool to use.
num_processes: The number of processes for the multiprocessing pool.
Returns:
A generator of _MapWorker instances. The order is random.
"""
if not len(binaries) == len(input_protos):
raise ValueError("Number of binaries does not equal protos")
cmds = [[bazelutil.DataPath(b)] for b in binaries]
# Read all inputs to a list. We need the inputs in a list so that we can
# map an inputs position in the list to a _MapWorker.id.
input_protos = list(input_protos)
output_proto_classes = list(output_proto_classes)
# Create the multiprocessing pool to use, if not provided.
pool = pool or multiprocessing.Pool(processes=num_processes)
map_worker_iterator = (
_MapWorker(id, cmd, input_proto,)
for id, (cmd, input_proto,) in enumerate(zip(cmds, input_protos))
)
for map_worker in pool.imap_unordered(
_RunNativeProtoProcessingWorker, map_worker_iterator,
):
map_worker.SetProtos(
input_protos[map_worker.id], output_proto_classes[map_worker.id],
)
yield map_worker
# Type annotations for MapDatabaseRowBatchProcessor().
WorkUnitType = typing.Callable[[typing.List[typing.Any]], typing.Any]
WorkUnitArgGenerator = typing.Callable[[typing.Any], typing.Any]
ResultCallback = typing.Callable[[typing.Any], None]
BatchCallback = typing.Callable[[int], None]
def MapDatabaseRowBatchProcessor(
work_unit: WorkUnitType,
query: sqlutil.Query,
generate_work_unit_args: WorkUnitArgGenerator = lambda rows: rows,
work_unit_result_callback: ResultCallback = lambda result: None,
start_of_batch_callback: BatchCallback = lambda i: None,
end_of_batch_callback: BatchCallback = lambda i: None,
batch_size: int = 256,
rows_per_work_unit: int = 5,
start_at: int = 0,
pool: typing.Optional[multiprocessing.Pool] = None,
) -> None:
"""Execute a database row-processesing function in parallel.
Use this function to orchestrate the parallel execution of a function that
takes batches of rows from the result set of a query.
This is equivalent to a serial implementation:
for row in query:
work_unit_result_callback(work_unit(generate_work_unit_args)))
end_of_batch_callback()
Args:
work_unit: A function which takes an input a list of the values returned
by generate_work_unit_args callback, and produces a list of zero or more
instances of output_table_class.
query: The query which produces inputs to the work units.
generate_work_unit_args: A callback which transforms a single result of the
query into an input to a work unit.
batch_size:
rows_per_work_unit:
start_at:
pool:
Returns:
Foo.
"""
pool = pool or multiprocessing.Pool()
i = start_at
row_batches = sqlutil.OffsetLimitBatchedQuery(query, batch_size=batch_size)
for batch in row_batches:
rows_batch = batch.rows
start_of_batch_callback(i)
work_unit_args = [
generate_work_unit_args(rows_batch[i : i + rows_per_work_unit])
for i in range(0, len(rows_batch), rows_per_work_unit)
]
for result in pool.starmap(work_unit, work_unit_args):
work_unit_result_callback(result)
i += len(rows_batch)
end_of_batch_callback(i)
class _ForcedNonDaemonProcess(multiprocessing.Process):
"""A process which is never a daemon."""
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class UnsafeNonDaemonPool(multiprocessing.pool.Pool):
"""A multiprocessing.Pool where the processes are not daemons.
Python's multiprocessing Pool creates daemonic processes. Deamonic processes
are killed automatically when the parent process terminates. This is nice
behaviour as it prevents orphaned processes lying around. However, a downside
of daemonic processes is that they cannot create child processes. E.g. a
worker in an parallel map cannot create child processes. This is occasionally
desirable. For cases where you need non-daemonic processes, use this class.
Disclaimer: USING THIS CLASS CAN RESULT IN ORPHAN PROCESSES IF YOU DO NOT
EXPLICITLY CLOSE THE POOL!
Example usage:
pool = ppar.UnsafeNonDaemonPool(5)
try:
# go nuts ...
finally:
pool.close()
pool.join()
"""
Process = _ForcedNonDaemonProcess
class ThreadedIterator:
"""An iterator that computes its elements in a parallel thread to be ready to
be consumed.
Exceptions raised by the threaded iterator are propagated to consumer.
"""
def __init__(
self,
iterator: typing.Iterable[typing.Any],
max_queue_size: int = 2,
start: bool = True,
):
self._queue = queue.Queue(maxsize=max_queue_size)
self._thread = threading.Thread(target=lambda: self.worker(iterator))
if start:
self.Start()
def Start(self):
self._thread.start()
def worker(self, iterator):
try:
for element in iterator:
self._queue.put(self._ValueOrError(value=element), block=True)
except Exception as e:
# Propagate an error in the iterator.
self._queue.put(self._ValueOrError(error=e))
# Mark that the iterator is done.
self._queue.put(self._EndOfIterator(), block=True)
def __iter__(self):
next_element = self._queue.get(block=True)
while not isinstance(next_element, self._EndOfIterator):
value = next_element.GetOrRaise()
yield value
next_element = self._queue.get(block=True)
self._thread.join()
class _EndOfIterator(object):
"""Tombstone marker object for iterators."""
pass
class _ValueOrError(typing.NamedTuple):
"""A tuple which represents the union of either a value or an error."""
value: typing.Any = None
error: Exception = None
def GetOrRaise(self) -> typing.Any:
"""Return the value or raise the exception."""
if self.error is None:
return self.value
else:
raise self.error
| StarcoderdataPython |
335375 | <reponame>BenMussay/Data-Independent-Neural-Pruning-via-Coresets<filename>coreset.py
from typing import Callable, Tuple, Union
import sys
import torch
import numpy as np
class Coreset:
def __init__(self, points, weights, activation_function: Callable, upper_bound: int = 1):
assert points.shape[0] == weights.shape[0]
self.__points = points.cpu()
self.__weights = weights.cpu()
self.__activation = activation_function
self.__beta = upper_bound
self.__sensitivity = None
self.indices = None
@property
def sensitivity(self):
if self.__sensitivity is None:
points_norm = self.__points.norm(dim=1)
assert points_norm.shape[0] == self.__points.shape[0]
weights = torch.abs(self.__weights).max(dim=1)[0] # max returns (values, indices)
assert weights.shape[0] == self.__points.shape[0]
self.__sensitivity = weights * torch.abs(self.__activation(self.__beta * points_norm))
self.__sensitivity /= self.__sensitivity.sum()
return self.__sensitivity
def compute_coreset(self, coreset_size):
assert coreset_size <= self.__points.shape[0]
prob = self.sensitivity.cpu().numpy()
indices = set()
idxs = []
cnt = 0
while len(indices) < coreset_size:
i = np.random.choice(a=self.__points.shape[0], size=1, p=prob).tolist()[0]
idxs.append(i)
indices.add(i)
cnt += 1
hist = np.histogram(idxs, bins=range(self.__points.shape[0] + 1))[0].flatten()
idxs = np.nonzero(hist)[0]
self.indices = idxs
coreset = self.__points[idxs, :]
weights = (self.__weights[idxs].t() * torch.tensor(hist[idxs]).float()).t()
weights = (weights.t() / (torch.tensor(prob[idxs]) * cnt)).t()
return coreset, weights
def compress_fc_layer(layer1: Tuple[torch.Tensor, torch.Tensor],
layer2: Tuple[torch.Tensor, torch.Tensor],
compressed_size,
activation: Callable,
upper_bound,
device,
compression_type):
num_neurons = layer1[1].shape[0]
if compression_type == "Coreset":
points = np.concatenate(
(layer1[0].cpu().numpy(), layer1[1].view(num_neurons, 1).cpu().numpy()),
axis=1)
points = torch.tensor(points)
weights = layer2[0].t()
coreset = Coreset(points=points, weights=weights, activation_function=activation, upper_bound=upper_bound)
points, weights = coreset.compute_coreset(compressed_size)
indices = coreset.indices
layer1 = (points[:, :-1].to(device), points[:, 1].to(device))
weights = weights.t()
layer2 = (weights.to(device), layer2[1].to(device))
elif compression_type == "Uniform":
indices = np.random.choice(num_neurons, size=compressed_size, replace=False)
layer1 = (layer1[0][indices, :, :, :], layer1[1][indices])
layer2 = (layer2[0][:, indices, :, :], layer2[1])
elif compression_type == "Top-K":
indices = torch.topk(torch.norm(layer1[0], dim=1), k=compressed_size)[1]
layer1 = (layer1[0][indices, :], layer1[1][indices])
layer2 = (layer2[0][:, indices], layer2[1])
else:
sys.exit("There is not a compression type: {}".format(compression_type))
return layer1, layer2, indices
| StarcoderdataPython |
6652642 | # Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import logging
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
from servicex.did_finder.rucio_adapter import RucioAdapter
from servicex_did_finder_lib import add_did_finder_cnd_arguments, start_did_finder
from servicex.did_finder.lookup_request import LookupRequest
def run_rucio_finder():
'''Run the rucio finder
'''
logger = logging.getLogger()
# Parse the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--report-logical-files", action="store_true")
add_did_finder_cnd_arguments(parser)
args = parser.parse_args()
prefix = args.prefix
logger.info("ServiceX DID Finder starting up. "
f"Prefix: {prefix}")
if args.report_logical_files:
logger.info("---- DID Finder Only Returning Logical Names, not replicas -----")
# Initialize the finder
did_client = DIDClient()
replica_client = ReplicaClient()
rucio_adapter = RucioAdapter(did_client, replica_client, args.report_logical_files)
# Run the DID Finder
try:
logger.info('Starting rucio DID finder')
async def callback(did_name, info):
lookup_request = LookupRequest(
did=did_name,
rucio_adapter=rucio_adapter,
prefix=prefix,
request_id=info['request-id']
)
for file in lookup_request.lookup_files():
yield file
start_did_finder('rucio',
callback,
parsed_args=args)
finally:
logger.info('Done running rucio DID finder')
if __name__ == "__main__":
run_rucio_finder()
| StarcoderdataPython |
148916 | <gh_stars>0
"""
Experiment for XGBoost + CF
Aim: To find the best tc(max_depth), mb(min_child_weight), mf(colsample_bytree * 93), ntree
tc: [13, 15, 17]
mb: [5, 7, 9]
mf: [40, 45, 50, 55, 60]
ntree: [160, 180, 200, 220, 240, 260, 280, 300, 320, 340, 360]
Averaging 20 models
Summary
Best
loss ntree
mf 40 45 50 55 60 40 45 50 55 60
tc mb
13 5 0.4471 0.4471 0.4473 0.4471 0.4476 300 300 280 280 260
7 0.4477 0.4475 0.4469 0.4472 0.4481 340 320 300 300 300
9 0.4485 0.4484 0.4487 0.4488 0.4487 360 360 340 340 340
15 5 0.4471 *0.4465* 0.4471 0.4476 0.4478 260 *260* 240 240 240
7 0.4473 0.4468 0.4473 0.4474 0.4478 300 280 260 260 260
9 0.4483 0.4480 0.4483 0.4484 0.4492 340 320 300 300 280
17 5 0.4471 0.4472 0.4474 0.4476 0.4478 240 240 220 220 200
7 0.4474 0.4470 0.4468 0.4475 0.4473 280 260 260 240 240
9 0.4481 0.4480 0.4476 0.4480 0.4486 320 300 280 260 260
Time: 1 day, 7:37:21 on i7-4790k 32G MEM GTX660
"""
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
from datetime import datetime
import os
from sklearn.grid_search import ParameterGrid
import xgboost as xgb
from utility import *
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat = pd.get_dummies(training['target']).values
X = training.iloc[:,:93].values
kf = StratifiedKFold(y, n_folds=5, shuffle = True, random_state = 345)
for train_idx, valid_idx in kf:
break
y_train_1 = yMat[train_idx].argmax(1)
y_train = yMat[train_idx]
y_valid = yMat[valid_idx]
X2, ignore = count_feature(X)
dtrain , dvalid= xgb.DMatrix(X2[train_idx], label = y_train_1), xgb.DMatrix(X2[valid_idx])
#
nIter = 20
nt = 360
nt_lst = list(range(160, 370, 20))
nt_len = len(nt_lst)
bf = .8 # subsample
sh = .1 # eta
# tc:max_depth, mb:min_child_weight, mf(max features):colsample_bytree * 93
param_grid = {'tc':[13, 15, 17], 'mb':[5, 7, 9], 'mf':[40, 45, 50, 55, 60]}
scores = []
t0 = datetime.now()
for params in ParameterGrid(param_grid):
tc = params['tc']
mb = params['mb']
mf = params['mf']
cs = float(mf) / X.shape[1]
print(tc, mb, mf)
predAll = [np.zeros(y_valid.shape) for k in range(nt_len)]
for i in range(nIter):
seed = 112233 + i
param = {'bst:max_depth':tc, 'bst:eta':sh,'objective':'multi:softprob','num_class':9,
'min_child_weight':mb, 'subsample':bf, 'colsample_bytree':cs,
'silent':1, 'nthread':8, 'seed':seed}
plst = list(param.items())
bst = xgb.train(plst, dtrain, nt)
for s in range(nt_len):
ntree = nt_lst[s]
pred = bst.predict(dvalid, ntree_limit = ntree).reshape(y_valid.shape)
predAll[s] += pred
scores.append({'tc':tc, 'mb':mb, 'mf':mf, 'ntree':ntree, 'nModels':i+1, 'seed':seed,
'valid':log_loss(y_valid, pred),
'valid_avg':log_loss(y_valid, predAll[s] / (i+1))})
print(scores[-4], datetime.now() - t0)
df = pd.DataFrame(scores)
if os.path.exists(path_log) is False:
print('mkdir', path_log)
os.mkdir(path_log)
df.to_csv(path_log + 'exp_XGB_CF_tc_mb_mf_ntree.csv')
keys = ['tc', 'mb', 'mf', 'ntree']
grouped = df.groupby(keys)
pd.set_option('display.precision', 5)
print(pd.DataFrame({'loss':grouped['valid_avg'].last().unstack().min(1),
'ntree':grouped['valid_avg'].last().unstack().idxmin(1)}).unstack())
# loss ntree
# mf 40 45 50 55 60 40 45 50 55 60
# tc mb
# 13 5 0.4471 0.4471 0.4473 0.4471 0.4476 300 300 280 280 260
# 7 0.4477 0.4475 0.4469 0.4472 0.4481 340 320 300 300 300
# 9 0.4485 0.4484 0.4487 0.4488 0.4487 360 360 340 340 340
# 15 5 0.4471 0.4465 0.4471 0.4476 0.4478 260 260 240 240 240
# 7 0.4473 0.4468 0.4473 0.4474 0.4478 300 280 260 260 260
# 9 0.4483 0.4480 0.4483 0.4484 0.4492 340 320 300 300 280
# 17 5 0.4471 0.4472 0.4474 0.4476 0.4478 240 240 220 220 200
# 7 0.4474 0.4470 0.4468 0.4475 0.4473 280 260 260 240 240
# 9 0.4481 0.4480 0.4476 0.4480 0.4486 320 300 280 260 260
print(pd.DataFrame({'loss':grouped['valid'].mean().unstack().min(1),
'ntree':grouped['valid'].mean().unstack().idxmin(1)}).unstack())
# loss ntree
# mf 40 45 50 55 60 40 45 50 55 60
# tc mb
# 13 5 0.4563 0.4564 0.4564 0.4561 0.4566 280 260 260 260 240
# 7 0.4565 0.4563 0.4557 0.4561 0.4569 320 300 300 300 280
# 9 0.4571 0.4569 0.4571 0.4573 0.4570 340 340 320 300 300
# 15 5 0.4567 0.4559 0.4565 0.4571 0.4571 260 240 240 220 220
# 7 0.4565 0.4558 0.4562 0.4564 0.4568 280 260 260 260 240
# 9 0.4570 0.4567 0.4570 0.4570 0.4577 300 300 280 280 260
# 17 5 0.4568 0.4569 0.4570 0.4572 0.4574 220 220 200 200 200
# 7 0.4567 0.4563 0.4559 0.4567 0.4564 260 240 240 220 220
# 9 0.4571 0.4569 0.4565 0.4567 0.4573 280 280 260 260 240
#
criterion = df.apply(lambda x: x['tc']==15 and x['mb']==5 and x['mf']==45, axis = 1)
grouped = df[criterion].groupby('ntree')
g = grouped[['valid']].mean()
g['valid_avg'] = grouped['valid_avg'].last()
print(g)
# valid valid_avg
# ntree
# 160 0.461023 0.452912
# 180 0.458513 0.450111
# 200 0.456939 0.448232
# 220 0.456147 0.447141
# 240 0.455870 0.446598
# 260 0.456097 0.446525
# 280 0.456657 0.446827
# 300 0.457434 0.447327
# 320 0.458462 0.448101
# 340 0.459635 0.449036
# 360 0.460977 0.450160
ax = g.plot()
ax.set_title('XGB+CF max_depth=15\n min_child_weight=5, colsample_bytree=45/93.')
ax.set_ylabel('Logloss')
fig = ax.get_figure()
fig.savefig(path_log + 'exp_XGB_CF_tc_mb_mf_ntree.png')
| StarcoderdataPython |
11228011 | <reponame>Nutzer001/greenaddress
from zipfile import ZipFile
from io import BytesIO
import base64
from pycoin.encoding import to_bytes_32
from pycoin.tx.Tx import Tx, SIGHASH_ALL
from gaservices.utils.btc_ import tx_segwit_hash
from gaservices.utils import inscript
from wallycore import *
def _fernet_decrypt(key, data):
assert hmac_sha256(key[:16], data[:-32]) == data[-32:]
res = bytearray(len(data[25:-32]))
written = aes_cbc(key[16:], data[9:25], data[25:-32], AES_FLAG_DECRYPT, res)
assert written <= len(res) and len(res) - written <= AES_BLOCK_LEN
return res[:written]
def _unzip(data, key):
"""Unzip a GreenAddress nlocktimes.zip attachment file.
The file format is double zip encoded with the user's chaincode
"""
all_data = []
if not data.startswith(b'PK'):
all_data.append(data)
else:
# Compressed zip file: unzip it
zf = ZipFile(BytesIO(data))
for f in zf.namelist():
data = b''.join(zf.open(f).readlines())
prefix = b'GAencrypted'
if data.startswith(prefix):
# Encrypted inner zip file: Strip prefix, decrypt and unzip again
encrypted = data[len(prefix):]
all_data.extend(_unzip(_fernet_decrypt(key, encrypted), key))
else:
all_data.append(data)
return all_data
def private_key_to_wif(key, testnet):
ver = b'\xef' if testnet else b'\x80'
compressed = b'\x01'
return base58check_from_bytes(ver + bip32_key_get_priv_key(key) + compressed,)
P2SH_P2WSH_FORTIFIED_OUT = SEGWIT = 14
class PassiveSignatory:
"""Represent a signatory for which the keys are not known, only the signature
For use where a transaction has been partially signed. Instances of this class represent the
known signatures
"""
def __init__(self, signature):
self.signature = signature
def get_signature(self, sighash):
return self.signature
class ActiveSignatory:
"""Active signatory for which the keys are known, capable of signing arbitrary data"""
def __init__(self, key):
self.key = key
def get_signature(self, sighash):
sig = ec_sig_from_bytes(self.key, sighash, EC_FLAG_ECDSA)
signature = ec_sig_to_der(sig) + bytearray([SIGHASH_ALL, ])
return signature
def sign(txdata, signatories):
tx = Tx.from_hex(txdata['tx'])
for prevout_index, txin in enumerate(tx.txs_in):
script = hex_to_bytes(txdata['prevout_scripts'][prevout_index])
script_type = txdata['prevout_script_types'][prevout_index]
if script_type == SEGWIT:
value = int(txdata['prevout_values'][prevout_index])
sighash = tx_segwit_hash(tx, prevout_index, script, value)
else:
sighash = to_bytes_32(tx.signature_hash(script, prevout_index, SIGHASH_ALL))
signatures = [signatory.get_signature(sighash) for signatory in signatories]
if script_type == SEGWIT:
tx.set_witness(prevout_index, [b'', ] + signatures + [script, ])
txin.script = inscript.witness(script)
else:
txin.script = inscript.multisig(script, signatures)
return tx
def countersign(txdata, private_key):
GreenAddress = PassiveSignatory(hex_to_bytes(txdata['prevout_signatures'][0]))
user = ActiveSignatory(bip32_key_get_priv_key(private_key))
return sign(txdata, [GreenAddress, user])
def derive_hd_key(root, path, flags=0):
return bip32_key_from_parent_path(root, path, flags | BIP32_FLAG_SKIP_HASH)
def get_subaccount_path(subaccount):
if subaccount == 0:
return []
else:
HARDENED = 0x80000000
return [HARDENED | 3, HARDENED | subaccount]
def derive_user_private_key(txdata, wallet, branch):
subaccount = txdata['prevout_subaccounts'][0] or 0
pointer = txdata['prevout_pointers'][0] or 0
path = get_subaccount_path(subaccount)
return derive_hd_key(wallet, path + [branch, pointer])
| StarcoderdataPython |
1751789 | '''
Todo ano, Papai Noel faz o recrutamento de elfos e gnomos para a sua equipe de preparação natalina. O setor de sua produção que mais tem alterações ao longo do ano é o da fabricação dos presentes, pois ele contrata elfos temporários, que trabalham uma determinada quantidade de horas H com ele. Além disso, cada elfo é contratado para um dos 4 diferentes grupos de trabalho, onde cada um dos grupos possui uma quantidade de horas para produzir os presentes do tipo do grupo:
* Grupo dos bonecos: 8 horas;
* Grupo dos arquitetos: 4 horas;
* Grupo dos musicos: 6 horas;
* Grupo dos desenhistas: 12 horas.
Note que os trabalhadores do grupo dos bonecos só produzem bonecos, o dos arquitetos, casas, e assim sucessivamente. Mas cada tipo de presente conta como um presente completo no final do dia.
O Papai Noel possui uma lista dos nomes dos elfos escolhidos esse ano, com a quantidade de horas e em que grupo que eles podem trabalhar. Sabendo da sua habilidade com programação, o Noel quer uma forcinha sua para dizer para ele quantos presentes ele vai conseguir ter pronto, por dia, de acordo com a quantidade de elfos que ele contratou.
Entrada
O primeiro valor da entrada é um valor inteiro N (1 ≤ N ≤ 1000), indicando a quantidade de elfos que o Papai Noel contratou. As N linhas seguintes possuem três valores E, G e H (1 ≤ H ≤ 24), indicando respectivamente o nome do elfo, em qual grupo ele vai trabalhar (em letras minúsculas) e quantas horas por dia ele irá ajudar (em valor inteiro).
Saída
A saída deverá ser um inteiro P, a quantidade total de presentes produzida por dia pela fábrica do Papai Noel.
'''
h_bonecos = h_arquitetos = h_musicos = h_desenhistas = qt_brinquedo = 0
n = int(input())
for _ in range(n):
entrada = input().split()
if entrada[1] == 'bonecos':
h_bonecos += int(entrada[2])
elif entrada[1] == 'arquitetos':
h_arquitetos += int(entrada[2])
elif entrada[1] == 'musicos':
h_musicos += int(entrada[2])
elif entrada[1] == 'desenhistas':
h_desenhistas += int(entrada[2])
while h_bonecos >= 8:
h_bonecos -= 8
qt_brinquedo += 1
while h_arquitetos >= 4:
h_arquitetos -= 4
qt_brinquedo += 1
while h_musicos >= 6:
h_musicos -= 6
qt_brinquedo += 1
while h_desenhistas >= 12:
h_desenhistas -= 12
qt_brinquedo += 1
print(qt_brinquedo) | StarcoderdataPython |
6502463 | #!/usr/bin/env python2.7
"""
UNC Best Practice RNA-Seq Pipeline
Author: <NAME>
Affiliation: UC Santa Cruz Genomics Institute
Please see the README.md in the same directory
Tree Structure of RNA-Seq Pipeline (per sample)
0---> 2
| |
1 3 - - - - -> Consolidate Output -> Upload_to_S3
/ \
*4 5
|
6
/ \
*7 8
/ \
*9 10
|
11
|
12
|
*13
0 = Start Node
1 = Download Sample
2 = Unzip
3 = Mapsplice
4 = Mapping Stats (not currently included)
5 = Add Read Groups
6 = Bamsort and Index
7 = Rseq-QC
8 = Sort Bam by Reference
9 = Exon Quantification
10 = Transcriptome
11 = Filter
12 = RSEM
13 = RSEM Post-Process
7,9,13 contribute to producing the final output
Dependencies
Curl: apt-get install curl
Docker: apt-get install docker.io # docker.io if using linux, o.w. just docker
Samtools: apt-get install samtools
Unzip: apt-get install unzip
Toil: pip install git+https://github.com/BD2KGenomics/toil.git
Optional
Boto: pip install boto
"""
import argparse
import base64
import errno
import glob
import hashlib
import multiprocessing
import os
import shutil
import subprocess
import tarfile
from collections import OrderedDict
from contextlib import closing
from urlparse import urlparse
from toil.job import Job
def build_parser():
parser = argparse.ArgumentParser(description=main.__doc__, add_help=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--config', default=None, help='Path to config. One sample per line, with the format: '
'uuid,url_to_sample.tar')
group.add_argument('--input', default=None, help='Accepts a local sample: /path/to/sample.tar. Take note! The'
'UUID for this sample is derived from the name. So samples'
'should be in the form of uuid.tar.')
group.add_argument('-f', '--config_fastq', default=None,
help='Path to CSV. One sample per line with the format: '
'uuid,file:///path/to/R_1.fastq,file:///path/to/R_2.fastq')
parser.add_argument('--single_end_reads', default=False, action='store_true',
help='Set this flag if input data is non-paired (single end reads).')
parser.add_argument('--unc', help='URL to unc_hg19.bed',
default='https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/rna-seq/unc_hg19.bed')
parser.add_argument('--fasta', help='URL to hg19_M_rCRS_ref.transcripts.fa',
default='https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/rna-seq/hg19_M_rCRS_ref.transcripts.fa')
parser.add_argument('--composite_exons', help='URL to composite_exons.bed',
default='https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/rna-seq/composite_exons.bed')
parser.add_argument('--normalize', help='URL to normalizeBedToolsExonQuant.pl',
default='https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/rna-seq/normalizeBedToolsExonQuant.pl')
parser.add_argument('--rsem_ref', help='RSEM_REF URL',
default='https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/rna-seq/rsem_ref.zip')
parser.add_argument('--chromosomes', help='Chromosomes Directory',
default='https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/rna-seq/chromosomes.zip')
parser.add_argument('--ebwt', help='EBWT Directory',
default='https://s3-us-west-2.amazonaws.com/cgl-pipeline-inputs/rna-seq/ebwt.zip')
parser.add_argument('--ssec', help='Path to Key File for SSE-C Encryption')
parser.add_argument('--output_dir', default=None, help='full path where final results will be output')
parser.add_argument('--upload_bam_to_s3', default=False, action='store_true',
help='uploads alignment bam to S3 directory specified.')
parser.add_argument('--s3_dir', default=None, help='S3 Directory, starting with bucket name. e.g.: '
'cgl-driver-projects/ckcc/rna-seq-samples/')
parser.add_argument('--sudo', dest='sudo', action='store_true', default=False,
help='Docker usually needs sudo to execute locally, but not when running Mesos or when '
'the user is a member of a Docker group.')
return parser
# Convenience functions used in the pipeline
def mkdir_p(path):
"""
It is Easier to Ask for Forgiveness than Permission
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def flatten(x):
"""
Flattens a nested array into a single list
x: list/tuple The nested list/tuple to be flattened.
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def which(program):
import os
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def generate_unique_key(master_key_path, url):
"""
master_key_path: str Path to the BD2K Master Key (for S3 Encryption)
url: str S3 URL (e.g. https://s3-us-west-2.amazonaws.com/bucket/file.txt)
Returns: str 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key
def download_encrypted_file(job, input_args, name):
"""
Downloads encrypted files from S3 via header injection
input_args: dict Input dictionary defined in main()
name: str Symbolic name associated with file
"""
work_dir = job.fileStore.getLocalTempDir()
key_path = input_args['ssec']
file_path = os.path.join(work_dir, name)
url = input_args[name]
with open(key_path, 'r') as f:
key = f.read()
if len(key) != 32:
raise RuntimeError('Invalid Key! Must be 32 bytes: {}'.format(key))
key = generate_unique_key(key_path, url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest())
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path)
def download_from_url(job, url):
"""
Simple curl request made for a given url
url: str URL to download
"""
work_dir = job.fileStore.getLocalTempDir()
file_path = os.path.join(work_dir, os.path.basename(url))
if not os.path.exists(file_path):
if url.startswith('s3:'):
download_from_s3_url(file_path, url)
else:
try:
subprocess.check_call(['curl', '-fs', '--retry', '5', '--create-dir', url, '-o', file_path])
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(file_path)
return job.fileStore.writeGlobalFile(file_path)
def return_input_paths(job, work_dir, ids, *args):
"""
Given one or more strings representing file_names, return the paths to those files. Each item must be unpacked!
work_dir: str Current working directory
ids: dict Dictionary of fileStore IDs
*args: str(s) for every file in *args, place file in work_dir via FileStore
"""
paths = OrderedDict()
for name in args:
if not os.path.exists(os.path.join(work_dir, name)):
file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name))
else:
file_path = os.path.join(work_dir, name)
paths[name] = file_path
if len(args) == 1:
return file_path
return paths.values()
def docker_path(filepath):
"""
Given a path, returns that files path inside the docker mount directory (/data).
"""
return os.path.join('/data', os.path.basename(filepath))
def docker_call(work_dir, tool_parameters, tool, java_opts=None, outfile=None, sudo=False):
"""
Makes subprocess call of a command to a docker container.
tool_parameters: list An array of the parameters to be passed to the tool
tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools)
java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G')
outfile: file Filehandle that stderr will be passed to
sudo: bool If the user wants the docker command executed as sudo
"""
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if sudo:
base_docker_call = ['sudo'] + base_docker_call
if java_opts:
base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)]
try:
if outfile:
subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile)
else:
subprocess.check_call(base_docker_call + [tool] + tool_parameters)
except subprocess.CalledProcessError:
raise RuntimeError('docker command returned a non-zero exit status. Check error logs.')
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.')
def copy_to_output_dir(work_dir, output_dir, uuid=None, files=list()):
"""
A list of files to move from work_dir to output_dir.
work_dir: str Current working directory
output_dir: str Output directory for files to go
uuid: str UUID to "stamp" onto output files
files: list List of files to iterate through
"""
for fname in files:
if uuid is None:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, fname))
else:
shutil.copy(os.path.join(work_dir, fname), os.path.join(output_dir, '{}.{}'.format(uuid, fname)))
def tarball_files(work_dir, tar_name, uuid=None, files=None):
"""
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
"""
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname)
def download_from_s3_url(file_path, url):
from urlparse import urlparse
from boto.s3.connection import S3Connection
s3 = S3Connection()
try:
parsed_url = urlparse(url)
if not parsed_url.netloc or not parsed_url.path.startswith('/'):
raise RuntimeError("An S3 URL must be of the form s3:/BUCKET/ or "
"s3://BUCKET/KEY. '%s' is not." % url)
bucket = s3.get_bucket(parsed_url.netloc)
key = bucket.get_key(parsed_url.path[1:])
key.get_contents_to_filename(file_path)
finally:
s3.close()
# Job Functions
def program_checks(job, input_args):
"""
Checks that dependency programs are installed.
input_args: dict Dictionary of input arguments (from main())
"""
# Program checks
for program in ['curl', 'docker', 'unzip', 'samtools']:
assert which(program), 'Program "{}" must be installed on every node.'.format(program)
job.addChildJobFn(download_shared_files, input_args)
def download_shared_files(job, input_args):
"""
Downloads and stores shared inputs files in the FileStore
input_args: dict Dictionary of input arguments (from main())
"""
shared_files = ['unc.bed', 'hg19.transcripts.fa', 'composite_exons.bed', 'normalize.pl', 'rsem_ref.zip',
'ebwt.zip', 'chromosomes.zip']
shared_ids = {}
for f in shared_files:
shared_ids[f] = job.addChildJobFn(download_from_url, input_args[f]).rv()
if input_args['config'] or input_args['config_fastq']:
job.addFollowOnJobFn(parse_config_file, shared_ids, input_args)
else:
sample_path = input_args['input']
uuid = os.path.splitext(os.path.basename(sample_path))[0]
sample = (uuid, sample_path)
job.addFollowOnJobFn(download_sample, shared_ids, input_args, sample)
def parse_config_file(job, ids, input_args):
"""
Launches pipeline for each sample.
shared_ids: dict Dictionary of fileStore IDs
input_args: dict Dictionary of input arguments
"""
samples = []
config = input_args['config']
with open(config, 'r') as f:
for line in f.readlines():
if not line.isspace():
sample = line.strip().split(',')
samples.append(sample)
for sample in samples:
job.addChildJobFn(download_sample, ids, input_args, sample)
def download_sample(job, ids, input_args, sample):
"""
Defines variables unique to a sample that are used in the rest of the pipelines
ids: dict Dictionary of fileStore IDS
input_args: dict Dictionary of input arguments
sample: tuple Contains uuid and sample_url
"""
if len(sample) == 2:
uuid, sample_location = sample
url1, url2 = None, None
else:
uuid, url1, url2 = sample
sample_location = None
# Update values unique to sample
sample_input = dict(input_args)
sample_input['uuid'] = uuid
sample_input['sample.tar'] = sample_location
if sample_input['output_dir']:
sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid)
sample_input['cpu_count'] = multiprocessing.cpu_count()
job_vars = (sample_input, ids)
# Download or locate local file and place in the jobStore
if sample_input['input']:
ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location))
elif sample_input['config_fastq']:
ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path)
ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path)
else:
if sample_input['ssec']:
ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv()
else:
ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv()
job.addFollowOnJobFn(static_dag_launchpoint, job_vars)
def static_dag_launchpoint(job, job_vars):
"""
Statically define jobs in the pipeline
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
if input_args['config_fastq']:
cores = input_args['cpu_count']
a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G').encapsulate()
else:
a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate()
b = job.wrapJobFn(consolidate_output, job_vars, a.rv())
# Take advantage of "encapsulate" to simplify pipeline wiring
job.addChild(a)
a.addChild(b)
def merge_fastqs(job, job_vars):
"""
Unzips input sample and concats the Read1 and Read2 groups together.
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
single_end_reads = input_args['single_end_reads']
# I/O
sample = return_input_paths(job, work_dir, ids, 'sample.tar')
# Untar File
# subprocess.check_call(['unzip', sample, '-d', work_dir])
subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir])
# Remove large files before creating concat versions.
os.remove(os.path.join(work_dir, 'sample.tar'))
# Zcat files in parallel
if single_end_reads:
files = sorted(glob.glob(os.path.join(work_dir, '*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
subprocess.check_call(['zcat'] + files, stdout=f1)
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
else:
r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*')))
r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*')))
with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:
p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1)
with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:
p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2)
p1.wait()
p2.wait()
# FileStore
ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))
ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))
job.fileStore.deleteGlobalFile(ids['sample.tar'])
# Spawn child job
return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv()
def mapsplice(job, job_vars):
"""
Maps RNA-Seq reads to a reference genome.
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
sudo = input_args['sudo']
single_end_reads = input_args['single_end_reads']
files_to_delete = ['R1.fastq']
# I/O
return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip')
if single_end_reads:
return_input_paths(job, work_dir, ids, 'R1.fastq')
else:
return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq')
files_to_delete.extend(['R2.fastq'])
for fname in ['chromosomes.zip', 'ebwt.zip']:
subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir])
# Command and call
parameters = ['-p', str(cores),
'-s', '25',
'--bam',
'--min-map-len', '50',
'-x', '/data/ebwt',
'-c', '/data/chromosomes',
'-1', '/data/R1.fastq',
'-o', '/data']
if not single_end_reads:
parameters.extend(['-2', '/data/R2.fastq'])
docker_call(tool='quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=parameters, work_dir=work_dir, sudo=sudo)
# Write to FileStore
for fname in ['alignments.bam', 'stats.txt']:
ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname))
for fname in files_to_delete:
job.fileStore.deleteGlobalFile(ids[fname])
# Run child job
# map_id = job.addChildJobFn(mapping_stats, job_vars).rv()
if input_args['upload_bam_to_s3'] and input_args['s3_dir']:
job.addChildJobFn(upload_bam_to_s3, job_vars)
output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv()
return output_ids
def mapping_stats(job, job_vars):
"""
This function is not currently in use.
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
return_input_paths(job, work_dir, ids, 'stats.txt')
uuid = input_args['uuid']
# Command
docker_call(tool='jvivian/mapping_stats', tool_parameters=[uuid], work_dir=work_dir, sudo=sudo)
# Zip output files and store
output_files = ['{}_stats2.txt'.format(uuid), '{}_stats_all.txt'.format(uuid), '{}_mapping.tab'.format(uuid)]
tarball_files(work_dir, tar_name='map.tar.gz', files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'map.tar.gz'))
def add_read_groups(job, job_vars):
"""
This function adds read groups to the headers
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
alignments = return_input_paths(job, work_dir, ids, 'alignments.bam')
output = os.path.join(work_dir, 'rg_alignments.bam')
# Command and callg
parameter = ['AddOrReplaceReadGroups',
'INPUT={}'.format(docker_path(alignments)),
'OUTPUT={}'.format(docker_path(output)),
'RGSM={}'.format(input_args['uuid']),
'RGID={}'.format(input_args['uuid']),
'RGLB=TruSeq',
'RGPL=illumina',
'RGPU=barcode',
'VALIDATION_STRINGENCY=SILENT']
docker_call(tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=parameter, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv()
def bamsort_and_index(job, job_vars):
"""
Sorts bam file and produces index file
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam')
output = os.path.join(work_dir, 'sorted.bam')
# Command -- second argument is "Output Prefix"
cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')]
cmd2 = ['index', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd1, work_dir=work_dir, sudo=sudo)
docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e',
tool_parameters=cmd2, work_dir=work_dir, sudo=sudo)
# Write to FileStore
ids['sorted.bam'] = job.fileStore.writeGlobalFile(output)
ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai'))
# Run child job
output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv()
rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv()
return rseq_id, output_ids
def rseq_qc(job, job_vars):
"""
QC module: contains QC metrics and information about the BAM post alignment
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
# Command
docker_call(tool='jvivian/qc', tool_parameters=['/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid],
work_dir=work_dir, sudo=sudo)
# Write to FileStore
output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f]
tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz'))
def sort_bam_by_reference(job, job_vars):
"""
Sorts the bam by reference
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
# Unpack variables
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
# I/O
sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
output = os.path.join(work_dir, 'sort_by_ref.bam')
# Call: Samtools
ref_seqs = []
handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout
for line in handle:
if line.startswith("@SQ"):
tmp = line.split("\t")
chrom = tmp[1].split(":")[1]
ref_seqs.append(chrom)
handle.close()
# Iterate through chromosomes to create mini-bams
for chrom in ref_seqs:
# job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam)
cmd_view = ["samtools", "view", "-b", sorted_bam, chrom]
cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)]
p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE)
subprocess.check_call(cmd_sort, stdin=p1.stdout)
sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs]
cmd = ["samtools", "cat", "-o", output] + sorted_files
subprocess.check_call(cmd)
# Write to FileStore
ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output)
rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv()
exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv()
return exon_id, rsem_id
def exon_count(job, job_vars):
"""
Produces exon counts
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam',
'normalize.pl', 'composite_exons.bed')
# Command
tool = 'jvivian/bedtools'
cmd_1 = ['coverage',
'-split',
'-abam', docker_path(sort_by_ref),
'-b', docker_path(composite_bed)]
cmd_2 = ['perl',
os.path.join(work_dir, 'normalize.pl'),
sort_by_ref,
composite_bed]
popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool]
if sudo:
popen_docker = ['sudo'] + popen_docker
p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE)
with open(os.path.join(work_dir, 'exon_quant'), 'w') as f:
subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f)
p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout=subprocess.PIPE)
with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f:
subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f)
# Create zip, upload to fileStore, and move to output_dir as a backup
output_files = ['exon_quant.bed', 'exon_quant']
tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz'))
def transcriptome(job, job_vars):
"""
Creates a bam of just the transcriptome
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# I/O
sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam',
'unc.bed', 'hg19.transcripts.fa')
output = os.path.join(work_dir, 'transcriptome.bam')
# Command
parameters = ['sam-xlate',
'--bed', docker_path(bed),
'--in', docker_path(sort_by_ref),
'--order', docker_path(hg19_fa),
'--out', docker_path(output),
'--xgtag',
'--reverse']
docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',
tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo)
# Write to FileStore
ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G').rv()
def filter_bam(job, job_vars):
"""
Performs filtering on the transcriptome bam
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cores = input_args['cpu_count']
sudo = input_args['sudo']
# I/O
transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam')
output = os.path.join(work_dir, 'filtered.bam')
# Command
parameters = ['sam-filter',
'--strip-indels',
'--max-insert', '1000',
'--mapq', '1',
'--in', docker_path(transcriptome_bam),
'--out', docker_path(output)]
docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843',
tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo)
# Write to FileStore
ids['filtered.bam'] = job.fileStore.writeGlobalFile(output)
# Run child job
return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv()
def rsem(job, job_vars):
"""
Runs RSEM to produce counts
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
cpus = input_args['cpu_count']
sudo = input_args['sudo']
single_end_reads = input_args['single_end_reads']
# I/O
filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip')
subprocess.check_call(['unzip', '-o', os.path.join(work_dir, 'rsem_ref.zip'), '-d', work_dir])
output_prefix = 'rsem'
# Make tool call to Docker
parameters = ['--quiet',
'--no-qualities',
'-p', str(cpus),
'--forward-prob', '0.5',
'--seed-length', '25',
'--fragment-length-mean', '-1.0',
'--bam', docker_path(filtered_bam)]
if not single_end_reads:
parameters.extend(['--paired-end'])
parameters.extend(['/data/rsem_ref/hg19_M_rCRS_ref', output_prefix])
docker_call(tool='quay.io/ucsc_cgl/rsem:1.2.25--4e8d1b31d4028f464b3409c6558fb9dfcad73f88',
tool_parameters=parameters, work_dir=work_dir, sudo=sudo)
os.rename(os.path.join(work_dir, output_prefix + '.genes.results'), os.path.join(work_dir, 'rsem_gene.tab'))
os.rename(os.path.join(work_dir, output_prefix + '.isoforms.results'), os.path.join(work_dir, 'rsem_isoform.tab'))
# Write to FileStore
ids['rsem_gene.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_gene.tab'))
ids['rsem_isoform.tab'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem_isoform.tab'))
# Run child jobs
return job.addChildJobFn(rsem_postprocess, job_vars).rv()
def rsem_postprocess(job, job_vars):
"""
Parses RSEMs output to produce the separate .tab files (TPM, FPKM, counts) for both gene and isoform
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
return_input_paths(job, work_dir, ids, 'rsem_gene.tab', 'rsem_isoform.tab')
# Command
sample = input_args['uuid']
docker_call(tool='jvivian/rsem_postprocess', tool_parameters=[sample], work_dir=work_dir, sudo=sudo)
# Tar output files together and store in fileStore
output_files = ['rsem.genes.norm_counts.tab', 'rsem.genes.raw_counts.tab', 'rsem.genes.norm_fpkm.tab',
'rsem.genes.norm_tpm.tab', 'rsem.isoform.norm_counts.tab', 'rsem.isoform.raw_counts.tab',
'rsem.isoform.norm_fpkm.tab', 'rsem.isoform.norm_tpm.tab']
tarball_files(work_dir, tar_name='rsem.tar.gz', uuid=uuid, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'rsem.tar.gz'))
def consolidate_output(job, job_vars, output_ids):
"""
Combine the contents of separate zipped outputs into one via streaming
job_vars: tuple Tuple of dictionaries: input_args and ids
output_ids: tuple Nested tuple of all the output fileStore IDs
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
# Retrieve IDs
rseq_id, exon_id, rsem_id = flatten(output_ids)
# Retrieve output file paths to consolidate
# map_tar = job.fileStore.readGlobalFile(map_id, os.path.join(work_dir, 'map.tar.gz'))
qc_tar = job.fileStore.readGlobalFile(rseq_id, os.path.join(work_dir, 'qc.tar.gz'))
exon_tar = job.fileStore.readGlobalFile(exon_id, os.path.join(work_dir, 'exon.tar.gz'))
rsem_tar = job.fileStore.readGlobalFile(rsem_id, os.path.join(work_dir, 'rsem.tar.gz'))
# I/O
out_tar = os.path.join(work_dir, uuid + '.tar.gz')
# Consolidate separate tarballs
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in [rsem_tar, exon_tar, qc_tar]:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar == qc_tar:
tarinfo.name = os.path.join(uuid, 'rseq_qc', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(uuid, os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output directory of selected
if input_args['output_dir']:
output_dir = input_args['output_dir']
mkdir_p(output_dir)
copy_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.tar.gz'])
# Write output file to fileStore
ids['uuid.tar.gz'] = job.fileStore.writeGlobalFile(out_tar)
# If S3 bucket argument specified, upload to S3
if input_args['s3_dir']:
job.addChildJobFn(upload_output_to_s3, job_vars)
def upload_output_to_s3(job, job_vars):
"""
If s3_dir is specified in arguments, file will be uploaded to S3 using boto.
WARNING: ~/.boto credentials are necessary for this to succeed!
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
import boto
from boto.s3.key import Key
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
# Parse s3_dir
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.split('/')[0]
bucket_dir = '/'.join(s3_dir.split('/')[1:])
# I/O
uuid_tar = return_input_paths(job, work_dir, ids, 'uuid.tar.gz')
# Upload to S3 via boto
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = os.path.join(bucket_dir, uuid + '.tar.gz')
k.set_contents_from_filename(uuid_tar)
def upload_bam_to_s3(job, job_vars):
"""
Upload bam to S3. Requires S3AM and a ~/.boto config file.
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
# I/O
job.fileStore.readGlobalFile(ids['alignments.bam'], os.path.join(work_dir, 'alignments.bam'))
bam_path = os.path.join(work_dir, 'alignments.bam')
sample_name = uuid + '.bam'
# Parse s3_dir to get bucket and s3 path
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.split('/')[0]
bucket_dir = os.path.join('/'.join(s3_dir.split('/')[1:]), 'bam_files')
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'file://{}'.format(bam_path),
os.path.join('s3://', bucket_name, bucket_dir, sample_name)]
subprocess.check_call(s3am_command)
def main():
"""
This is a Toil pipeline for the UNC best practice RNA-Seq analysis.
RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified.
Please read the README.md located in the same directory.
"""
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'config': args.config,
'config_fastq': args.config_fastq,
'input': args.input,
'unc.bed': args.unc,
'hg19.transcripts.fa': args.fasta,
'composite_exons.bed': args.composite_exons,
'normalize.pl': args.normalize,
'output_dir': args.output_dir,
'rsem_ref.zip': args.rsem_ref,
'chromosomes.zip': args.chromosomes,
'ebwt.zip': args.ebwt,
'ssec': args.ssec,
's3_dir': args.s3_dir,
'sudo': args.sudo,
'single_end_reads': args.single_end_reads,
'upload_bam_to_s3': args.upload_bam_to_s3,
'uuid': None,
'sample.tar': None,
'cpu_count': None}
# Launch jobs
Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4865127 | import threading
from time import sleep
from requests import get, exceptions
from re import sub
from re import compile as Compile
from json import loads
from os import mkdir, path
from bs4 import BeautifulSoup as bs
from hashlib import md5
default_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
}
api_headers = {
'Host': 'api.bilibili.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
}
page_headers = {
'Host': 'www.bilibili.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
}
old_url_headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
'Host':'interface.bilibili.com',
'Connection':'keep-alive',
}
def old_url(CID,QUALITY=64):
"""通过cid编号获取视频下载地址,默认清晰度是720p"""
keystr='<KEY>'[::-1]
tempstr=''
for letter in keystr:
addstr=chr(ord(letter)+2)
tempstr+=addstr
keystr1,keystr2=tempstr.split(":")
keystr3=f"appkey={keystr1}&cid={CID}&otype=json&qn={QUALITY}&quality={QUALITY}&type="
keystr4=md5(bytes(keystr3+keystr2,'utf-8')).hexdigest()
geturl=f"https://interface.bilibili.com/v2/playurl?{keystr3}&sign={keystr4}"
try:
htmljson=Go(geturl,old_url_headers).json()
# print(htmljson)
except AttributeError:
print(f'\x1b[4;31m访问旧视频地址{CID}失败\x1b[0m')
# error_container_list.append(f'访问旧视频地址失败:{CID}')
else:
videourl=htmljson['durl']
ddd=[]
for dict1 in videourl:
ddd.append(dict1['url'])
return ddd
class MyException(Exception):
""" 自定义错误基类 """
pass
class ConnectError(MyException):
""" 连接不上服务器 """
def __init__(self):
self.args = ('连接不上目标服务器',)
class PattenError(MyException):
""" 连接不上服务器 """
def __init__(self):
self.args = ('网页源码匹配错误',)
response_time_max = 60
def MakeDir(PATH:"文件或目录")->int:
""" 存在文件则返回1,否则创建并返回0 """
if not path.exists(PATH):
mkdir(PATH)
return 0
return 1
def RemoveInvalidChr(STRING):
"""去除文件名中那些不合法的字符(?*/)"""
q=sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]',"",rf'{STRING}')
q = q.replace('\\','')
q = sub(r'[?*"<>/:|]',"",q)
if len(q) > 60:
return q[:60]
else:
return q
def ValueCopy(SEQUENCE)->list: # 也可以用copy.deepcopy()
"""对列表和字典进行完全的值拷贝,不受原先对象改变的影响"""
INITIALLIST,INITIALDIRECTORY=[],{}
if type(SEQUENCE)==list:
for item in SEQUENCE:
INITIALLIST.append(ValueCopy(item))
return INITIALLIST
elif type(SEQUENCE)==dict:
for key,value in SEQUENCE.items():
key=ValueCopy(key);value=ValueCopy(value)
INITIALDIRECTORY[key]=value
return INITIALDIRECTORY
else:
return SEQUENCE
def Go(url, headers, *, timeout=response_time_max, **kwargs)->"<response>":
num = 0
while num < 6:
try:
response = get(url, headers=headers, timeout=timeout, **kwargs)
except exceptions.RequestException:
num += 1
else:
return response
raise ConnectError()
def playget(URL,HEADERS=page_headers)->"(video_url,audio_url)/False":
"""视频播放页面信息获取"""
html_text=Go(URL,HEADERS).text
html_lxml=bs(html_text, 'lxml')
data=html_lxml.head.find_all('script')[2]
data=data.string
data_play=loads(data[20:])['data']
if 'dash' in data_play:
video_new_url=data_play['dash']['video'][0]['baseUrl']
audio_new_url=data_play['dash']['audio'][0]['baseUrl']
return video_new_url, audio_new_url
else:
return False
def PattenCut(patten, string, start=0, end=None)->list:
""" 按照模式patten对目标字符串截取匹配部分coincidence,
返回coincidence[start:end],若没有匹配则返回None """
patten1 = Compile(patten)
coincide = patten1.finditer(string)
if coincide:
cc = []
for i in coincide:
cc.append(i.group()[start:end])
return cc
else:
raise PattenError()
def file_part(url, head,threadnum)->tuple:
data= Go(url, head)
content = data.headers["Content-Range"]
content = int(PattenCut("/[0-9]+", content, start=1)[0])
block = content//threadnum
range_list = []
write_list = []
auxiliary_list = []
for i in range(threadnum):
auxiliary_list.append(i*block - 1)
write_list.append(i*block)
for i in auxiliary_list:
range_list.append([i + 1, i + block])
range_list[-1][-1] = content - 1
return range_list, write_list
API_URL="https://api.bilibili.com/x/web-interface/view?bvid="
PLAY_URL='https://www.bilibili.com/video/BV'
def apiget(URL, HEADERS=api_headers)->list:
""" cid,title,aid,name,videos,view,danmaku,desc,picurl,dimension """
data=Go(URL, HEADERS).json()['data']
aid = data["aid"]
videos = data["videos"]
desc = data["desc"]
picurl = data["pic"]
name = data["owner"]["name"]
view = data["stat"]["view"]
danmaku = data["stat"]["danmaku"]
part=[]
cid = []
dimension = []
title = RemoveInvalidChr(data['title'])
for item in data['pages']:
cid.append(item["cid"])
part.append(RemoveInvalidChr(item['part']))
dimension.append([item["dimension"]["width"], item["dimension"]["height"]])
if len(cid) == 1:
part = [title]
return cid, part, aid, name, videos, view, danmaku, desc, dimension, picurl
container = []
odc = {}
class download:
def __init__(self, page_url, title, threadnum, save_dict, rec_dict, index, cid):
self.page_url = page_url
self.title = title
self.save_dict = save_dict.replace('/', '\\\\')
self.threadnum = threadnum
self.head = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
'Origin': 'https://www.bilibili.com',
'Connection': 'keep-alive',
'Range': 'bytes=0-2028',
'Referer': f'{self.page_url}'
}
self.cid = cid
self.index = index
self.rec_dict = rec_dict.replace('/', '\\\\')
MakeDir(self.rec_dict)
MakeDir(self.save_dict)
def newORold(self):
try:
self.page_video_url = playget(self.page_url)
if not self.page_video_url:
self.page_video_url = old_url(self.cid)
self.oldbegin()
else:
self.begin()
except Exception:
raise PattenError()
def oldbegin(self):
odc[int(self.index)] = len(self.page_video_url)
j = 1
for urlx in self.page_video_url:
filename = self.rec_dict+'\\'+self.title+f'_{j}.flv'
with open(filename,"wb") as f:
pass
f.close()
lock = threading.Lock()
range_list, write_list = file_part(urlx, self.head, self.threadnum)
j +=1
threads = []
for i in range(len(write_list)):
threads.append(threading.Thread(target=downloadcore,
args=(urlx, filename, lock, self.head, range_list[i], write_list[i], self.index)))
for i in threads:
i.start()
def begin(self):
self.filebox=(
self.rec_dict + "\\" + self.title + "_video.flv",
self.rec_dict + "\\" + self.title + "_audio.flv",
)
j = 0
for url in self.page_video_url:
filename = self.filebox[j]
with open(filename,"wb") as f:
pass
f.close()
lock = threading.Lock()
range_list, write_list = file_part(url, self.head, self.threadnum)
j += 1
threads = []
for i in range(len(write_list)):
threads.append(threading.Thread(target=downloadcore,
args=(url, filename, lock, self.head, range_list[i], write_list[i], self.index)))
for i in threads:
i.start()
def downloadcore(url, filename, lock, headers, rangex, writex, index):
head = ValueCopy(headers)
head.update({'Range':f"bytes={rangex[0]}-{rangex[1]}"})
data = Go(url, head).content
lock.acquire()
with open(filename, 'rb+') as file:
file.seek(writex)
file.write(data)
container[index].append(1)
lock.release()
| StarcoderdataPython |
12831074 | <reponame>EDF-Lab/EDF
"""
.. module:: threading
:platform: Unix
:synopsis: A module that implements threading tools to use a GUI.
.. Copyright 2022 EDF
.. moduleauthor:: <NAME>, <NAME>, <NAME>, <NAME>
.. License:: This source code is licensed under the MIT License.
"""
from PyQt5.QtCore import QObject, pyqtSignal
from shared.controller import ControllerInterface
class Worker(QObject):
"""This is a class that will handle a specific task in a thread.
"""
finished = pyqtSignal()
def __init__(self, controller: ControllerInterface, callback):
"""Basic constructor.
:param controller: The controller that will handle the data and the view.
:param callback: The function that will be called by the the thread.
"""
self.controller = controller
self.callback = callback
super(Worker, self).__init__()
def run(self):
self.callback(self.controller)
self.finished.emit()
| StarcoderdataPython |
1764444 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 22:17:05 2018
@author: yoelr
"""
from ._splitter import Splitter
from .decorators import cost
__all__ = ('MolecularSieve',)
# @cost('Flow rate', 'Pressure filter drying (2)',
# cost=405000, CE=521.9, S=22687, n=0.6, kW=1044)
# @cost('Flow rate', 'Pressure filter pressing',
# cost=75200, CE=521.9, S=22687, n=0.6, kW=112)
@cost('Flow rate', 'Column', kW=151, BM=1.8,
cost=2601000, CE=521.9, S=22687, n=0.6)
class MolecularSieve(Splitter):
"""Create an ethanol/water molecular sieve for bioethanol plants.
The molecular sieve is modeled as a component wise separator. Costing
is based on scaling by the 6/10ths rule from an NREL TEA report [1].
**Parameters**
**split:** [array_like] Componentwise split to the 0th output stream
**ins**
[0] Feed (gas)
**outs**
[0] Split stream (gas)
[1] Remainder stream (gas)
**References**
[1] Process Design and Economics for Biochemical Conversion of
Lignocellulosic Biomass to Ethanol Dilute-Acid Pretreatment and
Enzymatic Hydrolysis of Corn Stover. <NAME>, <NAME>, L.
Tao, <NAME>, <NAME>, and <NAME> (National Renewable Energy
Laboratory Golden, Colorado). <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, and <NAME> (Harris Group Inc. Seattle,
Washington and Atlanta, Georgia)
**Examples**
:doc:`MolecularSieve Example`
"""
_N_heat_utilities = 2
_units = {'Flow rate': 'kg/hr'}
def __init__(self, ID='', ins=None, outs=(), *, order=None, split):
Splitter.__init__(self, ID, ins, outs, order=order, split=split)
self._outs[0]._phase = self._outs[1]._phase = 'g'
def _design(self):
self._Design['Flow rate'] = flow = self._outs[1].massnet
T = self.ins[0].T
self._heat_utilities[0](1429.65*flow, T)
self._heat_utilities[1](-55.51*flow, T)
| StarcoderdataPython |
100646 | <reponame>Imperat/SSU-Information-Security-Course
import pda_exceptions as e
class PDA(object):
def __init__(self, rules, input_alphabet, states,
initial_state, terminate_states):
self.rules = rules
self.input_alphabet = input_alphabet
self.states = states
self.state = initial_state
self.terminate_states = terminate_states
self.crash = False
def _crash(self):
self.crash = True
def input(self, symbol):
# print symbol + "- - " + self.state
try:
if self.crash:
raise e.PDACrashException(
"Error by input. PDA is crashed!")
self.state = self.rules[self.state][symbol]
except KeyError:
if symbol not in self.input_alphabet:
self._crash()
raise e.UnknownSymbolException(
"Symbol isn't in input alphabet")
else:
self._crash()
raise e.PDACrashException(
"PDA is crashed")
def in_terminate_state(self):
return self.state in self.terminate_states
| StarcoderdataPython |
11206042 | # SETS THE APPLICATION CONFIG
default_app_config = "canteenREST.apps.CanteenrestConfig"
| StarcoderdataPython |
9702033 |
from ..templatetags.analysis_tags import analyses_results_urls_list_str
from topobank.analysis.models import Analysis
def test_analyses_results_urls_list_str(mocker):
mocker.patch('topobank.analysis.models.Analysis', autospec=True)
analyses = [Analysis(id=i) for i in range(3)] # we just need any id
reverse = mocker.patch('topobank.analysis.templatetags.analysis_tags.reverse', autospec=True)
reverse.side_effect = ["/analysis/retrieve/0","/analysis/retrieve/1","/analysis/retrieve/2"]
# we fake here the outcome of the reverse function, we only want to
# test the template tag
s = analyses_results_urls_list_str(analyses)
assert s=='["/analysis/retrieve/0","/analysis/retrieve/1","/analysis/retrieve/2"]'
| StarcoderdataPython |
5046120 | <filename>demo_seq2seq_nmt.py
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 16 20:31:32 2019
@author: ruan
"""
import codecs
import collections
from operator import itemgetter
import numpy as np
from pandas import DataFrame as dataframe
from pandas import Series as series
import os
import time
import tensorflow as tf
import logging
from sklearn.model_selection import train_test_split
import copy
import pickle
from imp import reload
logging.basicConfig(level=logging.WARNING, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S",)
# 修改当前工作目录
# todo change dir
os.chdir(u'E:\\MachineLearning\\nlp')
# os.chdir(u'D:\\nlp')
path_data = u'..\\data\\'
print(os.getcwd())
from neural_network import NeuralNetwork
import nn_lib
# 开关
flag_test = False
flag_build_vocab = False
flag_process_data = False
flag_pretrain_w2v = False
flag_train = False
flag_infer = False
flag_transfer_learning = False
flag_params_output = False
flag_export_model = True
# 超参数
word_embd_dim = 100
dim_rnn = word_embd_dim
learning_rate = 1e-3
batch_size = 16*1
keep_prob = 0.95
encoder_word_embd_pretrain = None
decoder_word_embd_pretrain = None
path_seq2seq = path_data+u'seq2seq_nmt\\'
path_corpus_processed = path_seq2seq + u'corpus_processed\\'
corpus_names = ['x', 'x_extended', 'y', 'y_extended', 'vocab_extend']
# processed_corpus_names = ['x_train', 'x_test', 'x_vali', 'x_extended_train', 'x_extended_test', 'x_extended_vali',
# 'y_train', 'y_test', 'y_vali', 'y_extended_train', 'y_extended_test', 'y_extended_vali',
# 'vocab_extend_train', 'vocab_extend_test', 'vocab_extend_vali']
# 源序列词库相关参数
vocab_size_src = 8000
path_corpus_src = path_seq2seq+'train.txt.en'
path_vocab_src = path_seq2seq+'vocab_en.pkl'
path_corpus_processed_src = path_seq2seq+'corpus_processed_en'
src_seq_len_max = 199
# 目标序列词库相关参数
vocab_size_tgt = 4000
path_corpus_tgt = path_seq2seq+'train.txt.zh'
path_vocab_tgt = path_seq2seq+'vocab_zh.pkl'
path_corpus_processed_tgt = path_seq2seq+'corpus_processed_zh'
tgt_seq_len_max = 99
# 读取样本数据
def read_file(file_path):
count = 0
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
data = []
for line in lines:
data.append(line.strip().split())
return data
#写入映射好id的样本数据
def write_file(file_path, data):
with open(file_path, 'w', encoding='utf-8') as f:
for sentence in data:
f.write(sentence)
return
# 构建字典
def build_vocab():
global path_corpus_src, path_vocab_src, vocab_size_src
global path_corpus_tgt, path_vocab_tgt, vocab_size_tgt
data_en = read_file(path_corpus_src)
word2id_vocab_en, vocab_size_src = nn_lib.build_word2id_vocab(data_en, path_vocab_src, vocab_size=vocab_size_src)
data_zh = read_file(path_corpus_tgt)
word2id_vocab_zh, vocab_size_tgt = nn_lib.build_word2id_vocab(data_zh, path_vocab_tgt, vocab_size=vocab_size_tgt)
return word2id_vocab_en, word2id_vocab_zh
# 数据预处理
def preprocess_data():
# 读取字典
word2id_vocab_src, vocab_size_src = nn_lib.read_word2id_dict(path_vocab_src)
word2id_vocab_tgt, vocab_size_tgt = nn_lib.read_word2id_dict(path_vocab_tgt)
# 读取语料
corpus_src = read_file(path_corpus_src)
corpus_tgt = read_file(path_corpus_tgt)
# 转换语料文本至id值
src_ids = []
src_id_extendeds = []
word2id_vocab_extends = []
tgt_ids = []
tgt_id_extendeds = []
for src, tgt in zip(corpus_src, corpus_tgt):
# 转换src至ID
src_id, src_id_extended, vocab_extend_raw = nn_lib.sentence2id(sent=src,
word2id_vocab=word2id_vocab_src,
build_extend_vocab=True)
src_ids.append(src_id)
src_id_extendeds.append(src_id_extended)
vocab_extend = {key: value-len(word2id_vocab_src)+len(word2id_vocab_tgt) for key, value in vocab_extend_raw.items()}
word2id_vocab_extends.append(copy.copy(vocab_extend))
# 转换tgt至ID
tgt_id = nn_lib.sentence2id(sent=tgt, word2id_vocab=word2id_vocab_tgt, build_extend_vocab=False)
vocab_extend.update(word2id_vocab_tgt)
tgt_id_extended = nn_lib.sentence2id(sent=tgt, word2id_vocab=vocab_extend, build_extend_vocab=False)
tgt_ids.append(tgt_id)
tgt_id_extendeds.append(tgt_id_extended)
del src, src_id, src_id_extended, tgt, tgt_id, tgt_id_extended
# 序列补0,统计附加词表大小
src_ids = nn_lib.pad_sequences(sequences=src_ids, max_seq_len=src_seq_len_max)
src_id_extendeds = nn_lib.pad_sequences(sequences=src_id_extendeds, max_seq_len=src_seq_len_max)
tgt_ids = nn_lib.pad_sequences(sequences=tgt_ids, max_seq_len=tgt_seq_len_max, add_sos=True)
tgt_id_extendeds = nn_lib.pad_sequences(sequences=tgt_id_extendeds, max_seq_len=tgt_seq_len_max, add_sos=True)
vocab_size_extened = max([len(i) for i in word2id_vocab_extends])
# 规整数据
src_ids = np.array(src_ids)
src_id_extendeds = np.array(src_id_extendeds)
tgt_ids = np.array(tgt_ids)
tgt_id_extendeds = np.array(tgt_id_extendeds)
word2id_vocab_extends = np.array(word2id_vocab_extends).reshape([-1, 1])
# 构建训练集、测试集、验证集
nn_lib.generate_train_test_vali(src_ids, src_id_extendeds, tgt_ids, tgt_id_extendeds, word2id_vocab_extends,
file_path=path_corpus_processed,
corpus_names=corpus_names,
data_test_size=128*2,
data_vali_size=128*3)
# x, x_vali, x_extended, x_extended_vali, y, y_vali, y_extended, y_extended_vali, vocab_extend, vocab_extend_vali \
# = train_test_split(src_ids, src_id_extendeds, tgt_ids, tgt_id_extendeds, word2id_vocab_extends, test_size=128*3)
# x_train, x_test, x_extended_train, x_extended_test, y_train, y_test, y_extended_train, y_extended_test, vocab_extend_train, vocab_extend_test \
# = train_test_split(x, x_extended, y, y_extended, vocab_extend, test_size=128*2)
# del x, x_extended, y, y_extended, vocab_extend
# # 存储训练集、测试集、验证集
# for name in processed_corpus_names:
# with open(path_corpus_processed + name, 'wb') as file:
# pickle.dump(eval(name), file)
return
# 读取语料
def load_processed_corpus():
corpus = {}
for data_name in [name + '_train' for name in corpus_names] \
+ [name + '_test' for name in corpus_names] \
+ [name + '_vali' for name in corpus_names]:
with open(path_corpus_processed + data_name, 'rb') as file:
data = pickle.load(file, encoding='utf-8')
corpus[data_name] = data
return corpus
# 机器翻译测试用例
def nmt(model, corpus_src,
path_vocab_src=path_vocab_src, path_vocab_tgt=path_vocab_tgt,
src_seq_len_max=src_seq_len_max):
# 读取字典
word2id_vocab_src, vocab_size_src = nn_lib.read_word2id_dict(path_vocab_src)
word2id_vocab_tgt, vocab_size_tgt = nn_lib.read_word2id_dict(path_vocab_tgt)
id2word_vocab_tgt = {value: key for key, value in word2id_vocab_tgt.items()}
ids = []
id_extendeds = []
vocab_extends = []
# 处理输入语料数据
for sentence in corpus_src:
sent = sentence.strip().split()
id, id_extended, vocab_extend_raw = nn_lib.sentence2id(sent=sent, word2id_vocab=word2id_vocab_src, build_extend_vocab=True)
ids.append(id)
id_extendeds.append(id_extended)
vocab_extend = {key: value-len(word2id_vocab_src)+len(word2id_vocab_tgt) for key, value in vocab_extend_raw.items()}
vocab_extends.append(copy.copy(vocab_extend))
# 序列补0,统计附加词表大小
ids = nn_lib.pad_sequences(sequences=ids, max_seq_len=src_seq_len_max)
id_extendeds = nn_lib.pad_sequences(sequences=id_extendeds, max_seq_len=src_seq_len_max)
vocab_size_extened = max([len(i) for i in vocab_extends])
# 规整数据
ids = np.array(ids)
id_extendeds = np.array(id_extendeds)
vocab_extends = np.array(vocab_extends).reshape([-1, 1])
data = [ids, id_extendeds, vocab_extends]
# 进行预测,输出时序概率分布
tgt_prob_seqs = model.infer(data=data)
# 转换预测结果至自然语言语句
tgt_seqs = []
for seq in tgt_prob_seqs:
seq = np.argmax(seq, axis=1)
seq = [id2word_vocab_tgt[id] for id in seq]
seq = np.array(seq).reshape([-1, 1])
tgt_seqs.append(seq)
corpus_tgt = np.concatenate(tgt_seqs, axis=1)
corpus_tgt = [''.join([tmp for tmp in corpus_tgt[i, :] if tmp != '<PAD>']) for i in range(corpus_tgt.shape[0])]
return corpus_tgt
def demo():
word2id_vocab_src, vocab_size_src = nn_lib.read_word2id_dict(path_vocab_src)
word2id_vocab_tgt, vocab_size_tgt = nn_lib.read_word2id_dict(path_vocab_tgt)
id2word_vocab_src = {value: key for key, value in word2id_vocab_src.items()}
id2word_vocab_tgt = {value: key for key, value in word2id_vocab_tgt.items()}
# ' '.join([id2word_vocab_src[i] for i in corpus['x_test'][100]])
# ''.join([id2word_vocab_tgt[i] for i in corpus['y_test'][100]])
return
if __name__ == "__main__":
# 创建字典
if flag_build_vocab is True:
word2id_vocab_en, word2id_vocab_zh = build_vocab()
# 预处理数据
if flag_process_data is True:
preprocess_data()
# 预训练词向量
if flag_pretrain_w2v is True:
nn_lib.train_word2vec(path_corpus=path_corpus_src, word2vec_dim=word_embd_dim,
path_w2v_model=path_seq2seq+'en_w2v_model', path_w2v_vector=path_seq2seq+'en_w2v_vector')
w2v_vector = nn_lib.load_w2v_vector(path_seq2seq+'en_w2v_vector')
word2id_vocab_src, vocab_size_src = nn_lib.read_word2id_dict(path_vocab_src)
encoder_word_embd_pretrain = nn_lib.rebuild_w2v_matrix(word2id_vocab_src, w2v_vector)
with open(path_seq2seq + 'encoder_word_embd_pretrain', 'wb') as file:
pickle.dump(encoder_word_embd_pretrain, file)
# word2id_vocab_tgt, vocab_size_tgt = nn_lib.read_word2id_dict(path_vocab_tgt)
corpus = load_processed_corpus()
data = [corpus['x_train'], corpus['x_extended_train'], corpus['vocab_extend_train'], corpus['y_train'], corpus['y_extended_train']]
data_test = [corpus['x_test'], corpus['x_extended_test'], corpus['vocab_extend_test'], corpus['y_test'], corpus['y_extended_test']]
model = NeuralNetwork(data=data,
model_type='seq2seq', loss_fun_type='cross_entropy_seq2seq',
model_parameter={'keep_prob': keep_prob,
'word_embd_dim': word_embd_dim,
'dim_rnn': dim_rnn,
'use_same_word_embd': False,
# 'encoder_word_embd_pretrain': encoder_word_embd_pretrain,
'encoder_vocab_size': vocab_size_src,
# 'decoder_word_embd_pretrain': decoder_word_embd_pretrain,
'decoder_vocab_size': vocab_size_tgt,
'target_seq_len_max': tgt_seq_len_max,
'batch_size': batch_size},
hyper_parameter={'optimizer_type': 'Adam',
'learning_rate': learning_rate,
'eval_score_type': 'cross_entropy_seq2seq',
'early_stop_rounds_train': 100,
'built_in_test_interval': 1,
'early_stop_rounds_test': 10},
other_parameter={'model_save_rounds': 1,
'path_data': path_seq2seq}
)
# 训练
if flag_train:
model.train(transfer_learning=flag_transfer_learning, built_in_test=True, data_test=data_test)
# 预测
if flag_infer:
if False:
word2id_vocab_src, vocab_size_src = nn_lib.read_word2id_dict(path_vocab_src)
id2word_vocab_src = {id: word for word,id in word2id_vocab_src.items()}
en = []
for sent in data[0]:
sent_tmp = [id2word_vocab_src[id] for id in sent if (id != 0 and id != 2)]
sent_tmp = ' '.join(sent_tmp)
en.append(sent_tmp)
zh = nmt(model=model, corpus_src=en)
nmt_result = []
for i in range(len(en)):
nmt_result.append((en[i], zh[i]))
if True:
src = ['win',
'go']
src = [sent.lower() for sent in src]
tgt = nmt(model=model, corpus_src=src)
for sent in tgt:
print(sent)
# 输出模型参数
if flag_params_output:
model.params_output()
# 输出pb模型
if flag_export_model:
model.export_model()
print('Task End.')
| StarcoderdataPython |
1745686 | {
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_ogr_htf_frmt",
"type": "static_library",
"sources": [
"../gdal/ogr/ogrsf_frmts/htf/ogrhtfdatasource.cpp",
"../gdal/ogr/ogrsf_frmts/htf/ogrhtfdriver.cpp",
"../gdal/ogr/ogrsf_frmts/htf/ogrhtflayer.cpp"
],
"include_dirs": [
"../gdal/ogr/ogrsf_frmts/htf"
]
}
]
}
| StarcoderdataPython |
6468683 | from torch.utils.data import Dataset, DataLoader
import cv2
import torchvision.transforms.functional as TF
import numpy as np
import torch
class CharacteristicsDataset(Dataset):
def __init__(self, path, target, size = None, transform = None):
self.path = path
self.target = torch.from_numpy(target.values)
self.variables = target.columns.tolist()
self.transform = transform
self.size = size
def __len__(self):
return len(self.path)
def __getitem__(self, idx):
#Fetch the path and image
path = self.path[idx]
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#Fetch the target
target = self.target[idx,:]
# Resize
if self.size is not None:
image = cv2.resize(image, dsize=self.size, interpolation=cv2.INTER_CUBIC)
# Transform
if self.transform is not None:
image = self.transform(image=image)['image']
# Cast to Tensor
image = TF.to_tensor(image)
return image, target, self.variables
| StarcoderdataPython |
1793108 | <gh_stars>10-100
import tkinter as tk
class Bottom(tk.Frame):
def __init__(self, master, *args, **kwargs):
tk.Frame.__init__(self, master, *args, **kwargs)
self['bg'] = '#2c2c2c'
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid_propagate(False)
def show_frame(self, title):
from Base.listOfPage import bottomPage
frame = bottomPage[0]
frame.grid(row=0, column=0, sticky='nsew')
frame.tkraise()
| StarcoderdataPython |
3432543 | <gh_stars>10-100
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.cluster import AgglomerativeClustering
# For reproducibility
np.random.seed(1000)
nb_samples = 3000
def plot_clustered_dataset(X, Y):
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.grid()
ax.set_xlabel('X')
ax.set_ylabel('Y')
markers = ['o', 'd', '^', 'x', '1', '2', '3', 's']
colors = ['r', 'b', 'g', 'c', 'm', 'k', 'y', '#cccfff']
for i in range(nb_samples):
ax.scatter(X[i, 0], X[i, 1], marker=markers[Y[i]], color=colors[Y[i]])
plt.show()
if __name__ == '__main__':
# Create the dataset
X, _ = make_blobs(n_samples=nb_samples, n_features=2, centers=8, cluster_std=2.0)
# Show the dataset
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.grid()
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.scatter(X[:, 0], X[:, 1], marker='o', color='b')
plt.show()
# Complete linkage
print('Complete linkage')
ac = AgglomerativeClustering(n_clusters=8, linkage='complete')
Y = ac.fit_predict(X)
# Show the clustered dataset
plot_clustered_dataset(X, Y)
# Average linkage
print('Average linkage')
ac = AgglomerativeClustering(n_clusters=8, linkage='average')
Y = ac.fit_predict(X)
# Show the clustered dataset
plot_clustered_dataset(X, Y)
# Ward linkage
print('Ward linkage')
ac = AgglomerativeClustering(n_clusters=8)
Y = ac.fit_predict(X)
# Show the clustered dataset
plot_clustered_dataset(X, Y)
| StarcoderdataPython |
221765 | """Copyright 2012 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = '<NAME>'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
from Phidgets.PhidgetLibrary import PhidgetLibrary
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
import sys
class LEDVoltage:
"""This is an enumeration of available voltage output level values.
This has been added to allow the user to select the preset voltage output level values available in the hardware.
"""
VOLTAGE_1_7V=1
VOLTAGE_2_75V=2
VOLTAGE_3_9V=3
VOLTAGE_5_0V=4
INVALID=0
class LEDCurrentLimit:
"""This is an enumeration of available current limit values.
This has been added to allow the user to select the preset current limit values available in the hardware.
"""
CURRENT_LIMIT_20mA=1
CURRENT_LIMIT_40mA=2
CURRENT_LIMIT_60mA=3
CURRENT_LIMIT_80mA=4
INVALID=0
class LED(Phidget):
"""This class represents a Phidget LED. All methods to control a Phidget LED are implemented in this class.
The Phidget LED is a board that is meant for driving LEDs. Currently, the only available version drives 64 LEDs, but other versions may become available so this number is not absolute.
LEDs can be controlled individually, at brightness levels from 0-100.
See your device's User Guide for more specific API details, technical information, and revision details.
The User Guide, along with other resources, can be found on the product page for your device.
Extends:
Phidget
"""
def __init__(self):
"""The Constructor Method for the LED Class
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
"""
Phidget.__init__(self)
try:
PhidgetLibrary.getDll().CPhidgetLED_create(byref(self.handle))
except RuntimeError:
raise
def __del__(self):
"""The Destructor Method for the LED Class
"""
Phidget.dispose(self)
def getDiscreteLED(self, index):
"""Deprecated: use getBrightness
"""
ledVal = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetLED_getDiscreteLED(self.handle, c_int(index), byref(ledVal))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return ledVal.value
def setDiscreteLED(self, index, value):
"""Deprecated: use setBrightness
"""
try:
result = PhidgetLibrary.getDll().CPhidgetLED_setDiscreteLED(self.handle, c_int(index), c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getBrightness(self, index):
"""Returns the brightness value of an LED.
This value ranges from 0-100.
Parameters:
index<int>: index of the Discrete LED.
Returns:
Brightness of the LED <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
ledVal = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetLED_getBrightness(self.handle, c_int(index), byref(ledVal))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return ledVal.value
def setBrightness(self, index, value):
"""Sets the brightness of an LED.
Valid values are 0-100, with 0 being off and 100 being the brightest.
Parameters:
index<int>: index of the Discrete LED.
value<double>: brightness value of the Discrete LED.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or brightness value are out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetLED_setBrightness(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getCurrentLimitIndexed(self, index):
"""Returns the current limit value of an LED.
This value ranges from 0-80 mA.
Parameters:
index<int>: index of the LED.
Returns:
Current limit of the LED <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
ledVal = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetLED_getCurrentLimitIndexed(self.handle, c_int(index), byref(ledVal))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return ledVal.value
def setCurrentLimitIndexed(self, index, value):
"""Sets the current limit of an LED.
Valid values are 0-80 mA.
Parameters:
index<int>: index of the Discrete LED.
value<double>: current limit of the Discrete LED.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or brightness value are out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetLED_setCurrentLimitIndexed(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getLEDCount(self):
"""Returns the number of LEDs that this board can drive.
This may not correspond to the actual number of LEDs attached.
Returns:
The number of available LEDs <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
LEDCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetLED_getLEDCount(self.handle, byref(LEDCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return LEDCount.value
def getCurrentLimit(self):
"""Returns the current limit for the all outputs.
This is only supported by some PhidgetLEDs - see the product manual.
The possible values for type are LEDCurrentLimit.CURRENT_LIMIT_20mA, LEDCurrentLimit.CURRENT_LIMIT_40mA,
LEDCurrentLimit.CURRENT_LIMIT_60mA and LEDCurrentLimit.CURRENT_LIMIT_80mA.
Returns:
The current limit for all the outputs <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if unsupported by this board.
"""
currentLimit = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetLED_getCurrentLimit(self.handle, byref(currentLimit))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return currentLimit.value
def setCurrentLimit(self, currentLimit):
"""Sets the current limit for all outputs.
This is only supported by some PhidgetLEDs - see the product manual.
The possible values for type are LEDCurrentLimit.CURRENT_LIMIT_20mA, LEDCurrentLimit.CURRENT_LIMIT_40mA,
LEDCurrentLimit.CURRENT_LIMIT_60mA and LEDCurrentLimit.CURRENT_LIMIT_80mA.
By default, currentLimit is set to LEDCurrentLimit.CURRENT_LIMIT_20mA.
Parameters:
currentLimit<int>: desired current limit to set for all the outputs.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if unsupported by this board.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetLED_setCurrentLimit(self.handle, c_int(currentLimit))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getVoltage(self):
"""Returns the voltage output for the all outputs.
This is only supported by some PhidgetLEDs - see the product manual.
The possible values for type are LEDVoltage.VOLTAGE_1_7V, LEDVoltage.VOLTAGE_2_75V, LEDVoltage.VOLTAGE_3_9V and LEDVoltage.VOLTAGE_5_0V.
Returns:
The voltage level set for all the outputs <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if unsupported by this board.
"""
voltage = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetLED_getVoltage(self.handle, byref(voltage))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return voltage.value
def setVoltage(self, voltage):
"""Sets the voltage output for all outputs.
This is only supported by some PhidgetLEDs - see the product manual.
The possible values for type are LEDVoltage.VOLTAGE_1_7V, LEDVoltage.VOLTAGE_2_75V, LEDVoltage.VOLTAGE_3_9V and LEDVoltage.VOLTAGE_5_0V.
By default, voltage is set to LEDVoltage.VOLTAGE_2_75V.
Parameters:
voltage<int>: desired voltage level to set for all the outputs.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if unsupported by this board.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetLED_setVoltage(self.handle, c_int(voltage))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
| StarcoderdataPython |
9714473 | from regression_tests import *
class Test(Test):
settings = TestSettings(
tool='fileinfo',
input=[
'sample_20.ex',
'sample_236.ex',
'sample_23x.ex',
'sample_42321.ex',
'sample_uv_02.ex',
],
args='--json'
)
def test_correctly_analyzes_input_file(self):
molebox_found = False
self.assertTrue(self.fileinfo.succeeded)
for tool in self.fileinfo.output['tools']:
if tool['name'] == 'MoleBox':
molebox_found = True
self.assertTrue(molebox_found)
| StarcoderdataPython |
1713961 | def create_description():
print('ok')
class mission:
id = -1
delete_count = -1
count = -1
def __init__(self, name, date_add, date_start, date_end, description):
mission.id += 1
mission.count += 1
self.count = mission.count
self.date_start = date_start
self.date_end = date_end
self.name = name
self.date_add = date_add
self.description = description
self.id = mission.id # 编号,不会改变
| StarcoderdataPython |
9615915 | import pytest
try:
import ray
from ray.rllib.agents.a3c import A2CTrainer, A3CTrainer
from ray.rllib.agents.es import ESTrainer
from ray.rllib.agents.ddpg import TD3Trainer, ApexDDPGTrainer
from ray.rllib.agents.ddpg.ddpg import DDPGTrainer
from ray.rllib.agents.impala import ImpalaTrainer
from ray.rllib.agents.marwil import MARWILTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.ppo import PPOTrainer, APPOTrainer
except ImportError:
pytest.skip("skipping rllib integration tests because package is not installed",
allow_module_level=True)
# supported RL algorithms by environment
ENV_DICT = {'FootballDataDailyEnv': [A2CTrainer, A3CTrainer, ESTrainer, TD3Trainer, ApexDDPGTrainer,
DDPGTrainer, ImpalaTrainer, MARWILTrainer, PGTrainer, PPOTrainer, APPOTrainer],
'FootballDataDailyPercentageEnv': [A2CTrainer, A3CTrainer, ESTrainer, TD3Trainer, ApexDDPGTrainer,
DDPGTrainer, ImpalaTrainer, MARWILTrainer, PGTrainer, PPOTrainer,
APPOTrainer],
'TennisDataDailyEnv': [A2CTrainer, A3CTrainer, ESTrainer, TD3Trainer, ApexDDPGTrainer,
DDPGTrainer, ImpalaTrainer, MARWILTrainer, PGTrainer, PPOTrainer, APPOTrainer],
'TennisDataDailyPercentageEnv': [A2CTrainer, A3CTrainer, ESTrainer, TD3Trainer, ApexDDPGTrainer,
DDPGTrainer, ImpalaTrainer, MARWILTrainer, PGTrainer, PPOTrainer,
APPOTrainer]
}
ray.init(logging_level='ERROR')
def _get_model_config(alg, framework):
model_config = alg._default_config.copy()
model_config.update(num_gpus=0, num_workers=1, log_level='ERROR', train_batch_size=10)
model_config['model'].update(fcnet_hiddens=[3])
if ray.__version__ <= '0.8.5':
if alg not in (ApexDDPGTrainer, ImpalaTrainer):
model_config.update(use_pytorch=framework == 'torch')
else:
model_config.update(framework=framework)
if alg in [DDPGTrainer, TD3Trainer, ApexDDPGTrainer]:
model_config.update(learning_starts=0, timesteps_per_iteration=10)
if alg == ESTrainer:
model_config.update(episodes_per_batch=1, train_batch_size=10, noise_size=250000)
if alg == ApexDDPGTrainer:
model_config.update(num_workers=2, prioritized_replay=True, min_iter_time_s=1)
if alg in [PPOTrainer, APPOTrainer]:
model_config.update(num_sgd_iter=10)
if alg == PPOTrainer:
model_config.update(sgd_minibatch_size=10)
return model_config
@pytest.mark.parametrize("alg", ENV_DICT['FootballDataDailyEnv'])
@pytest.mark.parametrize("framework", ['tf', 'torch'])
def test_football_co_uk_daily_env_with_(alg, framework):
model = alg(env="FootballDataDaily-ray-v0", config=_get_model_config(alg, framework))
model.train()
@pytest.mark.parametrize("alg", ENV_DICT['FootballDataDailyPercentageEnv'])
@pytest.mark.parametrize("framework", ['tf', 'torch'])
def test_football_co_uk_daily_percentage_env_with_(alg, framework):
model = alg(env="FootballDataDailyPercent-ray-v0", config=_get_model_config(alg, framework))
model.train()
@pytest.mark.parametrize("alg", ENV_DICT['TennisDataDailyEnv'])
@pytest.mark.parametrize("framework", ['tf', 'torch'])
def test_tennis_co_uk_daily_env_with_(alg, framework):
model = alg(env="TennisDataDaily-ray-v0", config=_get_model_config(alg, framework))
model.train()
@pytest.mark.parametrize("alg", ENV_DICT['TennisDataDailyPercentageEnv'])
@pytest.mark.parametrize("framework", ['tf', 'torch'])
def test_tennis_co_uk_daily_percentage_env_with_(alg, framework):
model = alg(env="TennisDataDailyPercent-ray-v0", config=_get_model_config(alg, framework))
model.train()
| StarcoderdataPython |
9712145 | <filename>_Model Testing/match_model_test.py
from math import *
import time
import csv
surface_gas_molpercent = {"H2O": 93.32,
"CO2": 3.33,
"SO2": 3.34}
shallow_eq = {"H2O": 71.41,
"CO2": 19.93,
"SO2": 8.67}
int_eq = {"H2O": 24.43,
"CO2": 3.83,
"SO2": 71.74}
deep_eq = {"H2O": 35.03,
"CO2": 4.68,
"SO2": 60.29}
shallow_degassing = {"H2O": 98.36,
"CO2": 0.04,
"SO2": 1.60}
int_degassing = {"H2O": 27.43,
"CO2": 0.06,
"SO2": 72.51}
deep_degassing = {"H2O": 90.97,
"CO2": 0.19,
"SO2": 8.84}
sub_gases = {"Shallow_EQ_Fluid": shallow_eq,
"Int_EQ_Fluid": int_eq,
"Deep_EQ_Fluid": deep_eq,
"Shallow_Degassing": shallow_degassing,
"Int_Degassing": int_degassing,
"Deep_Degassing": deep_degassing}
threshold = 1.0 #in absolute mol% value
sub_CO2 = {gas_name: gas_dict["CO2"] for gas_name, gas_dict in sub_gases.iteritems()}
sub_H2O = {gas_name: gas_dict["H2O"] for gas_name, gas_dict in sub_gases.iteritems()}
sub_SO2 = {gas_name: gas_dict["SO2"] for gas_name, gas_dict in sub_gases.iteritems()}
start_time = time.time()
def sums(length, total_sum):
if length == 1:
yield (total_sum,)
else:
for value in range(total_sum + 1):
for permutation in sums(length - 1, total_sum - value):
yield (value,) + permutation
final_list = []
for l in list(sums(6,100)):
sum_CO2 = ( l[0]/100. * sub_CO2["Shallow_EQ_Fluid"] +
l[1]/100. * sub_CO2["Int_EQ_Fluid"] +
l[2]/100. * sub_CO2["Deep_EQ_Fluid"] +
l[3]/100. * sub_CO2["Shallow_Degassing"] +
l[4]/100. * sub_CO2["Int_Degassing"] +
l[5]/100. * sub_CO2["Deep_Degassing"] )
if sum_CO2 < surface_gas_molpercent["CO2"] + threshold and sum_CO2 > surface_gas_molpercent["CO2"] - threshold:
sum_H2O = ( l[0]/100. * sub_H2O["Shallow_EQ_Fluid"] +
l[1]/100. * sub_H2O["Int_EQ_Fluid"] +
l[2]/100. * sub_CO2["Deep_EQ_Fluid"] +
l[3]/100. * sub_H2O["Shallow_Degassing"] +
l[4]/100. * sub_H2O["Int_Degassing"] +
l[5]/100. * sub_H2O["Deep_Degassing"] )
if sum_H2O < surface_gas_molpercent["H2O"] + threshold and sum_H2O > surface_gas_molpercent["H2O"] - threshold:
sum_SO2 = ( l[0]/100. * sub_SO2["Shallow_EQ_Fluid"] +
l[1]/100. * sub_SO2["Int_EQ_Fluid"] +
l[2]/100. * sub_CO2["Deep_EQ_Fluid"] +
l[3]/100. * sub_SO2["Shallow_Degassing"] +
l[4]/100. * sub_SO2["Int_Degassing"] +
l[5]/100. * sub_SO2["Deep_Degassing"] )
if sum_SO2 < surface_gas_molpercent["SO2"] + threshold and sum_SO2 > surface_gas_molpercent["SO2"] - threshold:
final_list.append(l)
end_time = time.time()
print ("--- %s seconds ---" % (end_time - start_time))
print final_list
with open("match_model_test_output.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(final_list)
#L = list(sums(6,100))
# print L[-100:]
# start_time = time.time()
# possible_gas_mixture = []
# for a in range(101):
# for b in range(101):
# if a+b > 100:
# break
# for c in range(101):
# if a+b+c > 100:
# break
# for d in range(101):
# if a+b+c+d > 100:
# break
# for e in range(101):
# if a+b+c+d+e > 100:
# break
# for f in range(101):
# if a+b+c+d+e+f == 100:
# possible_gas_mixture.append((a, b, c, d, e, f))
# print possible_gas_mixture
# print ("--- %s seconds ---" % (time.time() - start_time))
# start_time = time.time()
# import itertools
# numbers = [1, 2, 3, 7, 7, 9, 10]
# result = [seq for i in range(len(numbers), 0, -1) for seq in itertools.combinations(numbers, i) if sum(seq) == 10]
# print result
# print ("--- %s seconds ---" % (time.time() - start_time))
# result = itertools.takewhile(a+b+c+d <= 100, [a, ])
# print result
| StarcoderdataPython |
3444405 | from tkinter import *
import time
import serial
SIZE = 18
UPDATE_RATE = 100
pixels = [0]*(SIZE**2)
BAUD_RATE = 115200
time.sleep(1)
def get_port_name():
for i in range(0, 256): # Select right port (GNU/Linux and Windows)
for serial_name in ['/dev/ttyUSB', '/dev/ttyACM', 'COM']:
try:
arduino = serial.Serial(serial_name+str(i), BAUD_RATE, timeout=.1)
for times in range(0, 100):
data = arduino.readline()[:-2]
if data:
return serial_name+str(i)
except KeyboardInterrupt:
return 'STOP'
except:
pass
return ''
port = ''
while port == '':
port = get_port_name()
print('Connecting to port', port)
if port == 'STOP':
exit()
arduino = serial.Serial(port, BAUD_RATE, timeout=.5)
time.sleep(1)
arduino.write('OK'.encode())
time.sleep(1)
while arduino.read():
pass
arduino.write('R'.encode())
time.sleep(2)
def create_grid(width, height, pixels_grid):
color = '#cccccc'
for y in range(height):
for x in range(width):
pixels[SIZE - 1 - y + x*SIZE] = Button(pixels_grid, background=color, state='disabled')
pixels[SIZE - 1 - y + x*SIZE].grid(column = x, row = (y + 1), sticky = (N+S+E+W))
for x in range(width):
Grid.columnconfigure(pixels_grid, x, weight = 1)
for y in range(height):
Grid.rowconfigure(pixels_grid, (y + 1), weight = 1)
updater()
def updater():
sensor_pixels = arduino.readline()[:-2] # Remove \r\n
# print(sensor_pixels)
for i in range(SIZE**2):
pixels[i]['background']='#'+('%02x'%(round((sensor_pixels[i] & 0b00111111)/(2**6-1)*(2**8-1))))*3
arduino.write('R'.encode())
pixels_grid.after(UPDATE_RATE, updater)
pixels_grid = Tk()
pixels_grid.geometry("650x650")
create_grid(SIZE, SIZE, pixels_grid)
pixels_grid.mainloop()
| StarcoderdataPython |
6641756 | import numpy as np
def lyapunov(N,phi,QQ):
return(np.matmul(np.linalg.inv(np.identity(N**2) - np.kron(phi,phi)), QQ.reshape(N**2,1)).reshape((N,N)))
| StarcoderdataPython |
354729 | <reponame>GuiBarreto/Segundo-Mundo-Python-Curso-em-Video<gh_stars>0
valor1 = float(input('Primeiro número: '))
valor2 = float(input('Segundo número: '))
if valor1 > valor2:
print('O primeiro número é maior!')
elif valor1 < valor2:
print('O segundo valor é maior!')
else:
print('Os dois números são iguais!')
| StarcoderdataPython |
4853145 | from ..pkg_info import cmp_pkg_version
import unittest
from unittest import mock
import pytest
MODULE_SCHEDULE = [
("5.0.0", ["nibabel.keywordonly"]),
("4.0.0", ["nibabel.trackvis"]),
("3.0.0", ["nibabel.minc", "nibabel.checkwarns"]),
# Verify that the test will be quiet if the schedule outlives the modules
("1.0.0", ["nibabel.nosuchmod"]),
]
OBJECT_SCHEDULE = [
("5.0.0", [("nibabel.pydicom_compat", "dicom_test"),
("nibabel.onetime", "setattr_on_read")]),
("3.0.0", [("nibabel.testing", "catch_warn_reset")]),
# Verify that the test will be quiet if the schedule outlives the modules
("1.0.0", [("nibabel.nosuchmod", "anyobj"), ("nibabel.nifti1", "nosuchobj")]),
]
ATTRIBUTE_SCHEDULE = [
("5.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_data")]),
# Verify that the test will be quiet if the schedule outlives the modules
("1.0.0", [("nibabel.nosuchmod", "anyobj", "anyattr"),
("nibabel.nifti1", "nosuchobj", "anyattr"),
("nibabel.nifti1", "Nifti1Image", "nosuchattr")]),
]
def _filter(schedule):
return [entry for ver, entries in schedule if cmp_pkg_version(ver) < 1 for entry in entries]
def test_module_removal():
for module in _filter(MODULE_SCHEDULE):
with pytest.raises(ImportError):
__import__(module)
assert False, f"Time to remove {module}"
def test_object_removal():
for module_name, obj in _filter(OBJECT_SCHEDULE):
try:
module = __import__(module_name)
except ImportError:
continue
assert not hasattr(module, obj), f"Time to remove {module_name}.{obj}"
def test_attribute_removal():
for module_name, cls, attr in _filter(ATTRIBUTE_SCHEDULE):
try:
module = __import__(module_name)
except ImportError:
continue
try:
klass = getattr(module, cls)
except AttributeError:
continue
assert not hasattr(klass, attr), f"Time to remove {module_name}.{cls}.{attr}"
#
# Test the tests, making sure that we will get errors when the time comes
#
_sched = "nibabel.tests.test_removalschedule.{}_SCHEDULE".format
@mock.patch(_sched("MODULE"), [("3.0.0", ["nibabel.nifti1"])])
def test_unremoved_module():
with pytest.raises(AssertionError):
test_module_removal()
@mock.patch(_sched("OBJECT"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image")])])
def test_unremoved_object():
with pytest.raises(AssertionError):
test_object_removal()
@mock.patch(_sched("ATTRIBUTE"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image", "affine")])])
def test_unremoved_attr():
with pytest.raises(AssertionError):
test_attribute_removal()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.