id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1740396 | <filename>cogs/premium.py
import discord
from discord.ext import commands
#definir classe
class premium(commands.Cog):
def __init__(self, client):
self.client = client
#evento webhook
@commands.cooldown(1,5, commands.BucketType.user)
@commands.guild_only()
@commands.command()
async def premium(self, ctx):
embed = discord.Embed(title="Link para donates", url="https://picpay.me/luiz.miguel.sr", description="", color=0xfdf7f7)
embed.set_thumbnail(url="https://i.imgur.com/l7gm6w1.png")
embed.set_image(url="https://i.imgur.com/XIkYkHu.png")
embed.set_author(name="ChootyBot - AJUDA ", url="", icon_url="https://i.imgur.com/lI01A4T.jpg")
await ctx.send(embed=embed)
def setup(client):
client.add_cog(premium(client))
| StarcoderdataPython |
1739890 | from tkinter import messagebox
import tradlib
import shutil
import sys
import os
active_language = "english" # default on start
def get_resources_path(relative_path):
try:
file_name = relative_path.split("\\")[-1]
base_path = sys._MEIPASS
return os.path.join(base_path + "\\" + file_name)
except Exception:
base_path = os.path.realpath(__file__)
base_path = base_path.replace("\\Utils.py", "")
return os.path.join(base_path + "\\" + relative_path)
def get_translations(translation_group, translation_key):
try:
return tradlib.get_translation(active_language, [translation_group, 0, translation_key])
except KeyError:
return "Error"
def set_active_language(selected_language):
global active_language
active_language = selected_language
def check_empty_entry(entries_to_check):
for key, value in entries_to_check.items():
if (key == "entry_modid" and value == "") or (key == "entry_material_name" and value == "") or \
(key == "listbox_json_list" and not value):
messagebox.showerror(get_translations("other", "error_GUI_title"),
get_translations("labels", "label_blank_error_messagebox"))
break
elif key == "entry_output_path":
if value == "":
messagebox.showerror(get_translations("other", "error_GUI_title"),
get_translations("labels", "label_blank_error_messagebox"))
break
else:
try:
os.chdir(value)
return True
except FileNotFoundError:
messagebox.showerror(get_translations("other", "error_GUI_title"),
get_translations("labels", "label_path_error_messagebox"))
break
return False
def make_output_dir(output_folder_path):
os.chdir(output_folder_path)
try:
os.mkdir("Json maker")
os.chdir("Json maker")
except FileExistsError:
os.chdir("Json maker")
try:
os.mkdir("blockstates")
except FileExistsError:
pass
try:
os.mkdir("models")
os.chdir("models")
except FileExistsError:
os.chdir("models")
try:
os.mkdir("block")
except FileExistsError:
pass
try:
os.mkdir("item")
except FileExistsError:
pass
os.chdir(output_folder_path)
def zip_output_dir(output_folder_path):
os.chdir(output_folder_path)
try:
shutil.make_archive("Json maker", "zip", "Json maker")
shutil.rmtree("Json maker")
except FileExistsError:
shutil.rmtree("Json maker.zip")
shutil.make_archive("Json maker", "zip", "Json maker")
shutil.rmtree("Json maker")
| StarcoderdataPython |
1642564 | <reponame>nuaa-QK/1_NAS
import os, sys, queue, time, random, re, json
import datetime, traceback, pickle
import multiprocessing, copy
from base import Network, NetworkItem, Cell
from info_str import NAS_CONFIG, MF_TEMP
def _dump_stage(stage_info):
_cur_dir = os.getcwd()
stage_path = os.path.join(_cur_dir, "memory", "stage_info.pickle")
with open(stage_path, "w") as f:
json.dump(stage_info, f, indent=2)
class TimeCnt:
def __init__(self):
self.time_stamp = None
def start(self):
self.time_stamp = datetime.datetime.now()
start_time = self.time_stamp.strftime('%d %H:%M:%S')
return start_time
def stop(self):
cost_time = (datetime.datetime.now() - self.time_stamp)
# format the costtime
total_seconds = int(cost_time.total_seconds())
hours = total_seconds//3600
seconds_inHour = total_seconds%3600
minutes = seconds_inHour//60
seconds = seconds_inHour%60
cost_time = '{}:{}:{}'.format(hours, minutes, seconds)
return cost_time
class DataSize:
def __init__(self, eva):
self.eva = eva
self.round_count = 0
self.mode = NAS_CONFIG['nas_main']['add_data_mode']
# data size control for game
self.add_data_per_rd = NAS_CONFIG['nas_main']['add_data_per_round']
self.init_lr = NAS_CONFIG['nas_main']['init_data_size']
self.scale = NAS_CONFIG['nas_main']['data_increase_scale']
# data size control for confirm train
self.data_for_confirm_train = NAS_CONFIG['nas_main']['add_data_for_confirm_train']
def _cnt_game_data(self):
if self.mode == "linear":
self.round_count += 1
cur_data_size = self.round_count * self.add_data_per_rd
elif self.mode == "scale":
cur_data_size = int(self.init_lr * (self.scale ** self.round_count))
self.round_count += 1
else:
raise ValueError("signal error: mode, it must be one of linear, scale")
return cur_data_size
def control(self, stage="game"):
"""Increase the dataset's size in different way
:param stage: must be one of "game", "confirm"
:return:
"""
if stage == "game":
cur_data_size = self._cnt_game_data()
elif stage == "confirm":
cur_data_size = self.data_for_confirm_train
elif stage == "retrain":
cur_data_size = -1
else:
raise ValueError("signal error: stage, it must be one of game, confirm")
if self.eva:
cur_data_size = self.eva._set_data_size(cur_data_size)
return cur_data_size
def _epoch_ctrl(eva=None, stage="game"):
"""
:param eva:
:param stage: must be one of "game", "confirm", "retrain"
:return:
"""
if stage == "game":
cur_epoch = NAS_CONFIG['eva']['search_epoch']
elif stage == "confirm":
cur_epoch = NAS_CONFIG['eva']['confirm_epoch']
elif stage == "retrain":
cur_epoch = NAS_CONFIG['eva']['retrain_epoch']
else:
raise ValueError("signal error: stage, it must be one of game, confirm, retrain")
if eva: # for eva_mask
eva._set_epoch(cur_epoch)
return cur_epoch
class EvaScheduleItem:
def __init__(self, nn_id, alig_id, graph_template, item, pre_blk,\
ft_sign, bestNN, rd, nn_left, spl_batch_num, epoch, data_size):
# task content (assign in initial)
self.nn_id = nn_id
self.alig_id = alig_id
self.graph_template = graph_template
self.network_item = item # is None when retrain
self.pre_block = pre_blk
self.ft_sign = ft_sign
self.is_bestNN = bestNN
self.round = rd
self.nn_left = nn_left
self.spl_batch_num = spl_batch_num
self.epoch = epoch
self.data_size = data_size
# task info
self.task_id = -1 # assign in TaskScheduler().exec_task_async
self.pid = -1 # assgin in task_func
self.start_time = None # assign in task_func
self.cost_time = 0 # assign in task_func
self.gpu_info = -1 # ScheduleItem give gpu to it
# result
self.score = 0 # assign in task_func
class PredScheduleItem:
def __init__(self, net_pool):
self.net_pool = net_pool
# task info
self.task_id = -1
self.start_time = None
self.cost_time = 0
self.gpu_info = -1
class TaskScheduler:
# Mainly for coordinating GPU resources
def __init__(self):
self.task_list = []
self.result_list = []
# for multiprocessing communication
self.result_buffer = multiprocessing.Queue()
self.signal = multiprocessing.Event()
# resource
self.gpu_num = NAS_CONFIG['nas_main']['num_gpu']
self.gpu_list = queue.Queue()
for gpu in range(self.gpu_num):
self.gpu_list.put(gpu)
# for counting task(every task has a unique task_id)
self.task_id = 0
def load_tasks(self, tasks):
self.task_list.extend(tasks)
def get_task_id(self):
tmp_id = self.task_id
self.task_id += 1
return tmp_id
def exec_task_async(self, task_func, *args, **kwargs):
"""Async: directly return whetherever the tasks is completed
"""
while self.task_list and not self.gpu_list.empty():
gpu = self.gpu_list.get() # get gpu
task_item = self.task_list.pop(0) # get task
# config task
task_item.gpu_info = gpu
task_item.task_id = self.get_task_id()
# exec task
multiprocessing.Process(target=task_func, args=[task_item, self.result_buffer, self.signal, *args]).start()
self.signal.clear()
def load_part_result(self):
"""load one or more results if there are tasks completed
"""
self.signal.wait()
while not self.result_buffer.empty():
task_item = self.result_buffer.get()
self.result_list.append(task_item)
self.gpu_list.put(task_item.gpu_info)
def exec_task(self, task_func, *args, **kwargs):
"""Sync: waiting for all the tasks completed before return
"""
while self.task_list or self.gpu_list.qsize() < self.gpu_num:
self.exec_task_async(task_func, *args, **kwargs)
self.load_part_result()
def get_result(self):
result = self.result_list
self.result_list = []
return result
# for test...
def task_fun(task_item, result_buffer, signal, *args, **kwargs):
import tensorflow as tf
print("computing gpu {} task {}".format(task_item.gpu_info, task_item.alig_id))
time.sleep(random.randint(2,20))
result_buffer.put(task_item)
signal.set()
class Logger(object):
def __init__(self):
_cur_ver_dir = os.getcwd()
log_dir = os.path.join(_cur_ver_dir, 'memory')
naslog_path = os.path.join(log_dir, 'nas_log.txt')
network_info_path = os.path.join(log_dir, 'network_info.txt')
evalog_path = os.path.join(log_dir, 'evaluator_log.txt')
errlog_path = os.path.join(log_dir, 'error_log.txt')
self.base_data_dir = os.path.join(log_dir, 'base_data_serialize')
self._nas_log = open(naslog_path, 'a')
self._network_log = open(network_info_path, 'a')
self._eva_log = open(evalog_path, 'a')
self._error_log = open(errlog_path, 'a')
self._log_match = { # match -> log
'basedata': self.base_data_dir,
'nas': self._nas_log,
'net': self._network_log,
'eva': self._eva_log,
'err': self._error_log,
'utils': sys.stdout # for test
}
def __del__(self):
self._nas_log.close()
self._network_log.close()
self._eva_log.close()
self._error_log.close()
@staticmethod
def _get_action(args):
if isinstance(args, str) and len(args):
return args, ()
elif isinstance(args, tuple) and len(args):
return args[0], args[1:]
else:
raise Exception("empty or wrong log args")
return
def _log_output(self, match, output, temp, others):
if not temp:
assert len(others) == 1, "you must send net to log one by one"
content = others[0]
dump_path = os.path.join(self.base_data_dir, "blk_{}_nn_{}.pickle"
.format(len(content.pre_block), content.id))
with open(dump_path, "wb") as f_dump:
pickle.dump(content, f_dump)
return
content = temp.format(others)
output.write(content)
output.write('\n')
if match == "nas":
print(content)
if match == "err":
traceback.print_exc(file=output)
traceback.print_exc(file=sys.stdout)
output.flush()
return
def __lshift__(self, args):
"""
Wrtie log or print system information.
The specified log templeate is defined in info_str.py
Args:
args (string or tuple, non-empty)
When it's tuple, its value is string.
The first value must be action.
Return:
None
Example:
NAS_LOG = Logger() # 'Nas.run' func in nas.py
NAS_LOG << 'enuming'
"""
act, others = Logger._get_action(args)
match = act.split("_")[0]
output = self._log_match[match]
temp = MF_TEMP[act] if match != "basedata" else None
self._log_output(match, output, temp, others)
NAS_LOG = Logger()
def _check_log():
_cur_ver_dir = os.getcwd()
log_dir = os.path.join(_cur_ver_dir, 'memory')
base_data_dir = os.path.join(log_dir, 'base_data_serialize')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
os.mkdir(base_data_dir)
else:
if not os.path.exists(base_data_dir):
os.mkdir(base_data_dir)
log_dir_sub = os.listdir(log_dir)
log_files = [os.path.join(log_dir, item) for item in log_dir_sub]
log_files = [item for item in log_files if os.path.isfile(item)]
have_content = False
for file in log_files:
if os.path.getsize(file) or os.listdir(base_data_dir):
have_content = True
if have_content:
_ask_user(log_files, base_data_dir)
if not os.path.exists("model"):
os.mkdir("model")
def _ask_user(log_files, base_data_dir):
print(MF_TEMP['nas_log_hint'])
while True:
# answer = input()
answer = "y"
if answer == "n":
raise Exception(MF_TEMP['nas_existed_log'])
elif answer == "y":
log_files = _clear_log(log_files, base_data_dir)
break
else:
print(MF_TEMP['nas_invalid_str'])
def _clear_log(files, base_data_dir):
for file in files:
with open(file, "w") as f:
f.truncate()
for item in os.listdir(base_data_dir):
os.remove(os.path.join(base_data_dir, item))
return files
if __name__ == '__main__':
# NAS_LOG << ('hello', 'I am bread', 'hello world!')
# NAS_LOG << 'enuming'
item = NetworkItem(0, [[1,2,3],[4,5,6],[7,8,9]], Cell('conv', 48, 7, 'relu'), [1,0,2,0,1,0])
tasks = []
for i in range(15):
tasks.append(EvaScheduleItem(0, i, [], item, [], False, False, -1, 1, 1))
TSche = TaskScheduler()
TSche.load_tasks(tasks)
TSche.exec_task(task_fun)
result = TSche.get_result()
| StarcoderdataPython |
3295290 | import time
from .settings import *
# Selecionando o algoritmo para exibir na tela
def draw_lines(grid, algorithm, posX1, posY1, posX2, posY2, color, rows, pixel_size, line):
# Como posições são sempre floats, arredondarei para int
posX1, posX2, posY1, posY2 = int(posX1), int(posX2), int(posY1), int(posY2)
# Não fazer nada se for a primeira iteração
if posX1 > 0 and posX2 > 0 and posY1 > 0 and posY2 > 0:
grid = init_grid()
if algorithm == "DDA":
grid = DDA(posX1, posY1, posX2, posY2, grid, color, rows, pixel_size)
elif algorithm == "Bresenham":
grid = bresenham(posX1, posY1, posX2, posY2, grid, color, rows, pixel_size)
elif algorithm == "Círculo":
grid = draw_circle_bresenham(posX1, posY2, abs(posX2 - posX1), grid, color, rows, pixel_size)
elif algorithm == "Cohen Sutherland":
clip = Clipping()
grid = clip.cohenSutherland(posX1, posY1, BLUE, rows, pixel_size, line, grid)
elif algorithm == "Liang Barsky":
clip = Clipping()
grid = clip.liangBarsky(posX1, posY1, RED, rows, pixel_size, line, grid)
if not grid:
grid = init_grid()
return grid
# Transformando uma posição do pygame em uma posição do grid
def get_row_col_from_pos(pos, rows, pixel_size):
x, y = pos
row = x // pixel_size
col = y // pixel_size
# A posição passada não está dentro da área desenhavel
if row >= rows:
raise IndexError
return col, row
# Inicializando o grid que será desenhado
def init_grid():
grid = []
for i in range(ROWS):
grid.append([])
for _ in range(COLS):
grid[i].append(BG_COLOR)
return grid
# Algoritmo DDA para escrever na tela
def DDA(posX1, posY1, posX2, posY2, grid, color, rows, pixel_size):
dx = dy = passos = 0
x = y = 0
# Inicio do algoritmo
dx = posX2 - posX1
dy = posY2 - posY1
if abs(dx) > abs(dy):
passos = abs(dx)
else:
passos = abs(dy)
# Desenhar no mesmo pixel
if passos == 0:
passos = 1
x_incr = dx / passos
y_incr = dy / passos
x = posX1
y = posY1
grid = draw_in_grid(x, y, rows, pixel_size, grid, color)
for i in range(passos):
x = x + x_incr
y = y + y_incr
grid = draw_in_grid(x, y, rows, pixel_size, grid, color)
return grid
# Algoritmo de Brensenham para desenhar linha
def bresenham(x1, y1, x2, y2, grid, color, rows, pixel_size):
if x1 < x2 and y2 > x1:
x1, x2 = x2, x1
y1, y2 = y2, y1
elif x1 < x2 and y1 > y2:
x1, x2 = x2, x1
y1, y2 = y2, y1
# Calcular o delta X e delta y
dx = abs(x2 - x1)
dy = abs(y2 - y1)
p = 2*dy - dx
# Evitando uma divisão por zero
if dx == 0:
return
# Calcular angulo da reta
slope = dy // dx
if slope >= 1:
const1 = 2 * dx
const2 = 2 * dx - 2 * dy
else:
const1 = 2 * dy
const2 = 2 * dy - 2 * dx
x = x1
y = y1
# Definindo a direção da reta
if y2 > y1:
passo_y = 1
else:
passo_y = -1
grid = draw_in_grid(x, y, rows, pixel_size, grid, color) # Desenhar no grid
# Retornando da função se não for possível desenhar
if not grid:
return
if x2 > x1:
passo_x = x
while x <= x2:
grid = draw_in_grid(x, y, rows, pixel_size, grid, color) # Desenhar no grid
# Retornando da função se não for possível desenhar
if not grid:
return
if slope >= 1:
y = y + passo_y
else:
x = x + passo_x
if p < 0:
p = p + const1
else:
p = p + const2
if slope >= 1:
x = x + passo_x
else:
y = y + passo_y
else:
passo_x = -1
# Desenhe a reta
while x >= x2:
grid = draw_in_grid(x, y, rows, pixel_size, grid, color) # Desenhar no grid
# Retornando da função se não for possível desenhar
if not grid:
return
if slope >= 1:
y = y + passo_y
else:
x = x + passo_x
if p < 0:
p = p + const1
else:
p = p + const2
if slope >= 1:
x = x + passo_x
else:
y = y + passo_y
return grid
# Desenhar circulo com bresenham
def draw_circle_bresenham(x, y, raio, grid, color, rows, pixel_size):
# Desenhar circulos
def draw_circle(xc, yc, x, y, grid, color, rows, pixel_size):
grid = draw_in_grid(xc + x, yc + y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - x, yc + y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc + x, yc - y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - x, yc - y, rows, pixel_size, grid, color)
grid = draw_in_grid(xc + y, yc + x, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - y, yc + x, rows, pixel_size, grid, color)
grid = draw_in_grid(xc + y, yc - x, rows, pixel_size, grid, color)
grid = draw_in_grid(xc - y, yc - x, rows, pixel_size, grid, color)
return grid
# Desenhar todos os pontos do círculo
def brensenham(xc, yc, r, rows, pixel_size, grid, color):
x = 0
y = r
d = 3 - 2 * r
grid = draw_circle(xc, yc, x, y, grid, color, rows, pixel_size)
# Ir desenhando o circulo 8 pixels de cada vez
while y >= x:
x += 1
if d > 0:
y -= 1
d += 4 * (x - y) + 10
else:
d += 4 * x + 6
grid = draw_circle(xc, yc, x, y, grid, color, rows, pixel_size)
return grid
# Chamando os métodos para desenhar o círculo
return brensenham(x, y, raio, rows, pixel_size, grid, color)
class Clipping:
# Valores para o bitwase
DENTRO = 0b0
ESQUERDA = 0b1
DIREITA = 0b10
ABAIXO = 0b100
TOPO = 0b1000
# Valores máximos para o desenho
x_max = -1
y_max = -1
x_min = -1
y_min = -1
# Valores auxiliares, pois python não tem ponteiro
t1 = t2 = 0
# Verificar se o retangulo existe
def existsRectangle(self) -> bool:
return self.x_max != -1 and self.y_max != -1 and self.x_min != -1 and self.y_min != -1
# Verificar onde o ponto está
def qualLado(self, x, y):
code = self.DENTRO
# Linha a esquerda do retangulo
if x < self.x_min:
code |= self.ESQUERDA
# Linha está a direita do retângulo
elif x > self.x_max:
code = self.DIREITA
# Linha está abaixo do retângulo
if y < self.y_min:
code |= self.ABAIXO
# Linha está acima do retângulo
elif y > self.y_max:
code |= self.TOPO
return code
def desenharRetangulo(self, x, y, color, rows, grid, pixel_size, line):
# Desenhando um retangulo
deslocamento_x = 150
deslocamento_y = 100
for i in range(deslocamento_x):
grid = draw_in_grid(x + i, y, rows, pixel_size, grid, color) # Desenhando da esquerda para a direita
for i in range(deslocamento_y):
grid = draw_in_grid(x + deslocamento_x, y + i, rows, pixel_size, grid,
color) # Desenhando da direita para baixo
for i in range(deslocamento_y):
grid = draw_in_grid(x, y + i, rows, pixel_size, grid, color) # Desenhando de cima para baixo
for i in range(deslocamento_x):
grid = draw_in_grid(x + i, y + deslocamento_y, rows, pixel_size, grid,
color) # Desenhando da direita para baixo
return grid
# Algoritmo de Cohen Sutherland para clipping
def cohenSutherland(self, retanguloX, retanguloY, color, rows, pixel_size, line, grid):
self.x_min = retanguloX
self.x_max = retanguloX + 150
self.y_min = retanguloY
self.y_max = retanguloY + 100
pontoX1 = line.pontoX1
pontoY1 = line.pontoY1
pontoX2 = line.pontoX2
pontoY2 = line.pontoY2
lado1 = self.qualLado(pontoX1, pontoY1)
lado2 = self.qualLado(pontoX2, pontoY2)
desenhar = False
run = True
while run:
if lado1 == 0 and lado2 == 0:
desenhar = True
run = False
elif lado1 & lado2 != 0:
run = False
else:
# Algum segmento esta dentro do retangulo
lado_fora = 0
x = y = 0
if lado1 != 0:
lado_fora = lado1
else:
lado_fora = lado2
# Pontos de interseç�o
if (lado_fora & self.TOPO) != 0:
x = pontoX1 + (pontoX2 - pontoX1) * (self.y_max - pontoY1) / (pontoY2 - pontoY1)
y = self.y_max
elif (lado_fora & self.ABAIXO) != 0:
x = pontoX1 + (pontoX2 - pontoX1) * (self.y_min - pontoY1) / (pontoY2 - pontoY1)
y = self.y_min
elif (lado_fora & self.DIREITA) != 0:
y = pontoY1 + (pontoY2 - pontoY1) * (self.x_max - pontoX1) / (pontoX2 - pontoX1)
x = self.x_max
elif (lado_fora & self.ESQUERDA) != 0:
y = pontoY1 + (pontoY2 - pontoY1) * (self.x_min - pontoX1) / (pontoX2 - pontoX1)
x = self.x_min
if lado_fora == lado1:
pontoX1 = x
pontoY1 = y
lado1 = self.qualLado(pontoX1, pontoY1)
else:
pontoX2 = x
pontoY2 = y
lado2 = self.qualLado(pontoX2, pontoY2)
if desenhar:
grid = draw_lines(grid, line.algoritmo, pontoX1, pontoY1, pontoX2, pontoY2, RED, rows, pixel_size, line)
else:
grid = init_grid()
return self.desenharRetangulo(retanguloX, retanguloY, BLUE, rows, grid, pixel_size, line)
# Testando o clipping
def testandoClipping(self, ponto1, ponto2) -> bool:
isClipping = True
r = 0
if ponto1 < 0:
r = ponto2 / ponto1
if r > self.t2:
isClipping = False
elif r > self.t1:
self.t1 = r
elif ponto1 > 0:
r = ponto2 / ponto1
if r < self.t1:
isClipping = False
elif r < self.t2:
self.t2 = r
else:
if ponto2 < 0:
isClipping = False
return isClipping
# Algoritmo de Liang Barsky para clipping
def liangBarsky(self, retanguloX, retanguloY, color, rows, pixel_size, line, grid):
dx = line.pontoX2 - line.pontoX1
self.t1 = 0
self.t2 = 1
pontoX1 = line.pontoX1
pontoY1 = line.pontoY1
pontoX2 = line.pontoX2
pontoY2 = line.pontoY2
if self.testandoClipping(-dx, pontoX1 - retanguloX) and self.testandoClipping(dx, retanguloX + 150 - pontoX1):
dy = pontoY2 - pontoY1
if self.testandoClipping(-dy, pontoY1 - retanguloY) and self.testandoClipping(dy, retanguloY + 100 - pontoY1):
if self.t2 < 1.0:
pontoX2 = int(pontoX1 + self.t2*dx)
pontoY2 = int(pontoY1 + self.t2*dy)
if self.t1 > 0.0:
pontoX1 += int(self.t1 * dx)
pontoY1 += int(self.t1 * dy)
if line.algoritmo == "Círculo":
# To be implemented
pass
else:
grid = draw_lines(grid, line.algoritmo, pontoX1, pontoY1, pontoX2, pontoY2, color, rows, pixel_size, line)
return self.desenharRetangulo(retanguloX, retanguloY, BLUE, rows, grid, pixel_size, line)
# Algoritmos de transformaç�o
class Transformation:
# Funç�o de transformaç�o
def traslacao(self, x, y, line, rows, pixel_size, grid, color):
line.pontoX1 += x
line.pontoY1 += y
line.pontoX2 += x
line.pontoY2 += y
draw_lines(grid, line.algoritmo, line.pontoX1, line.pontoY1, line.pontoX2, line.pontoY2, color, rows, pixel_size, line)
# Funç�o para mudar a escala da linha
def escala(self, line, rows, pixel_size, grid, color):
pass
# Rotacionar a linha
def rotacao(self, line, rows, pixel_size, grid, color):
pass
# Refletir a linha
def reflexao(self, line, rows, pixel_size, grid, color):
pass
# Desenhar dentro do grid
def draw_in_grid(x, y, rows, pixel_size, grid, color):
# Verificar se o pixel clicado está dentro da tela
try:
draw_x, draw_y = get_row_col_from_pos((x, y), rows, pixel_size)
grid[int(draw_x)][int(draw_y)] = color
except IndexError:
#print('Pixel desenhado fora da tela')
pass
return grid | StarcoderdataPython |
80882 | import usb.core
devices = usb.core.find(find_all=True)
if devices is None:
raise ValueError('Danger zone?')
for device in devices:
print('============')
config = device.get_active_configuration()
deviceIndex = config.index
product = device.product
portNum = device.port_number
print('index:' + str(deviceIndex))
print('product' + product)
print('port number:' + str(portNum))
| StarcoderdataPython |
136769 | #!/usr/bin/env python
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import re
import subprocess
import sys
import click
from babel.dates import format_date
from packaging.version import Version
def fail(message, *args, **kwargs):
click.echo(click.style('Error: ' + message.format(*args), fg='red', bold=True), err=True)
if 'verbose_msg' in kwargs:
click.echo(kwargs['verbose_msg'], err=True)
sys.exit(1)
def warn(message, *args):
click.echo(click.style(message.format(*args), fg='yellow', bold=True), err=True)
def info(message, *args):
click.echo(click.style(message.format(*args), fg='green', bold=True), err=True)
def step(message, *args, **kwargs):
dry_run = kwargs.get('dry_run')
suffix = click.style(' (not really due to dry-run)', fg='yellow', bold=False) if dry_run else ''
click.echo(click.style(message.format(*args) + suffix, fg='white', bold=True), err=True)
def run(cmd, title, shell=False):
if shell:
cmd = ' '.join(cmd)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=shell)
except subprocess.CalledProcessError as exc:
fail(f'{title} failed', verbose_msg=exc.output)
def _bump_version(version):
try:
parts = [int(v) for v in version.split('.')]
except ValueError:
fail('cannot bump version with non-numeric parts')
if len(parts) == 2:
parts.append(0)
parts[-1] += 1
return '.'.join(map(str, parts))
def _get_current_version():
with open('indico/__init__.py') as f:
content = f.read()
match = re.search(r"^__version__ = '([^']+)'$", content, re.MULTILINE)
return match.group(1)
def _set_version(version, dry_run=False):
step('Setting version to {}', version, dry_run=dry_run)
with open('indico/__init__.py') as f:
orig = content = f.read()
content = re.sub(r"^__version__ = '([^']+)'$", f"__version__ = '{version}'", content, flags=re.MULTILINE)
assert content != orig
if not dry_run:
with open('indico/__init__.py', 'w') as f:
f.write(content)
def _set_changelog_date(new_version, dry_run=False):
with open('CHANGES.rst') as f:
orig = content = f.read()
version_line = f'Version {new_version}'
underline = '-' * len(version_line)
unreleased = re.escape('Unreleased')
release_date = format_date(format='MMMM dd, YYYY', locale='en')
content = re.sub(r'(?<={}\n{}\n\n\*){}(?=\*\n)'.format(re.escape(version_line), underline, unreleased),
f'Released on {release_date}',
content,
flags=re.DOTALL)
step('Setting release date to {}', release_date, dry_run=dry_run)
if content == orig:
fail('Could not update changelog - is there an entry for {}?', new_version)
if not dry_run:
with open('CHANGES.rst', 'w') as f:
f.write(content)
def _canonicalize_version(new_version):
version = Version(new_version)
if len(version._version.release) == 3 and version._version.release[-1] == 0:
warn('Removing trailing `.0` from {}', new_version)
new_version = '.'.join(map(str, version._version.release[:-1]))
return new_version
def _get_versions(version):
cur_version = _get_current_version()
new_version = _canonicalize_version(version or cur_version.replace('-dev', ''))
pre = not all(x.isdigit() for x in new_version.split('.'))
if cur_version == new_version:
fail('Version number did not change',
verbose_msg=('During alpha/beta/rc you need to specify the new version manually' if pre else None))
next_version = (_bump_version(new_version) + '-dev') if not pre else None
return cur_version, new_version, next_version
def _tag_name(version):
return 'v' + version
def _check_tag(version):
tag_name = _tag_name(version)
if tag_name in subprocess.check_output(['git', 'tag']).splitlines():
fail('Git tag already exists: {}', tag_name)
def _check_git_clean():
cmds = [['git', 'diff', '--stat', '--color=always'],
['git', 'diff', '--stat', '--color=always', '--staged']]
for cmd in cmds:
rv = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if rv:
fail('Git working tree is not clean', verbose_msg=rv)
def _git_commit(message, files, dry_run=False):
step("Committing '{}'", message, dry_run=dry_run)
if dry_run:
return
subprocess.check_call(['git', 'add'] + files)
subprocess.check_call(['git', 'commit', '--no-verify', '--message', message])
def _git_tag(version, message, sign, dry_run):
tag_name = _tag_name(version)
step('Tagging {}', tag_name, dry_run=dry_run)
if dry_run:
return
sign_args = ['--sign'] if sign else []
subprocess.check_call(['git', 'tag', '--message', message, tag_name] + sign_args)
def _build_wheel(no_assets, dry_run):
step("Building wheel", dry_run=dry_run)
if dry_run:
return
args = ['--no-assets'] if no_assets else []
subprocess.check_call(['./bin/maintenance/build-wheel.py', 'indico'] + args)
@click.command()
@click.argument('version', required=False)
@click.option('--dry-run', '-n', is_flag=True, help='Do not modify any files or run commands')
@click.option('--sign', '-s', is_flag=True, help='Sign the Git commit/tag with GPG')
@click.option('--no-assets', '-D', is_flag=True, help='Skip building assets when building the wheel')
@click.option('--no-changelog', '-C', is_flag=True, help='Do not update the date in the changelog')
def cli(version, dry_run, sign, no_assets, no_changelog):
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
cur_version, new_version, next_version = _get_versions(version)
_check_tag(new_version)
if next_version:
_check_tag(next_version)
_check_git_clean()
info('Current version is {}', cur_version)
info('Going to release {}', new_version)
if next_version:
info('Next version will be {}', next_version)
if not no_changelog:
_set_changelog_date(new_version, dry_run=dry_run)
_set_version(new_version, dry_run=dry_run)
release_msg = f'Release {new_version}'
_git_commit(release_msg, ['CHANGES.rst', 'indico/__init__.py'], dry_run=dry_run)
_git_tag(new_version, release_msg, sign=sign, dry_run=dry_run)
prompt = 'Build release wheel before bumping version?' if next_version else 'Build release wheel now?'
if click.confirm(click.style(prompt, fg='blue', bold=True), default=True):
_build_wheel(no_assets, dry_run=dry_run)
if next_version:
next_message = f'Bump version to {next_version}'
_set_version(next_version, dry_run=dry_run)
_git_commit(next_message, ['indico/__init__.py'], dry_run=dry_run)
if __name__ == '__main__':
cli()
| StarcoderdataPython |
1782311 | <reponame>ospiper/Sandbox-Runner
class JudgerException(Exception):
def __init__(self, message):
super().__init__()
self.message = message
class JudgerError(JudgerException):
pass
| StarcoderdataPython |
3917 | <gh_stars>1-10
# GridGain Community Edition Licensing
# Copyright 2019 GridGain Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License") modified with Commons Clause
# Restriction; you may not use this file except in compliance with the License. You may obtain a
# copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
# Commons Clause Restriction
#
# The Software is provided to you by the Licensor under the License, as defined below, subject to
# the following condition.
#
# Without limiting other conditions in the License, the grant of rights under the License will not
# include, and the License does not grant to you, the right to Sell the Software.
# For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you
# under the License to provide to third parties, for a fee or other consideration (including without
# limitation fees for hosting or consulting/ support services related to the Software), a product or
# service whose value derives, entirely or substantially, from the functionality of the Software.
# Any license notice or attribution required by the License must also include this Commons Clause
# License Condition notice.
#
# For purposes of the clause above, the “Licensor” is Copyright 2019 GridGain Systems, Inc.,
# the “License” is the Apache License, Version 2.0, and the Software is the GridGain Community
# Edition software provided with this notice.
from typing import Iterable, Union
from pyignite.queries.op_codes import *
from pyignite.datatypes import (
Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject,
)
from pyignite.datatypes.key_value import PeekModes
from pyignite.queries import Query, Response
from pyignite.utils import cache_id
def cache_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache (overwriting existing value if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(connection, {
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
})
def cache_get(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves a value from cache by key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a value
retrieved on success, non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_GET,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status != 0:
return result
result.value = result.value['value']
return result
def cache_get_all(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Retrieves multiple key-value pairs from cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a dict, made of
retrieved key-value pairs, non-zero status and an error description
on failure.
"""
query_struct = Query(
OP_CACHE_GET_ALL,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('data', Map),
],
)
if result.status == 0:
result.value = dict(result.value)['data']
return result
def cache_put_all(
connection: 'Connection', cache: Union[str, int], pairs: dict,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts multiple key-value pairs to cache (overwriting existing associations
if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param pairs: dictionary type parameters, contains key-value pairs to save.
Each key or value can be an item of representable Python type or a tuple
of (item, hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if key-value pairs
are written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_ALL,
[
('hash_code', Int),
('flag', Byte),
('data', Map),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'data': pairs,
},
)
def cache_contains_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether given key is present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when key is present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_contains_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Returns a value indicating whether all given keys are present in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: a list of keys or (key, type hint) tuples,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a bool value
retrieved on success: `True` when all keys are present, `False` otherwise,
non-zero status and an error description on failure.
"""
query_struct = Query(
OP_CACHE_CONTAINS_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
response_config=[
('value', Bool),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, and returns the previous value
for that key, or null value if there was not such key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None if a value is written, non-zero status and an error description
in case of error.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache, returning previous value
for that key, if and only if there is a value currently mapped
for that key.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REPLACE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_get_and_remove(
connection: 'Connection', cache: Union[str, int], key,
key_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Removes the cache entry with specified key, returning the value.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_REMOVE, [
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key
does not already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_get_and_put_if_absent(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key does not
already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form. False
by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and an old value
or None on success, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_GET_AND_PUT_IF_ABSENT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('value', AnyDataObject),
],
)
if result.status == 0:
result.value = result.value['value']
return result
def cache_replace(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exist.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: pass True to keep the value in binary form. False
by default,
:param query_id: a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_replace_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample, value,
key_hint=None, sample_hint=None, value_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache only if the key already exists
and value equals provided sample.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param value: new value for the given key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param value_hint: (optional) Ignite data type, for which the given value
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_clear(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Clears the cache without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_clear_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
)
def cache_clear_keys(
connection: 'Connection', cache: Union[str, int], keys: list,
binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache keys without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample,
key_hint=None, sample_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes an entry with a given key if provided value is equal to
actual value, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes entries with given keys, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_all(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Removes all entries from cache, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_ALL,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_get_size(
connection: 'Connection', cache: Union[str, int], peek_modes=0,
binary=False, query_id=None,
) -> 'APIResult':
"""
Gets the number of entries in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param peek_modes: (optional) limit count to near cache partition
(PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache
(PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status and a number of
cache entries on success, non-zero status and an error description
otherwise.
"""
if not isinstance(peek_modes, (list, tuple)):
if peek_modes == 0:
peek_modes = []
else:
peek_modes = [peek_modes]
query_struct = Query(
OP_CACHE_GET_SIZE,
[
('hash_code', Int),
('flag', Byte),
('peek_modes', PeekModes),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'peek_modes': peek_modes,
},
response_config=[
('count', Long),
],
)
if result.status == 0:
result.value = result.value['count']
return result
| StarcoderdataPython |
3333582 | <gh_stars>0
#extract to .py file later
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import itertools
import scipy.special
from decimal import *
import unittest as ut
from sympy.solvers import solve
from sympy import Symbol
from scipy.constants import golden as phi
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FuncFormatter
import sys
sys.version
#extract to .py file later
def payoff_first_n_steps(p=0.1,delta=0.9999,n=1):
# return total payoff for the first n steps/rounds
return ( p * (1-delta**n) )/(1 - delta)
#extract to .py file later
def plot_bitcoin_delta_approx(deltas=(0.9999,0.99999,0.999999),
power=0.1,
step=2016,
steps=2*13,
ticks=[ 1,8,2*6,2*9,2*12 ],
title_string="",
save_path=None,
ylim=None,
xlim=None):
fig, ax = plt.subplots(figsize=(16, 9))
# https://matplotlib.org/api/markers_api.html
marker = itertools.cycle((',','v', 'o', '+','.', 's', '*','1','D','x','^'))
for d in deltas:
X = [ n for n in np.arange(0,step*steps,step) ]
reward = [ payoff_first_n_steps(p=power,delta=d,n=x) for x in X ]
plt.plot(X,
reward,
marker=next(marker),
linewidth=3,
label="$\delta$ = " + str(d))
plt.vlines(2016,ymin=0,ymax=2016*power,color="black",linestyle="dashed")
plt.hlines(2016*power,xmin=0,xmax=2016,color="black",linestyle="dashed")
plt.vlines(2016*2,ymin=0,ymax=2016*2*power,color="black",linestyle="dashed")
plt.hlines(2016*2*power,xmin=0,xmax=2016*2,color="black",linestyle="dashed")
plt.vlines(2016*2*6,ymin=0,ymax=2016*2*6*power,color="black",linestyle="dashed")
plt.hlines(2016*2*6*power,xmin=0,xmax=2016*2*6,color="black",linestyle="dashed")
ax.annotate('average rewards after 6 months',
xy=(2016*2*6, 2016*2*6*power),
xytext=(2016*2, 2900),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.vlines(2016*2*12,ymin=0,ymax=2016*2*12*power,color="black",linestyle="dashed")
plt.hlines(2016*2*12*power,xmin=0,xmax=2016*2*12,color="black",linestyle="dashed")
ax.annotate('average rewards after one year',
xy=(2016*2*12, 2016*2*12*power),
xytext=(2016*2*3, 5500),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.vlines(2016*2*24,ymin=0,ymax=2016*2*24*power,color="black",linestyle="dashed")
plt.hlines(2016*2*24*power,xmin=0,xmax=2016*2*24,color="black",linestyle="dashed")
ax.annotate('average rewards after two years',
xy=(2016*2*24, 2016*2*24*power),
xytext=(2016*2*9, 10100),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.vlines(2016*2*36,ymin=0,ymax=2016*2*36*power,color="black",linestyle="dashed")
plt.hlines(2016*2*36*power,xmin=0,xmax=2016*2*36,color="black",linestyle="dashed")
ax.annotate('average rewards after three years',
xy=(2016*2*36, 2016*2*36*power),
xytext=(2016*2*20, 15000),
arrowprops=dict(facecolor='black', shrink=0.05))
# tidy up the figure
ax.grid(True)
#ax.legend(loc='center right', bbox_to_anchor=(0.8, 0.57))
#ax.legend(loc='center right',
ax.legend(loc='upper left',
bbox_to_anchor=(0.71, .55), # location of the legend
framealpha=1.0) # turn off transparency of legend
#ax.set_title("")
ax.set_xlabel("relative block height (in steps of 2016 blocks)")
ax.set_ylabel("normalized block rewards for mined blocks")
if ylim is not None:
ax.set_ylim([0,ylim])
if xlim is not None:
ax.set_xlim([0,xlim])
#plt.yticks(np.arange(0.0, 1.5, step=0.1))
plt.xticks(np.arange(0, step*steps, step=step))
#for label in ax.xaxis.get_ticklabels()[::2]:
for tick,label in enumerate(ax.xaxis.get_ticklabels()[::]):
if tick in ticks:
label.set_visible(True)
else:
label.set_visible(False)
#plt.yscale('log')
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
plt.rcParams.update({'font.size': 20})
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
if save_path is not None:
plt.savefig(save_path, dpi=200)
plt.show()
#extract to .py file later
def bribe_solve(p,p_new,delta,e=None):
# Return the bribe required such that the total payoff without
# defect/attack actiton is equal to the total payoff with defect/attack action
if e is not None:
p_new=p*e
return ( ( delta * p_new ) - p ) / ( delta - 1)
#extract to .py file later
def bribe_solve_r(p,p_new,delta,e=None,r=0):
# Return the bribe required such that the total payoff without
# defect/attack actiton is equal to the total payoff with defect/attack action
# Added funds in the respective resource the attacker already has (r)
if e is not None:
p_new=p*e
return ( ( delta * p_new ) - p ) / ( delta - 1) - r*(e-1)
#extract to .py file later
def bribedrop_solve_r(p,p_new,delta,e=None,r=0):
# Return the bribe required such that the total payoff without
# defect/attack actiton is equal to the total payoff with defect/attack action
# Added funds in the respective resource the attacker already has (r)
# Also the bribe is subject to the exchange rate reduction
if e is not None:
p_new=p*e
return ( ( ( delta * p_new ) - p ) / ( delta - 1) - r*(e-1) )/e
#extract to .py file later
def EEV_honest(p0=0.1,p1=0.1,r0=0,r1=0,d0=0.9999,d1=0.9999):
# Calculate EEV for infinite honest strategy,
# for two resources
return (p0/(1-d0))+r0+(p1/(1-d1))+r1
#extract to .py file later
def EEV_attack(p0=0.1,p1=0,r0=0,r1=0,d0=0.9999,d1=0,e0=0,e1=0,E0=0,E1=0):
# Calculate EEV for attack with side effects i.e., defect with bribes and then infinite honest strategy,
# for two resources
return (d0*(p0*e0))/(1-d0) + (E0+r0)*e0 + (p1*e1)/(1-d1) + (E1+r1)*e1
#extract to .py file later
def bribe_solve_two_sympy(p0=0.1,p1=0.1,
d0=0.9999,d1=0.9999,
r0=0,r1=0,
e0=1,e1=0,
E0=0,E1=0,
bribeIn="R0"):
_E0 = Symbol('E0') # varepsilon/bribe in R_0
_E1 = Symbol('E1') # varepsilon/bribe in R_1
_d0 = Symbol('d0')
_d1 = Symbol('d1')
_p0 = Symbol('p0')
_p1 = Symbol('p1')
_r0 = Symbol('r0')
_r1 = Symbol('r1')
_e0 = Symbol('e0')
_e1 = Symbol('e1')
expr_zero = (
(_d0 * (_p0 * _e0))/(1-_d0) +
(_E0 + _r0) * _e0 +
(_p1 * _e1)/(1-_d1) +
(_E1 + _r1) * _e1 -
(_p0/(1-_d0)) -
_r0 -
(_p1/(1-_d1)) -
_r1 )
expr_repl = expr_zero.subs(_p0,p0).subs(_p1,p1)
expr_repl = expr_repl.subs(_d0,d0).subs(_d1,d1)
expr_repl = expr_repl.subs(_r0,r0).subs(_r1,r1)
expr_repl = expr_repl.subs(_e0,e0).subs(_e1,e1)
if bribeIn == "R0":
expr_repl = expr_repl.subs(_E1,E1)
rslt = float(solve(expr_repl, _E0)[0])
elif bribeIn == "R1":
expr_repl = expr_repl.subs(_E0,E0)
rslt = float(solve(expr_repl, _E1)[0])
else:
assert False,'Solve for bribeIn="R0" or "R1"'
return rslt
#extract to .py file later
def EEV_gains(p=0.1,e=1,d=0.9999):
return (p * e)/(1 - d)
#extract to .py file later
def EEV_funds(r=0,e=1):
return r * e
#extract to .py file later
def d_solve(p=0.1,R=1):
# Given a EEV result R and power p, return delta
return (R-p)/R
#extract to .py file later
def EEV_gains_after(p=0.1,e=1,d=0.9999):
return (d * (p * e))/(1 - d)
#extract to .py file later
def EEV_funds_after(r=0,e=1,E=0):
return (E + r) * e
#extract to .py file later
def d_solve_after(p=0.1,R=1):
# Given a EEV result R and power p, return delta
return R/(p+R)
#extract source to .py later
def plot_bar_payoff_after(b_p0=0,b_p1=0,
a_p0=0,a_p1=0,
b_d0=0,b_d1=0,
a_d0=0,a_d1=0,
b_r0=0,b_r1=0,
a_r0=0,a_r1=0,
b_e0=1,b_e1=1,
a_e0=1,a_e1=1,
E0=0,E1=0,
ymax_ax1=None,
ymax_ax2=None,
yticklist_ax1=None,
yticklist_ax2=None,
save_path=None,
skip_round=False,
ylabel='normalized block rewards',
xticklabels=["before","after"],
show_diff=True,
double_spend=0):
x_names = ('$ C_0 $,$ C_1 $,sum (before)','$C_0$,$C_1$,sum (after)','total difference',)
x_values = np.arange(len(x_names))
b_payoff0 = EEV_gains(p=b_p0,e=b_e0,d=b_d0)
b_payoff1 = EEV_gains(p=b_p1,e=b_e1,d=b_d1)
b_funds0 = EEV_funds(r=b_r0,e=b_e0)
b_funds1 = EEV_funds(r=b_r1,e=b_e1)
b_sum = b_payoff0 + b_payoff1 + b_funds0 + b_funds1
if skip_round:
a_payoff0 = EEV_gains_after(p=a_p0,e=a_e0,d=a_d0)
a_payoff1 = EEV_gains_after(p=a_p1,e=a_e1,d=a_d1)
else:
a_payoff0 = EEV_gains(p=a_p0,e=a_e0,d=a_d0)
a_payoff1 = EEV_gains(p=a_p1,e=a_e1,d=a_d1)
a_funds0 = EEV_funds(r=a_r0,e=a_e0)
a_funds1 = EEV_funds(r=a_r1,e=a_e1)
a_sum = a_payoff0 + a_payoff1 + a_funds0 + a_funds1 + E0*a_e0 + E1*a_e1
gain = (a_sum+double_spend) - b_sum
print("sum (before) = ",b_sum)
print("sum (after) = ",a_sum)
print("double spend = ",double_spend)
print("total after = ",a_sum + double_spend)
print("gain = ",gain)
loss_funds0 = a_funds0 - b_funds0
loss_funds1 = a_funds1 - b_funds1
loss_payoff0 = a_payoff0 - b_payoff0
loss_payoff1 = a_payoff1 - b_payoff1
total_loss = 0
if loss_funds0 < 0:
total_loss += loss_funds0
if loss_funds1 < 0:
total_loss += loss_funds1
if loss_payoff0 < 0:
total_loss += loss_payoff0
if loss_payoff1 < 0:
total_loss += loss_payoff1
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
s = 0.35
fig, ax1 = plt.subplots(figsize=(16*s,9*s))
#gridspec_kw = {'height_ratios':[10, 8]})
#plt.subplots_adjust(wspace=0,
# hspace=0.025)
width=0.5
# currency 0 before
ax1.bar(0 - width/2,
b_funds0,
width=width,
label='Funds $ r_0 $',
color='lightgray',
align='edge',
hatch='oo',
edgecolor='black')
ax1.bar(0 - width/2,
b_payoff0,
width=width,
bottom=b_funds0,
label='Expected Funds $ p_0 $',
color='lightgray',
align='edge',
hatch='\\\\\\',
edgecolor='black')
# currency 1 before
ax1.bar(0 - width/2,
b_funds1,
width=width,
bottom=b_funds0 + b_payoff0,
label='Funds $ r_1 $',
color='lightblue',
align='edge',
hatch='oo',
edgecolor='black')
ax1.bar(0 - width/2,
b_payoff1,
width=width,
bottom=b_funds0 + b_payoff0 + b_funds1,
label='Expected Funds $ p_1 $',
color='lightblue',
align='edge',
hatch='\\\\\\',
edgecolor='black')
# currency 0 after
if E0 != 0:
ax1.bar(1 - width/2,
E0*a_e0,
width=width,
label='Bribe in $ r_0 $',
color='gold',
align='edge',
hatch='oo',
edgecolor='black')
ax1.bar(1 - width/2,
a_funds0,
width=width,
bottom=E0*a_e0,
#label='Funds',
color='lightgray',
align='edge',
hatch='oo',
edgecolor='black')
ax1.bar(1 - width/2,
a_payoff0,
width=width,
bottom=a_funds0 + E0*a_e0,
#label='Payoffs',
color='lightgray',
align='edge',
hatch='\\\\\\',
edgecolor='black')
# currrency 1 after
if E1 != 0:
ax1.bar(1 - width/2,
E1*a_e1,
width=width,
bottom=a_funds0 + E0*a_e0 + a_payoff0,
label='Bribe in $ r_1 $',
color='gold',
align='edge',
hatch='oo',
edgecolor='black')
ax1.bar(1- width/2,
a_funds1,
bottom=a_funds0 + E0*a_e0 + a_payoff0 + E1*a_e1,
width=width,
#label='Funds',
color='lightblue',
align='edge',
hatch='oo',
edgecolor='black')
ax1.bar(1- width/2,
a_payoff1,
bottom=a_funds0 + E0*a_e0 + a_payoff0 + a_funds1 + E1*a_e1,
width=width,
#label='Payoffs',
color='lightblue',
align='edge',
hatch='\\\\\\',
edgecolor='black')
if show_diff and gain > 0:
ax1.bar(1- width/2,
gain,
width=width,
bottom=b_funds0 + b_payoff0 + b_funds1 + b_payoff1,
label='Gain',
color='mediumseagreen',
align='edge',
hatch='++',
edgecolor='black')
if show_diff and gain < 0:
ax1.bar(1- width/2,
b_sum - a_sum,
width=width,
bottom=a_funds0 + E0*a_e0 + a_payoff0 + a_funds1 + E1*a_e1 + a_payoff1,
label='Loss',
color='lightcoral',
align='edge',
hatch='XXX',
edgecolor='black')
if double_spend > 0:
ax1.bar(1- width/2,
double_spend,
bottom=a_funds0 + E0*a_e0 + a_payoff0 + a_funds1 + E1*a_e1 + a_payoff1,
width=width,
label='Double-spend $r_3$',
color='mediumseagreen',
align='edge',
hatch='++',
edgecolor='black')
if ymax_ax1 is not None:
ax1.set_ylim(0, ymax_ax1)
#ax2.set_ylim(-(ymax_ax2), 0)
if ymax_ax1 is not None and yticklist_ax1 is None:
yticks = [ 0,ymax_ax1//5,(ymax_ax1//5)*2,(ymax_ax1//5)*3,(ymax_ax1//5)*4, (ymax_ax1//5)*5 ]
ax1.yaxis.set_ticks(yticks)
yticks = [ -ymax_ax2//5,(-ymax_ax2//5)*2,(-ymax_ax2//5)*3,(-ymax_ax2//5)*4, (-ymax_ax2//5)*5,0 ]
#ax2.yaxis.set_ticks(yticks)
if ymax_ax1 is not None and yticklist_ax1 is not None:
ax1.yaxis.set_ticks(yticklist_ax1)
#ax2.yaxis.set_ticks(yticklist_ax2)
#ax2.yaxis.get_major_ticks()[-1].label1.set_visible(False)
# ax.bar(, color='r')
ax1.set_xlim(0 - 0.5,3)
#ax2.set_xlim(0 - 0.5,3)
ax1.xaxis.set_ticks([0,1])
ax1.set_xticklabels(xticklabels)
#ax2.set_xticks(x_values)
#ax2.set_xticklabels(x_names)
#ax2.get_xticklabels()[0].set_ha("left")
#ax2.get_xticklabels()[-1].set_ha("right")
#ax2.xaxis.set_major_formatter(FuncFormatter(lambda x,_: str(int())))
#ax2.yaxis.set_major_formatter(FuncFormatter(lambda x,_: str(int(abs(x)))))
#ax2.yaxis.get_major_ticks()[-1].label1.set_visible(False) # undisplay 0 on y axis of ax2
ax1.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.7)
#ax2.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.7)
#ax2.set_xlabel('')
#ax2.set_ylabel(' '*45 + 'expected normalized block rewards')
ax1.set_ylabel(ylabel)
plt.figlegend(loc='upper right',
bbox_to_anchor=(0.9, 0.89),
framealpha=1,
ncol=1)
if save_path is not None:
plt.savefig(save_path, dpi=200, bbox_inches='tight')
plt.show()
| StarcoderdataPython |
153404 | <filename>project/database_controller/user_projects.py
import json
from project.database_controller.app import db
from project.models import Project
def get_project(project_id: int) -> json:
"""
:param project_id:
:return:
"""
query_result = db.session.query(Project).filter(Project.id == project_id).all()
if len(query_result) == 1:
info = {
"name" : query_result[0].name,
"description" : query_result[0].description
}
return json.dumps(info)
return "foo" | StarcoderdataPython |
4802216 | <filename>polarion/email_report.py
#!/usr/bin/env python
"""
Module to generate email report post extracting automation status from polarion.
"""
import os
import smtplib
import sys
import time
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import pytz
from jinja2 import Environment, FileSystemLoader, select_autoescape
from jinja_markdown import MarkdownExtension
import config as cf
from main import main
config = cf.Config()
config.load()
gmail_user = config.sender_user
mail_to = config.recipient_user
PRODUCT = "Red Hat Ceph Storage"
results = main()
def send_email(gmail_user, recipients, subject, body):
"""
Function to send email from sender to receipients with the subject and message passed.
"""
sent_from = gmail_user
msg = MIMEMultipart("mixed")
msg["Subject"] = subject
msg["From"] = gmail_user
msg["To"] = ", ".join(recipients)
# create html template for email body
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(project_dir)
template_dir = os.path.join(project_dir, "polarion/html_template")
jinja_env = Environment(
extensions=[MarkdownExtension],
loader=FileSystemLoader(template_dir),
autoescape=select_autoescape(["html", "xml"]),
)
template = jinja_env.get_template("automation_status.html")
automation_status = template.render(items=body[0])
template = jinja_env.get_template("component_wise_data.html")
# component_data = template.render(content=body[1])
component_data = template.render(content=body)
# Record the MIME types of both parts - text/plain and text/html.
table1 = MIMEText(automation_status, "html")
table2 = MIMEText(component_data, "html")
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(table1)
msg.attach(table2)
try:
s = smtplib.SMTP("localhost")
s.sendmail(sent_from, recipients, msg.as_string())
s.quit()
print("Email sent!")
except:
print("Something went wrong...{}", sys.exc_info()[0])
UTC = pytz.utc
IST = pytz.timezone("Asia/Kolkata")
datetime_ist = datetime.now(IST)
start_time = datetime_ist.strftime("%d %b %Y %H:%M")
send_email(
gmail_user,
[mail_to],
f"{PRODUCT} Automation Status as on {start_time} [IST]",
results,
)
| StarcoderdataPython |
3308110 | # Author: <NAME> <<EMAIL>>
from pycocotools.coco import COCO
from pycocotools import mask
from tensorpack.utils.segmentation.segmentation import visualize_label
import numpy as np
coco_dataset = "/data2/dataset/coco"
detection_json_train = "/data2/dataset/annotations/instances_train2014.json"
detection_json_val = "/data2/dataset/annotations/instances_val2014.json"
caption_json_train = "/data2/dataset/annotations/captions_train2014.json"
caption_json_val = "/data2/dataset/annotations/captions_val2014.json"
train_dir = "/data2/dataset/coco/train2014"
val_dir = "/data2/dataset/coco/val2014"
def draw_gt(_coco,img_id):
img = _coco.loadImgs(img_id)[0]
annIds = _coco.getAnnIds(imgIds=img_id)
img_mask = np.zeros((img['height'], img['width'], 1), dtype=np.uint8)
for annId in annIds:
ann = _coco.loadAnns(annId)[0]
# polygon
if type(ann['segmentation']) == list:
for _instance in ann['segmentation']:
rle = mask.frPyObjects([_instance], img['height'], img['width'])
# mask
else: # mostly is aeroplane
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], img['height'], img['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img_mask[np.where(m == 1)] = 0000#TODO
import skimage.io as io
import matplotlib.pyplot as plt
#%matplotlib inline
# get all images containing given categories, select one at random
coco_instance = COCO(detection_json_val)
coco_caps = COCO(caption_json_val)
instance_set = set(coco_instance.imgs.keys())
caption_set = set(coco_caps.imgs.keys())
common_set = instance_set & caption_set
for check_img_id in list(common_set):
print("*" * 40)
imgIds = coco_instance.getImgIds(imgIds=[check_img_id])
img = coco_instance.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
I = io.imread("/data2/dataset/coco/val2014/COCO_val2014_000000{}.jpg".format("%06d" % check_img_id))
print "/data2/dataset/coco/val2014/COCO_val2014_000000{}.jpg".format("%06d" % check_img_id)
plt.imshow(I);plt.axis('off')
annIds = coco_instance.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco_instance.loadAnns(annIds)
coco_instance.showAnns(anns)
plt.show()
annIds = coco_caps.getAnnIds(imgIds=img['id'])
anns = coco_caps.loadAnns(annIds)
print coco_caps.showAnns(anns)
| StarcoderdataPython |
60730 | <filename>loopy/schedule/tools.py
__copyright__ = "Copyright (C) 2016 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from loopy.kernel.data import AddressSpace
from pytools import memoize_method
import islpy as isl
import enum
# {{{ block boundary finder
def get_block_boundaries(schedule):
"""
Return a dictionary mapping indices of
:class:`loopy.schedule.BlockBeginItem`s to
:class:`loopy.schedule.BlockEndItem`s and vice versa.
"""
from loopy.schedule import (BeginBlockItem, EndBlockItem)
block_bounds = {}
active_blocks = []
for idx, sched_item in enumerate(schedule):
if isinstance(sched_item, BeginBlockItem):
active_blocks.append(idx)
elif isinstance(sched_item, EndBlockItem):
start = active_blocks.pop()
block_bounds[start] = idx
block_bounds[idx] = start
return block_bounds
# }}}
# {{{ subkernel tools
def temporaries_read_in_subkernel(kernel, subkernel):
from loopy.kernel.tools import get_subkernel_to_insn_id_map
insn_ids = get_subkernel_to_insn_id_map(kernel)[subkernel]
return frozenset(tv
for insn_id in insn_ids
for tv in kernel.id_to_insn[insn_id].read_dependency_names()
if tv in kernel.temporary_variables)
def temporaries_written_in_subkernel(kernel, subkernel):
from loopy.kernel.tools import get_subkernel_to_insn_id_map
insn_ids = get_subkernel_to_insn_id_map(kernel)[subkernel]
return frozenset(tv
for insn_id in insn_ids
for tv in kernel.id_to_insn[insn_id].write_dependency_names()
if tv in kernel.temporary_variables)
# }}}
# {{{ add extra args to schedule
def add_extra_args_to_schedule(kernel):
"""
Fill the `extra_args` fields in all the :class:`loopy.schedule.CallKernel`
instructions in the schedule with global temporaries.
"""
new_schedule = []
from loopy.schedule import CallKernel
for sched_item in kernel.linearization:
if isinstance(sched_item, CallKernel):
subkernel = sched_item.kernel_name
used_temporaries = (
temporaries_read_in_subkernel(kernel, subkernel)
| temporaries_written_in_subkernel(kernel, subkernel))
more_args = {tv
for tv in used_temporaries
if
kernel.temporary_variables[tv].address_space
== AddressSpace.GLOBAL
and
kernel.temporary_variables[tv].initializer is None
and
tv not in sched_item.extra_args}
new_schedule.append(sched_item.copy(
extra_args=sched_item.extra_args + sorted(more_args)))
else:
new_schedule.append(sched_item)
return kernel.copy(linearization=new_schedule)
# }}}
# {{{ get_return_from_kernel_mapping
def get_return_from_kernel_mapping(kernel):
"""
Returns a mapping from schedule index of every schedule item (S) in
*kernel* to the schedule index of :class:`loopy.schedule.ReturnFromKernel`
of the active sub-kernel at 'S'.
"""
from loopy.kernel import LoopKernel
from loopy.schedule import (RunInstruction, EnterLoop, LeaveLoop,
CallKernel, ReturnFromKernel, Barrier)
assert isinstance(kernel, LoopKernel)
assert isinstance(kernel.linearization, list)
return_from_kernel_idxs = {}
current_return_from_kernel = None
for sched_idx, sched_item in list(enumerate(kernel.linearization))[::-1]:
if isinstance(sched_item, CallKernel):
return_from_kernel_idxs[sched_idx] = current_return_from_kernel
current_return_from_kernel = None
elif isinstance(sched_item, ReturnFromKernel):
assert current_return_from_kernel is None
current_return_from_kernel = sched_idx
return_from_kernel_idxs[sched_idx] = current_return_from_kernel
elif isinstance(sched_item, (RunInstruction, EnterLoop, LeaveLoop,
Barrier)):
return_from_kernel_idxs[sched_idx] = current_return_from_kernel
else:
raise NotImplementedError(type(sched_item))
return return_from_kernel_idxs
# }}}
# {{{ check for write races in accesses
def _check_for_access_races(map_a, insn_a, map_b, insn_b, knl, callables_table):
"""
Returns *True* if the execution instances of *insn_a* and *insn_b*, accessing
the same variable via access maps *map_a* and *map_b*, result in an access race.
.. note::
The accesses ``map_a``, ``map_b`` lead to write races iff there exists 2
*unequal* global ids that access the same address.
"""
import pymbolic.primitives as p
from loopy.symbolic import isl_set_from_expr
from loopy.kernel.data import (filter_iname_tags_by_type,
HardwareConcurrentTag)
gsize, lsize = knl.get_grid_size_upper_bounds(callables_table,
return_dict=True)
# {{{ Step 1: Preprocess the maps
# Step 1.1: Project out inames which are also map's dims, but does not form the
# insn's within_inames
# Step 1.2: Project out sequential inames in the access maps
# Step 1.3: Rename the dims with their iname tags i.e. (g.i or l.i)
# Step 1.4: Name the ith output dims as _lp_dim{i}
updated_maps = []
for (map_, insn) in [
(map_a, insn_a),
(map_b, insn_b)]:
dims_not_to_project_out = ({iname
for iname in insn.within_inames
if knl.iname_tags_of_type(
iname, HardwareConcurrentTag)}
| knl.all_params())
map_ = map_.project_out_except(sorted(dims_not_to_project_out),
[isl.dim_type.in_,
isl.dim_type.param,
isl.dim_type.div,
isl.dim_type.cst])
for name, (dt, pos) in map_.get_var_dict().items():
if dt == isl.dim_type.in_:
tag, = filter_iname_tags_by_type(knl.inames[name].tags,
HardwareConcurrentTag)
map_ = map_.set_dim_name(dt, pos, str(tag))
for i_l in lsize:
if f"l.{i_l}" not in map_.get_var_dict():
ndim = map_.dim(isl.dim_type.in_)
map_ = map_.add_dims(isl.dim_type.in_, 1)
map_ = map_.set_dim_name(isl.dim_type.in_, ndim, f"l.{i_l}")
for i_g in gsize:
if f"g.{i_g}" not in map_.get_var_dict():
ndim = map_.dim(isl.dim_type.in_)
map_ = map_.add_dims(isl.dim_type.in_, 1)
map_ = map_.set_dim_name(isl.dim_type.in_, ndim, f"g.{i_g}")
for pos in range(map_.dim(isl.dim_type.out)):
map_ = map_.set_dim_name(isl.dim_type.out, pos, f"_lp_dim{pos}")
updated_maps.append(map_)
map_a, map_b = updated_maps
# }}}
# {{{ Step 2: rename all lid's, gid's in map_a to lid.A, gid.A
for name, (dt, pos) in map_a.get_var_dict().items():
if dt == isl.dim_type.in_:
map_a = map_a.set_dim_name(dt, pos, name+".A")
# }}}
# {{{ Step 3: rename all lid's, gid's in map_b to lid.B, gid.B
for name, (dt, pos) in map_b.get_var_dict().items():
if dt == isl.dim_type.in_:
map_b = map_b.set_dim_name(dt, pos, name+".B")
# }}}
# {{{ Step 4: make map_a, map_b ISL sets
map_a, map_b = isl.align_two(map_a, map_b)
map_a = map_a.move_dims(isl.dim_type.in_, map_a.dim(isl.dim_type.in_),
isl.dim_type.out, 0, map_a.dim(isl.dim_type.out))
map_b = map_b.move_dims(isl.dim_type.in_, map_b.dim(isl.dim_type.in_),
isl.dim_type.out, 0, map_b.dim(isl.dim_type.out))
set_a = map_a.domain()
set_b = map_b.domain()
# }}}
assert set_a.get_space() == set_b.get_space()
# {{{ Step 5: create the set any(l.i.A != l.i.B) OR any(g.i.A != g.i.B)
space = set_a.space
unequal_global_id_set = isl.Set.empty(set_a.get_space())
for i_l in lsize:
lid_a = p.Variable(f"l.{i_l}.A")
lid_b = p.Variable(f"l.{i_l}.B")
unequal_global_id_set |= (isl_set_from_expr(space,
p.Comparison(lid_a, "!=", lid_b))
)
for i_g in gsize:
gid_a = p.Variable(f"g.{i_g}.A")
gid_b = p.Variable(f"g.{i_g}.B")
unequal_global_id_set |= (isl_set_from_expr(space,
p.Comparison(gid_a, "!=", gid_b))
)
# }}}
return not (set_a & set_b & unequal_global_id_set).is_empty()
class AccessMapDescriptor(enum.Enum):
"""
Special access map values.
:attr DOES_NOT_ACCESS: Describes an unaccessed variable.
:attr NON_AFFINE_ACCESS: Describes a non-quasi-affine access into an array.
"""
DOES_NOT_ACCESS = enum.auto()
NON_AFFINE_ACCESS = enum.auto()
class WriteRaceChecker:
"""Used for checking for overlap between access ranges of instructions."""
def __init__(self, kernel, callables_table):
self.kernel = kernel
self.callables_table = callables_table
@property
@memoize_method
def vars(self):
return (self.kernel.get_written_variables()
| self.kernel.get_read_variables())
@memoize_method
def _get_access_maps(self, insn_id, access_dir):
from loopy.symbolic import BatchedAccessMapMapper
from collections import defaultdict
insn = self.kernel.id_to_insn[insn_id]
exprs = list(insn.assignees)
if access_dir == "any":
exprs.append(insn.expression)
exprs.extend(insn.predicates)
access_maps = defaultdict(lambda: AccessMapDescriptor.DOES_NOT_ACCESS)
arm = BatchedAccessMapMapper(self.kernel, self.vars, overestimate=True)
for expr in exprs:
arm(expr, insn.within_inames)
for name in arm.access_maps:
if arm.bad_subscripts[name]:
access_maps[name] = AccessMapDescriptor.NON_AFFINE_ACCESS
continue
access_maps[name] = arm.access_maps[name][insn.within_inames]
return access_maps
def _get_access_map_for_var(self, insn_id, access_dir, var_name):
assert access_dir in ["w", "any"]
insn = self.kernel.id_to_insn[insn_id]
# Access range checks only apply to assignment-style instructions. For
# non-assignments, we rely on read/write dependency information.
from loopy.kernel.instruction import MultiAssignmentBase
if not isinstance(insn, MultiAssignmentBase):
if access_dir == "any":
return var_name in insn.dependency_names()
else:
return var_name in insn.write_dependency_names()
return self._get_access_maps(insn_id, access_dir)[var_name]
def do_accesses_result_in_races(self, insn1, insn1_dir, insn2, insn2_dir,
var_name):
"""Determine whether the access maps to *var_name* in the two given
instructions result in write races owing to concurrent iname tags. This
determination is made 'conservatively', i.e. if precise information is
unavailable (for ex. if one of the instructions accesses *var_name* via
indirection), it is concluded that the ranges overlap.
:arg insn1_dir: either ``"w"`` or ``"any"``, to indicate which
type of access is desired--writing or any
:arg insn2_dir: either ``"w"`` or ``"any"``
:returns: a :class:`bool`
"""
insn1_amap = self._get_access_map_for_var(insn1, insn1_dir, var_name)
insn2_amap = self._get_access_map_for_var(insn2, insn2_dir, var_name)
if (insn1_amap is AccessMapDescriptor.DOES_NOT_ACCESS
or insn2_amap is AccessMapDescriptor.DOES_NOT_ACCESS):
return False
if (insn1_amap is AccessMapDescriptor.NON_AFFINE_ACCESS
or insn2_amap is AccessMapDescriptor.NON_AFFINE_ACCESS):
return True
return _check_for_access_races(insn1_amap, self.kernel.id_to_insn[insn1],
insn2_amap, self.kernel.id_to_insn[insn2],
self.kernel, self.callables_table)
# }}}
| StarcoderdataPython |
1656071 | <reponame>Dan-Freda/python-challenge<filename>PyBank/main.py
# Py Me Up, Charlie (PyBank)
# Import Modules/Dependencies
import os
import csv
# Initialize the variables
total_months = 0
net_total_amount = 0
monthy_change = []
month_count = []
greatest_increase = 0
greatest_increase_month = 0
greatest_decrease = 0
greatest_decrease_month = 0
# Set path to csv file
budgetdata_csv = os.path.join('Resources', 'budget_data.csv')
output_file = os.path.join('Analysis', 'output.txt')
# Open and Read csv file
with open(budgetdata_csv, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
row = next(csvreader)
# Calculate the total number number of months, net total amount of "Profit/Losses" and set relevant variables
previous_row = int(row[1])
total_months += 1
net_total_amount += int(row[1])
greatest_increase = int(row[1])
greatest_increase_month = row[0]
# Reach each row of data
for row in csvreader:
total_months += 1
net_total_amount += int(row[1])
# Calculate change in "Profits/Losses" on a month-to-month basis
revenue_change = int(row[1]) - previous_row
monthy_change.append(revenue_change)
previous_row = int(row[1])
month_count.append(row[0])
# Calculate the greatest increase in Profits
if int(row[1]) > greatest_increase:
greatest_increase = int(row[1])
greatest_increase_month = row[0]
# Calculate the greatest decrease in Profits (i.e. greatest instance of losses)
if int(row[1]) < greatest_decrease:
greatest_decrease = int(row[1])
greatest_decrease_month = row[0]
# Calculate the average change and the date
average_change = sum(monthy_change)/ len(monthy_change)
highest = max(monthy_change)
lowest = min(monthy_change)
# Print Analysis
print(f"Financial Analysis")
print(f"-----------------------------")
print(f"Total Months: {total_months}")
print(f"Total: ${net_total_amount}")
print(f"Average Change: ${average_change:.2f}")
print(f"Greatest Increase in Profits:, {greatest_increase_month}, (${highest})")
print(f"Greatest Decrease in Profits:, {greatest_decrease_month}, (${lowest})")
# Export results to text file
# Specify the file to write to
output_file = os.path.join('Analysis', 'output.txt')
# Open the file using "write" mode. Specify the variable to hold the contents.
with open(output_file, 'w',) as txtfile:
# Write to text file
txtfile.write(f"Financial Analysis\n")
txtfile.write(f"-----------------------------\n")
txtfile.write(f"Total Months: {total_months}\n")
txtfile.write(f"Total: ${net_total_amount}\n")
txtfile.write(f"Average Change: ${average_change:.2f}\n")
txtfile.write(f"Greatest Increase in Profits:, {greatest_increase_month}, (${highest})\n")
txtfile.write(f"Greatest Decrease in Profits:, {greatest_decrease_month}, (${lowest})\n")
| StarcoderdataPython |
1777521 | class QueryResult:
def __init__(self, icon='', name='', description='', clipboard=None, value=None, error=None, order=0):
self.icon = icon
self.name = name
self.description = description
self.clipboard = clipboard
self.value = value
self.error = error
self.order = order | StarcoderdataPython |
1763383 | <gh_stars>0
#entrada
Edays = int(input())
#variavel
days = 0
month = 0
year = 0
#definido quantidade de meses
month = Edays // 30
#condição se meses maior ou igual a 12
if month >= 12:
#calculo de anos
year = Edays // 365
#dias restantes
Edays = Edays - (year * 365)
#calculo mês
month = Edays // 30
#dias restantes
days = Edays - (month * 30)
else:
#dias restantes
days = Edays - (month * 30)
print('{} ano(s)\n{} mes(es)\n{} dia(s)'.format(year, month, days))
| StarcoderdataPython |
100764 | <filename>scripts/clues_plot_trajectory.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Customised version of https://github.com/35ajstern/clues/blob/master/plot_traj.py
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import argparse
import json
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats.distributions import chi2
parser = argparse.ArgumentParser()
parser.add_argument("inputPrefix", type=str)
parser.add_argument("figurePrefix", type=str)
parser.add_argument("--ext", type=str, default="pdf")
parser.add_argument("--gen-time", type=int, default="28")
parser.add_argument("--params", type=str)
parser.add_argument("--label", type=str)
parser.add_argument("--ancestry", type=str)
parser.add_argument("--sex", type=str)
args = parser.parse_args()
epochs = np.load("{}.epochs.npy".format(args.inputPrefix))
freqs = np.load("{}.freqs.npy".format(args.inputPrefix))
logpost = np.load("{}.post.npy".format(args.inputPrefix))
with open(args.label) as fin:
label = json.load(fin)
with open(args.params) as fin:
params = json.load(fin)
f, ax = plt.subplots(1, 1)
f.set_size_inches(20, 10)
xmin = int(min(epochs))
xmax = min(int(max(epochs)), round(13665 / args.gen_time)) # TODO parameterize this
xticks = range(xmin, xmax + 1, round(1000 / args.gen_time))
# flip the x-axis
epochs = epochs * -1
xticks = [tick * -1 for tick in xticks]
xlabels = [-int(tick * args.gen_time / 1000) for tick in xticks]
desc = {
"ancient": "Ancient samples only",
"modern": "Modern 1000G data only",
"both": "Ancient samples plus modern 1000G data",
}
subtitle = desc[params["mode"]]
ancestries = {
"ALL": "All ancestries",
"ANA": "Anatolian Farmers",
"CHG": "Caucasus Hunter-gatherers",
"WHG": "Western Hunter-gatherers",
"EHG": "Eastern Hunter-gatherers",
}
subtitle += " | " + ancestries[args.ancestry]
sexes = {
"XX": "XX karyotypes",
"XY": "XY karyotypes",
"any": "All karyotypes",
}
subtitle += " | " + sexes[args.sex]
data = []
for epoch, s in params["epochs"].items():
# convert the log-likelihood ratio into a p-value
params["p.value"] = chi2.sf(params["logLR"], 1)
data.append(
"logLR = {:.2f} | p = {:.2e} | epoch = {} | s = {:.5f}".format(params["logLR"], params["p.value"], epoch, s)
)
subtitle += "\n" + "\n".join(data)
plt.pcolormesh(epochs[:-1], freqs, np.exp(logpost)[:, :])
plt.suptitle(label["title"], x=0.6, fontsize=18)
plt.title(subtitle, fontsize=16, pad=10)
plt.axis((-xmax, -xmin, 0, 1.0))
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
plt.ylabel("Derived Allele Frequency", fontsize=16, labelpad=20)
plt.xlabel("kyr BP", fontsize=16)
plt.xticks(ticks=xticks, labels=xlabels, fontsize=16)
plt.yticks(fontsize=18)
cbar = plt.colorbar(ax=[ax], location="left")
plt.clim(0, 0.5)
cbar.ax.set_ylabel("Posterior prob.\n\n", rotation=270, fontsize=16, labelpad=-40)
cbar.ax.set_yticklabels([0.0, 0.1, 0.2, 0.3, 0.4, "≥0.5"])
cbar.ax.tick_params(labelsize=18)
# print in two columns
per_column = ceil(len(label["gwascat"]) / 2)
# add GWAS associations to bottom
for i, label in enumerate(label["gwascat"]):
if i < per_column:
x, y = 0.15, i * -0.03
align = "left"
else:
x, y = 0.95, (i - per_column) * -0.03
align = "right"
plt.figtext(x, y, label, horizontalalignment=align, fontsize=16)
plt.savefig("%s.%s" % (args.figurePrefix, args.ext), format=args.ext, bbox_inches="tight")
| StarcoderdataPython |
13035 | import pandas as pd
# Global variable to set the base path to our dataset folder
base_url = '../dataset/'
def update_mailing_list_pandas(filename):
"""
Your docstring documentation starts here.
For more information on how to proper document your function, please refer to the official PEP8:
https://www.python.org/dev/peps/pep-0008/#documentation-strings.
"""
df = # Read your csv file with pandas
return # Your logic to filter only rows with the `active` flag the return the number of rows
# Calling the function to test your code
print(update_mailing_list_pandas('mailing_list.csv'))
| StarcoderdataPython |
119871 | '''
Author : <NAME>
API Project for Olympics Database
Takes requests using the flask app route in browser and returns list of dictionaries containing results.
'''
import sys
import argparse
import flask
import json
import psycopg2
from config import user
from config import password
from config import database
debug = False
app = flask.Flask(__name__)
try:
connection = psycopg2.connect(database=database, user=user, password=password)
except Exception as e:
print(e)
exit()
@app.route('/games')
def get_games():
'''
uses the route /games to get a list of all the unique olympic games held, sorted by year.
'''
games_list = []
try:
cursor = connection.cursor()
except Exception as e:
print(e)
exit()
query = '''SELECT DISTINCT game_id, year, season, city
FROM games
WHERE games.game_id = game_id
ORDER BY year;
'''
try:
cursor.execute(query)
except Exception as e:
print(e)
exit()
for item in cursor:
if debug:
print(item)
game = [{"id": item[0]}, {"year": item[1]}, {"season": item[2]}, {"city": item[3]}]
# game_dict = {item[0]: [item[1], item[2], item[3]], }
games_list.append(game)
# games_list.append(item)
print(game)
return json.dumps(games_list)
@app.route('/nocs')
def get_nocs():
'''
Uses the route /nocs to display a list of nocs with their id and full name.
'''
noc_list = []
try:
cursor = connection.cursor()
except Exception as e:
print(e)
exit()
query = '''SELECT noc_id, region
FROM noc
WHERE noc.noc_id = noc_id
ORDER BY noc_id;
'''
try:
cursor.execute(query)
except Exception as e:
print(e)
exit()
for item in cursor:
if debug:
print(item)
noc = [{"id" : item[0]}, {"region": item[1]}]
noc_list.append(noc)
return json.dumps(noc_list)
@app.route('/medalists/games/<games_id>')
def get_athlete_medal_in_games(games_id):
'''
uses the route /medalists/games/<games_id> to get a list of all the medalists from a specific olympic games,
if the noc is given then the list contains only medalists from that noc.
'''
results_list = []
noc = flask.request.args.get('noc')
try:
cursor = connection.cursor()
except Exception as e:
print(e)
exit()
if noc is None:
query = '''SELECT athletes.id, athletes.name, athletes.sex,sports.sport_name, event.event_name, athlete_medal.medal
FROM athletes, athlete_medal, event,games, sports
WHERE athletes.id = athlete_medal.athlete_id
AND event.event_id = athlete_medal.event_id
AND event.sport_id = sports.sport_id
AND games.game_id = athlete_medal.game_id
AND games.game_id = %s
AND (athlete_medal.medal = 'Gold' OR athlete_medal.medal = 'Silver' OR athlete_medal.medal = 'Bronze');
'''
try:
cursor.execute(query, (games_id,))
except Exception as e:
print(e)
exit()
else:
query = '''SELECT athletes.id, athletes.name, athletes.sex,sports.sport_name, event.event_name, athlete_medal.medal
FROM athletes, athlete_medal, event,games, sports, noc
WHERE athletes.id = athlete_medal.athlete_id
AND athlete_medal.game_id = %s
AND athlete_medal.noc_id = %s
AND event.event_id = athlete_medal.event_id
AND event.sport_id = sports.sport_id
AND games.game_id = athlete_medal.game_id
AND (athlete_medal.medal = 'Gold' OR athlete_medal.medal = 'Silver' OR athlete_medal.medal = 'Bronze')
AND athlete_medal.noc_id = noc.noc_id;
'''
try:
cursor.execute(query, (games_id,noc))
except Exception as e:
print(e)
exit()
for item in cursor:
if debug:
print(item)
result_dict = [{"athlete_id": item[0]}, {"athlete_name": item[1]}, {"athlete_sex": item[2]}, {"sport": item[3]},
{"event": item[4]}, {"medal": item[5]}]
results_list.append(result_dict)
return json.dumps(results_list)
@app.route('/help')
def get_help():
help_statement = " This is an api to search up olympic data. \n Use /games to get a list of all games held. " \
"\n Use /noc to get a list of all nocs in the olympics. \n " \
"Use /medalists/games/<game_id>?[noc=noc_abbreviation] for searching up all athletes" \
" who won medals in a specific games or further specify those from a particcular noc" \
return help_statement
if __name__ == '__main__':
parser = argparse.ArgumentParser('A sample Flask application/API')
parser.add_argument('host', help='the host on which this application is running')
parser.add_argument('port', type=int, help='the port on which this application is listening')
arguments = parser.parse_args()
app.run(host=arguments.host, port=arguments.port, debug=True)
connection.close()
| StarcoderdataPython |
3300126 | <reponame>LaudateCorpus1/oci-python-sdk<filename>src/oci/object_storage/models/object_lifecycle_rule.py<gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ObjectLifecycleRule(object):
"""
To use any of the API operations, you must be authorized in an IAM policy. If you are not authorized,
talk to an administrator. If you are an administrator who needs to write policies to give users access, see
`Getting Started with Policies`__.
__ https://docs.cloud.oracle.com/Content/Identity/Concepts/policygetstarted.htm
"""
#: A constant which can be used with the time_unit property of a ObjectLifecycleRule.
#: This constant has a value of "DAYS"
TIME_UNIT_DAYS = "DAYS"
#: A constant which can be used with the time_unit property of a ObjectLifecycleRule.
#: This constant has a value of "YEARS"
TIME_UNIT_YEARS = "YEARS"
def __init__(self, **kwargs):
"""
Initializes a new ObjectLifecycleRule object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this ObjectLifecycleRule.
:type name: str
:param target:
The value to assign to the target property of this ObjectLifecycleRule.
:type target: str
:param action:
The value to assign to the action property of this ObjectLifecycleRule.
:type action: str
:param time_amount:
The value to assign to the time_amount property of this ObjectLifecycleRule.
:type time_amount: int
:param time_unit:
The value to assign to the time_unit property of this ObjectLifecycleRule.
Allowed values for this property are: "DAYS", "YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type time_unit: str
:param is_enabled:
The value to assign to the is_enabled property of this ObjectLifecycleRule.
:type is_enabled: bool
:param object_name_filter:
The value to assign to the object_name_filter property of this ObjectLifecycleRule.
:type object_name_filter: oci.object_storage.models.ObjectNameFilter
"""
self.swagger_types = {
'name': 'str',
'target': 'str',
'action': 'str',
'time_amount': 'int',
'time_unit': 'str',
'is_enabled': 'bool',
'object_name_filter': 'ObjectNameFilter'
}
self.attribute_map = {
'name': 'name',
'target': 'target',
'action': 'action',
'time_amount': 'timeAmount',
'time_unit': 'timeUnit',
'is_enabled': 'isEnabled',
'object_name_filter': 'objectNameFilter'
}
self._name = None
self._target = None
self._action = None
self._time_amount = None
self._time_unit = None
self._is_enabled = None
self._object_name_filter = None
@property
def name(self):
"""
**[Required]** Gets the name of this ObjectLifecycleRule.
The name of the lifecycle rule to be applied.
:return: The name of this ObjectLifecycleRule.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ObjectLifecycleRule.
The name of the lifecycle rule to be applied.
:param name: The name of this ObjectLifecycleRule.
:type: str
"""
self._name = name
@property
def target(self):
"""
Gets the target of this ObjectLifecycleRule.
The target of the object lifecycle policy rule. The values of target can be either \"objects\",
\"multipart-uploads\" or \"previous-object-versions\".
This field when declared as \"objects\" is used to specify ARCHIVE, INFREQUENT_ACCESS
or DELETE rule for objects.
This field when declared as \"previous-object-versions\" is used to specify ARCHIVE,
INFREQUENT_ACCESS or DELETE rule for previous versions of existing objects.
This field when declared as \"multipart-uploads\" is used to specify the ABORT (only) rule for
uncommitted multipart-uploads.
:return: The target of this ObjectLifecycleRule.
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this ObjectLifecycleRule.
The target of the object lifecycle policy rule. The values of target can be either \"objects\",
\"multipart-uploads\" or \"previous-object-versions\".
This field when declared as \"objects\" is used to specify ARCHIVE, INFREQUENT_ACCESS
or DELETE rule for objects.
This field when declared as \"previous-object-versions\" is used to specify ARCHIVE,
INFREQUENT_ACCESS or DELETE rule for previous versions of existing objects.
This field when declared as \"multipart-uploads\" is used to specify the ABORT (only) rule for
uncommitted multipart-uploads.
:param target: The target of this ObjectLifecycleRule.
:type: str
"""
self._target = target
@property
def action(self):
"""
**[Required]** Gets the action of this ObjectLifecycleRule.
The action of the object lifecycle policy rule.
Rules using the action 'ARCHIVE' move objects from Standard and InfrequentAccess storage tiers
into the `Archive storage tier`__.
Rules using the action 'INFREQUENT_ACCESS' move objects from Standard storage tier into the
Infrequent Access Storage tier. Objects that are already in InfrequentAccess tier or in Archive
tier are left untouched.
Rules using the action 'DELETE' permanently delete objects from buckets.
Rules using 'ABORT' abort the uncommitted multipart-uploads and permanently delete their parts from buckets.
__ https://docs.cloud.oracle.com/Content/Archive/Concepts/archivestorageoverview.htm
:return: The action of this ObjectLifecycleRule.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this ObjectLifecycleRule.
The action of the object lifecycle policy rule.
Rules using the action 'ARCHIVE' move objects from Standard and InfrequentAccess storage tiers
into the `Archive storage tier`__.
Rules using the action 'INFREQUENT_ACCESS' move objects from Standard storage tier into the
Infrequent Access Storage tier. Objects that are already in InfrequentAccess tier or in Archive
tier are left untouched.
Rules using the action 'DELETE' permanently delete objects from buckets.
Rules using 'ABORT' abort the uncommitted multipart-uploads and permanently delete their parts from buckets.
__ https://docs.cloud.oracle.com/Content/Archive/Concepts/archivestorageoverview.htm
:param action: The action of this ObjectLifecycleRule.
:type: str
"""
self._action = action
@property
def time_amount(self):
"""
**[Required]** Gets the time_amount of this ObjectLifecycleRule.
Specifies the age of objects to apply the rule to. The timeAmount is interpreted in units defined by the
timeUnit parameter, and is calculated in relation to each object's Last-Modified time.
:return: The time_amount of this ObjectLifecycleRule.
:rtype: int
"""
return self._time_amount
@time_amount.setter
def time_amount(self, time_amount):
"""
Sets the time_amount of this ObjectLifecycleRule.
Specifies the age of objects to apply the rule to. The timeAmount is interpreted in units defined by the
timeUnit parameter, and is calculated in relation to each object's Last-Modified time.
:param time_amount: The time_amount of this ObjectLifecycleRule.
:type: int
"""
self._time_amount = time_amount
@property
def time_unit(self):
"""
**[Required]** Gets the time_unit of this ObjectLifecycleRule.
The unit that should be used to interpret timeAmount. Days are defined as starting and ending at midnight UTC.
Years are defined as 365.2425 days long and likewise round up to the next midnight UTC.
Allowed values for this property are: "DAYS", "YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The time_unit of this ObjectLifecycleRule.
:rtype: str
"""
return self._time_unit
@time_unit.setter
def time_unit(self, time_unit):
"""
Sets the time_unit of this ObjectLifecycleRule.
The unit that should be used to interpret timeAmount. Days are defined as starting and ending at midnight UTC.
Years are defined as 365.2425 days long and likewise round up to the next midnight UTC.
:param time_unit: The time_unit of this ObjectLifecycleRule.
:type: str
"""
allowed_values = ["DAYS", "YEARS"]
if not value_allowed_none_or_none_sentinel(time_unit, allowed_values):
time_unit = 'UNKNOWN_ENUM_VALUE'
self._time_unit = time_unit
@property
def is_enabled(self):
"""
**[Required]** Gets the is_enabled of this ObjectLifecycleRule.
A Boolean that determines whether this rule is currently enabled.
:return: The is_enabled of this ObjectLifecycleRule.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this ObjectLifecycleRule.
A Boolean that determines whether this rule is currently enabled.
:param is_enabled: The is_enabled of this ObjectLifecycleRule.
:type: bool
"""
self._is_enabled = is_enabled
@property
def object_name_filter(self):
"""
Gets the object_name_filter of this ObjectLifecycleRule.
:return: The object_name_filter of this ObjectLifecycleRule.
:rtype: oci.object_storage.models.ObjectNameFilter
"""
return self._object_name_filter
@object_name_filter.setter
def object_name_filter(self, object_name_filter):
"""
Sets the object_name_filter of this ObjectLifecycleRule.
:param object_name_filter: The object_name_filter of this ObjectLifecycleRule.
:type: oci.object_storage.models.ObjectNameFilter
"""
self._object_name_filter = object_name_filter
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
155201 | <reponame>nabetama-training/CompetitionProgrammingPractice
def resolve():
n, m, l = map(int, input().split())
a = [list(map(int, input().split())) for i in range(n)]
b = [list(map(int, input().split())) for i in range(m)]
c = []
for i in range(n):
tmp = []
for j in range(l):
x = 0
for k in range(m):
x += a[i][k] * b[k][j]
tmp.append(x)
c.append(tmp)
for line in c:
print(*line)
| StarcoderdataPython |
1612791 | <gh_stars>1-10
"""
Helper functions for the commands.
"""
import os
import logging
from typing import Tuple
from django.conf import settings as sett
logger = logging.getLogger(__name__)
def get_p_run_name(name: str, return_folder: bool=False) -> Tuple[str, str]:
"""
Determines the name of the pipeline run. Can also return the output folder
if selected.
Args:
name: The user entered name of the pipeline run.
return_folder: When `True` the pipeline directory is also returned.
Returns:
The name of the pipeline run. If return_folder is `True` then both
the name and directory are returned.
"""
if '/' in name:
folder = os.path.realpath(name)
run_name = os.path.basename(folder)
return (run_name, folder) if return_folder else run_name
folder = os.path.join(os.path.realpath(sett.PIPELINE_WORKING_DIR), name)
return (name, folder) if return_folder else name
| StarcoderdataPython |
3386563 | <gh_stars>1-10
import unittest
from stareg.penalty_matrix import PenaltyMatrix
from stareg.bspline import Bspline
import numpy as np
from scipy.signal import find_peaks
class TestPenaltyMatrix(unittest.TestCase):
def setUp(self):
self.n_param = 25
self.PM = PenaltyMatrix()
def tearDown(self):
del self.PM
def test_d1_difference_matrix(self):
d1 = self.PM.d1_difference_matrix(n_param=self.n_param)
self.assertEqual(d1.shape, (self.n_param-1, self.n_param))
self.assertTrue((((d1 == -1).sum(axis=1) + (d1 == 1).sum(axis=1)) == 2).all())
self.assertTrue(d1[0,0], -1)
self.assertTrue(d1[0,1], 1)
def test_d2_difference_matrix(self):
d2 = self.PM.d2_difference_matrix(n_param=self.n_param)
self.assertEqual(d2.shape, (self.n_param-2, self.n_param))
self.assertTrue((((d2 == -2).sum(axis=1) + (d2 == 1).sum(axis=1)) == 3).all())
self.assertTrue(d2[0,0], 1)
self.assertTrue(d2[0,1], -2)
self.assertTrue(d2[0,2], 1)
def test_smoothness_matrix(self):
sm = self.PM.smoothness_matrix(n_param=self.n_param)
self.assertEqual(sm.shape, (self.n_param-2, self.n_param))
self.assertTrue((((sm == -2).sum(axis=1) + (sm == 1).sum(axis=1)) == 3).all())
self.assertTrue(sm[0,0], 1)
self.assertTrue(sm[0,1], -2)
self.assertTrue(sm[0,2], 1)
def test_peak(self):
x = np.linspace(0, 1, 100)
y = 0.5*np.exp(-(x - 0.4)**2 / 0.01)
bs = Bspline()
bs.bspline_basis(x_data=x, k=self.n_param, m=2, type_="equidistant")
peak = self.PM.peak_matrix(n_param=self.n_param, y_data=y, basis=bs.basis)
self.assertEqual(peak.shape, (self.n_param-1, self.n_param))
self.assertEqual(np.count_nonzero(np.count_nonzero(peak, axis=1)==0), 1)
self.assertTrue(peak[0,0], -1)
self.assertTrue(peak[0,1], 1)
self.assertTrue(peak[-1,-1], 1)
self.assertTrue(peak[-1,-2], 1)
def test_valley(self):
x = np.linspace(0, 1, 100)
y = -1*0.5*np.exp(-(x - 0.4)**2 / 0.01)
bs = Bspline()
bs.bspline_basis(x_data=x, k=self.n_param, m=2, type_="equidistant")
valley = self.PM.valley_matrix(n_param=self.n_param, y_data=y, basis=bs.basis)
self.assertEqual(valley.shape, (self.n_param-1, self.n_param))
self.assertEqual(np.count_nonzero(np.count_nonzero(valley, axis=1)==0), 1)
self.assertTrue(valley[0,0], 1)
self.assertTrue(valley[0,1], -1)
self.assertTrue(valley[-1,-1], -1)
self.assertTrue(valley[-1,-2], 1)
def test_multi_peak(self):
x = np.linspace(0, 1, 100)
y = np.exp(-(x - 0.4)**2 / 0.01) + np.exp(-(x-0.8)**2 / 0.01)
bs = Bspline()
bs.bspline_basis(x_data=x, k=self.n_param, m=2, type_="equidistant")
peaks = self.PM.multi_peak_matrix(n_param=self.n_param, y_data=y, basis=bs.basis)
self.assertEqual(peaks.shape, (self.n_param-1, self.n_param))
self.assertEqual(np.count_nonzero(np.count_nonzero(peaks, axis=1)==0), 3)
def test_multi_valley(self):
x = np.linspace(0, 1, 100)
y = np.exp(-(x - 0.4)**2 / 0.01) + np.exp(-(x-0.8)**2 / 0.01)
y = -1*y
bs = Bspline()
bs.bspline_basis(x_data=x, k=self.n_param, m=2, type_="equidistant")
valley = self.PM.multi_valley_matrix(n_param=self.n_param, y_data=y, basis=bs.basis)
self.assertEqual(valley.shape, (self.n_param-1, self.n_param))
self.assertEqual(np.count_nonzero(np.count_nonzero(valley, axis=1)==0), 3)
def test_multi_extremum_peak_then_valley(self):
x = np.linspace(0, 1, 100)
y = np.exp(-(x - 0.4)**2 / 0.01) + -1*np.exp(-(x-0.8)**2 / 0.01)
bs = Bspline()
bs.bspline_basis(x_data=x, k=self.n_param, m=2, type_="equidistant")
valley = self.PM.multi_extremum_matrix(n_param=self.n_param, y_data=y, basis=bs.basis)
self.assertEqual(valley.shape, (self.n_param-1, self.n_param))
self.assertEqual(np.count_nonzero(np.count_nonzero(valley, axis=1)==0), 3)
def test_multi_extremum_valley_then_peak(self):
x = np.linspace(0, 1, 100)
y = -1*np.exp(-(x - 0.4)**2 / 0.01) + np.exp(-(x-0.8)**2 / 0.01)
bs = Bspline()
bs.bspline_basis(x_data=x, k=self.n_param, m=2, type_="equidistant")
valley = self.PM.multi_extremum_matrix(n_param=self.n_param, y_data=y, basis=bs.basis)
self.assertEqual(valley.shape, (self.n_param-1, self.n_param))
self.assertEqual(np.count_nonzero(np.count_nonzero(valley, axis=1)==0), 3)
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
1654636 | from pathlib import Path
import pytest
from configuror.exceptions import DecodeError
def test_method_returns_false_when_file_is_unknown_and_ignore_flag_is_true(config):
assert not config.load_from_python_file('foo.txt', ignore_file_absence=True)
def test_method_raises_error_when_file_is_unknown_and_ignore_flag_is_false(config):
with pytest.raises(FileNotFoundError) as exc_info:
config.load_from_python_file('foo.txt')
assert 'file foo.txt not found on the filesystem' == str(exc_info.value)
@pytest.mark.parametrize('filename', [2, 2.5, ['a', 'b']])
def test_method_raises_error_when_filename_is_not_a_string(config, filename):
with pytest.raises(TypeError) as exc_info:
config.load_from_python_file(filename)
assert f'{filename} is not a string representing a path' == str(exc_info.value)
def test_method_updates_config_when_passing_valid_python_file(config, tempdir):
content_lines = ['A = "foo"', 'B = "bar"', 'c = "char"']
path = Path(tempdir) / 'test.py'
path.write_text('\n'.join(content_lines))
return_value = config.load_from_python_file(f'{path}')
assert return_value is True
assert 'foo' == config['A']
assert 'bar' == config['B']
assert 'c' not in config
def test_method_raises_error_when_filename_is_not_a_valid_python_file(config, tempdir):
path = Path(tempdir) / 'foo.txt'
path.write_text('hello world!')
with pytest.raises(DecodeError) as exc_info:
config.load_from_python_file(f'{path}')
assert f'{path} is not well python formatted' == str(exc_info.value)
| StarcoderdataPython |
4811466 | <gh_stars>1-10
from typing import Any
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.utils.translation import gettext_lazy as _
class TimestampedModel(models.Model):
"""
An abstract class to be extended to add timestamp to models
"""
# auto_now_add will set the timezone.now() only when the instance is created
created_at = models.DateTimeField(
_("datetime when model is created"), auto_now_add=True
)
# auto_now will update the field everytime the save method is called.
update_at = models.DateTimeField(
_("datetime when model is updated last time"), auto_now=True
)
class Meta:
abstract = True
class ModelWithMetadata(models.Model):
"""
An abstract class to be extended to add metadata to models
"""
metadata = JSONField(
_("used to store metadata"), blank=True, null=True, default=dict
)
class Meta:
abstract = True
def get_value_from_metadata(self, key, default: Any = None) -> Any:
return self.metadata.get(key, default)
def store_value_in_metadata(self, items: dict):
if not self.metadata:
self.metadata = {}
self.metadata.update(items)
def append_value_in_metadata(self, key: str, value):
# if metadata is None or key not in metadata
if key not in self.metadata:
self.store_value_in_metadata({key: [value]})
# if value is not a list
elif not isinstance(self.get_value_from_metadata(key), list):
# create a list with existing and new data
value_list = list(self.get_value_from_metadata(key), value)
# add the dict
self.store_value_in_metadata({key: value_list})
else:
value_list = self.get_value_from_metadata(key).append(value)
def clear_metadata(self):
self.metadata = {}
def delete_value_from_metadata(self, key: str):
if key in self.metadata:
del self.metadata[key]
| StarcoderdataPython |
181954 | import unittest
from attacking_queens.board import ChessBoard, WhiteQueen
from attacking_queens.board import BlackQueen
from attacking_queens.exceptions import BadQueenPlacementException
class PlacingQueensTests(unittest.TestCase):
def setUp(self):
self.board = ChessBoard(size=5)
def test_place_black_queen_on_black_place(self):
black_places = self.board.black_places[0]
self.board.place_black_queen(black_places)
self.assertEqual(self.board.black_queens(), [BlackQueen(row=0, column=0)])
def test_place_black_queen_on_white_place_should_raise_exception(self):
white_place = self.board.white_places[0]
with self.assertRaises(BadQueenPlacementException):
self.board.place_black_queen(white_place)
def test_place_white_queen_on_white_place(self):
white_places = self.board.white_places[0]
self.board.place_white_queen(white_places)
self.assertEqual(self.board.white_queens(), [WhiteQueen(row=0, column=1)])
def test_place_white_queen_on_black_place_should_raise_exception(self):
black_place = self.board.black_places[0]
with self.assertRaises(BadQueenPlacementException):
self.board.place_white_queen(black_place)
def test_place_two_black_queens_on_same_place_should_raise_exception(self):
black_queen = BlackQueen(row=0, column=0)
self.board.place_black_queen(black_queen)
with self.assertRaises(BadQueenPlacementException):
self.board.place_black_queen(black_queen)
def test_place_two_white_queens_on_same_place_should_raise_exception(self):
white_queen = WhiteQueen(row=0, column=1)
self.board.place_white_queen(white_queen)
with self.assertRaises(BadQueenPlacementException):
self.board.place_white_queen(white_queen)
def test_place_two_attacking_queens_opposite_color(self):
self.fail("Work for tomorrow")
| StarcoderdataPython |
185539 | from xgboost_ray.tests.utils import create_parquet
def main():
create_parquet(
"example.parquet",
num_rows=1_000_000,
num_partitions=100,
num_features=8,
num_classes=2)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1725684 | temp = float(input('Digite a temperatura '))
k = 273.15 + temp
f = (temp * 9/5) + 32
print('A temperatura digitada é de {}ºC \n Fahrenheit {}ºF \n Kelvin {}K'.format(temp, k ,f)) | StarcoderdataPython |
29823 | <reponame>dwpaley/cctbx_project
from __future__ import absolute_import, division, print_function
from libtbx import test_utils
import libtbx.load_env
#tst_list = [
# "$D/regression/tst_py_from_html.py"
# ]
tst_list = [
"$D/regression/tst_1_template.py",
"$D/regression/tst_2_doc_high_level_objects.py",
"$D/regression/tst_3_doc_model_manager.py",
"$D/regression/tst_4_doc_data_manager.py",
"$D/regression/tst_5_doc_map_manager.py",
"$D/regression/tst_6_doc_model_map_manager.py",
]
def run():
build_dir = libtbx.env.under_build("cctbx_website")
dist_dir = libtbx.env.dist_path("cctbx_website")
test_utils.run_tests(build_dir, dist_dir, tst_list)
if (__name__ == "__main__"):
run()
| StarcoderdataPython |
3371505 | import os
import pickle
import numpy as np
from matplotlib import pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
def dim_reduc_protocol(pickle_filepath, plot_file_name):
"""
Execute the dimensionality reduction protocol to the given pickle file
:param str pickle_filepath: The pickle file
:param str plot_file_name: The name given to each plot created
"""
####################################################################################################################
# LOAD THE DATASET #
####################################################################################################################
# Load the dataset (you can modify the variables to be load. In this case, we have x an array of the features extracted
# for each instance and y a list of labels)
with open(pickle_filepath, 'rb') as file:
x, y = pickle.load(file)
# Define the number of attribute to select
attribute_number_to_select = int(len(x.columns) / 2)
####################################################################################################################
# REDUCE THE DIMENSIONALITY BY SELECTING FEATURES #
####################################################################################################################
# Define the classifier
classifier_model = ExtraTreesClassifier(n_estimators=50)
# Train the classifier model to classify correctly the instances into the correct classes
classifier_model = classifier_model.fit(x, y)
# Get the score of importances for each attribute
importance_scores = classifier_model.feature_importances_
# Maintenant c'est a votre tour de coder le reste.
# Le reste doit extraire de x les N meilleurs attributs et afficher un rapport des attributs selectionnes par ordre
# croissant d'importances. Puis a la fin, vous sauvegarderez le nouveau dataset.
# Sort the features importances and get the ordered indices
indices = np.argsort(importance_scores)[::-1]
# Get the best features according to the reduction algorithm
columns_to_select = x.columns[indices[0:attribute_number_to_select]]
# Get the new dataset with the selected features
new_dataset = x[columns_to_select]
# Plot the results
plt.bar(x=np.arange(attribute_number_to_select), height=importance_scores[indices[0:attribute_number_to_select]],
tick_label=columns_to_select)
plt.title(f"Feature Importances - Sum = {str(np.sum(importance_scores[indices[0:attribute_number_to_select]]))}")
plt.xlabel(f"Selected features")
plt.xticks(rotation=90)
plt.ylabel(f"Importance score")
plt.tight_layout()
# Save the results
plot_path = "../Files/Out/Plots/"
if os.path.isfile(plot_path + plot_file_name):
os.remove(plot_path + plot_file_name)
plt.savefig(plot_path + plot_file_name + ".png")
# Show the results
plt.show()
plt.close()
# Save the dataset as Pickle file
pickle_filepath = "../Files/Out/Pickles/"
with open(os.path.join(pickle_filepath, plot_file_name + ".pickle"), "wb") as f:
pickle.dump([new_dataset, y], f)
if __name__ == '__main__':
dim_reduc_protocol("../Files/Out/Pickles/DailyDelhiClimate1.pkl", "DimReduction30-7")
dim_reduc_protocol("../Files/Out/Pickles/DailyDelhiClimate2.pkl", "DimReduction20-5")
| StarcoderdataPython |
195890 | <reponame>fding/pyedifice
import logging
# Support for colored logging: https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': RED,
'INFO': GREEN,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, date_msg, use_color = True):
logging.Formatter.__init__(self, msg, date_msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
if levelname == "ERROR":
record.msg = COLOR_SEQ % (30 + COLORS[levelname]) + record.msg + RESET_SEQ
return logging.Formatter.format(self, record)
GRAY_SEQ = COLOR_SEQ % (30 + BLACK)
FORMAT = formatter_message(f"[$BOLD%(name)s$RESET|%(levelname)s] {GRAY_SEQ}%(asctime)s.%(msecs)03d{RESET_SEQ}: %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(ColoredFormatter(FORMAT, "%Y-%m-%d %H:%M:%S"))
logger = logging.getLogger("Edifice")
logger.setLevel(logging.INFO)
logger.addHandler(handler)
| StarcoderdataPython |
94878 | import config
import random
import datetime, time
import math
def execute(parser, bot, user, args):
slotsPool = bot.execQuerySelectOne("SELECT * FROM slotspool")
bot.addressUser(user, "The current slots jackpot is %d %s." % (slotsPool["slotspool"], config.currencyPlural))
def requiredPerm():
return "anyone"
def canUseByWhisper():
return True
| StarcoderdataPython |
3317115 | from .nhl_api import nhl
| StarcoderdataPython |
135577 | <reponame>estradjm/Class_Work
#!/bin/python
''
treeds.py for tree data structure, taken from StackOverflow at https://stackoverflow.com/questions/2358045/how-can-i-implement-a-tree-in-python-are-there-any-built-in-data-structures-in
''
class Node:
"""
Class Node
"""
def __init__(self, value):
self.left = None
self.data = value
self.right = None
class Tree:
"""
Class tree will provide a tree as well as utility functions.
"""
def createNode(self, data):
"""
Utility function to create a node.
"""
return Node(data)
def insert(self, node , data):
"""
Insert function will insert a node into tree.
Duplicate keys are not allowed.
"""
#if tree is empty , return a root node
if node is None:
return self.createNode(data)
# if data is smaller than parent , insert it into left side
if data < node.data:
node.left = self.insert(node.left, data)
elif data > node.data:
node.right = self.insert(node.right, data)
return node
| StarcoderdataPython |
65502 | <reponame>DevikalyanDas/Multiclass-Segmentation
import torch
def _threshold(x, threshold=None):
if threshold is not None:
return (x > threshold).type(x.dtype)
else:
return x
def make_one_hot(labels, classes):
one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)
target = one_hot.scatter_(1, labels.data, 1)
return target
class IoU(object):
def __init__(self, eps=1e-7, threshold=0.5, activation=None,classes=19):
self.eps = eps
self.threshold = threshold
self.activation = torch.nn.Softmax(dim=1)
self.classes = classes
def __call__(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
y_pr = _threshold(y_pr, threshold=self.threshold)
y_gt = make_one_hot(y_gt.long().unsqueeze(dim=1),self.classes)
intersection = torch.sum(y_gt * y_pr)
union = torch.sum(y_gt) + torch.sum(y_pr) - intersection + self.eps
score = (intersection + self.eps) / union
return score.item()
class Fscore(object):
def __init__(self, beta=1, eps=1e-7, threshold=0.5, activation=None,classes=19):
self.eps = eps
self.beta = beta
self.threshold = threshold
self.activation = torch.nn.Softmax(dim=1)
self.classes = classes
def __call__(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
y_pr = _threshold(y_pr, threshold=self.threshold)
y_gt = make_one_hot(y_gt.long().unsqueeze(dim=1),self.classes)
tp = torch.sum(y_gt * y_pr)
fp = torch.sum(y_pr) - tp
fn = torch.sum(y_gt) - tp
score = ((1 +self.beta ** 2) * tp + self.eps) \
/ ((1 + self.beta ** 2) * tp + self.beta ** 2 * fn + fp + self.eps)
return score.item()
class Accuracy(object):
def __init__(self, threshold=0.5, activation=None,classes=19):
self.threshold = threshold
self.activation = torch.nn.Softmax(dim=1)
self.classes = classes
def __call__(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
y_pr = _threshold(y_pr, threshold=self.threshold)
y_gt = make_one_hot(y_gt.long().unsqueeze(dim=1),self.classes)
tp = torch.sum(y_gt == y_pr, dtype=y_pr.dtype)
score = tp / y_gt.view(-1).shape[0]
return score.item()
class Sensitivity(object):
# Sensitivity
def __init__(self, eps=1e-7,activation=None, threshold=0.5,classes=19):
self.eps = eps
self.threshold = threshold
self.activation = torch.nn.Softmax(dim=1)
self.classes = classes
def __call__(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
y_pr = _threshold(y_pr, threshold=self.threshold)
y_gt = make_one_hot(y_gt.long().unsqueeze(dim=1),self.classes)
tp = torch.sum(y_gt * y_pr)
fn = torch.sum(y_gt) - tp
score = (tp + self.eps) / (tp + fn + self.eps)
return score.item()
class Specificity(object):
def __init__(self, eps=1e-7,activation=None, threshold=0.5,classes=19):
self.eps = eps
self.threshold = threshold
self.activation = torch.nn.Softmax(dim=1)
self.classes = classes
def __call__(self, y_pr, y_gt):
y_pr = self.activation(y_pr)
y_pr = _threshold(y_pr, threshold=self.threshold)
y_gt = make_one_hot(y_gt.long().unsqueeze(dim=1),self.classes)
tn = torch.sum(y_gt == y_pr, dtype=y_pr.dtype)-torch.sum(y_gt * y_pr)
tp = torch.sum(y_gt * y_pr)
fp = torch.sum(y_pr) - tp
score = (tn + self.eps) / (tn + fp + self.eps)
return score.item() | StarcoderdataPython |
3273120 | <reponame>barnjamin/py-algorand-sdk
from algosdk.abi.uint_type import UintType
from algosdk.abi.ufixed_type import UfixedType
from algosdk.abi.base_type import ABIType
from algosdk.abi.bool_type import BoolType
from algosdk.abi.byte_type import ByteType
from algosdk.abi.address_type import AddressType
from algosdk.abi.string_type import StringType
from algosdk.abi.array_dynamic_type import ArrayDynamicType
from algosdk.abi.array_static_type import ArrayStaticType
from algosdk.abi.tuple_type import TupleType
from .method import Method, Argument, Returns
from .interface import Interface
from .contract import Contract
name = "abi"
| StarcoderdataPython |
55970 | <filename>fcn.py
import numpy as np
import random
import pandas as pd
def remove_outlier(feature, name, data):
q1 = np.percentile(feature, 25)
q3 = np.percentile(feature, 75)
iqr = q3-q1
cut_off = iqr*1.5
lower_limit = q1-cut_off
upper_limit = q3+cut_off
data = data.drop(data[(data[name] > upper_limit) | (data[name] < lower_limit)].index)
return data
def test_train_split(data: pd.DataFrame, test_ratio):
if test_ratio > 1 or test_ratio < 0:
return
N = data.shape[0]
test_amount = int(test_ratio*N)
test_indices = random.sample(range(N), test_amount)
test_data = data.iloc[test_indices].reset_index(drop=True)
train_data = data.drop(test_indices).reset_index(drop=True)
return train_data, test_data
def confusion_matrix(real, pred, show = True, ret = True):
TP = np.sum(np.logical_and(real == 1, pred == 1))
TN = np.sum(np.logical_and(real == 0, pred == 0))
FN = np.sum(np.logical_and(real == 1, pred == 0))
FP = np.sum(np.logical_and(real == 0, pred == 1))
matrix = np.array([[TP, FP], [FN, TN]])
if show:
print(" 1\t0 (actual)")
print("1\t", matrix[0, 0], "\t", matrix[0, 1], sep="")
print("0\t", matrix[1, 0], "\t", matrix[1, 1], sep="")
if ret:
return matrix
return
| StarcoderdataPython |
1702135 | <filename>Chapter 02/Chap02_Example2.59.py<gh_stars>0
s1 = "welcome <NAME>"
print(s1.upper()) # -- UO1
s2 = "PYTHON"
print(s2.upper()) # -- UO2
s3 = "be@UTIfull"
print(s3.upper()) # -- UO3
s4 = "I love python language!:)"
print(s4.upper()) # -- UO4
s5 = 'th3ree'
print(s5.upper()) # -- UO5
| StarcoderdataPython |
101686 | <filename>examples/hello.py
import ppytty
ppytty_task = ppytty.Label('Hello world!')
| StarcoderdataPython |
1600442 | <gh_stars>0
valores = []
while True:
v = int(input('digite os valores: '))
c = input('quer continuar? [s/n]: ')
if c == 'n':
break
if v not in valores:
valores.append(v)
print('numero adicionado com sucesso...')
else:
print('numero duplicado, não irei adicionar...')
valores.sort()
print('—' * 40)
print(f'A lista em ordem crescente é {valores}') | StarcoderdataPython |
161880 | from django.urls import path
from . import views
from qa.views import UserAnswerList, UserQuestionList
app_name = "user_profile"
urlpatterns = [
path("activate/<uidb64>/<token>/", views.EmailVerify.as_view(), name="activate"),
path("<int:id>/<str:username>/", views.profile, name="profile"),
path(
"<int:user_id>/<str:user_name>/edit", views.ProfileEdit.as_view(), name="edit"
),
path("avatar/upload/", views.ProfileImageUplade.as_view(), name="avatar_upload"),
path(
"<int:user_id>/<str:user_name>/questions",
UserQuestionList.as_view(),
name="user_questions_list",
),
path(
"<int:user_id>/<str:user_name>/answers",
UserAnswerList.as_view(),
name="user_answers_list",
),
path("list/", views.UsersList.as_view(), name="user_list"),
]
| StarcoderdataPython |
3292224 | <gh_stars>0
__title__ = 'DPT detail extractor'
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
__date__ = '2018-07-30'
__version__ = 1.0
#%% Load Packages
import numpy as np
from SignificantFeatures import SignificantFeatures
from TextureExtraction import Textures
from RoadmakersPavage import RP_DPT
def Extracter(Image, Neigh, alpha, beta):
RP = RP_DPT(Image, Neigh)
KeyPoints = SignificantFeatures(RP).SigFeats(alpha)
PulseMap = Textures(RP).DetectDetails(beta)
return(PulseMap, KeyPoints) | StarcoderdataPython |
463 | <gh_stars>0
#encoding=utf-8
import qlib
import pandas as pd
import pickle
import xgboost as xgb
import numpy as np
import re
from qlib.constant import REG_US
from qlib.utils import exists_qlib_data, init_instance_by_config
from qlib.workflow import R
from qlib.workflow.record_temp import SignalRecord, PortAnaRecord
from qlib.utils import flatten_dict
from qlib.data import LocalExpressionProvider
from qlib.data.ops import Operators, OpsList
from qlib.data.base import Feature
from pyecharts import options as opts
from pyecharts.charts import Kline, Line, Grid
from my_data_handler import MyAlphaHandler
# model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model'
model_file = r'.\mlruns\1\148ef1cd7acd48deac3eadc339ad3008\artifacts\trained_model'
with open(model_file, 'rb') as fi:
model = pickle.load(fi)
exprs, columns = MyAlphaHandler.get_custom_config()
raw_data = pd.read_csv('../stock_data/TSLA.csv', parse_dates=['time'])
raw_data['data_time'] = raw_data['time'].dt.strftime("%Y-%m-%d %H:%M:00")
raw_data.set_index('time', inplace=True)
raw_data["vwap"] = np.nan
raw_data.sort_index(inplace=True)
# print(raw_data)
class MyFeature(Feature):
def _load_internal(self, instrument, start_index, end_index, freq):
print("load", self._name, instrument, start_index, end_index, freq)
return raw_data.loc[start_index:end_index][self._name]
Operators.register(OpsList + [MyFeature])
def my_parse_field(field):
if not isinstance(field, str):
field = str(field)
for pattern, new in [(r"\$(\w+)", rf'MyFeature("\1")'), (r"(\w+\s*)\(", r"Operators.\1(")]: # Features # Operators
field = re.sub(pattern, new, field)
return field
obj = dict()
for field in exprs:
expression = eval(my_parse_field(field))
series = expression.load('TSLA', "2022-01-02", "2022-02-28", "1min")
series = series.astype(np.float32)
obj[field] = series
data = pd.DataFrame(obj)
data.columns = columns
view_time_start = '2022-02-11'
view_time_end = '2022-02-12'
pre_data = raw_data.loc[view_time_start:view_time_end].copy()
pred=model.model.predict(xgb.DMatrix(data.loc[view_time_start:view_time_end]))
pre_data['pred_score'] = pred
records = pre_data.to_dict("records")
cash = 50000
position = {}
hold_thresh = 5
score_thresh = 0.001
x_axises, y_axises, mark_points, money = [], [], [], []
for record in records:
x_axises.append(record['data_time'])
y_axises.append([
record['open'], record['close'], record['low'], record['high']
])
if 'hold_cnt' in position:
position['hold_cnt'] += 1
if position and (record['open'] >= position['close'] * 1.01 or record['open'] < position['close'] * 0.995 or record['pred_score'] < -score_thresh or position['hold_cnt'] >= hold_thresh):
cash += position['amount'] * record['open']
position = {}
#print("sell")
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='triangle', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="green")
))
elif record['pred_score'] > score_thresh and not position:
position = dict(record)
position['amount'] = int(cash / position['open'])
cash -= position['amount'] * position['open']
# buy
#print("buy")
position['hold_cnt'] = 0
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='arrow', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="yellow")
))
cur_money = cash
if position:
cur_money += position['amount'] * record['close']
money.append(cur_money)
if position:
cash += position['amount'] * records[-1]['close']
print("cash:", cash)
kline_graph = (
Kline()
.add_xaxis(x_axises)
.add_yaxis(
"kline",
y_axises,
markpoint_opts=opts.MarkPointOpts(
data=mark_points
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(is_scale=True),
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
title_opts=opts.TitleOpts(title="%s_%s" % (view_time_start, view_time_end)),
datazoom_opts=[opts.DataZoomOpts(type_="inside", xaxis_index=[0, 1],)],
)
)
kline_line = (
Line()
.add_xaxis(xaxis_data=x_axises)
.add_yaxis(
series_name="cur_money",
y_axis=money,
is_smooth=True,
linestyle_opts=opts.LineStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
markline_opts=opts.MarkLineOpts(
data=[opts.MarkLineItem(y=50000)]
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=2,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
min_='dataMin'
)
)
)
grid_chart = Grid(init_opts=opts.InitOpts(width='2000px', height='900px'))
grid_chart.add(
kline_graph,
grid_opts=opts.GridOpts(pos_left="3%", pos_right="10%", height="50%"),
)
grid_chart.add(
kline_line,
grid_opts=opts.GridOpts(
pos_left="3%", pos_right="10%", pos_top="60%", height="30%"
),
)
grid_chart.render("kline_markline.html") | StarcoderdataPython |
191754 | #!/usr/bin/env python
# coding: utf-8
from baseframe import Plugin
class MyPlugin(Plugin):
info = {
'name': '<NAME>',
'tag': 'sqli'
}
rules = [
{
'desc': 'PHP常见SQLi过滤函数',
'rule': (
r'(?i)get_magic_quotes_gpc\(|intval\(|addslashes\(|strip_tags\(|'
r'str_replace\(|mysql_real_escape_string\(|stripslashes\('
)
},
{
'desc': 'ASP常见XSS过滤函数',
'rule': (
r'(?i)CheckSql\(|'
r'Replace\((?P<quote>[\'"])(?P<char>.)(?P=quote),[ ]*(?P=quote)\\(?P=char)*(?P=quote)'
)
},
]
| StarcoderdataPython |
118703 | import logging
from rest_framework import serializers, exceptions
logger = logging.getLogger(__name__)
class ResourceTypeSerializer(serializers.Serializer):
'''
Serializer for describing the types of available Resources
that users may choose.
'''
resource_type_key = serializers.CharField(max_length=50)
resource_type_title = serializers.CharField(max_length=250)
resource_type_description = serializers.CharField(max_length=2000)
example = serializers.JSONField() | StarcoderdataPython |
1791719 | <filename>server/commandcentre.py
class CommandCentre:
def __init__(self):
self.order_status = {}
self.drone_status = {1: "Idle"}
def add_order(self, order_id):
self.order_status[order_id] = "Order is placed and we are working on it."
def completed_order(self, order_id):
self.order_status[order_id] = "Completed"
def register_drone_trip_completed(self):
self.drone_status[1] = "Idle" | StarcoderdataPython |
1786537 | <reponame>yugangw-msft/AutoRest<filename>src/generator/AutoRest.Python.Tests/AcceptanceTests/dictionary_tests.py
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import os
from datetime import date, datetime, timedelta
from os.path import dirname, pardir, join, realpath
cwd = dirname(realpath(__file__))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyDictionary"))
from msrest.exceptions import DeserializationError
from autorestswaggerbatdictionaryservice import AutoRestSwaggerBATdictionaryService
from autorestswaggerbatdictionaryservice.models import Widget, ErrorException
class DictionaryTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = AutoRestSwaggerBATdictionaryService(base_url="http://localhost:3000")
return super(DictionaryTests, cls).setUpClass()
def test_dictionary_primitive_types(self):
tfft = {"0":True, "1":False, "2":False, "3":True}
self.assertEqual(tfft, self.client.dictionary.get_boolean_tfft())
self.client.dictionary.put_boolean_tfft(tfft)
invalid_null_dict = {"0":True, "1":None, "2":False}
self.assertEqual(invalid_null_dict, self.client.dictionary.get_boolean_invalid_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_boolean_invalid_string()
int_valid = {"0":1, "1":-1, "2":3, "3":300}
self.assertEqual(int_valid, self.client.dictionary.get_integer_valid())
self.client.dictionary.put_integer_valid(int_valid)
int_null_dict = {"0":1, "1":None, "2":0}
self.assertEqual(int_null_dict, self.client.dictionary.get_int_invalid_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_int_invalid_string()
long_valid = {"0":1, "1":-1, "2":3, "3":300}
self.assertEqual(long_valid, self.client.dictionary.get_long_valid())
self.client.dictionary.put_long_valid(long_valid)
long_null_dict = {"0":1, "1":None, "2":0}
self.assertEqual(long_null_dict, self.client.dictionary.get_long_invalid_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_long_invalid_string()
float_valid = {"0":0, "1":-0.01, "2":-1.2e20}
self.assertEqual(float_valid, self.client.dictionary.get_float_valid())
self.client.dictionary.put_float_valid(float_valid)
float_null_dict = {"0":0.0, "1":None, "2":-1.2e20}
self.assertEqual(float_null_dict, self.client.dictionary.get_float_invalid_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_float_invalid_string()
double_valid = {"0":0, "1":-0.01, "2":-1.2e20}
self.assertEqual(double_valid, self.client.dictionary.get_double_valid())
self.client.dictionary.put_double_valid(double_valid)
double_null_dict = {"0":0.0, "1":None, "2":-1.2e20}
self.assertEqual(double_null_dict, self.client.dictionary.get_double_invalid_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_double_invalid_string()
string_valid = {"0":"foo1", "1":"foo2", "2":"foo3"}
self.assertEqual(string_valid, self.client.dictionary.get_string_valid())
self.client.dictionary.put_string_valid(string_valid)
string_null_dict = {"0":"foo", "1":None, "2":"foo2"}
string_invalid_dict = {"0":"foo", "1":"123", "2":"foo2"}
self.assertEqual(string_null_dict, self.client.dictionary.get_string_with_null())
self.assertEqual(string_invalid_dict, self.client.dictionary.get_string_with_invalid())
date1 = isodate.parse_date("2000-12-01T00:00:00Z")
date2 = isodate.parse_date("1980-01-02T00:00:00Z")
date3 = isodate.parse_date("1492-10-12T00:00:00Z")
datetime1 = isodate.parse_datetime("2000-12-01T00:00:01Z")
datetime2 = isodate.parse_datetime("1980-01-02T00:11:35+01:00")
datetime3 = isodate.parse_datetime("1492-10-12T10:15:01-08:00")
rfc_datetime1 = isodate.parse_datetime("2000-12-01T00:00:01Z")
rfc_datetime2 = isodate.parse_datetime("1980-01-02T00:11:35Z")
rfc_datetime3 = isodate.parse_datetime("1492-10-12T10:15:01Z")
duration1 = timedelta(days=123, hours=22, minutes=14, seconds=12, milliseconds=11)
duration2 = timedelta(days=5, hours=1)
valid_date_dict = {"0":date1, "1":date2, "2":date3}
date_dictionary = self.client.dictionary.get_date_valid()
self.assertEqual(date_dictionary, valid_date_dict)
self.client.dictionary.put_date_valid(valid_date_dict)
date_null_dict = {"0":isodate.parse_date("2012-01-01"),
"1":None,
"2":isodate.parse_date("1776-07-04")}
self.assertEqual(date_null_dict, self.client.dictionary.get_date_invalid_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_date_invalid_chars()
valid_datetime_dict = {"0":datetime1, "1":datetime2, "2":datetime3}
self.assertEqual(valid_datetime_dict, self.client.dictionary.get_date_time_valid())
self.client.dictionary.put_date_time_valid(valid_datetime_dict)
datetime_null_dict = {"0":isodate.parse_datetime("2000-12-01T00:00:01Z"), "1":None}
self.assertEqual(datetime_null_dict, self.client.dictionary.get_date_time_invalid_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_date_time_invalid_chars()
valid_rfc_dict = {"0":rfc_datetime1, "1":rfc_datetime2, "2":rfc_datetime3}
self.assertEqual(valid_rfc_dict, self.client.dictionary.get_date_time_rfc1123_valid())
self.client.dictionary.put_date_time_rfc1123_valid(valid_rfc_dict)
valid_duration_dict = {"0":duration1, "1":duration2}
self.assertEqual(valid_duration_dict, self.client.dictionary.get_duration_valid())
self.client.dictionary.put_duration_valid(valid_duration_dict)
bytes1 = bytearray([0x0FF, 0x0FF, 0x0FF, 0x0FA])
bytes2 = bytearray([0x01, 0x02, 0x03])
bytes3 = bytearray([0x025, 0x029, 0x043])
bytes4 = bytearray([0x0AB, 0x0AC, 0x0AD])
bytes_valid = {"0":bytes1, "1":bytes2, "2":bytes3}
self.client.dictionary.put_byte_valid(bytes_valid)
bytes_result = self.client.dictionary.get_byte_valid()
self.assertEqual(bytes_valid, bytes_result)
bytes_null = {"0":bytes4, "1":None}
bytes_result = self.client.dictionary.get_byte_invalid_null()
self.assertEqual(bytes_null, bytes_result)
test_dict = {'0': 'a string that gets encoded with base64url'.encode(),
'1': 'test string'.encode(),
'2': 'Lorem ipsum'.encode()}
self.assertEqual(self.client.dictionary.get_base64_url(), test_dict)
def test_basic_dictionary_parsing(self):
self.assertEqual({}, self.client.dictionary.get_empty())
self.client.dictionary.put_empty({})
self.assertIsNone(self.client.dictionary.get_null())
with self.assertRaises(DeserializationError):
self.client.dictionary.get_invalid()
# {null:"val1"} is not standard JSON format (JSON require key as string. Should we skip this case
#self.assertEqual({"None":"val1"}, self.client.dictionary.get_null_key())
self.assertEqual({"key1":None}, self.client.dictionary.get_null_value())
self.assertEqual({"":"val1"}, self.client.dictionary.get_empty_string_key())
def test_dictionary_composed_types(self):
test_product1 = Widget(integer=1, string="2")
test_product2 = Widget(integer=3, string="4")
test_product3 = Widget(integer=5, string="6")
test_dict = {"0":test_product1, "1":test_product2, "2":test_product3}
self.assertIsNone(self.client.dictionary.get_complex_null())
self.assertEqual({}, self.client.dictionary.get_complex_empty())
self.client.dictionary.put_complex_valid(test_dict)
complex_result = self.client.dictionary.get_complex_valid()
self.assertEqual(test_dict, complex_result)
list_dict = {"0":["1","2","3"], "1":["4","5","6"], "2":["7","8","9"]}
self.client.dictionary.put_array_valid(list_dict)
array_result = self.client.dictionary.get_array_valid()
self.assertEqual(list_dict, array_result)
dict_dict = {"0":{"1":"one","2":"two","3":"three"},
"1":{"4":"four","5":"five","6":"six"},
"2":{"7":"seven","8":"eight","9":"nine"}}
self.client.dictionary.put_dictionary_valid(dict_dict)
dict_result = self.client.dictionary.get_dictionary_valid()
self.assertEqual(dict_dict, dict_result)
self.assertIsNone(self.client.dictionary.get_complex_null())
self.assertEqual({}, self.client.dictionary.get_complex_empty())
test_dict2 = {"0":test_product1, "1":None, "2":test_product3}
complex_result = self.client.dictionary.get_complex_item_null()
self.assertEqual(complex_result, test_dict2)
test_dict3 = {"0":test_product1, "1":Widget(), "2":test_product3}
complex_result = self.client.dictionary.get_complex_item_empty()
self.assertEqual(complex_result, test_dict3)
self.assertIsNone(self.client.dictionary.get_array_null())
self.assertEqual({}, self.client.dictionary.get_array_empty())
list_dict = {"0":["1","2","3"], "1":None, "2":["7","8","9"]}
array_result = self.client.dictionary.get_array_item_null()
self.assertEqual(list_dict, array_result)
list_dict = {"0":["1","2","3"], "1":[], "2":["7","8","9"]}
array_result = self.client.dictionary.get_array_item_empty()
self.assertEqual(list_dict, array_result)
self.assertIsNone(self.client.dictionary.get_dictionary_null())
self.assertEqual({}, self.client.dictionary.get_dictionary_empty())
dict_dict = {"0":{"1":"one","2":"two","3":"three"},
"1":None,
"2":{"7":"seven","8":"eight","9":"nine"}}
dict_result = self.client.dictionary.get_dictionary_item_null()
self.assertEqual(dict_dict, dict_result)
dict_dict = {"0":{"1":"one","2":"two","3":"three"},
"1":{},
"2":{"7":"seven","8":"eight","9":"nine"}}
dict_result = self.client.dictionary.get_dictionary_item_empty()
self.assertEqual(dict_dict, dict_result)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
72111 | <gh_stars>1-10
"""
Created by auto_sdk on 2019.05.06
"""
from aliexpress.api.base import RestApi
class AliexpressSolutionProductSchemaGetRequest(RestApi):
def __init__(self, domain="gw.api.taobao.com", port=80):
RestApi.__init__(self, domain, port)
self.aliexpress_category_id = None
def getapiname(self):
return "aliexpress.solution.product.schema.get"
| StarcoderdataPython |
1709398 | <filename>src/mastermind/game/board.py<gh_stars>0
import random
class Board:
""" (AH). The Prepare Method and _Create_Hint Method were given.
A code template to track the gameboard for a Mastermind game.
The responsibility of this class of objects is to prepare the gameboard
with a random four digit number and create hints for the players.
Stereotype:
Service Provider, Interfacer
Attributes:
code (integer): 4-digit number random number between 1000 and 9999.
guess (integer): each player guesses a four digit number.
"""
def __init__(self):
""" (AH).
The class constructor. Declares and initializes instance Attributes.
Args:
self (Board): an instance of Board.
"""
# _items is a dictionary used in Prepare Method.
self._items = {}
def generate_code(self):
"""Sets up the board with an entry for each player.
Args:
self (Board): an instance of Board.
player (Player): an instance of Player. (AH).
"""
return str(random.randint(1000, 10000))
def validate_guess(self, guess):
""" (AH).
Board.validate_guess verifies that guess is a four-digit integer.
Args:
self (Board): an instance of Board.
guess (string): The guess that was made.
Returns:
Boolean: whether the guess is a four-digit integer.
"""
if guess is None:
return False
elif guess.isdigit() and len(guess) == 4:
return True
return False
def create_hint(self, code, guess):
""" (_Create_Hint Method was given in a requirement snippet.)
Generates a hint based on the given code and guess.
Args:
self (Board): An instance of Board.
code (string): The code to compare with.
guess (string): The guess that was made.
Returns:
string: A hint in the form [xxxx]
"""
hint = ""
for index, letter in enumerate(guess):
if code[index] == letter:
hint += "x"
elif letter in code:
hint += "o"
else:
hint += "*"
return hint
def update_board(self, player, guess):
""" (AH).
Updates the gameboard with player, current guess, and current hint.
Args:
self (Board): An instance of Board.
player (Player): an instance of Player.
guess (string): The guess that was made.
Returns:
None.
"""
name = player.get_name()
code = self._items[name][0]
self._items[name][1] = guess
self._items[name][2] = self.create_hint(code, guess)
def info_to_display(self, player):
""" (AH).
Passes current board info for Director to call Console to display.
Args:
self (Board): An instance of Board.
player (Player): an instance of Player.
Returns:
string: A four-digit integer guess in the form [xxxx].
string: A hint in the form [xxxx]
"""
name = player.get_name()
# Returns guess and hint.
return self._items[name][1], self._items[name][2]
| StarcoderdataPython |
3384749 | <reponame>monasca/dbuild
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from dbuild.docker_utils import (ARG_VARIANT, ARG_APPEND, ARG_TAG,
load_config, resolve_variants)
from dbuild.verb import verb
logger = logging.getLogger(__name__)
ARG_TYPES = [ARG_VARIANT, ARG_APPEND, ARG_TAG]
@verb('resolve', args=ARG_TYPES,
description='tests variant resolver against args')
def resolve(global_args, verb_args, module, intents):
base_config = load_config(global_args.base_path, module)
variants = resolve_variants(verb_args, base_config)
print 'resolved tags:', module
for variant in variants:
print ' %s' % variant['variant_tag']
for tag in variant['tags']:
print ' %s' % tag.full
print ''
| StarcoderdataPython |
52744 | <filename>0306_more_guest.py<gh_stars>0
#!/usr/bin/python
import sys
def main():
guest_lists = ['senthil', 'raj', 'ameen']
print("Hi Everyone! I found a bigger dinner table. I would like to invite more people for Dinner.")
guest_lists.insert(0, 'naveen')
guest_lists.insert(2, 'prabhu')
guest_lists.append('ragu')
print("")
print("Hi " + guest_lists[0].title() + ", We coridally invite you to my House Warming Party.")
print("Hi " + guest_lists[1].title() + ", We coridally invite you to my House Warming Party.")
print("Hi " + guest_lists[2].title() + ", We coridally invite you to my House Warming Party.")
print("Hi " + guest_lists[3].title() + ", We coridally invite you to my House Warming Party.")
print("Hi " + guest_lists[4].title() + ", We coridally invite you to my House Warming Party.")
print("Hi " + guest_lists[5].title() + ", We coridally invite you to my House Warming Party.")
if __name__ == '__main__':
main()
sys.exit(0)
| StarcoderdataPython |
4840218 | # Copyright (c) OpenMMLab. All rights reserved.
from mmocr.models.builder import RECOGNIZERS
from .encode_decode_recognizer import EncodeDecodeRecognizer
@RECOGNIZERS.register_module()
class SARNet(EncodeDecodeRecognizer):
"""Implementation of `SAR <https://arxiv.org/abs/1811.00751>`_"""
| StarcoderdataPython |
1745558 | <filename>src/third_party/wiredtiger/dist/style.py
#!/usr/bin/env python
# Check the style of WiredTiger C code.
from dist import source_files
import re, sys
# Complain if a function comment is missing.
def missing_comment():
for f in source_files():
skip_re = re.compile(r'DO NOT EDIT: automatically built')
func_re = re.compile(
r'(/\*(?:[^\*]|\*[^/])*\*/)?\n\w[\w \*]+\n(\w+)', re.DOTALL)
s = open(f, 'r').read()
if skip_re.search(s):
continue
for m in func_re.finditer(s):
if not m.group(1) or \
not m.group(1).startswith('/*\n * %s --\n' % m.group(2)):
print "%s:%d: missing comment for %s" % \
(f, s[:m.start(2)].count('\n'), m.group(2))
# Display lines that could be joined.
def lines_could_join():
skip_re = re.compile(r'__asm__')
match_re = re.compile('(^[ \t].*\()\n^[ \t]*([^\n]*)', re.MULTILINE)
for f in source_files():
s = open(f, 'r').read()
if skip_re.search(s):
continue
for m in match_re.finditer(s):
if len(m.group(1).expandtabs()) + \
len(m.group(2).expandtabs()) < 80:
print f + ': lines may be combined: '
print '\t' + m.group(1).lstrip() + m.group(2)
print
missing_comment()
# Don't display lines that could be joined by default; in some cases, the code
# isn't maintained by WiredTiger, or the line splitting enhances readability.
if len(sys.argv) > 1:
lines_could_join()
| StarcoderdataPython |
3271733 | <filename>Sorting/inversions.py
def Count(array, n):
if(n == 1):
return 0
else:
a = array[0:(n/2)]
b = array[(n/2):n]
# print(array)
# print(a)
# print(b)
x = Count(a, len(a))
y = Count(b, len(b))
z = CountSplitInversions(array, n)
#print(x+y+z)
return x+y+z
# if(length == 1):
# return array
# else:
# newArray1 = array[0:(length/2)]
# newArray2 = array[(length/2):length]
# # print(len(newArray1))
# # print(len(newArray2))
def CountSplitInversions(array, n):
d = [None]*n
b = sorted(array[0:(n/2)])
c = sorted(array[(n/2):n])
i = 0
j = 0
# print(array)
# print(b)
# print(c)
length1 = len(b)
length2 = len(c)
count = 0
for k in range(0, n-1):
# print(n)
# print(i)
#print(j)
length1 = len(b)
length2 = len(c)
# print(i)
# print(j)
if(b[i] < c[j]):
d[k]=b[i]
if((i+1) == length1):
# print("returned in i " + str(count))
return count
else:
i+=1
elif(c[j] < b[i]):
d[k]=c[j]
count += (length1-i)
# print(count)
if((j+1) == length2):
# print("returned in j " + str(count))
return count
j+=1
return count
| StarcoderdataPython |
162999 | <reponame>LinusU/t1-runtime<gh_stars>0
{
"includes": [
"common.gypi",
],
"targets": [
{
"target_name": "colony-lua",
"product_name": "colony-lua",
"type": "static_library",
"defines": [
'LUA_USELONGLONG',
],
"sources": [
'<(colony_lua_path)/src/lapi.c',
'<(colony_lua_path)/src/lauxlib.c',
'<(colony_lua_path)/src/lbaselib.c',
'<(colony_lua_path)/src/lcode.c',
'<(colony_lua_path)/src/ldblib.c',
'<(colony_lua_path)/src/ldebug.c',
'<(colony_lua_path)/src/ldo.c',
'<(colony_lua_path)/src/ldump.c',
'<(colony_lua_path)/src/lfunc.c',
'<(colony_lua_path)/src/lgc.c',
'<(colony_lua_path)/src/linit.c',
'<(colony_lua_path)/src/liolib.c',
'<(colony_lua_path)/src/llex.c',
'<(colony_lua_path)/src/lmathlib.c',
'<(colony_lua_path)/src/lmem.c',
'<(colony_lua_path)/src/loadlib.c',
'<(colony_lua_path)/src/lobject.c',
'<(colony_lua_path)/src/lopcodes.c',
'<(colony_lua_path)/src/loslib.c',
'<(colony_lua_path)/src/lparser.c',
'<(colony_lua_path)/src/lstate.c',
'<(colony_lua_path)/src/lstring.c',
'<(colony_lua_path)/src/lstrlib.c',
'<(colony_lua_path)/src/ltable.c',
'<(colony_lua_path)/src/ltablib.c',
'<(colony_lua_path)/src/ltm.c',
'<(colony_lua_path)/src/lundump.c',
'<(colony_lua_path)/src/lvm.c',
'<(colony_lua_path)/src/lzio.c',
'<(colony_lua_path)/src/print.c',
'<(lua_bitop_path)/bit.c'
],
# Lua uses tmpname and has empty bodies and doesn't use some vars
'cflags': [
'-Wno-deprecated-declarations',
'-Wno-empty-body',
'-Wno-unused-but-set-variable',
'-Wno-unused-value',
'-Wno-unused-variable',
'-Wno-unknown-warning-option',
],
'xcode_settings': {
'OTHER_CFLAGS': [
'-Wno-deprecated-declarations',
'-Wno-empty-body',
'-Wno-unused-but-set-variable',
'-Wno-unused-value',
'-Wno-unknown-warning-option',
],
},
"include_dirs": [
"<(colony_lua_path)/src",
"<(lua_bitop_path)/",
],
'direct_dependent_settings': {
'defines': [
'COLONY_LUA',
'LUA_USELONGLONG',
],
'include_dirs': [
"<(colony_lua_path)/src",
],
'link_settings': {
'libraries': [
'-lm'
]
}
}
},
{
"target_name": "colony-luajit",
"product_name": "colony-luajit",
"type": "static_library",
'sources': [
# generated by the action below
'<(INTERMEDIATE_DIR)/libluajit.o',
],
'actions': [
{
'action_name': 'luajit-build',
'inputs': [
'<(colony_luajit_path)/Makefile',
],
'outputs': [
'<(INTERMEDIATE_DIR)/libluajit.o',
],
'action': ['<(tools_path)/luajit-build.sh', '<(OS)', '<@(_outputs)'],
},
],
'direct_dependent_settings': {
'defines': [
'COLONY_LUA',
],
'include_dirs': [
"<(colony_luajit_path)/src",
],
'link_settings': {
'libraries': [
'-lm'
]
}
}
},
{
'target_name': 'dir_builtin',
'type': 'none',
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/<(_target_name).c'
],
'actions': [
{
'action_name': '<(_target_name)_compile',
'inputs': [
'<(node_libs_path)/_stream_duplex.js',
'<(node_libs_path)/_stream_passthrough.js',
'<(node_libs_path)/_stream_readable.js',
'<(node_libs_path)/_stream_transform.js',
'<(node_libs_path)/_stream_writable.js',
'<(runtime_path)/colony/modules/_structured_clone.js',
'<(node_libs_path)/assert.js',
'<(runtime_path)/colony/modules/buffer.js',
'<(runtime_path)/colony/modules/child_process.js',
'<(runtime_path)/colony/modules/console.js',
'<(runtime_path)/colony/modules/crypto.js',
'<(runtime_path)/colony/modules/dgram.js',
'<(runtime_path)/colony/modules/domain.js',
'<(runtime_path)/colony/modules/dns.js',
'<(node_libs_path)/events.js',
'<(runtime_path)/colony/modules/fs.js',
'<(runtime_path)/colony/modules/http.js',
'<(runtime_path)/colony/modules/https.js',
'<(runtime_path)/colony/modules/net.js',
'<(runtime_path)/colony/modules/os.js',
'<(node_libs_path)/path.js',
'<(node_libs_path)/punycode.js',
'<(node_libs_path)/querystring.js',
'<(runtime_path)/colony/modules/repl.js',
'<(node_libs_path)/stream.js',
'<(node_libs_path)/string_decoder.js',
'<(runtime_path)/colony/modules/tls.js',
'<(runtime_path)/colony/modules/tty.js',
'<(node_libs_path)/url.js',
'<(runtime_path)/colony/modules/util.js',
'<(runtime_path)/colony/modules/zlib.js',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/<(_target_name).c',
],
'action': [ '<(tools_path)/compile_folder.sh', '<(SHARED_INTERMEDIATE_DIR)/<(_target_name).c', '<(_target_name)', '<(enable_luajit)', '<@(_inputs)' ],
},
]
},
{
'target_name': 'dir_runtime_lib',
'type': 'none',
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/<(_target_name).c'
],
'actions': [
{
'action_name': '<(_target_name)_compile',
'inputs': [
'<(runtime_path)/colony/lua/cli.lua',
'<(runtime_path)/colony/lua/colony-init.lua',
'<(runtime_path)/colony/lua/colony-js.lua',
'<(runtime_path)/colony/lua/colony-node.lua',
'<(runtime_path)/colony/lua/colony.lua',
'<(runtime_path)/colony/lua/preload.lua',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/<(_target_name).c',
],
'action': [ '<(tools_path)/compile_folder.sh', '<(SHARED_INTERMEDIATE_DIR)/<(_target_name).c', '<(_target_name)', '<(enable_luajit)', '<@(_inputs)' ],
},
]
},
{
"target_name": "libcolony",
"product_name": "libcolony",
"type": "static_library",
'cflags': [ '-Wall', '-Wextra', '-Werror' ],
'defines': [
'COLONY_COMPILER_PATH=<(compiler_path)',
'COLONY_NODE_VERSION=<(node_version)',
'__TESSEL_RUNTIME_SEMVER__=<!(node -p \"require(\\\"../package.json\\\").version")',
],
"sources": [
'<(runtime_path)/tm_event.c',
'<(runtime_path)/tm_timer.c',
'<(runtime_path)/colony/lua_hsregex.c',
'<(runtime_path)/colony/lua_tm.c',
'<(runtime_path)/colony/lua_rapidjson.c',
'<(runtime_path)/colony/colony.c',
'<(runtime_path)/colony/colony_init.c',
'<(runtime_path)/colony/colony_runtime.c',
'<(runtime_path)/colony/lua_http_parser.c',
'<(SHARED_INTERMEDIATE_DIR)/dir_builtin.c',
'<(SHARED_INTERMEDIATE_DIR)/dir_runtime_lib.c',
],
"include_dirs": [
'<(runtime_path)/',
'<(runtime_path)/colony/',
"<(colony_lua_path)/src",
],
"dependencies": [
'dir_builtin',
'dir_runtime_lib',
'libtm.gyp:hsregex',
'libtm.gyp:fortuna',
'libtm.gyp:dlmalloc',
'libtm.gyp:libtm',
'libtm.gyp:approxidate',
'libtm.gyp:http_parser',
],
"direct_dependent_settings": {
"include_dirs": [
'<(runtime_path)/colony/'
]
},
'conditions': [
['enable_luajit!=1', {
"dependencies": [
'colony-lua',
]
}],
['enable_luajit==1', {
"dependencies": [
'colony-luajit',
]
}],
['OS=="linux"', {
"link_settings": {
"libraries": [ "-ldl" ],
},
}],
['OS!="arm"', {
"sources": [
'<(runtime_path)/posix/tm_uptime.c',
'<(runtime_path)/posix/tm_timestamp.c',
],
}],
['enable_ssl==1', {
'dependencies': [
"libtm.gyp:axtls",
"libtm.gyp:tm-ssl",
],
}],
['enable_net==1', {
'sources': [
'<(runtime_path)/colony/lua_cares.c',
],
'dependencies': [
'libtm.gyp:c-ares',
],
}],
],
}
]
}
| StarcoderdataPython |
6078 | import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True):
A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes]
nref = len(ref_nodes)
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref)
L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg)))
emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs)
emb_eig *= -1 if max(emb_eig) < 0 else 1
f = emb_eig[:,0]
if normalize:
f *= g.dn_sqrt[ref_nodes]
return ((ref_nodes,f), emb_eig_val)
| StarcoderdataPython |
1761093 | import logging
import requests
import numpy as np
FIND_PLACE = "https://maps.googleapis.com/maps/api/place/findplacefromtext/json?"
PLACE_DETAILS = "https://maps.googleapis.com/maps/api/place/details/json?"
def place_by_name(place, key, FIND_PLACE=FIND_PLACE):
"""Finds a Google Place ID by searching with its name.
Args:
place (str): Name of the place. It can be a restaurant, bar, monument,
whatever you would normally search in Google Maps.
key (str): Key for the Google API.
FIND_PLACE (str): Endpoint for the Google Places API. Note that the
service must be enabled in order to use it.
Returns:
(str) Place ID.
"""
params = {
"input": place,
"fields": "place_id",
"inputtype": "textquery",
"key": key,
}
r = requests.get(FIND_PLACE, params=params)
r.raise_for_status()
try:
return r.json()["candidates"][0]["place_id"]
except IndexError as e:
logging.info(f"Failed to find a match for {place}")
return None
def place_by_id(id, key, PLACE_DETAILS=PLACE_DETAILS):
"""Finds details about a place given its Google Place ID.
Args:
id (str): Place ID.
key (str): Key for the Google API.
FIND_PLACE_DETAILS (str): Endpoint for the Google Places API. Note that the
service must be enabled in order to use it.
Returns:
(dict): Details of a place. See the `fields` parameters to find what's
being returned in the response.
"""
params = {
"place_id": id,
"key": key,
"fields": "address_components,formatted_address,geometry,name,place_id,type,website",
}
r = requests.get(PLACE_DETAILS, params=params)
r.raise_for_status()
return r.json()
def parse_response(response):
"""Parses details from a Google Place Details API endpoint response.
Args:
response (dict): Response of a request.
Returns:
d (dict): Geocoded information for a given Place ID.
"""
result = response["result"]
# Store attributes
d = dict()
d["lat"] = result["geometry"]["location"]["lat"]
d["lng"] = result["geometry"]["location"]["lng"]
d["address"] = result["formatted_address"]
d["name"] = result["name"]
d["id"] = result["place_id"]
d["types"] = result["types"]
try:
d["website"] = result["website"]
except KeyError as e:
logging.info(f"{d['name']}: {e}")
d["website"] = np.nan
for r in result["address_components"]:
if "postal_town" in r["types"]:
d["postal_town"] = r["long_name"]
elif "administrative_area_level_2" in r["types"]:
d["administrative_area_level_2"] = r["long_name"]
elif "administrative_area_level_1" in r["types"]:
d["administrative_area_level_1"] = r["long_name"]
elif "country" in r["types"]:
d["country"] = r["long_name"]
else:
continue
return d
if __name__ == "__main__":
import os
from dotenv import load_dotenv, find_dotenv
logging.basicConfig(level=logging.INFO)
load_dotenv(find_dotenv())
key = os.getenv("google_key")
name = "<NAME>"
try:
r = place_by_name(name, key)
except IndexError as e:
logging.info(e)
response = place_by_id(r, key)
logging.info(parse_response(response))
| StarcoderdataPython |
92442 | <reponame>chenke91/ihaveablog
from random import randint
from flask import render_template, request, current_app, jsonify
from app.models import Blog, User, Reply
from .forms import ReplyForm
from . import main
@main.route('/')
def index():
args = request.args
page = args.get('page', 1, type=int)
blogs = Blog.get_blogs(page)
admin = User.get_admin()
return render_template('index.html', blogs=blogs)
@main.route('/blogs/<int:id>/', methods=['GET', 'POST'])
def get_blog(id):
blog = Blog.query.filter_by(id=id).first_or_404()
blog.read_count += 1
blog.save()
form = ReplyForm()
if form.validate_on_submit():
reply = Reply(body=form.body.data,
username=form.username.data,
email=form.email.data,
avatar = randint(1, 5),
blog=blog)
reply.save()
return render_template('blog.html', blog=blog, form=form)
@main.route('/blogs/category/<int:id>/')
def category(id):
args = request.args
page = args.get('page', 1, type=int)
blogs = Blog.get_blogs(page, category_id=id)
return render_template('index.html', blogs=blogs)
| StarcoderdataPython |
3324284 | from concurrent.futures import ThreadPoolExecutor
def exam(s, show_all, is_exam: bool = 1):
with ThreadPoolExecutor() as e:
return [
future.result() for future in [
e.submit(s.data.exam_GetExamContent, iExamId=exam['sQuestionIds'])
for exam in s.data.homework_GetHomeworkListByStudent(
iIsExam=is_exam, iPageCount=s.data.homework_GetHomeworkListByStudent(iIsExam=is_exam)['iCount']
)['aHomework']
if show_all or not exam['sTimeFlag'] and not int(exam['iFinished'])
]
]
def word_train(s, unit):
return s.data.word_AddWordTrainDetails(
iUnit=unit, iTrainId=s.data.word_BeginWordTrain(
sSubmodule='train', sTrainMode='batch', iTrainType='2', iUnit=unit, sSummary='%00'
)['iTrainId'],
sSerialIds='0', sAnswers='%00', sSolutions='%00', sIsPasses='1', sScores='100'
)
| StarcoderdataPython |
21098 | <gh_stars>0
# Generated by Django 3.2 on 2021-05-05 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('image_repo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='image',
name='colors',
field=models.CharField(blank=True, default='', max_length=50),
),
migrations.AlterField(
model_name='image',
name='description',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AlterField(
model_name='image',
name='result',
field=models.JSONField(blank=True, default=''),
),
migrations.AlterField(
model_name='image',
name='tags',
field=models.CharField(blank=True, default='', max_length=250),
),
]
| StarcoderdataPython |
3269532 | from gtfparse import read_gtf
from .constants import FEATURES, NON_CODING_BIOTYPES
import logging
import click
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
def write_bed(bed_df, output_file_name, uncompressed):
if uncompressed:
bed_df.to_csv(
f"{output_file_name}.bed",
sep="\t",
header=False,
index=False,
chunksize=1024,
)
else:
bed_df.to_csv(
f"{output_file_name}.bed.gz",
sep="\t",
compression="gzip",
header=False,
index=False,
chunksize=1024,
)
@click.command()
@click.option(
"--gtf",
required=True,
help="The input GTF file",
type=click.Path(readable=True, exists=True),
)
@click.option(
"--output",
required=True,
help="The name of the output BED file.",
type=click.Path(writable=True),
)
@click.option(
"--feature",
required=True,
help="A comma-separated list of feature names to extract.",
type=click.STRING,
)
@click.option("--uncompressed", help="Don't compress output file", is_flag=True)
def gtf2bed(gtf, output, feature, uncompressed):
"""Simple command to convert GTF features to a BED file"""
# TODO: Support the following features:
## On feature column (Done):
# 5’ UTR
# 3’ UTR
# CDS
# Exons
# Introns
# Start codons
# Stop codons
## Computed:
# Non-coding RNA (Done)
# First exons
# Last Exons
feature_list = set(feature.split(","))
logging.info(f"Reading {gtf}...")
current_gtf = read_gtf(gtf, chunksize=1024)
logging.info(f"Filtering {gtf} for {'; '.join(feature_list)}...")
feature_gtf = current_gtf[current_gtf["feature"].isin(feature_list)]
if "ncRNA" in feature_list:
feature_gtf = feature_gtf[
feature_gtf["transcript_biotype"].isin(NON_CODING_BIOTYPES)
]
if feature_gtf.empty:
nl = "\n"
message = f"File is empty, check that the feature provided is contained below:\n{nl.join(FEATURES)}"
logging.error(message)
raise Exception
for current_feat, group in feature_gtf.groupby("feature"):
bed_data = group[
["seqname", "start", "end", "gene_id", "score", "strand"]
].fillna(".")
current_filename = f"{output}_{current_feat}"
logging.info(f"Writing {current_filename}...")
write_bed(bed_data, current_filename, uncompressed) | StarcoderdataPython |
1696421 | <gh_stars>0
# -*- coding: utf-8 -*-
from .action import *
from .jwt import * | StarcoderdataPython |
42609 | <reponame>NNemec/meson<filename>test cases/common/95 dep fallback/gensrc.py
#!/usr/bin/env python
import sys
import shutil
shutil.copyfile(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
3386849 | from rest_framework import serializers
from main.models import Curriculum, TreatmentPlan, PatientProfile, AssistantProfile
class CurriculumSerializer(serializers.ModelSerializer):
class Meta:
model = Curriculum
exclude = ['uploaded_at', 'patient']
class TreatmentPlanSerializer(serializers.ModelSerializer):
class Meta:
model = TreatmentPlan
exclude = ['uploaded_at', 'patient']
class PatientProfileSerializer(serializers.ModelSerializer):
class Meta:
model = PatientProfile
exclude = ['user']
class AssistantProfileSerializer(serializers.ModelSerializer):
class Meta:
model = AssistantProfile
exclude = ['user']
| StarcoderdataPython |
166476 | from openvino.inference_engine import IECore, IENetwork
class Network:
#Constructor class to declare variables, any of these still as 'None' in console, an error occured when initializing it
def __init__(self):
#NEED TO: put ntoes done indicating what each does
self.plugin = None
self.network = None
self.input_blob = None
self.output_blob = None
self.exec_network = None
self.infer_request = None
def load_model(self, model, bin, device = "CPU"):
#Brings in IR file to be read as an .xml, morphs string to be seen as a .bin in the same folder, as it should be
model_xml = model
model_bin = bin
self.plugin = IECore()
self.network = IENetwork(model_xml, weights = model_bin)
self.exec_network = self.plugin.load_network(self.network, device)
self.input_blob = next(iter(self.network.inputs))
self.output_blob = next(iter(self.network.outputs))
return
def get_input_shape(self):
return self.network.inputs[self.input_blob].shape
def async_inference(self, image):
self.exec_network.start_async(request_id = 0, inputs = {self.input_blob: image})
return
def synchronous_inference(self,image):
self.exec_network.infer({self.input_blob: image})
return
def wait(self):
status = self.exec_network.requests[0].wait(-1)
return status
def extract_output(self):
return self.exec_network.requests[0].outputs[self.output_blob]
| StarcoderdataPython |
76870 | <reponame>temelkirci/Motion_Editor
#!C:\Users\DOF\Desktop\DOF_Motion_Editor\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'astropy==3.0.5','console_scripts','wcslint'
__requires__ = 'astropy==3.0.5'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('astropy==3.0.5', 'console_scripts', 'wcslint')()
)
| StarcoderdataPython |
3295831 | <reponame>winclap/aldjemy<gh_stars>0
import django
class LogsRouter(object):
ALIAS = 'logs'
def use_logs(self, model):
return hasattr(model, '_DATABASE') and model._DATABASE == self.ALIAS
def db_for_read(self, model, **hints):
if self.use_logs(model):
return self.ALIAS
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
if django.VERSION < (1, 8):
def allow_migrate(self, db, model):
if db == 'logs':
return self.use_logs(model)
elif self.use_logs(model):
return False
return None
else:
def allow_migrate(self, db, app_label, model_name=None, **hints):
model = hints.get('model', None)
if model is None:
return None
if db == 'logs':
return self.use_logs(model)
elif self.use_logs(model):
return False
return None
| StarcoderdataPython |
3327721 | <gh_stars>10-100
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class ISCSIDataIn(Base):
__slots__ = ()
_SDM_NAME = 'iSCSI_Data_In'
_SDM_ATT_MAP = {
'HeaderOpcode': 'iSCSI_Data_In.header.Opcode-1',
'HeaderFlags': 'iSCSI_Data_In.header.Flags-2',
'HeaderTotalAHSLength': 'iSCSI_Data_In.header.TotalAHSLength-3',
'HeaderUnknown': 'iSCSI_Data_In.header.Unknown-4',
'HeaderDataSegmentLength': 'iSCSI_Data_In.header.DataSegmentLength-5',
'HeaderLUN': 'iSCSI_Data_In.header.LUN-6',
'HeaderInitiatorTaskTag': 'iSCSI_Data_In.header.InitiatorTaskTag-7',
'HeaderTargetTransferTag': 'iSCSI_Data_In.header.TargetTransferTag-8',
'HeaderStatSN': 'iSCSI_Data_In.header.StatSN-9',
'HeaderExpCmdSN': 'iSCSI_Data_In.header.ExpCmdSN-10',
'HeaderMaxCmdSN': 'iSCSI_Data_In.header.MaxCmdSN-11',
'HeaderDataSN': 'iSCSI_Data_In.header.DataSN-12',
'HeaderBufferoffset': 'iSCSI_Data_In.header.Bufferoffset-13',
'HeaderResidualCount': 'iSCSI_Data_In.header.ResidualCount-14',
'HeaderHeaderDigest': 'iSCSI_Data_In.header.HeaderDigest-15',
}
def __init__(self, parent, list_op=False):
super(ISCSIDataIn, self).__init__(parent, list_op)
@property
def HeaderOpcode(self):
"""
Display Name: Opcode
Default Value: 0x25
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderOpcode']))
@property
def HeaderFlags(self):
"""
Display Name: Flags
Default Value: 0x80
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFlags']))
@property
def HeaderTotalAHSLength(self):
"""
Display Name: TotalAHSLength
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderTotalAHSLength']))
@property
def HeaderUnknown(self):
"""
Display Name: Unknown
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderUnknown']))
@property
def HeaderDataSegmentLength(self):
"""
Display Name: DataSegmentLength
Default Value: 0x000016
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderDataSegmentLength']))
@property
def HeaderLUN(self):
"""
Display Name: LUN
Default Value: 0x0000000000000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLUN']))
@property
def HeaderInitiatorTaskTag(self):
"""
Display Name: InitiatorTaskTag
Default Value: 0x00000010
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderInitiatorTaskTag']))
@property
def HeaderTargetTransferTag(self):
"""
Display Name: TargetTransferTag
Default Value: 0xFFFFFFFF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderTargetTransferTag']))
@property
def HeaderStatSN(self):
"""
Display Name: StatSN
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderStatSN']))
@property
def HeaderExpCmdSN(self):
"""
Display Name: ExpCmdSN
Default Value: 0x00000010
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderExpCmdSN']))
@property
def HeaderMaxCmdSN(self):
"""
Display Name: MaxCmdSN
Default Value: 0x00000051
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMaxCmdSN']))
@property
def HeaderDataSN(self):
"""
Display Name: DataSN
Default Value: 0x00000001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderDataSN']))
@property
def HeaderBufferoffset(self):
"""
Display Name: Bufferoffset
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBufferoffset']))
@property
def HeaderResidualCount(self):
"""
Display Name: ResidualCount
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderResidualCount']))
@property
def HeaderHeaderDigest(self):
"""
Display Name: HeaderDigest
Default Value: 0x5F7F4CAE
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderHeaderDigest']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| StarcoderdataPython |
125017 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SPAM Rados Module
"""
import spam.ansirunner
import re
class Rados(object):
"""
SPAM Rados class
"""
def __init__(self):
"""
Initialize Rados class.
"""
self.runner = spam.ansirunner.AnsibleRunner()
def rados_df(self,
host_list=None,
remote_user=None,
remote_pass=None):
'''
Invoked the rados df command and return output to user
'''
result, failed_hosts = self.runner.ansible_perform_operation(
host_list=host_list,
remote_user=remote_user,
remote_pass=remote_pass,
module="command",
module_args="rados df")
parsed_result = self.rados_parse_df(result)
return parsed_result
def rados_parse_df(self,
result):
'''
Parse the result from ansirunner module and save it as a json
object
'''
parsed_results = []
HEADING = r".*(pool name) *(category) *(KB) *(objects) *(clones)" + \
" *(degraded) *(unfound) *(rd) *(rd KB) *(wr) *(wr KB)"
HEADING_RE = re.compile(HEADING,
re.IGNORECASE)
dict_keys = ["pool_name", "category", "size_kb", "objects",
"clones", "degraded", "unfound", "rd", "rd_kb",
"wr", "wr_kb"]
if result['contacted'].keys():
for node in result['contacted'].keys():
df_result = {}
nodeobj = result['contacted'][node]
df_output = nodeobj['stdout']
for line in df_output.splitlines():
print "Line: ", line
# Skip the heading line.
reobj = HEADING_RE.match(line)
if not reobj:
row = line.split()
if len(row) != len(dict_keys):
print "line not match: ", line
continue
key_count = 0
for column in row:
df_result[dict_keys[key_count]] = column
key_count += 1
print "df_result: ", df_result
parsed_results.append(df_result)
nodeobj['parsed_results'] = parsed_results
return result
| StarcoderdataPython |
3345859 | <gh_stars>0
import os
import json
# =========== DO NOT USE ANYMORE ===================
# This File was only used, to assign the retrieved values
# to it's respective video. The result can be found in
# metadata_ai.json
# ==================================================
#List chosen videos
videos = os.listdir(
'F:\\H-BRS\\Vorlesungen, Skripts, Notizen, Übungen\\Visual Computing\\Project Deepfake\\Videos_Selected')
videos.remove("metadata.json")
#Values passed by the AI
checked_videos = ["REAL", "FAKE", "REAL", "FAKE", "FAKE", "FAKE", "FAKE",
"FAKE", "FAKE", "FAKE", "FAKE", "FAKE", "REAL", "FAKE",
"REAL", "FAKE", "REAL", "REAL", "REAL", "REAL", "REAL",
"FAKE", "FAKE", "REAL", "FAKE", "FAKE", "FAKE", "FAKE",
"REAL", "FAKE", "FAKE", "REAL", "REAL", "REAL", "REAL",
"FAKE", "FAKE", "REAL", "REAL", "FAKE", "REAL", "REAL",
"REAL", "FAKE", "REAL", "FAKE", "REAL", "REAL", "REAL",
"REAL"]
checked_values = [3, 97, 0, 99, 99, 97, 99,
97, 99, 100, 99, 100, 1, 84,
47, 98, 1, 0, 3, 0, 22,
97, 99, 5, 99, 99, 99, 55,
9, 98, 99, 4, 0, 21, 24,
100, 100, 0, 36, 99, 0, 0,
4, 93, 0, 99, 5, 2, 2,
2]
new_dict = {}
counter = 0
#Generate new metadata for A.I Evaluation
for x in videos:
new_dict.update({x: {"label_ai": checked_videos[counter],
"value": checked_values[counter]}})
counter += 1
#Safe dictionary
with open(
'F:\\H-BRS\\Vorlesungen, Skripts, Notizen, Übungen\\Visual Computing\\Project Deepfake\\Website\\Initialization\\metadata_ai.json',
'w') as fp:
json.dump(new_dict, fp)
| StarcoderdataPython |
1638404 | <filename>Calc_dobro_triplo_raiz.py
n = int(input('digite um número'))
d = n * 2
t = n * 3
r = n ** (1/2)
print(" analisando o valor {}, o dobro vale {}, o triplo vale {} , a raiz quadrada desse número é {:.3f}".format(n, d, t, r))
| StarcoderdataPython |
3232508 | <filename>clees_misc.py
# ----------------------------------
# CLEES Misc
# Author : Tompa
# ----------------------------------
# ------------------ General libs --------------
import time
# ------------------ Private Libs --------------
import clees_settings
import clees_io
import clees_objects
# --- date/time -----------------------------------------------
def formatdatetime(tstruct):
resultdate = "%i" %(tstruct.tm_year)
if tstruct.tm_mon < 10:
resultdate += "0%i" %(tstruct.tm_mon)
else:
resultdate += "%i" %(tstruct.tm_mon)
if tstruct.tm_mday < 10:
resultdate += "0%i" %(tstruct.tm_mday)
else:
resultdate += "%i" %(tstruct.tm_mday)
resultdate += '-'
if tstruct.tm_hour < 10:
resultdate += "0%i" %(tstruct.tm_hour)
else:
resultdate += "%i" %(tstruct.tm_hour)
resultdate += ':'
if tstruct.tm_min < 10:
resultdate += "0%i" %(tstruct.tm_min)
else:
resultdate += "%i" %(tstruct.tm_min)
resultdate += ':'
if tstruct.tm_sec < 10:
resultdate += "0%i" %(tstruct.tm_sec)
else:
resultdate += "%i" %(tstruct.tm_sec)
return (resultdate)
def getgmtdatetimestr():
return (formatdatetime(time.gmtime(time.time())))
# -------------------------------------------------------------
statusledstate = 0
sysledgpio = 0
sysledobj = ""
# --- init
def init():
global statusledstate
global sysledgpio
global sysledobj
sysledgpio = clees_settings.get('systemledgpio')
if sysledgpio > 40:
sysledgpio == 0
sysledobj = clees_settings.get('systemledoutputobjectid')
statusledstate = 1
clr_statusled()
return
def set_statusled():
global statusledstate
global sysledgpio
global sysledobj
if statusledstate == 0:
if sysledgpio > 0:
clees_io.Set_GPIO(sysledgpio,1)
if sysledobj != "":
clees_objects.set_outputobject(sysledobj,"activate")
statusledstate = 1
return(0)
def clr_statusled():
global statusledstate
global sysledgpio
global sysledobj
if statusledstate == 1:
if sysledgpio > 0:
clees_io.Set_GPIO(sysledgpio,0)
if sysledobj != "":
clees_objects.set_outputobject(sysledobj,"deactivate")
statusledstate = 0
return(0)
| StarcoderdataPython |
16954 | # Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from lcm.ns.biz.ns_create import CreateNSService
from lcm.ns.biz.ns_get import GetNSInfoService
from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsReqSerializer
from lcm.ns.serializers.deprecated.ns_serializers import _CreateNsRespSerializer
from lcm.ns.serializers.deprecated.ns_serializers import _QueryNsRespSerializer
from lcm.pub.exceptions import NSLCMException
from lcm.pub.exceptions import BadRequestException
from lcm.pub.utils.values import ignore_case_get
from .common import view_safe_call_with_log
logger = logging.getLogger(__name__)
class CreateNSView(APIView):
@swagger_auto_schema(
request_body=None,
responses={
status.HTTP_200_OK: _QueryNsRespSerializer(help_text="NS instances", many=True),
status.HTTP_500_INTERNAL_SERVER_ERROR: "Inner error"
}
)
@view_safe_call_with_log(logger=logger)
def get(self, request):
logger.debug("CreateNSView::get")
ret = GetNSInfoService().get_ns_info()
logger.debug("CreateNSView::get::ret=%s", ret)
resp_serializer = _QueryNsRespSerializer(data=ret, many=True)
if not resp_serializer.is_valid():
raise NSLCMException(resp_serializer.errors)
return Response(data=resp_serializer.data, status=status.HTTP_200_OK)
@swagger_auto_schema(
request_body=_CreateNsReqSerializer(),
responses={
status.HTTP_201_CREATED: _CreateNsRespSerializer(),
status.HTTP_400_BAD_REQUEST: "Bad Request",
status.HTTP_500_INTERNAL_SERVER_ERROR: "Inner error"
}
)
@view_safe_call_with_log(logger=logger)
def post(self, request):
logger.debug("Enter CreateNS: %s", request.data)
req_serializer = _CreateNsReqSerializer(data=request.data)
if not req_serializer.is_valid():
raise BadRequestException(req_serializer.errors)
if ignore_case_get(request.data, 'test') == "test":
return Response(
data={'nsInstanceId': "test"},
status=status.HTTP_201_CREATED
)
csar_id = ignore_case_get(request.data, 'csarId')
ns_name = ignore_case_get(request.data, 'nsName')
description = ignore_case_get(request.data, 'description')
context = ignore_case_get(request.data, 'context')
ns_inst_id = CreateNSService(
csar_id,
ns_name,
description,
context
).do_biz()
logger.debug("CreateNSView::post::ret={'nsInstanceId':%s}", ns_inst_id)
resp_serializer = _CreateNsRespSerializer(
data={'nsInstanceId': ns_inst_id,
'nsInstanceName': 'nsInstanceName',
'nsInstanceDescription': 'nsInstanceDescription',
'nsdId': 123,
'nsdInfoId': 456,
'nsState': 'NOT_INSTANTIATED',
'_links': {'self': {'href': 'href'}}})
if not resp_serializer.is_valid():
raise NSLCMException(resp_serializer.errors)
return Response(data=resp_serializer.data, status=status.HTTP_201_CREATED)
| StarcoderdataPython |
4819595 | # -*- coding: utf-8; -*-
#
# @file template_parser.py
# @brief Base class for packager templage node.
# @author <NAME> (INRA UMR1095)
# @date 2014-06-03
# @copyright Copyright (c) 2014 INRA
# @license MIT (see LICENSE file)
# @details Allow to parse Django templates and to find what are the used custom tag and theirs values.
# This is especially used to detect for the packager in way to auto initialize the list of installed packages and which
# version even at css templates level.
import os
import re
from importlib import import_module
from django.conf import settings
from django.template.base import Template
import igdectk.packager.template
from .exception import UnsupportedPackagerConfiguration
VALIDATOR = re.compile(r'^([a-zA-Z0-9-]+)(\.[a-zA-Z0-9-]+){0,1}_([a-zA-Z0-9-]+)(\|[a-zA-Z0-9\-]+){0,1}(#[a-zA-Z0-9-_\.]+){0,1}$')
def get_apps_list():
return getattr(settings, 'INSTALLED_APPS', ())
def get_templates_list(application):
result = []
app_module = import_module(application)
app_abspath = app_module.__path__[0]
tpl_path = os.path.join(app_abspath, 'templates', application.split('.')[-1])
# found template dir for this app
if os.path.isdir(tpl_path):
for f in os.listdir(tpl_path):
if f.endswith('.html'):
result.append(os.path.join(tpl_path, f))
return result
def introspect_node(node, results):
module = node.__module__.split('.')
module_name = None
is_next = False
for m in module:
if is_next:
module_name = m
break
if m == 'templatetags':
is_next = True
if module_name:
if module_name not in results:
results[module_name] = []
matches = VALIDATOR.match(node.arg.var)
libname = matches.group(1)
sublibname = matches.group(2)
module = matches.group(3)
theme = matches.group(4)
# filename = matches.group(5)
lib = None
fq_libname = libname if not sublibname else libname + sublibname
for library in results[module_name]:
if library[0] == fq_libname:
lib = library
break
# specific version in param
if node.param:
version_v = node.param
if not node.has_version(libname, sublibname, version_v):
raise UnsupportedPackagerConfiguration("Unsupported specific version %s for %s" % (version_v, fq_libname))
else:
version_v = node.get_default_version(libname, sublibname)
# specific theme
if theme:
theme_v = theme.lstrip('|')
if not node.has_theme(libname, sublibname, theme_v):
raise UnsupportedPackagerConfiguration("Unsupported specific theme %s for %s" % (theme_v, fq_libname))
else:
theme_v = node.get_default_theme(libname, sublibname)
if not lib:
lib = [fq_libname, [], []]
results[module_name].append(lib)
# was a workaround in way to add the parent library
# when only sublib are imported. but this is more easily
# done in finders.py#l75 using a split on the library name
# if sublibname:
# # can need parent lib for sublib and parent not specifically imported
# has_parent_lib = False
#
# for library in results[module_name]:
# # when sublib and if parent lib is not imported in template
# # we have to add load it into the list of results
# if sublibname and library[0] == libname:
# print(module_name, libname, library)
# has_parent_lib = True
# break
#
# if not has_parent_lib:
# parent_lib = [libname, [version_v], [theme_v]]
# results[module_name].append(parent_lib)
if lib:
if version_v not in lib[1]:
lib[1].append(version_v,)
if theme_v and theme_v not in lib[2]:
lib[2].append(theme_v,)
def get_installed_packages():
"""
Automatically create the list of installed packages, library and sub-library used
for default or specific versions and themes.
"""
apps = get_apps_list()
results = {}
for app in apps:
templates = get_templates_list(app)
for tpl in templates:
f = open(tpl, 'rU')
tpl = Template(f.read())
root = tpl.compile_nodelist()
for node in root.get_nodes_by_type(igdectk.packager.template.Node):
introspect_node(node, results)
return results
| StarcoderdataPython |
154906 | # Generated by Django 3.2.8 on 2021-11-17 13:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Ruby', '0003_auto_20211117_1334'),
]
operations = [
migrations.AlterField(
model_name='user',
name='cart_list',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='users', to='Ruby.product'),
),
]
| StarcoderdataPython |
1602776 | <reponame>simoore/cantilever-designer
import numpy as np
import microfem
class FrequencyProblem(object):
def __init__(self, params, topology_factory):
self.f0 = params['f0']
self.topology_factory = topology_factory
self.material = microfem.SoiMumpsMaterial()
@property
def ind_size(self):
return (self.topology_factory.ind_size)
@property
def name(self):
return '--- Frequency Placement Optimization ---'
def objective_function(self, xs):
self.topology_factory.update_topology(xs)
if self.topology_factory.is_connected is True:
params = self.topology_factory.get_params()
cantilever = microfem.Cantilever(*params)
fem = microfem.PlateFEM(self.material, cantilever)
w, _, _ = fem.modal_analysis(1)
f = np.asscalar(np.sqrt(w) / (2*np.pi))
cost = abs(f - self.f0)
return (cost,)
return (self.topology_factory.connectivity_penalty,)
def console_output(self, xopt, image_file):
self.topology_factory.update_topology(xopt)
params = self.topology_factory.get_params()
cantilever = microfem.Cantilever(*params)
microfem.plot_topology(cantilever, image_file)
if self.topology_factory.is_connected is True:
fem = microfem.PlateFEM(self.material, cantilever)
w, _, vall = fem.modal_analysis(1)
f = np.asscalar(np.sqrt(w) / (2 * np.pi))
print('The first modal frequency is (Hz): %g' % f)
microfem.plot_mode(fem, vall[:, 0])
| StarcoderdataPython |
1759265 | def minFallingPath(arr):
m = len(arr)
n = len(arr[0])
dp = [[0 for i in range(n)] for j in range(2)]
for i in range(n):
dp[0][i] = arr[0][i]
for i in range(1, m):
for j in range(n):
if j == 0:
dp[1][j] = min(dp[0][j], dp[0][j + 1]) + arr[i][j]
elif j == n - 1:
dp[1][j] = min(dp[0][j], dp[0][j - 1]) + arr[i][j]
else:
dp[1][j] = min(dp[0][j - 1], dp[0][j], dp[0][j + 1]) + arr[i][j]
dp[0] = dp[1]
return min(dp[-1])
arr = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
arr2 = [[-10, 5, 2],
[-1, -2, 3]]
print(minFallingPath(arr))
| StarcoderdataPython |
61771 | <reponame>tdriggs/TetrisAI
import os, json, subprocess
from pathlib import WindowsPath
def get_data_filenames(num_outputs):
output_depth = {
41: 1,
81: 2,
121: 3,
161: 4,
201: 5,
241: 6
}[num_outputs]
return (WindowsPath("../training_data/uber_x_1__%s__%d.csv" % ("training", output_depth)),
WindowsPath("../training_data/uber_x_1__%s__%d.csv" % ("test", output_depth)))
def main():
for root, dirs, files in os.walk("../networks/"):
if (root != "../networks/"):
continue
for file in files:
if file.startswith("uber") and file.endswith(".nn"):
format_data = file.split("__")
num_outputs = int(format_data[-2])
training_filename, test_filename = get_data_filenames(num_outputs)
json_data = {
"learning_rate": 0.01,
"input_size": 221,
"output_size": 201,
"hidden_sizes": [1],
"training_data": str(training_filename),
"test_data": str(test_filename),
"training_iterations": 0,
"input_network": str(WindowsPath(root, file)),
"output_network": str(WindowsPath(root, "validated", file))[:-3]
}
with open("./temp_configuration.json", "w") as fp:
json.dump(json_data, fp)
subprocess.run(['..\\bin\\Train\\aiTrainer.exe', '.\\temp_configuration.json'])
if __name__ == "__main__":
main()
| StarcoderdataPython |
3206024 |
def test_player():
from filers2.recording import FilersPlayer
player = FilersPlayer()
player.clean_up()
| StarcoderdataPython |
3256772 | <filename>VB_Classes/hit_miss.py
import cv2 as cv
import numpy as np
titleWindow = 'Hit_miss.py'
input_image = np.array((
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 0, 0, 255],
[0, 255, 255, 255, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 255, 0, 0],
[0, 0, 255, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 255, 255, 0],
[0,255, 0, 255, 0, 0, 255, 0],
[0, 255, 255, 255, 0, 0, 0, 0]), dtype="uint8")
kernel = np.array((
[0, 1, 0],
[1, -1, 1],
[0, 1, 0]), dtype="int")
output_image = cv.morphologyEx(input_image, cv.MORPH_HITMISS, kernel)
rate = 50
kernel = (kernel + 1) * 127
kernel = np.uint8(kernel)
kernel = cv.resize(kernel, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow(titleWindow + " - kernel", kernel)
cv.moveWindow(titleWindow + " - kernel", 0, 0)
input_image = cv.resize(input_image, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow(titleWindow + " - Original", input_image)
cv.moveWindow(titleWindow + " - Original", 0, 200)
output_image = cv.resize(output_image, None , fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow(titleWindow + " - Hit or Miss", output_image)
cv.moveWindow(titleWindow + " - Hit or Miss", 500, 200)
cv.waitKey(0)
cv.destroyAllWindows()
| StarcoderdataPython |
3247837 | from datetime import datetime
from onegov.core.orm.mixins import content_property, meta_property
from onegov.form import Form
from onegov.org import _
from onegov.org.forms import LinkForm, PageForm
from onegov.org.models.atoz import AtoZ
from onegov.org.models.extensions import (
ContactExtension, NewsletterExtension, PublicationExtension
)
from onegov.org.models.extensions import CoordinatesExtension
from onegov.org.models.extensions import AccessExtension
from onegov.org.models.extensions import PersonLinkExtension
from onegov.org.models.extensions import VisibleOnHomepageExtension
from onegov.org.models.traitinfo import TraitInfo
from onegov.page import Page
from onegov.search import SearchableContent
from sedate import replace_timezone
from sqlalchemy import desc, func, or_, and_
from sqlalchemy.dialects.postgresql import array, JSON
from sqlalchemy.orm import undefer, object_session
from sqlalchemy_utils import observes
class Topic(Page, TraitInfo, SearchableContent, AccessExtension,
PublicationExtension, VisibleOnHomepageExtension,
ContactExtension, PersonLinkExtension, CoordinatesExtension):
__mapper_args__ = {'polymorphic_identity': 'topic'}
es_type_name = 'topics'
lead = content_property()
text = content_property()
url = content_property()
# Show the lead on topics page
lead_when_child = content_property(default=True)
@property
def es_skip(self):
return self.meta.get('trait') == 'link' # do not index links
@property
def deletable(self):
""" Returns true if this page may be deleted. """
return self.parent is not None
@property
def editable(self):
return True
@property
def url_changeable(self):
"""Open for all topics, even root ones."""
return True
@property
def paste_target(self):
if self.trait == 'link':
return self.parent or self
if self.trait == 'page':
return self
raise NotImplementedError
@property
def allowed_subtraits(self):
if self.trait == 'link':
return tuple()
if self.trait == 'page':
return ('page', 'link')
raise NotImplementedError
def is_supported_trait(self, trait):
return trait in {'link', 'page'}
def get_form_class(self, trait, action, request):
if trait == 'link':
return self.with_content_extensions(LinkForm, request, extensions=[
AccessExtension,
VisibleOnHomepageExtension
])
if trait == 'page':
return self.with_content_extensions(PageForm, request)
raise NotImplementedError
class News(Page, TraitInfo, SearchableContent, NewsletterExtension,
AccessExtension, PublicationExtension, VisibleOnHomepageExtension,
ContactExtension, PersonLinkExtension, CoordinatesExtension):
__mapper_args__ = {'polymorphic_identity': 'news'}
es_type_name = 'news'
lead = content_property()
text = content_property()
url = content_property()
filter_years = []
filter_tags = []
hashtags = meta_property(default=list)
@observes('content')
def content_observer(self, files):
self.hashtags = self.es_tags or []
@property
def absorb(self):
return ''.join(self.path.split('/', 1)[1:])
@property
def deletable(self):
return self.parent_id is not None
@property
def editable(self):
return True
@property
def url_changeable(self):
"""Open for all topics, even root ones."""
return self.parent_id is not None
@property
def paste_target(self):
if self.parent:
return self.parent
else:
return self
@property
def allowed_subtraits(self):
# only allow one level of news
if self.parent is None:
return ('news', )
else:
return tuple()
def is_supported_trait(self, trait):
return trait in {'news'}
def get_root_page_form_class(self, request):
return self.with_content_extensions(
Form, request, extensions=(
ContactExtension, PersonLinkExtension, AccessExtension)
)
def get_form_class(self, trait, action, request):
if trait == 'news':
if not self.parent and action == 'edit':
return self.get_root_page_form_class(request)
form_class = self.with_content_extensions(PageForm, request)
if hasattr(form_class, 'is_visible_on_homepage'):
# clarify the intent of this particular flag on the news, as
# the effect is not entirely the same (news may be shown on the
# homepage anyway)
form_class.is_visible_on_homepage.kwargs['label'] = _(
"Always visible on homepage")
return form_class
raise NotImplementedError
def for_year(self, year):
years = set(self.filter_years)
years = list(years - {year} if year in years else years | {year})
return News(
id=self.id,
title=self.title,
name=self.name,
filter_years=sorted(years),
filter_tags=sorted(self.filter_tags)
)
def for_tag(self, tag):
tags = set(self.filter_tags)
tags = list(tags - {tag} if tag in tags else tags | {tag})
return News(
id=self.id,
title=self.title,
name=self.name,
filter_years=sorted(self.filter_years),
filter_tags=sorted(tags)
)
def news_query(self, limit=2, published_only=True):
news = object_session(self).query(News)
news = news.filter(Page.parent == self)
if published_only:
news = news.filter(
News.publication_started == True,
News.publication_ended == False
)
filter = []
for year in self.filter_years:
start = replace_timezone(datetime(year, 1, 1), 'UTC')
filter.append(
and_(
News.created >= start,
News.created < start.replace(year=year + 1)
)
)
if filter:
news = news.filter(or_(*filter))
if self.filter_tags:
news = news.filter(
News.meta['hashtags'].has_any(array(self.filter_tags))
)
news = news.order_by(desc(News.published_or_created))
news = news.options(undefer('created'))
news = news.options(undefer('content'))
news = news.limit(limit)
sticky = func.json_extract_path_text(
func.cast(News.meta, JSON), 'is_visible_on_homepage') == 'true'
sticky_news = news.limit(None)
sticky_news = sticky_news.filter(sticky)
return news.union(sticky_news).order_by(
desc(News.published_or_created))
@property
def all_years(self):
query = object_session(self).query(News)
query = query.with_entities(
func.date_part('year', Page.published_or_created))
query = query.group_by(
func.date_part('year', Page.published_or_created))
query = query.filter(Page.parent == self)
return sorted([int(r[0]) for r in query.all()], reverse=True)
@property
def all_tags(self):
query = object_session(self).query(News.meta['hashtags'])
query = query.filter(Page.parent == self)
hashtags = set()
for result in query.all():
hashtags.update(set(result[0]))
return sorted(hashtags)
class AtoZPages(AtoZ):
def get_title(self, item):
return item.title
def get_items(self):
# XXX implement correct collation support on the database level
topics = self.request.session.query(Topic).all()
topics = sorted(topics, key=self.sortkey)
if self.request.is_manager:
return [topic for topic in topics if topic.trait == 'page']
else:
return [
topic for topic in topics if topic.trait == 'page'
and topic.access == 'public'
]
| StarcoderdataPython |
3321809 | <filename>PaddleNLP/Research/ACL2019-KTNET/retrieve_concepts/ner_tagging_squad/tagging.py
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright 2019 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This script perform NER tagging for raw SQuAD datasets
# All the named entites found in question and context are recorded with their offsets in the output file
# CoreNLP is used for NER tagging
import os
import json
import argparse
import logging
import urllib
import sys
from tqdm import tqdm
from pycorenlp import StanfordCoreNLP
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", default='output', type=str,
help="The output directory to store tagging results.")
parser.add_argument("--train_file", default='../../data/SQuAD/train-v1.1.json', type=str, help="SQuAD json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default='../../data/SQuAD/dev-v1.1.json', type=str,
help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
return parser.parse_args()
# transform corenlp tagging output into entity list
# some questions begins with whitespaces and they are striped by corenlp, thus begin offset should be added.
def parse_output(text, tagging_output, begin_offset=0):
entities = []
select_states = ['ORGANIZATION', 'PERSON', 'MISC', 'LOCATION']
for sent in tagging_output['sentences']:
state = 'O'
start_pos, end_pos = -1, -1
for token in sent['tokens']:
tag = token['ner']
if tag == 'O' and state != 'O':
if state in select_states:
entities.append({'text': text[begin_offset + start_pos: begin_offset + end_pos], 'start': begin_offset + start_pos, 'end': begin_offset + end_pos - 1})
state = 'O'
elif tag != 'O':
if state == tag:
end_pos = token['characterOffsetEnd']
else:
if state in select_states:
entities.append({'text': text[begin_offset + start_pos: begin_offset + end_pos], 'start': begin_offset + start_pos, 'end': begin_offset + end_pos - 1})
state = tag
start_pos = token['characterOffsetBegin']
end_pos = token['characterOffsetEnd']
if state in select_states:
entities.append({'text': text[begin_offset + start_pos: begin_offset + end_pos], 'start': begin_offset + start_pos, 'end': begin_offset + end_pos - 1})
return entities
def tagging(dataset, nlp):
skip_context_cnt, skip_question_cnt = 0, 0
for article in tqdm(dataset['data']):
for paragraph in tqdm(article['paragraphs']):
context = paragraph['context']
context_tagging_output = nlp.annotate(urllib.parse.quote(context), properties={'annotators': 'ner', 'outputFormat': 'json'})
# assert the context length is not changed
if len(context.strip()) == context_tagging_output['sentences'][-1]['tokens'][-1]['characterOffsetEnd']:
context_entities = parse_output(context, context_tagging_output, len(context) - len(context.lstrip()))
else:
context_entities = []
skip_context_cnt += 1
logger.info('Skipped context due to offset mismatch:')
logger.info(context)
paragraph['context_entities'] = context_entities
for qa in tqdm(paragraph['qas']):
question = qa['question']
question_tagging_output = nlp.annotate(urllib.parse.quote(question), properties={'annotators': 'ner', 'outputFormat': 'json'})
if len(question.strip()) == question_tagging_output['sentences'][-1]['tokens'][-1]['characterOffsetEnd']:
question_entities = parse_output(question, question_tagging_output, len(context) - len(context.lstrip()))
else:
question_entities = []
skip_question_cnt += 1
logger.info('Skipped question due to offset mismatch:')
logger.info(question)
qa['question_entities'] = question_entities
logger.info('In total, {} contexts and {} questions are skipped...'.format(skip_context_cnt, skip_question_cnt))
if __name__ == '__main__':
args = parse_args()
# make output directory if not exist
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# register corenlp server
nlp = StanfordCoreNLP('http://localhost:9753')
# load train and dev datasets
ftrain = open(args.train_file, 'r', encoding='utf-8')
trainset = json.load(ftrain)
fdev = open(args.predict_file, 'r', encoding='utf-8')
devset = json.load(fdev)
for dataset, path, name in zip((trainset, devset), (args.train_file, args.predict_file), ('train', 'dev')):
tagging(dataset, nlp)
output_path = os.path.join(args.output_dir, "{}.tagged.json".format(os.path.basename(path)[:-5]))
json.dump(dataset, open(output_path, 'w', encoding='utf-8'))
logger.info('Finished tagging {} set'.format(name))
| StarcoderdataPython |
1732368 | from base64 import b64encode
from tornado_http_auth import DigestAuthMixin, BasicAuthMixin, auth_required
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
credentials = {
'user1': '<PASSWORD>',
'user2': '<PASSWORD>',
}
class BasicAuthHandler(BasicAuthMixin, RequestHandler):
@auth_required(realm='Protected', auth_func=credentials.get)
def get(self):
self.write('Hello %s' % self._current_user)
class DigestAuthHandler(DigestAuthMixin, RequestHandler):
@auth_required(realm='Protected', auth_func=credentials.get)
def get(self):
self.write('Hello %s' % self._current_user)
class AuthTest(AsyncHTTPTestCase):
def get_app(self):
urls = [
('/digest', DigestAuthHandler),
('/basic', BasicAuthHandler),
]
return Application(urls, http_client=self.http_client)
def test_digest_auth(self):
res = self.fetch('/digest')
self.assertEqual(res.code, 401)
# TODO: Add digest authentication to HTTPClient in order to test this.
def test_basic_auth(self):
res = self.fetch('/basic')
self.assertEqual(res.code, 401)
auth = '%s:%s' % ('user1', '<PASSWORD>')
auth = b64encode(auth.encode('ascii'))
hdr = {'Authorization': 'Basic %s' % auth.decode('utf8')}
res = self.fetch('/basic', headers=hdr)
self.assertEqual(res.code, 200)
| StarcoderdataPython |
3314727 | <filename>leaf_focus/ocr/prepare/operation.py<gh_stars>0
from logging import Logger
from pathlib import Path
from leaf_focus.ocr.prepare.component import Component
from leaf_focus.support.location import Location
class Operation:
def __init__(self, logger: Logger, base_path: Path):
self._logger = logger
self._base_path = base_path
self._location = Location(logger)
self._component = Component(logger)
def run(self, file_hash: str, page: int, threshold: int):
# create output directory
loc = self._location
bd = self._base_path
input_file = loc.pdf_page_image_file(bd, file_hash, page)
output_file = loc.pdf_page_prepared_file(bd, file_hash, page, threshold)
loc.create_directory(output_file.parent)
# create the image file
self._component.threshold(input_file, output_file, threshold)
# result
return output_file
| StarcoderdataPython |
1671328 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, stat
from stat import S_IRWXU, S_IRWXG, S_IROTH, S_IXOTH
from glob import glob
from distutils.core import setup
from distutils.command.build import build as DistutilsBuild
class ExtendedBuild(DistutilsBuild):
def run(self):
os.system("python setup.py configure -b")
for f in EXECUTABLES:
os.chmod(f, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)
DistutilsBuild.run(self)
EXECUTABLES = [
'database/pokerdatabaseupgrade',
'pokernetwork/pokerserver',
'pokernetwork/pokerbot'
]
setup(
name='poker-network',
version='2.3.0',
packages=[
'pokernetwork',
'pokernetwork.util',
'pokernetwork.protocol',
],
package_data={'pokernetwork': ['../twisted/plugins/*.py']},
data_files=[
('bin', EXECUTABLES),
('share/poker-network/database', glob('database/*.sql')),
('share/poker-network/conf', ['conf/poker.server.xml', 'conf/poker.bot.xml', 'conf/poker.pem', 'conf/badwords.txt', 'conf/poker.pem']),
('share/man/man8', [
'pokernetwork/pokerserver.8',
'pokernetwork/pokerbot.8',
'database/pokerdatabaseupgrade.8'
]),
('share/man/man5', ['database/pokerdatabase.5'])
],
cmdclass={'build': ExtendedBuild}
)
| StarcoderdataPython |
3277635 | <gh_stars>1-10
# flake8: noqa
metadata = {"apiLevel": "2.7"}
def run(ctx):
ctx.home()
magdeck = ctx.load_module("magneticModuleV2", 1)
magdeck_plate = magdeck.load_labware("nest_96_wellplate_2ml_deep")
trough = ctx.load_labware("usascientific_12_reservoir_22ml", 2)
# Name of H/S might be changed
heater_shaker_1 = ctx.load_module("Heater Shaker Module", 3)
tempdeck = ctx.load_module("Temperature Module", 4)
temp_plate = tempdeck.load_labware("opentrons_24_aluminumblock_nest_2ml_snapcap")
tiprack_1 = ctx.load_labware_by_name("opentrons_96_tiprack_20ul", 5)
# Name of H/S might be changed
heater_shaker_2 = ctx.load_module("Heater Shaker Module", 6)
thermocycler = ctx.load_module("thermocycler")
reaction_plate = thermocycler.load_labware("nest_96_wellplate_100ul_pcr_full_skirt")
tiprack_2 = ctx.load_labware_by_name("opentrons_96_tiprack_1000ul", 9)
# pipettes
pip1 = ctx.load_instrument("p1000_single_gen2", "left", tip_racks=[tiprack_2])
pip2 = ctx.load_instrument("p20_multi_gen2", "right", tip_racks=[tiprack_1])
# Temperature Module Testing
tempdeck.set_temperature(20)
ctx.comment(f"temp target {tempdeck.target}")
ctx.comment(f"temp current {tempdeck.temperature}")
# ctx.comment(f"temp status {tempdeck.status}")
tempdeck.set_temperature(30)
ctx.comment(f"temp target {tempdeck.target}")
ctx.comment(f"temp current {tempdeck.temperature}")
pip2.transfer(10, trough.wells("A1"), temp_plate.wells("A1"))
# Magnetic Module Testing
magdeck.engage(height=10)
ctx.delay(seconds=5)
magdeck.disengage()
ctx.comment(f"mag status {magdeck.status}")
magdeck.engage()
pip1.transfer(10, trough.wells("A1"), magdeck_plate.wells("A1"))
pip2.transfer(10, trough.wells("A1"), magdeck_plate.wells("A1"))
magdeck.disengage()
# Some thermocycler stuff
# Some stuff on H/S 1
# Some stuff at the same time on H/S 2
# Make sure all H/S commands are run
# Hope a breaker doesn't blow in my house from running so much at the same time
| StarcoderdataPython |
3269255 | import sys
t = int(sys.stdin.readline().rstrip("\n"))
for _ in range(t):
n = int(sys.stdin.readline().rstrip("\n"))
if n%3==2 or n%9 ==0:
print("TAK")
else:
print("NIE") | StarcoderdataPython |
1742708 | # -*- coding: utf-8 -*-
#
# Copyright 2012 <NAME> (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
"""
Bulbs supports pluggable clients. This is the Rexster client.
"""
from bulbs.config import Config, DEBUG
from bulbs.registry import Registry
from bulbs.utils import get_logger
# specific to this client
from bulbs.json import JSONTypeSystem
from bulbs.base import Client, Response, Result
from bulbs.rest import Request, RESPONSE_HANDLERS
from bulbs.groovy import GroovyScripts
from bulbs.utils import json, build_path, get_file_path, urlsplit, coerce_id
##### Titan
from bulbs.rexster.client import RexsterClient, \
RexsterResponse, RexsterResult
# The default URIs
TITAN_URI = "http://localhost:8182/graphs/graph"
# The logger defined in Config
log = get_logger(__name__)
# Rexster resource paths
# TODO: local path vars would be faster
vertex_path = "vertices"
edge_path = "edges"
index_path = "indices"
gremlin_path = "tp/gremlin"
transaction_path = "tp/batch/tx"
multi_get_path = "tp/batch"
key_index_path = "keyindices"
class TitanResult(RexsterResult):
"""
Container class for a single result, not a list of results.
:param result: The raw result.
:type result: dict
:param config: The client Config object.
:type config: Config
:ivar raw: The raw result.
:ivar data: The data in the result.
"""
pass
class TitanResponse(RexsterResponse):
"""
Container class for the server response.
:param response: httplib2 response: (headers, content).
:type response: tuple
:param config: Config object.
:type config: bulbs.config.Config
:ivar config: Config object.
:ivar headers: httplib2 response headers, see:
http://httplib2.googlecode.com/hg/doc/html/libhttplib2.html
:ivar content: A dict containing the HTTP response content.
:ivar results: A generator of RexsterResult objects, a single RexsterResult object,
or None, depending on the number of results returned.
:ivar total_size: The number of results returned.
:ivar raw: Raw HTTP response. Only set when log_level is DEBUG.
"""
result_class = TitanResult
class TitanRequest(Request):
"""Makes HTTP requests to Rexster and returns a RexsterResponse."""
response_class = TitanResponse
data_type = dict(string="String",
integer="Integer",
geoshape="Geoshape",)
class TitanClient(RexsterClient):
"""
Low-level client that sends a request to Titan and returns a response.
:param config: Optional Config object. Defaults to default Config.
:type config: bulbs.config.Config
:cvar default_uri: Default URI for the database.
:cvar request_class: Request class for the Client.
:ivar config: Config object.
:ivar registry: Registry object.
:ivar scripts: GroovyScripts object.
:ivar type_system: JSONTypeSystem object.
:ivar request: TitanRequest object.
Example:
>>> from bulbs.titan import TitanClient
>>> client = TitanClient()
>>> script = client.scripts.get("get_vertices")
>>> response = client.gremlin(script, params=None)
>>> result = response.results.next()
"""
#: Default URI for the database.
default_uri = TITAN_URI
request_class = TitanRequest
def __init__(self, config=None, db_name=None):
super(TitanClient, self).__init__(config, db_name)
# override so Rexster create_vertex() method doesn't try to index
self.config.autoindex = False
# GET
# these could replace the Rexster Gremlin version of these methods
def outV(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "out")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def inV(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "in")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def bothV(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "both")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def outV_count(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "outCount")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def inV_count(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "inCount")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def bothV_count(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "bothCount")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def outV_ids(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "outIds")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def inV_ids(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "inIds")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def bothV_ids(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "bothIds")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def outE(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "outE")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def inE(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "inE")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
def bothE(self, _id, label=None, start=None, limit=None, properties=None):
path = build_path(vertex_path, _id, "bothE")
params = build_params(_label=label, _limit=limit, _properties=properties)
return self.request.get(path, params)
# Key Indices
# Titan-Specific Index Methods
# https://github.com/thinkaurelius/titan/wiki/Indexing-Backend-Overview
# https://github.com/thinkaurelius/titan/wiki/Type-Definition-Overview
def create_edge_label(self, label):
# TODO: custom gremlin method
pass
def create_vertex_property_key():
# TODO: custom gremlin method
pass
def create_edge_property_key():
# TODO: custom gremlin method
pass
def create_vertex_key_index(self, key):
path = build_path(key_index_path, "vertex", key)
params = None
return self.request.post(path, params)
def create_edge_key_index(self, key):
path = build_path(key_index_path, "edge", key)
params = None
return self.request.post(path, params)
def get_vertex_keys(self):
path = build_path(key_index_path, "vertex")
params = None
return self.request.get(path, params)
def get_edge_keys(self):
path = build_path(key_index_path, "edge")
params = None
return self.request.get(path, params)
def get_all_keys(self):
path = key_index_path
params = None
return self.request.get(path, params)
# Index Proxy - General
def get_all_indices(self):
"""Returns a list of all the element indices."""
raise NotImplementedError
def get_index(self, name):
raise NotImplementedError
def delete_index(self, name):
raise NotImplementedError
# Index Proxy - Vertex
def create_vertex_index(self, index_name, *args, **kwds):
"""
Creates a vertex index with the specified params.
:param index_name: Name of the index to create.
:type index_name: str
:rtype: TitanResponse
"""
raise NotImplementedError
def get_vertex_index(self, index_name):
"""
Returns the vertex index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:rtype: TitanResponse
"""
raise NotImplementedError
def get_or_create_vertex_index(self, index_name, index_params=None):
raise NotImplementedError
def delete_vertex_index(self, name):
"""
Deletes the vertex index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:rtype: TitanResponse
"""
raise NotImplementedError
# Index Proxy - Edge
# Titan does NOT support edge indices
def create_edge_index(self, name, *args, **kwds):
raise NotImplementedError
def get_edge_index(self, name):
"""
Returns the edge index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:rtype: TitanResponse
"""
raise NotImplementedError
def get_or_create_edge_index(self, index_name, index_params=None):
raise NotImplementedError
def delete_edge_index(self, name):
raise NotImplementedError
# Index Container - Vertex
def put_vertex(self, index_name, key, value, _id):
# Titan only supports automatic indices
raise NotImplementedError
def lookup_vertex(self, index_name, key, value):
"""
Returns the vertices indexed with the key and value.
:param index_name: Name of the index.
:type index_name: str
:param key: Name of the key.
:type key: str
:param value: Value of the key.
:type value: str
:rtype: TitanResponse
"""
# NOTE: this is different than Rexster's version
# it uses vertex_path instead of index_path, and
# index_name is N/A
# Keeping method interface the same for practical reasons so
# index_name will be ignored, any value will work.
path = build_path(vertex_path)
params = dict(key=key,value=value)
return self.request.get(path,params)
def query_vertex(self, index_name, params):
"""Queries for an vertex in the index and returns the Response."""
path = build_path(index_path,index_name)
return self.request.get(path,params)
def remove_vertex(self,index_name,_id,key=None,value=None):
# Titan only supports automatic indices
raise NotImplementedError
# Index Container - Edge
# Titan does NOT support edge indices
def put_edge(self, index_name, key, value, _id):
raise NotImplementedError
def lookup_edge(self, index_name, key, value):
"""
Looks up an edge in the index and returns the Response.
"""
# NOTE: this is different than Rexster's version
# it uses edge_path instead of index_path, and
# index_name is N/A
# Keeping method interface the same for practical reasons so
# index_name will be ignored, any value will work.
#path = build_path(edge_path)
#params = dict(key=key,value=value)
#return self.request.get(path,params)
raise NotImplementedError
def query_edge(self, index_name, params):
"""Queries for an edge in the index and returns the Response."""
raise NotImplementedError
def remove_edge(self, index_name, _id, key=None, value=None):
raise NotImplementedError
# Model Proxy - Vertex
def create_indexed_vertex(self, data, index_name, keys=None):
"""
Creates a vertex, indexes it, and returns the Response.
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index.
:type keys: list
:rtype: TitanResponse
"""
return self.create_vertex(data)
def update_indexed_vertex(self, _id, data, index_name, keys=None):
"""
Updates an indexed vertex and returns the Response.
:param index_name: Name of the index.
:type index_name: str
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index.
:type keys: list
:rtype: TitanResponse
"""
return self.update_vertex(_id, data)
# Model Proxy - Edge
def create_indexed_edge(self, outV, label, inV, data, index_name, keys=None):
"""
Creates a edge, indexes it, and returns the Response.
:param outV: Outgoing vertex ID.
:type outV: int
:param label: Edge label.
:type label: str
:param inV: Incoming vertex ID.
:type inV: int
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index. Defaults to None (indexes all properties).
:type keys: list
:rtype: TitanResponse
"""
return self.create_edge(outV, label, inV, data)
def update_indexed_edge(self, _id, data, index_name, keys=None):
"""
Updates an indexed edge and returns the Response.
:param _id: Edge ID.
:type _id: int
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index. Defaults to None (indexes all properties).
:type keys: list
:rtype: TitanResponse
"""
return self.update_edge(_id, data)
# Utils
def build_params(**kwds):
# Rexster isn't liking None param values
params = dict()
for key in kwds:
value = kwds[key]
if value is not None:
params[key] = value
return params
| StarcoderdataPython |
196461 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:gis]
# language: python
# name: conda-env-gis-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import cartopy.crs as ccrs
# from cartopy.io import shapereader
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, LogNorm, PowerNorm
from matplotlib.collections import PatchCollection
import geopandas
from matplotlib.patches import Polygon
import shapely
import matplotlib as mpl
import pyproj as prj
from osgeo import ogr
# import xarray
# import netCDF4 as cf
# import pandas as pd
# from datetime import datetime
# from calendar import monthrange
# from collections import namedtuple
import numpy as np
# import os
from pyPRMS.ParamDb import ParamDb
# %%
work_dir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/bandit/nhmparamdb_CONUS'
shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple_lcc/nhruNationalIdentifier.shp'
shape_key='hru_id_nat'
# %%
pdb = ParamDb(paramdb_dir=work_dir, verbose=True, verify=True)
# %%
def plot_polygon_collection(ax, geoms, values=None, colormap='Set1', facecolor=None, edgecolor=None,
alpha=0.5, linewidth=1.0, **kwargs):
""" Plot a collection of Polygon geometries """
# from https://stackoverflow.com/questions/33714050/geopandas-plotting-any-way-to-speed-things-up
patches = []
for poly in geoms:
a = np.asarray(poly.exterior)
if poly.has_z:
poly = shapely.geometry.Polygon(zip(*poly.exterior.xy))
patches.append(Polygon(a))
patches = PatchCollection(patches, facecolor=facecolor, linewidth=linewidth, edgecolor=edgecolor, alpha=alpha, **kwargs)
if values is not None:
patches.set_array(values)
patches.set_cmap(colormap)
ax.add_collection(patches, autolim=True)
ax.autoscale_view()
return patches
# %%
# ### Get extent information from the national HRUs shapefile
# Need two shapefiles 1) in projected coordinates, 2) in geographic coordinates
# If gdal is installed can create geographic coordinates from projected with:
# ogr2ogr -t_srs epsg:4326 output_wgs84.shp input.shp
# shpfile = '/Users/pnorton/Projects/National_Hydrology_Model/extraction_requests/20180307_red_river/GIS/HRU_subset_nad83.shp'
# shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/extraction_requests/20180307_red_river/GIS/HRU_subset_usaea.shp'
shpfile = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple/nhruNationalIdentifier.shp'
shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple_lcc/nhruNationalIdentifier.shp'
# shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/GIS/NHM_GF_reduced.gdb'
# shpfile_extent = '/Users/pnorton/Projects/National_Hydrology_Model/notebooks/GIS/all_nhru_simple_usaea/nhruNationalIdentifier.shp'
# Name of attribute to use. Change to match the name of the HRU id attribute in the shapefile
shape_key='hru_id_nat'
# Use gdal/ogr to get the extent information
# Shapefile can be in projected coordinates
# Driver can be: OpenFileGDB or ESRI Shapefile
inDriver = ogr.GetDriverByName("ESRI Shapefile")
inDataSource = inDriver.Open(shpfile_extent, 0)
inLayer = inDataSource.GetLayer()
# inLayer = inDataSource.GetLayerByName('nhruNationalIdentifier')
extent = inLayer.GetExtent()
# Get the spatial reference information from the shapefile
spatial_ref = inLayer.GetSpatialRef()
# Create transformation object using projection information from the shapefile
xform = prj.Proj(spatial_ref.ExportToProj4())
west, east, south, north = extent
pad = 100000. # amount to pad the extent values with (in meters)
#east += pad
#west -= pad
#south -= pad
#north += pad
LL_lon, LL_lat = xform(west, south, inverse=True)
UR_lon, UR_lat = xform(east, north, inverse=True)
print('\tExtent: ({0:f}, {1:f}, {2:f}, {3:f})'.format(west, east, south, north))
print('\tExtent: (LL: [{}, {}], UR: [{}, {}])'.format(LL_lon, LL_lat, UR_lon, UR_lat))
extent_dms = [LL_lon, UR_lon, LL_lat, UR_lat]
# Matplotlib basemap requires the map center (lon_0, lat_0) be in decimal degrees
# and yet the corners of the extent can be in projected coordinates
cen_lon, cen_lat = xform((east+west)/2, (south+north)/2, inverse=True)
print('cen_lon: {}'.format(cen_lon))
print('cen_lat: {}'.format(cen_lat))
# %%
print(spatial_ref)
# %%
# Read the shapefile
hru_df = geopandas.read_file(shpfile_extent)
# %%
hru_df.crs.coordinate_operation.method_code
# %%
hru_df.crs.coordinate_operation.params
# %%
aa = {}
for yy in hru_df.crs.coordinate_operation.params:
aa[yy.name] = yy.value
print(yy.name, yy.value)
# %%
minx, miny, maxx, maxy = hru_df.geometry.total_bounds
# %%
the_var = 'tstorm_mo'
time_index = 0
# param_var = pdb.parameters.get_dataframe(the_var).iloc[:]
# Use the following for nhru x nmonths parameters
param_var = pdb.parameters.get_dataframe(the_var).iloc[:,time_index].to_frame(name=the_var)
param_var.head()
# param_var = pdb.parameters.get_dataframe(the_var)
# param_var.head()
# %%
# Create the colormap
# cmap = 'BrBG' #'GnBu_r' # for snow
# cmap = 'GnBu_r'
# cmap = 'jet'
if the_var in ['tmax_allsnow', 'tmax_allrain_offset']:
cmap = 'RdBu_r'
elif the_var in ['net_ppt', 'net_rain', 'net_snow']:
cmap = 'YlGnBu'
elif the_var in ['tmax_cbh_adj', 'tmin_cbh_adj']:
cmap = 'coolwarm'
else:
cmap = 'jet'
# create the colormap if a list of names is given, otherwise
# use the given colormap
lscm = mpl.colors.LinearSegmentedColormap
if isinstance(cmap,(list,tuple)):
cmap = lscm.from_list('mycm', cmap)
else:
cmap = plt.get_cmap(cmap)
missing_color = '#ff00cb' # pink/magenta
# Get the min and max values for the variable
max_val = param_var.max().max()
min_val = param_var.min().min()
# Override for tmax_allsnow
# max_val = 35.8
# min_val = 28.2
# norm = PowerNorm(gamma=0.05)
# norm = LogNorm(vmin=min_val, vmax=max_val)
if min_val == 0.:
if the_var in ['net_ppt', 'net_rain', 'net_snow']:
cmap.set_under(color='None')
norm = LogNorm(vmin=0.000001, vmax=max_val)
else:
norm = Normalize(vmin=0.000001, vmax=max_val)
else:
if the_var in ['tmax_allsnow', 'tmax_allrain_offset']:
norm = Normalize(vmin=min_val, vmax=max_val)
elif the_var in ['tmax_cbh_adj', 'tmin_cbh_adj']:
norm = Normalize(vmin=-max_val, vmax=max_val)
else:
norm = Normalize(vmin=min_val, vmax=max_val)
# %%
print(max_val)
print(min_val)
# %%
# This takes care of multipolygons that are in the NHM geodatabase/shapefile
geoms_exploded = hru_df.explode().reset_index(level=1, drop=True)
# xdf_df = xdf[the_var][2].to_dataframe()
df_mrg = geoms_exploded.merge(param_var, left_on=shape_key, right_index=True, how='left')
if '9822' in hru_df.crs.coordinate_operation.method_code:
# Albers Equal Area
aa = {}
for yy in hru_df.crs.coordinate_operation.params:
aa[yy.name] = yy.value
crs_proj = ccrs.AlbersEqualArea(central_longitude=aa['Longitude of false origin'],
central_latitude=aa['Latitude of false origin'],
standard_parallels=(aa['Latitude of 1st standard parallel'],
aa['Latitude of 2nd standard parallel']),
false_easting=aa['Easting at false origin'],
false_northing=aa['Northing at false origin'])
elif '9802' in hru_df.crs.coordinate_operation.method_code:
# Lambert Conformal Conic
crs_proj = ccrs.LambertConformal(central_latitude=aa['Latitude of false origin'],
central_longitude=aa['Longitude of false origin'],
standard_parallels=(aa['Latitude of 1st standard parallel'],
aa['Latitude of 2nd standard parallel']),
false_easting=aa['Easting at false origin'],
false_northing=aa['Northing at false origin'])
else:
# We're gonna crash
pass
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(30,20))
ax = plt.axes(projection=crs_proj)
ax.coastlines()
ax.gridlines()
# ax.set_extent(extent_dms)
ax.set_extent([minx, maxx, miny, maxy], crs=crs_proj)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(df_mrg[the_var])
plt.colorbar(mapper, shrink=0.6)
# plt.title('Variable: {}, Date: {}'.format(the_var, xdf_df['time'].iloc[0].isoformat()))
plt.title(f'Variable: {the_var}, Month: {time_index+1}')
# plt.title('Variable: {}'.format(the_var))
col = plot_polygon_collection(ax, df_mrg.geometry, values=df_mrg[the_var], colormap=cmap, norm=norm, linewidth=0.0)
# plt.savefig(f'/Users/pnorton/tmp/v1_figs/{the_var}_v11_{time_index+1:02}.png', dpi=300, bbox_inches='tight')
# %%
# xdf_df = xdf[the_var][4].to_dataframe()
time_index = 5
param_var = pdb.parameters.get_dataframe(the_var).iloc[:,time_index].to_frame(name=the_var)
df_mrg = geoms_exploded.merge(param_var, left_on='hru_id_nat', right_index=True, how='left')
# ax.set_title('Variable: {}, Date: {}'.format(the_var, xdf_df['time'].iloc[0].isoformat()))
ax.set_title(f'Variable: {the_var}, Month: {time_index+1}')
col.set_array(df_mrg[the_var])
fig
# plt.savefig(f'/Users/pnorton/tmp/v1_figs/{the_var}_v11_{time_index+1:02}.png', dpi=300, bbox_inches='tight')
# %%
# %%
# %%
| StarcoderdataPython |
69253 | import numpy as np
import pytest
from nlp_profiler.granular_features.numbers import \
NaN, gather_whole_numbers, count_whole_numbers # noqa
text_with_a_number = '2833047 people live in this area'
text_to_return_value_mapping = [
(np.nan, []),
(float('nan'), []),
(None, []),
]
@pytest.mark.parametrize("text,expected_result",
text_to_return_value_mapping)
def test_given_invalid_text_when_parsed_then_return_empty_list(
text: str, expected_result: str
):
# given, when
actual_result = gather_whole_numbers(text)
# then
assert expected_result == actual_result, \
f"Expected: {expected_result}, Actual: {actual_result}"
text_to_return_count_mapping = [
(np.nan, NaN),
(float('nan'), NaN),
(None, NaN),
]
@pytest.mark.parametrize("text,expected_result",
text_to_return_count_mapping)
def test_given_invalid_text_when_counted_then_return_NaN(
text: str, expected_result: float
):
# given, when
actual_result = count_whole_numbers(text)
# then
assert expected_result is actual_result, \
f"Expected: {expected_result}, Actual: {actual_result}"
def test_given_a_text_with_numbers_when_parsed_then_return_only_the_numbers():
# given
expected_results = ['2833047']
# when
actual_results = gather_whole_numbers(text_with_a_number)
# then
assert expected_results == actual_results, \
"Didn't find the number '2833047' in the text"
def test_given_a_text_with_a_number_when_counted_then_return_count_of_numbers_found():
# given, when
actual_results = count_whole_numbers(text_with_a_number)
# then
assert actual_results == 1, \
"Didn't find the expected single number in the text"
| StarcoderdataPython |
115921 | import functools
class Parity:
"""Bitwise operation
Find the parity
from EPI Ch 1
"""
def __init__(self, x):
self._x = x
def set_x(self, x):
self._x = x
def print_result(fun):
@functools.wraps(fun)
def wrap(self):
result = fun(self)
print(f"{'x':<8}:{self._x :>6}")
print(f"{'result':<8}:{result :>5}")
print(f"{'x':<8}:{bin(self._x) :>5}")
print(f"{'result':<8}:{bin(result) :>5}")
return wrap
@print_result
def parity_bruteforce(self):
result = 0
var = self._x
while var:
result ^= var & 1
var >>= 1
return result
def parity_dropbits(self):
result = 0
var = self._x
while var:
result ^= 0
var &= var - 1
return result
def drop_lowest(self):
count = 0
var = self._x
while var:
print(f"{'x' : <5} : {bin(var)}")
print(f"{'x-1' :<5}: {bin(var - 1)}")
var = var & (var - 1)
count += 1
print(f"drop lowest 1s for {count} times")
def drop_lowest2(self):
" Observe how x & (x-1) works"
count = 0
var = self._x
while count < 5:
print(bin(var))
print(bin(~(var - 1)))
var = var & ~(var - 1)
print(f"result {bin(var)}")
count += 1
print(f"count: {count}")
def variant1(self):
"""fill bits after the less significant bit 1 to be 1
ex. 101000 -> 101111
"""
var = self._x
var |= var - 1
return var
def variant2(self):
"""x modulo apower of 2
ex. input: 77, output: 13
"""
val = self._x
while val:
pre_val = val
val = val & (val - 1)
return pre_val ^ self._x
def variant3(self):
"""if x is power of 2
Args:
x (int): a positive integer
"""
return self._x & (self._x - 1) is 0
def main():
# x = 128
# result = variant3(x)
# print(f"x: {x} result: {result}")
# x = 127
# result = variant3(x)
# print(f"x: {x} result: {result}")
p = Parity(127)
p.parity_bruteforce()
if __name__ == "__main__":
main() | StarcoderdataPython |
1728931 | # Author: bbrighttaer
# Project: masdcop
# Date: 5/13/2021
# Time: 4:34 AM
# File: env.py
from masdcop.agent import SingleVariableAgent
from masdcop.algo import PseudoTree, SyncBB
import numpy as np
class FourQueens:
def __init__(self, num_agents):
self._max_cost = 0
self.domain = [(i, j) for i in range(0, 4) for j in range(0, 4)]
self.agents = [SingleVariableAgent(i + 1, self.domain) for i in range(num_agents)]
self.pseudo_tree = PseudoTree(self.agents)
self.algo = SyncBB(self.check_constraints, self.pseudo_tree, self._max_cost)
self.grid = np.zeros((4, 4))
self.history = {}
def check_constraints(self, *args) -> bool:
# update grid and history
cleared = []
for var in args:
if var[0] in self.history and self.history[var[0]] not in cleared:
self.grid[self.history[var[0]]] = 0
cleared.append(self.history[var[0]])
self.grid[var[1]] += 1
self.history[var[0]] = var[1]
# check for violations
if (not np.sum(np.sum(self.grid, 1) > 1) == 0) or \
(not np.sum(np.sum(self.grid, 0) > 1) == 0) or \
not _diags_check(self.grid) or not _diags_check(np.fliplr(self.grid)):
return False
return True
def resolve(self, verbose=False):
self.algo.initiate(self.pseudo_tree.next())
while not self.algo.terminate:
agent = self.pseudo_tree.getCurrentAgent()
if agent:
self.algo.receive(agent)
self.algo.sendMessage(agent)
else:
break
def _diags_check(m) -> bool:
"""
Checks for diagonals constraint violation.
:return: bool
True if all constraints are satisfied else False
"""
for i in range(m.shape[0]):
if np.trace(m, i) > 1 or np.trace(m, -1) > 1:
return False
return True
| StarcoderdataPython |
3356702 | import json
import unittest
from tests.constants import TESTING_KEYSTORE_FOLDER, TESTING_TEMP_FOLDER
from raiden_installer.account import Account
from raiden_installer.ethereum_rpc import make_web3_provider
from raiden_installer.network import Network
class AccountBaseTestCase(unittest.TestCase):
def setUp(self):
self.passphrase = "<PASSWORD>"
self.account = Account.create(TESTING_KEYSTORE_FOLDER, self.passphrase)
def tearDown(self):
try:
self.account.keystore_file_path.unlink()
except FileNotFoundError:
pass
class AccountTestCase(AccountBaseTestCase):
def test_account_can_get_address(self):
self.assertIsNotNone(self.account.address)
def test_finding_keystore_file_path(self):
path = Account.find_keystore_file_path(self.account.address, TESTING_KEYSTORE_FOLDER)
self.assertEqual(path, self.account.keystore_file_path)
def test_cannot_find_keystore_in_non_existent_directory(self):
path = TESTING_KEYSTORE_FOLDER.joinpath("non", "existent", "path")
keystore_file_path = Account.find_keystore_file_path(
self.account.address, path
)
self.assertIsNone(keystore_file_path)
def test_cannot_find_non_existent_keyfile(self):
self.account.keystore_file_path.unlink()
keystore_file_path = Account.find_keystore_file_path(
self.account.address, TESTING_KEYSTORE_FOLDER
)
self.assertIsNone(keystore_file_path)
def test_cannot_find_keyfile_without_read_permission(self):
self.account.keystore_file_path.chmod(0)
keystore_file_path = Account.find_keystore_file_path(
self.account.address, TESTING_KEYSTORE_FOLDER
)
self.assertIsNone(keystore_file_path)
def test_cannot_find_keyfile_with_invalid_content(self):
with self.account.keystore_file_path.open("w") as keyfile:
json.dump(dict(invalid="keyfile"), keyfile)
keystore_file_path = Account.find_keystore_file_path(
self.account.address, TESTING_KEYSTORE_FOLDER
)
self.assertIsNone(keystore_file_path)
def test_cannot_find_keyfile_with_non_json_content(self):
with self.account.keystore_file_path.open("w") as keyfile:
keyfile.write("This is no JSON")
keystore_file_path = Account.find_keystore_file_path(
self.account.address, TESTING_KEYSTORE_FOLDER
)
self.assertIsNone(keystore_file_path)
def test_has_no_content_when_keystore_file_does_not_exist(self):
path = TESTING_KEYSTORE_FOLDER.joinpath("non", "existent", "path")
account = Account(path)
self.assertIsNone(account.content)
def test_can_get_web3_provider(self):
web3_provider = make_web3_provider("http://localhost:8545", self.account)
self.assertIsNotNone(web3_provider)
def test_cannot_run_funding_on_mainnet(self):
network = Network.get_by_name("mainnet")
with self.assertRaises(NotImplementedError):
network.fund(self.account)
class LockedAccountTestCase(AccountBaseTestCase):
def setUp(self):
super().setUp()
keystore_file_path = self.account.keystore_file_path
self.locked_account = Account(keystore_file_path)
def test_cannot_get_private_key_without_passphrase(self):
with self.assertRaises(ValueError):
self.locked_account.private_key
def test_can_unlock_private_key(self):
self.locked_account.unlock(self.passphrase)
try:
self.locked_account.private_key
except ValueError:
self.fail("should have unlocked private key")
def test_cannot_unlock_with_wrong_password(self):
with self.assertRaises(ValueError):
self.locked_account.unlock("wrong" + self.passphrase)
class AccountCreationTestCase(unittest.TestCase):
def setUp(self):
self.account = Account.create(TESTING_KEYSTORE_FOLDER)
def test_create_account_with_random_password(self):
self.assertIsNotNone(self.account.passphrase)
self.assertGreater(len(self.account.passphrase), 0)
def tearDown(self):
self.account.keystore_file_path.unlink()
| StarcoderdataPython |
3303410 | <reponame>bdemin/M113_Visualization
from vtk import vtkTextActor
def draw_text(_input):
text_actor = vtkTextActor()
text_actor.SetInput(_input)
text_prop = text_actor.GetTextProperty()
text_prop.SetFontFamilyToArial()
text_prop.SetFontSize(34)
text_prop.SetColor(1,1,1)
text_actor.SetDisplayPosition(80,900)
return text_actor | StarcoderdataPython |
114498 | <reponame>Microsoft/SkillsExtractorCognitiveSearch
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from collections import defaultdict
import itertools
from pathlib import Path
import srsly
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.language import Language
from spacy.pipeline import EntityRuler
class SkillsExtractor:
"""Extracts skills from text using SpaCy's EntityRuler Component"""
def __init__(self, nlp: Language, data_path: Path = Path("data")):
self.nlp = nlp
self.data_path = data_path
self.skills = self._get_skills()
patterns = self._build_patterns(self.skills)
extra_patterns = self._get_extra_skill_patterns()
ruler = EntityRuler(nlp, overwrite_ents=True)
ruler.add_patterns(itertools.chain(patterns, extra_patterns))
if not self.nlp.has_pipe("skills_ruler"):
self.nlp.add_pipe(ruler, name="skills_ruler")
def _get_skills(self):
"""Query skills from skills collection"""
skills_path = self.data_path/"skills.json"
skills = srsly.read_json(skills_path)
return skills
def _get_extra_skill_patterns(self):
"""Load extra user added skill patterns"""
extra_patterns_path = self.data_path/"extra_skill_patterns.jsonl"
extra_skill_patterns = srsly.read_jsonl(extra_patterns_path)
return extra_skill_patterns
def _skill_pattern(self, skill: str, split_token: str = None):
"""Create a single skill pattern"""
pattern = []
if split_token:
split = skill.split(split_token)
else:
split = skill.split()
for b in split:
if b:
if b.upper() == skill:
pattern.append({"TEXT": b})
else:
pattern.append({"LOWER": b.lower()})
return pattern
def _build_patterns(self, skills: list, create: bool = False):
"""Build all matcher patterns"""
patterns_path = self.data_path/"skill_patterns.jsonl"
if not patterns_path.exists() or create:
"""Build up lists of spacy token patterns for matcher"""
patterns = []
split_tokens = [".", "/", "-"]
for skill_id, skill_info in skills.items():
aliases = skill_info['aliases']
sources = skill_info['sources']
skill_names = set()
for al in aliases:
skill_names.add(al)
for source in sources:
if "displayName" in source:
skill_names.add(source["displayName"])
for name in skill_names:
if name.upper() == name:
skill_name = name
else:
skill_name = name.lower().strip()
if skill_name not in STOP_WORDS:
pattern = self._skill_pattern(skill_name)
if pattern:
label = f"SKILL|{skill_id}"
patterns.append({"label": label, "pattern": pattern})
for t in split_tokens:
if t in skill_name:
patterns.append(
{
"label": label,
"pattern": self._skill_pattern(
skill_name, t
),
}
)
srsly.write_jsonl(patterns_path, patterns)
return patterns
else:
patterns = srsly.read_jsonl(patterns_path)
return patterns
def extract_skills(self, text: str):
"""Extract skills from text unstructured text"""
doc = self.nlp(text)
found_skills = defaultdict(lambda: defaultdict(list))
for ent in doc.ents:
if "|" in ent.label_:
ent_label, skill_id = ent.label_.split("|")
if ent_label == "SKILL" and skill_id:
found_skills[skill_id]["matches"].append(
{
"start": ent.start_char,
"end": ent.end_char,
"label": ent_label,
"text": ent.text,
}
)
try:
skill_info = self.skills[skill_id]
sources = skill_info['sources']
# Some sources have better Skill Descriptions than others.
# This is a simple heuristic for cascading through the sources
# to pick the best description available per skill
main_source = sources[0]
for source in sources:
if source["sourceName"] == "Github Topics":
main_source = source
break
elif source["sourceName"] == "Microsoft Academic Topics":
main_source = source
break
elif source["sourceName"] == "Stackshare Skills":
main_source = source
break
except KeyError:
# This happens when a pattern defined in data/extra_skill_patterns.jsonl
# is matched. The skill is not added to data/skills.json so there's no
# extra metadata about the skill from an established source.
sources = []
main_source = {
"displayName": ent.text,
"shortDescription": "",
"longDescription": ""
}
keys = ["displayName", "shortDescription", "longDescription"]
for k in keys:
found_skills[skill_id][k] = main_source[k]
found_skills[skill_id]["sources"] = [
{"name": s["sourceName"], "url": s["url"]} for s in sources
]
return found_skills
| StarcoderdataPython |
69362 | <reponame>Lilja/moto<gh_stars>0
import os
from .models import sns_backends
from ..core.models import base_decorator
region_name = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")
sns_backend = sns_backends[region_name]
mock_sns = base_decorator(sns_backends)
| StarcoderdataPython |
100849 | <reponame>jamesjiang52/Bitwise<gh_stars>0
import bitwise as bw
class TestWire:
def test_Wire(self):
wire = bw.wire.Wire()
assert wire.value == 0
wire.value = 1
assert wire.value == 1
wire.value = 0
assert wire.value == 0
print(wire.__doc__)
print(wire)
wire(value=1)
assert wire.value == 1
| StarcoderdataPython |
28309 | <filename>bin/vigilance-server.py
#!/usr/bin/python3
from prometheus_client import start_http_server, Gauge
import urllib.request
import random
from datetime import datetime
import re
import time
test = False
risks = ["vent violent", "pluie-inondation", "orages", "inondation", "neige-verglas", "canicule", "grand-froid", "avalanches", "vagues-submersion"]
# Maps a (dept, risk, startZ, endZ) tuple to the round in which it was last set
cache = {}
# Create metrics to track time spent and requests made.
gauge_full = Gauge('meteorological_risk_full', 'Weather risk', ['dept', 'risk', 'startZ', 'endZ'])
gauge = Gauge('meteorological_risk', 'Weather risk', ['dept', 'risk'])
def getTimeHash():
d = datetime.now()
return d.year*365*24*60+d.month*30*24*60+d.day*24*60+d.hour*60+d.minute
def getStream():
url = "http://www.vigimeteo.com/data/NXFR49_LFPW_.xml?{}".format(getTimeHash())
stream = None
if test:
stream = open('test/jaune-vent-violent+littoral-vagues.xml')
else:
try:
stream = urllib.request.urlopen(url)
except urllib.error.URLError as e:
print(f'Error fetching URL: {e}')
pass
return stream
def getVigilanceData():
regex = r'<PHENOMENE departement="(?P<dept>\w+)" phenomene="(?P<risk>\d+)" couleur="(?P<level>\d)" dateDebutEvtTU="(?P<start>\d{14})" dateFinEvtTU="(?P<end>\d{14})"/>'
pattern = re.compile(regex)
results = []
stream = getStream()
if stream is None: return results
for line in stream:
try:
line = line.decode('utf-8')
except AttributeError:
pass
matches = pattern.match(line)
if matches:
data = matches.groupdict()
results.append(data)
return results
def latestVigilanceMetrics(gauge=Gauge, cacheRound=int):
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
deptRiskLevelMap = dict()
for result in getVigilanceData():
if result['end'] > now:
level = int(result['level'])
else:
level = 0
risk = risks[int(result['risk'])-1]
key = (result['dept'], risk, result['start'], result['end'])
cache[key] = cacheRound
dept = result['dept']
gauge_full.labels(dept=dept, risk=risk, startZ=result['start'], endZ=result['end']).set(level)
if (dept, risk) not in deptRiskLevelMap:
deptRiskLevelMap[(dept, risk)] = level
gauge.labels(dept=dept, risk=risk).set(level)
elif level > deptRiskLevelMap[(dept, risk)]:
deptRiskLevelMap[(dept, risk)] = level
gauge.labels(dept=dept, risk=risk).set(level)
print(f'{key!r} --> {level}, added to cache with round {cacheRound}')
def checkDeadCacheEntries(gauge=Gauge, cacheRound=int):
'''
Checks if a particular combination has been dropped from the output
produced by vigimeteo. We need to zero these entries else they will stay stuck
at whatever their last value was.
'''
for key, value in list(cache.items()):
if value != cacheRound:
print(f'{key!r} --> {0}, deleting cache entry')
gauge.labels(dept=key[0], risk=key[1], startZ=key[2], endZ=key[3]).set(0)
del cache[key]
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(9696)
cacheRound = 0
while True:
cacheRound = 1 - cacheRound
print(f'Starting new round… (index {cacheRound})')
latestVigilanceMetrics(gauge, cacheRound)
checkDeadCacheEntries(gauge, cacheRound)
print('Round completed.')
time.sleep(3600)
| StarcoderdataPython |
111304 | from datetime import datetime
import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.orm import sessionmaker
from srs_sqlite import db
from srs_sqlite.databases import SrsRecord
from srs_sqlite.util import get_url_images_in_text
engine = create_engine('sqlite:///' + os.path.abspath('../user/PathoKnowledge.db'))
Base = declarative_base()
Session = sessionmaker(bind=engine)
def get_image_only():
db.create_all()
session = Session()
for record in session.query(_SrsRecord):
if len(get_url_images_in_text(record.front)) > 0:
srs_record = SrsRecord(
id=record.id,
front=record.front,
back=record.back,
keywords=record.keywords,
tags=record.tags,
created=record.created,
modified=record.modified,
srs_level=record.srs_level,
next_review=record.next_review
)
db.session.add(srs_record)
db.session.commit()
class _SrsRecord(Base):
__tablename__ = 'srs'
id = Column(Integer, primary_key=True, nullable=False, unique=True, autoincrement=True)
front = Column(String, nullable=False, unique=True)
back = Column(String)
data = Column(String)
keywords = Column(String)
tags = Column(String)
created = Column(DateTime, default=datetime.now)
modified = Column(DateTime, default=datetime.now)
srs_level = Column(Integer)
next_review = Column(DateTime)
if __name__ == '__main__':
get_image_only()
| StarcoderdataPython |
12303 | <filename>py3canvas/tests/shared_brand_configs.py
"""SharedBrandConfigs API Tests for Version 1.0.
This is a testing template for the generated SharedBrandConfigsAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.shared_brand_configs import SharedBrandConfigsAPI
from py3canvas.apis.shared_brand_configs import Sharedbrandconfig
class TestSharedBrandConfigsAPI(unittest.TestCase):
"""Tests for the SharedBrandConfigsAPI."""
def setUp(self):
self.client = SharedBrandConfigsAPI(
secrets.instance_address, secrets.access_token
)
def test_share_brandconfig_theme(self):
"""Integration test for the SharedBrandConfigsAPI.share_brandconfig_theme method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_update_shared_theme(self):
"""Integration test for the SharedBrandConfigsAPI.update_shared_theme method."""
# This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.
pass
def test_un_share_brandconfig_theme(self):
"""Integration test for the SharedBrandConfigsAPI.un_share_brandconfig_theme method."""
id = None # Change me!!
r = self.client.un_share_brandconfig_theme(id)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.