id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9654711 | <filename>cogs/meta.py
import discord
import psutil
from discord.ext import commands
import textwrap
from utils.paginator import HelpPaginator
class Meta:
def __init__(self, bot):
self.bot = bot
bot.remove_command('help')
@staticmethod
async def __error(ctx, error):
"""Sends the Error."""
if isinstance(error, commands.BadArgument):
await ctx.send(error)
@commands.command(name='help')
async def _help(self, ctx, *, command: str = None):
"""Shows help about a command or the bot"""
try:
if command is None:
p = await HelpPaginator.from_bot(ctx)
else:
entity = self.bot.get_cog(command) or self.bot.get_command(command)
if entity is None:
clean = command.replace('@', '@\u200b')
return await ctx.send(f'Command or category "{clean}" not found.')
elif isinstance(entity, commands.Command):
p = await HelpPaginator.from_command(ctx, entity)
else:
p = await HelpPaginator.from_cog(ctx, entity)
await p.paginate()
except Exception as e:
await ctx.send(e)
@commands.command(aliases=["user_info"])
async def userinfo(self, ctx, member: discord.Member = None):
"""Shows a profile. Defaults to you."""
if member is None:
member = ctx.message.author
userinfo_embed = discord.Embed(
title=f"{member.name}'s Profile",
color=member.color
)
userinfo_embed.add_field(
name="User:",
value=str(member)
)
if member.display_name != member.name:
userinfo_embed.add_field(
name="Nickname:",
value=member.display_name
)
userinfo_embed.add_field(
name="Status:",
value=str(member.status).title()
)
userinfo_embed.add_field(
name="Playing:",
value=str(member.game)
)
userinfo_embed.add_field(
name="ID:",
value=str(member.id)
)
userinfo_embed.add_field(
name="Account Created At:",
value=f"{member.created_at} UTC"
)
userinfo_embed.add_field(
name="Joined Guild At:",
value=f"{member.joined_at} UTC"
)
roles_list = [r.mention.replace(f'<@&{ctx.guild.id}>', '@everyone') for r in
reversed(sorted(member.roles, key=lambda role: role.position))]
roles = ', '.join(roles_list)
userinfo_embed.add_field(
name="Roles",
value=roles
)
userinfo_embed.set_thumbnail(url=member.avatar_url)
userinfo_embed.set_footer(text=f"""{member}'s Profile | Requested by:
{ctx.message.author}""", icon_url=ctx.message.author.avatar_url)
await ctx.send(embed=userinfo_embed)
@commands.command(aliases=["guild", "guildinfo", "serverinfo"])
async def server(self, ctx):
"""Displays Server Info.
Roles Credit: GiovanniMCMXCIX (Gio#0335 or GiovanniMCMXCIX#1211)"""
if ctx.guild.emojis:
emotes = ''.join((str(x) for x in ctx.guild.emojis))
server_embed = discord.Embed(
title=f"The {ctx.guild.name} Server"
)
server_embed.add_field(
name="Server ID:",
value=str(ctx.guild.id)
)
text_count = len(ctx.guild.text_channels)
voice_count = len(ctx.guild.voice_channels)
text_hid = sum(
1 for c in ctx.guild.channels
if c.overwrites_for(ctx.guild.default_role).read_messages is False)
server_embed.add_field(
name="Channels",
value=f"{text_count} Text ({text_hid}) Hidden / {voice_count} Voice"
)
server_embed.add_field(
name="Owner:",
value=ctx.guild.owner.mention
)
server_embed.add_field(
name="Region:",
value=ctx.guild.region
)
server_embed.add_field(
name="Created:",
value=f"{ctx.guild.created_at} UTC"
)
server_embed.add_field(
name="Emotes:",
value=f"{emotes}"
)
server_embed.add_field(
name="Server Members:",
value=str(ctx.guild.member_count)
)
roles_list = [r.mention.replace(f'<@&{ctx.guild.id}>', '@everyone') for r in
reversed(sorted(ctx.guild.roles, key=lambda role: role.position))]
roles = ', '.join(roles_list)
server_embed.add_field(
name="Roles",
value=roles
)
server_embed.set_thumbnail(url=ctx.guild.icon_url)
server_embed.set_footer(text=f"""The {ctx.guild.name} Server Information | Requested by:
{ctx.message.author}""", icon_url=ctx.message.author.avatar_url)
await ctx.send(embed=server_embed)
@commands.command(
name="about",
aliases=["whoareyou"]
)
async def _about(self, ctx):
"""Tells you about me."""
# TODO : Figure out how to get other fields working.
# TODO : Get uptime of bot
about_embed = discord.Embed(
description=textwrap.dedent("""
Discord bot designed for the Area 11 server
Check out the GitHub Repository [here](http://github.com/avinch/Cassandra)!
"""),
color=discord.Color.gold()
)
memory_usage = psutil.Process().memory_full_info().uss / 1024**2
about_embed.add_field(
name='Memory Usage',
value='{:.2f} MiB'.format(memory_usage)
)
about_embed.set_thumbnail(
url=ctx.bot.user.avatar_url
)
about_embed.set_footer(
text='Made with discord.py',
icon_url='http://i.imgur.com/5BFecvA.png'
)
await ctx.send(embed=about_embed)
@commands.command(aliases=["Ping"])
async def ping(self, ctx):
"""Time the websocket takes to respond."""
pong = discord.Embed(
title='Pong!',
colour=discord.Color.dark_gold()
)
pong.set_thumbnail(url='http://i.imgur.com/SKEmkvf.png')
pong.add_field(name="Response Time:", value=f'{self.bot.latency * 1000}ms')
await ctx.send(embed=pong)
def setup(bot):
bot.add_cog(Meta(bot))
| StarcoderdataPython |
1879054 | __version__ = '0.8.0'
def get_version():
return __version__
| StarcoderdataPython |
5181560 | <reponame>SiliconLabs/mltk
from re import L
import typer
from mltk import cli
@cli.root_cli.command('compile')
def compile_model_command(
model: str = typer.Argument(...,
help='''\b
One of the following:
- Name of MLTK model
- Path to trained model's archive (.mltk.zip)
- Path to MLTK model's python script
- Path to .tflite model
''',
metavar='<model>'
),
accelerator: str = typer.Option(..., '--accelerator', '-a',
help='Name of accelerator',
metavar='<name>'
),
verbose: bool = typer.Option(False, '--verbose', '-v',
help='Enable verbose console logs'
),
output: str = typer.Option(None, '--output', '-o',
help='''\b
One of the following:
- Path to generated output .tflite file
- Directory where output .tflite is generated
- If omitted, .tflite is generated in the same directory as the given model and the model archive is updated (if an mltk model is provided)''',
metavar='<path>'
),
):
"""Compile a model for the specified accelerator
"""
# Import all required packages here instead of at top
# to help improve the CLI's responsiveness
from mltk.core import (
compile_model,
load_mltk_model
)
logger = cli.get_logger(verbose=verbose)
if not model.endswith('.tflite'):
try:
model = load_mltk_model(
model,
print_not_found_err=True
)
except Exception as e:
cli.handle_exception('Failed to load model', e)
try:
tflite_path = compile_model(
model,
accelerator=accelerator,
output=output
)
except Exception as e:
cli.handle_exception('Failed to compile model', e)
if output:
logger.info(f'Generated model at {tflite_path}')
| StarcoderdataPython |
1805282 | <filename>modes/mission_control/code/mission_control.py
# Mission Control mode file for STTNG
import random
import inspect #329
from mpf.system.modes import Mode
class MissionControl(Mode):
def mode_init(self):
self.player = None
self.running_script = None
self.mission_lights = ['l_shipMode1', 'l_shipMode2', 'l_shipMode3',
'l_shipMode4', 'l_shipMode5', 'l_shipMode6',
'l_shipMode7', 'l_finalFrontier']
self.mission_events = ['start_mode_time_rift', 'start_mode_worm_hole',
'start_mode_search_the_galaxy',
'start_mode_battle_simulation',
'start_mode_qs_challenge', 'start_mode_rescue',
'start_mode_asteroid_threat',
'start_mode_final_frontier']
self.flash_start_mission = None
self.add_mode_event_handler('player_add_success', self.player_init)
### REMOVE THIS AFTER TESTING MODES
# self.add_mode_event_handler('sw_buyIn', self.mission_shortcut)
#def mission_shortcut(self):
# self.machine.events.post('start_mode_time_rift')
def player_init(self, player, **kwargs):
# We're gonna go left to right here, so:
# 0 = Time Rift
# 1 = Worm Hole
# 2 = Search the Galaxy
# 3 = Battle Simulation
# 4 = Q's Challenge
# 5 = Rescue
# 6 = Asteroid Threat
# 7 = Final Frontier
# Need to add provisions in the rotator for re-doing missions, but this
# is fine for now. Re-doing missions is only a Command Decision thing
# anyway.
player.missions_complete = [0]*7
# Need to add competition logic here whenever that becomes a thing
player.mission_lit = random.randrange(0,6,1)
def mode_start(self):
#self.player = self.machine.game.player
self.add_mode_event_handler('return_to_mission_control',
self.enable_mission_select)
self.add_mode_event_handler('final_frontier_complete',
self.reset_after_final_frontier)
self.enable_mission_select()
# We probably need something else that runs when a mission ends and this
# mode is started up again. We need it to automatically rotate to the
# next mission (light, list changed to 1, etc...) as well as to set the
# last mission played to 2
#
# This is probably the thing that also keeps track of Final Frontier. So
# maybe this goes into the mission_rotator method?
def mode_stop(self):
#self.machine.events.remove_handler(self.mission_started)
#self.machine.events.remove_handler(self.command_decision)
#self.machine.events.remove_handler(self.mission_rotate)
#self.machine.events.remove_handler(self.enable_mission_select)
#self.machine.events.remove_handler(self.reset_after_final_frontier)
self.light_start_mission(False)
def reset_after_final_frontier(self):
self.player.missions_complete = None
self.player.mission_lit = None
self.player_init(self.player)
self.enable_mission_select()
def mission_rotate(self, direction='right', allow_completed=False):
if self.player.missions_complete.count(0):
index=self.player.mission_lit
if direction == 'right':
index += 1
if index == 7:
index = 0
if not allow_completed:
while self.player.missions_complete[index]:
index += 1
if index == 7:
index = 0
else:
index -= 1
if index == -1:
index = 6
if not allow_completed:
while self.player.missions_complete[index]:
index -= 1
if index == -1:
index = 6
else:
index = 7
self.update_lit(index)
def update_lit(self, index):
self.player.mission_lit = index
self.update_lights()
def update_lights(self):
#self.log.info('UDL old running script: %s:', self.running_script)
if self.running_script:
#self.log.info('UDL stopping current running script')
self.running_script.stop()
for index, value in enumerate(self.player.missions_complete):
if value:
#self.log.info("I'm turning ON light %s",
# self.mission_lights[index])
self.machine.lights[self.mission_lights[index]].on()
else:
#self.log.info("I'm turning off light %s",
# self.mission_lights[index])
self.machine.lights[self.mission_lights[index]].off()
self.running_script = self.machine.show_controller.run_script(
lightname=self.mission_lights[self.player.mission_lit],
script=self.machine.show_controller.light_scripts['flash'],
tps='2'
)
#self.log.info('UDL new running script: %s', self.running_script)
def mission_started(self):
self.light_start_mission(False)
self.machine.events.post(self.mission_events[self.player.mission_lit])
if not self.player.mission_lit == 7:
self.player.missions_complete[self.player.mission_lit] += 1
self.machine.events.remove_handler(self.mission_started)
self.machine.events.remove_handler(self.command_decision)
self.machine.events.remove_handler(self.mission_rotate)
#self.machine.events.post('mission_control_mission_started')
# Need the code here to:
#
# Find the list item that contains a 1 and start the mode associated
# with that mission
#
# I guess that means we need a lookup table or set of if-thens to say:
# "if player.mission_states = 1, post an event that starts worm hole"
#
# Additionally, we have to check for Final Frontier. So perhaps that
# runs first, like: If Final Frontier is enabled, post an event to start
# that mode. Else, find the index in mission_states that = 1 and start
# that.
def light_start_mission(self, lit=True):
if lit:
self.flash_start_mission = self.machine.show_controller.run_script(
lightname='l_startMission',
script=self.machine.show_controller.light_scripts['flash'],
tps=8
)
else:
self.flash_start_mission.stop()
self.machine.lights['l_startMission'].off()
def enable_mission_select(self):
# is this the final frontier event?
# self.player_init(player=self.player)
# if not,
self.add_mode_event_handler('sw_start_mission',
self.mission_started)
self.add_mode_event_handler('sw_command_decision',
self.command_decision)
self.add_mode_event_handler('sw_advance_mission',
self.mission_rotate)
self.light_start_mission()
self.mission_rotate()
def command_decision(self):
pass
# This is here as an extra challenge that we don't need to address
# currently. In STTNG, when Command Decision is lit and hit, the player
# gets a menu to choose which mission they want to play. Consider this
# extra credit for later.
| StarcoderdataPython |
11382999 | <filename>lib/sampleapiclient/masking/Masking.py<gh_stars>10-100
import json
from authenticationsdk.util.GlobalLabelParameters import *
# This method reads the items to be masked and accordingly masks the response from the server
def masking(r):
try:
j = json.loads(r)
maskdata = json.dumps(
remove_key(j, "expirationMonth", "expirationYear", "email", "firstName", "lastName", "phoneNumber",
"number", "securityCode", "type"))
return maskdata
except ValueError as e:
return r
# This function replaces the value of the items to be masked to "XXXXX"
def remove_key(obj, *keys):
if type(obj) is dict:
for k, v in list(obj.items()):
if k not in keys:
obj[k] = remove_key(v, *keys)
obj[k] = v
else:
obj[k] = GlobalLabelParameters.MASKING_VALUE
return obj
| StarcoderdataPython |
11347532 | <reponame>Borda/kaggle_iMet-collection<gh_stars>1-10
import os
import pytest
import torch
from PIL import Image
from torch import tensor
from kaggle_imet.data import IMetDataset, IMetDM
_PATH_HERE = os.path.dirname(__file__)
_TEST_IMAGE_NAMES = (
'1cc66a822733a3c3a1ce66fe4be60a6f',
'09fe6ff247881b37779bcb386c26d7bb',
'258e4a904729119efd85faaba80c965a',
'11a87738861970a67249592db12f2da1',
'12c80004e34f9102cad72c7312133529',
'0d5b8274de10cd73836c858c101266ea',
'14f3fa3b620d46be00696eacda9df583',
)
_TEST_UNIQUE_LABELS = (
'124', '1660', '2281', '233', '2362', '262', '2941', '3192', '3193', '3235', '3334', '341', '3465', '370', '507',
'782', '783', '784', '792', '946', '96'
)
_TEST_LABELS_BINARY = [
tensor([1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]),
tensor([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0]),
tensor([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0]),
tensor([0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0]),
tensor([0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]),
tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]),
tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0])
]
@pytest.mark.parametrize("phase", ['train', 'valid'])
def test_dataset(phase, root_path=_PATH_HERE):
dataset = IMetDataset(
df_data=os.path.join(root_path, "data", "train-from-kaggle.csv"),
path_img_dir=os.path.join(root_path, "data", "train-1", "train-1"),
split=1.0 if phase == 'train' else 0.0,
mode=phase,
random_state=42,
)
assert len(dataset) == 7
img, _ = dataset[0]
assert isinstance(img, Image.Image)
_img_names = [os.path.splitext(im)[0] for im in dataset.img_names]
assert tuple(_img_names) == tuple(dataset.data['id']) == _TEST_IMAGE_NAMES
assert dataset.labels_unique == _TEST_UNIQUE_LABELS
lbs = [tensor(dataset[i][1]) for i in range(len(dataset))]
# mm = lambda lb: np.array([i for i, l in enumerate(lb) if l])
# lb_names = [np.array(dataset.labels_unique)[mm(lb)] for lb in lbs]
assert all(torch.equal(a, b) for a, b in zip(_TEST_LABELS_BINARY, lbs))
def test_datamodule(root_path=_PATH_HERE):
dm = IMetDM(
path_csv="train-from-kaggle.csv",
base_path=os.path.join(root_path, "data"),
batch_size=2,
split=0.6,
)
dm.setup()
assert dm.num_classes == len(_TEST_UNIQUE_LABELS)
assert dm.labels_unique == _TEST_UNIQUE_LABELS
assert len(dm.lut_label) == len(_TEST_UNIQUE_LABELS)
# assert isinstance(dm.label_histogram, Tensor)
for imgs, lbs in dm.train_dataloader():
assert len(imgs)
assert len(lbs)
break
for imgs, lbs in dm.val_dataloader():
assert len(imgs)
assert len(lbs)
break
for imgs, names in dm.test_dataloader():
assert len(imgs)
assert len(names)
assert isinstance(names[0], str)
break
| StarcoderdataPython |
51029 | def array_count9(nums):
count = 0
# Standard loop to look at each value
for num in nums:
if num == 9:
count = count + 1
return count
| StarcoderdataPython |
6581231 | <reponame>8Avalon8/pisces_af<filename>tasks/mainstory15.py
# -*- coding: utf-8 -*-
task = Task("MainStory15",desc = u"自动主线15级后",pretask = ["MainStory"])
#task.addSetupActionSet("RefreshGame",tag="pre1",desc="RefreshGame")
tasksuit.addTask(task)
step = Step("step0.5",u"Login")
task.addStep(step)
#step.addActionSet("InputNamePsd",tag = "login",desc = "login input username and password", mp={'username':'autotest1','password':'<PASSWORD>'})
#step.addActionSet("TypeCommand",tag = "0.5",desc = u"获得万劫不复技能", mp = {'command':'$setskill 2099 1'})
step.addActionSet("TypeCommand",tag = "0.5",desc = u"升一级", mp = {'command':'$addexp 1000000000'})
step.addActionSet("TypeCommand",tag = "0.6",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.7",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.8",desc = u"升一级", mp = {'command':'$addexp 1'})
step.addActionSet("TypeCommand",tag = "0.9",desc = u"升一级", mp = {'command':'$r who.Base.m_Grade = 69;who.Base.m_Exp=0;who.CalculateProp();who.SendPropChange();'})
#action1
act = Exec(tag = "1", desc = u"清理任务计数器开始", exp = "cleartasktimer = 0")
step.addAction(act)
#step1
step = Step("step1",u"主循环")
task.addStep(step)
#action1
arg = { "detectRegion" : GetRegionFromGrid(87, 88), "imagePattern" : "jujuemiaomiao.png",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" }
act = ClickAction(tag = "1.6", desc = u"拒绝喵喵", **arg)
step.addAction(act)
#act1.7
arg = { "detectRegion": GetRegionFromGrid(13,30) ,"imagePattern" : Pattern("btn_close_welfare_center.png").similar(0.80),
"failResponse" : "Ignore" ,"loopWaitingTime": 0 }
act = ClickAction(tag = "1.7",desc=u"关闭人物信息(防止误操作)", **arg)
step.addAction(act)
#act2
arg = { "detectRegion": gl.AREA_BTN_USEITEM ,"imagePattern" : "btn_equip.png",
"loopWaitingTime": 0 ,"successNext" : ["step","step2"],
"failResponse" : "Ignore" ,"loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "2", desc = u"是否有装备窗口", **arg)
step.addAction(act)
#act3
arg = { "detectRegion": gl.AREA_BTN_USEITEM ,"imagePattern" : "btn_useitem.png",
"loopWaitingTime": 0 ,"successNext" : ["step","step2"],
"failResponse" : "Ignore" ,"loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "3", desc = u"是否有使用道具窗口", **arg)
step.addAction(act)
#act4
arg = { "detectRegion": gl.AREA_BTN_SKIPSTORY ,"imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime": 0 ,"successNext" : ["step","step3"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "4", desc = u"是否在剧情或对话中", **arg)
step.addAction(act)
#act4
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"successNext" : ["step","step4"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "5", desc = u"任务栏是否有主线", **arg)
step.addAction(act)
exp = """
cleartasktimer += 1
print cleartasktimer
"""
act = Exec(tag = "6", desc = u"任务计数器加1", exp = exp)
step.addAction(act)
act = Jump(tag = "7", desc = u"清除其他任务", target = ["action","9"], cond = "cleartasktimer == 20")
step.addAction(act)
#action3
act = Jump(tag = "8", desc = u"返回主循环最开始",target=["step","step1"])
step.addAction(act)
#action4
step.addActionSet("TypeCommand",tag = "9",desc = u"清除无关任务", mp = {'command':'$cleartask'})
#action5
act = Exec(tag = "10", desc = u"清理任务计数器重置", exp = "cleartasktimer = 0")
step.addAction(act)
#action6
act = Jump(tag = "11", desc = u"返回主循环最开始",target=["step","step1"])
step.addAction(act)
#Step2
step = Step("step2",u"处理道具装备")
task.addStep(step)
#action1
arg = { "detectRegion" : gl.AREA_BTN_USEITEM, "imagePattern" : "btn_useitem.PNG",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopRegion": gl.AREA_BTN_USEITEM ,"loopPattern" :"btn_useitem.PNG",
"loopTime" : 5 ,"loopType" : 0 ,
"loopSleepTime" : 0.1 ,"saveImage" : True}
act = ClickAction(tag = "1", desc = u"使用道具", **arg)
step.addAction(act)
#action2
arg = { "detectRegion" : gl.AREA_BTN_USEITEM, "imagePattern" : "btn_equip.PNG",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopRegion": gl.AREA_BTN_USEITEM ,"loopPattern" :"btn_equip.PNG",
"loopSleepTime" : 0.1 ,"saveImage" : True,
"loopTime" : 5 ,"loopType" : 0 }
act = ClickAction(tag = "2", desc = u"使用装备", **arg)
step.addAction(act)
#action3
act = Jump(tag = "3", desc = u"返回主循环", target = ["step","step1"])
step.addAction(act)
#Step3
step = Step("step3", desc = u"处理剧情中")
task.addStep(step)
#action1
arg = { "detectRegion": gl.AREA_BTN_SKIPSTORY ,"imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime": 0 ,"failNext" : ["step","step1"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "1", desc = u"是否在剧情或对话中,不在则返回主循环Step1", **arg)
step.addAction(act)
#action1.6
#有时候出现拒绝喵喵会在一直有主线的时候,而这里只要有主线两个字存在就是死循环,所以在这里进行检测点击拒绝
arg = { "detectRegion" : GetRegionFromGrid(87, 88), "imagePattern" : "jujuemiaomiao.png",
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"saveImage" : True}
act = ClickAction(tag = "1.6", desc = u"拒绝喵喵", **arg)
step.addAction(act)
#action2
arg = { "detectRegion": GetRegionFromGrid(76, 112) ,"imagePattern" : "enterbattle.png",
"loopWaitingTime": 0 ,"next" : ["step","step6"],
"failResponse" : "Ignore" ,"loopSleepTime" : 0.1,
"saveImage" : True}
act = ClickAction(tag = "2", desc = u"如果有开始战斗则点击直到消失,并进入战斗Step", **arg)
step.addAction(act)
#action3
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"next" : ["step","step4"],
"failResponse" : "Ignore","loopSleepTime" : 0.1 ,"saveImage" : True,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "3", desc = u"如果有主线则点主线", **arg)
step.addAction(act)
#action 4
#点击跳过可能因为画面变化点到人物角色信息弹出窗口,所以loopSleepTime可稍长一点,或者每次检测是否弹出了角色信息窗口并关闭(后者更为稳定)
arg = { "detectRegion" : gl.AREA_BTN_SKIPSTORY, "imagePattern" : Pattern("btn_skip_story.png").similar(0.60),
"loopWaitingTime" : 0 , "failResponse" : "Ignore" ,
"loopSleepTime" : 0.3, "saveImage" : False,
"loopRegion": gl.AREA_BTN_SKIPSTORY ,"loopPattern" :Pattern("btn_skip_story.png").similar(0.60),
"loopTime" : 8 ,"loopType" : 0 }
act = ClickAction(tag = "4",desc=u"点击跳过", **arg)
step.addAction(act)
#action 5
arg = { "time":1}
act = SleepAction(tag = "5",desc=u"sleep 1s", **arg)
step.addAction(act)
#action 6
act = Jump(tag = "6", desc = u"返回继续处理剧情", target = ["action","1"])
step.addAction(act)
#Step4
step = Step("step4",u"处理剧情追踪")
task.addStep(step)
#act0.5
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime": 0 ,"saveImage" : True,
"failNext" : ["step","step3"] ,"failResponse" : "Ignore",
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "0.5", desc = u"任务栏是否有主线", **arg)
step.addAction(act)
#action0.6
arg = { "detectRegion": GetRegionFromGrid(60, 112) ,"imagePattern" : "special_zhuxian.png",
"loopWaitingTime": 0 ,
"failResponse" : "Ignore"}
act = ClickAction(tag = "0.6", desc = u"特殊主线", **arg)
step.addAction(act)
#act1
arg = { "detectRegion": GetRegionFromGrid(45, 128) ,"imagePattern" : Pattern("luoyangbaoxun.png").similar(0.70),
"loopWaitingTime": 0 ,"successNext" : ["step","step7"],
"failResponse" : "Ignore","loopSleepTime" : 0.1,
"loopTime":1 ,"loopType" : 1}
act = DetectAction(tag = "1", desc = u"是否达到69级下一个任务是洛阳报讯", **arg)
step.addAction(act)
#action2
arg = { "detectRegion" : GetRegionFromGrid(45, 128), "imagePattern" : Pattern("main_story.png").similar(0.60),
"loopWaitingTime" : 1 , "failResponse" : "Ignore" ,
"loopSleepTime" : 0.3, "saveImage" : False,
"loopRegion": GetRegionFromGrid(45, 128) ,"loopPattern" :Pattern("main_story.png").similar(0.60),
"loopTime" : 5 ,"loopType" : 0 }
act = ClickAction(tag = "2", desc = u"循环点击主线直到消失", **arg)
step.addAction(act)
#action3
act = Jump(tag = "3", desc = u"jump to step4 action0.5",target=["action","0.5"])
step.addAction(act)
#Step6
step = Step("step6",u"自动战斗")
task.addStep(step)
step.addActionSet("AutoBattle",tag = "1",desc = u"自动战斗actionset", mp = {})
act = Jump(tag = "1", desc = u"jump to step1", target=['step','step1'])
step.addAction(act)
#Step7
#Step7
step = Step("step7",u"结束Task")
task.addStep(step)
arg = {'time':1}
act = SleepAction(tag="end sleep",desc = u"准备结束该任务",**arg)
step.addAction(act)
| StarcoderdataPython |
183797 | <filename>bin/iamonds/hexiamonds-4x12-stacked-hexagons.py
#!/usr/bin/env python
# $Id$
"""51 solutions"""
import puzzler
from puzzler.puzzles.hexiamonds import Hexiamonds4x12StackedHexagons
puzzler.run(Hexiamonds4x12StackedHexagons)
| StarcoderdataPython |
6533890 | <filename>tests/browser/pages/external/govuk_article_page.py
# -*- coding: utf-8 -*-
"""GOV.UK - Generic article page."""
from selenium.webdriver.remote.webdriver import WebDriver
from directory_tests_shared import URLs
from directory_tests_shared.enums import PageType, Service
from directory_tests_shared.utils import check_url_path_matches_template
from pages.common_actions import go_to_url
NAME = "Brexit related article"
SERVICE = Service.GOVUK
TYPE = PageType.ARTICLE
URL = URLs.GOVUK_WILDCARD.absolute_template
SELECTORS = {}
SubURLs = {
"How to export goods to the EU after Brexit": URL.format(
slug="starting-to-export/within-eu"
),
"How to export goods outside of the EU after Brexit": URL.format(
slug="starting-to-export/outside-eu"
),
"Providing services and travelling to EEA and EFTA countries": URL.format(
slug="government/collections/providing-services-to-eea-and-efta-countries-after-eu-exit"
),
}
SubURLs = {key.lower(): val for key, val in SubURLs.items()}
NAMES = list(SubURLs.keys())
def visit(driver: WebDriver, *, page_name: str = None):
url = SubURLs[page_name]
go_to_url(driver, url, NAME)
def should_be_here(driver: WebDriver, *, page_name: str = None):
check_url_path_matches_template(URL, driver.current_url)
| StarcoderdataPython |
258147 |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import tqdm
from scipy import interpolate
from mouse_detection.tracker import EuclideanDistTracker
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def write_video(filepath, shape, fps=30):
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
video_height, video_width, CHANNELS = shape
video_filename = filepath
writer = cv2.VideoWriter(video_filename, fourcc, fps, (video_width, video_height), isColor=True)
return writer
def read_video(video_path, block=False, num_blocks=None, index=None):
'''
Read video in blocks or directly in memory, if block mode is selected reads only block by index
:param video_path: path to the video
:param block: allow block reading
:param num_blocks: number of blocks. eg. 10
:param index: index of the block. eg. 2 for the third block
:return: np.array of frames as uint8 type
'''
print('Reading video: ', video_path)
cap = cv2.VideoCapture(video_path)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('video props: (frameCount, frameHeight, frameWidth)=', (frameCount, frameHeight, frameWidth))
fc = 0
ret = True
if not block:
buf = np.empty((frameCount, frameHeight, frameWidth, 3), np.dtype('uint8'))
while fc < frameCount and ret:
ret, ff = cap.read()
if ret:
buf[fc] = ff
fc += 1
cap.release()
else:
# calculate block indices:
block_length = frameCount // num_blocks
a = index * block_length
b = a + block_length
if index == num_blocks - 1:
b += frameCount % num_blocks
buf = np.empty((b - a, frameHeight, frameWidth, 3), np.dtype('uint8'))
cnt = 0
while (fc < frameCount and ret and fc < b):
ret, frame = cap.read()
if fc < b and fc >= a:
buf[cnt] = frame
cnt += 1
fc += 1
return buf
class BackgroundSubtractorTH:
def __init__(self, init_frame=None, threshold=0.93):
self.init_frame = init_frame
self._track_window = None
self.threshold = threshold
def apply(self, frame):
if self.init_frame is not None:
frame = frame - self.init_frame
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
value = int(self.threshold * 255)
ret, th1 = cv2.threshold(frame_gray, value, 255, cv2.THRESH_BINARY)
frame = np.stack([th1, th1, th1], axis=-1)
cv2.normalize(frame, frame, 0, 255, cv2.NORM_MINMAX)
return frame
def createBackgroundSubtractorTH(init_image=None, bkg_threshold=0.93):
return BackgroundSubtractorTH(init_frame=init_image, threshold=bkg_threshold)
def _remove_blackchannel(img):
w = 0.98
miu = 0.95
##
# Calculate A
I_inv = 255.0 - img
I_min = np.min(I_inv, axis=2)
kernel = np.ones((3, 3), np.float32) / 9
def medfilt2(_img):
return cv2.filter2D(_img, -1, kernel)
# I_min_med=medfilt2(I_min);
I_min_med = medfilt2(I_min)
A = np.max(I_min_med)
# %%
# %Calculate Ac
I_inv_r = I_inv[:, :, 2] # takes red channel
# I_r_med=medfilt2(I_inv_r) # applies the median filter to that
I_r_med = cv2.filter2D(I_inv_r, -1, kernel)
A_r = np.max(I_r_med)
I_inv_g = I_inv[:, :, 1]
I_g_med = medfilt2(I_inv_g)
A_g = np.max(I_g_med)
I_inv_b = I_inv[:, :, 0];
I_b_med = cv2.filter2D(I_inv_b, -1, kernel)
A_b = np.max(I_b_med)
I_inv_A = np.empty_like(img, dtype='float32')
I_inv_A[:, :, 2] = I_inv_b / A_r
I_inv_A[:, :, 1] = I_inv_b / A_g
I_inv_A[:, :, 0] = I_inv_b / A_b
##
I_dark_til = np.min(I_inv_A, axis=2)
I_med = medfilt2(I_dark_til)
I_detail = medfilt2(np.abs(I_med - I_dark_til))
I_smooth = I_med - I_detail
I_dark_cal = np.empty((img.shape[0], img.shape[1], 2))
I_dark_cal[:, :, 0] = miu * I_dark_til
I_dark_cal[:, :, 1] = I_smooth
I_dark = np.min(I_dark_cal, 2);
t = 1.0 - w * I_dark
J_inv = (I_inv - A) / np.stack([t, t, t], axis=-1) + A
J = 255.0 - J_inv
return np.clip(J, 0, 255)
# return J.astype('uint8')
class MouseVideo:
# morphology filters
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel10 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
def __init__(self, vpath, bkg_method='MOG', bkg_threshold=0.93, roi_dims=(260, 260)):
assert os.path.exists(vpath), "Input video path is non-existing or bad argument {}".format(vpath)
self.vpath = vpath
self.frames = read_video(vpath)
self.num_frames = self.frames.shape[0]
self._frames_no_bkg = None
self._bkg_method = bkg_method
self.bkg_threshold = bkg_threshold
self.roi_dims = roi_dims
self.tracker = EuclideanDistTracker()
def remove_darkchannel(self, inplace = False):
darkframes = np.empty_like(self.frames)
for i, f in tqdm.tqdm(enumerate(self.frames),desc="Removing darkchannel"):
darkframes[i] = _remove_blackchannel(f)
if inplace:
self.frames = darkframes
return darkframes
def remove_background(self):
if self._frames_no_bkg is None:
self._frames_no_bkg = np.empty_like(self.frames)
if self._bkg_method == 'MOG':
bg_substractor = cv2.createBackgroundSubtractorMOG2()
for i in range(self.num_frames):
no_bkg_frame = np.tile(bg_substractor.apply(self.frames[i]), (3, 1, 1)).transpose(1, 2, 0)
self._frames_no_bkg[i] = no_bkg_frame
else:
bg_substractor = createBackgroundSubtractorTH(bkg_threshold=self.bkg_threshold)
inverted_frames = 255 - self.frames
for i in range(self.num_frames):
no_bkg_frame = bg_substractor.apply(inverted_frames[i])
self._frames_no_bkg[i] = no_bkg_frame
return self._frames_no_bkg
def track_mouse(self):
self.coords = []
for frame_index in range(self.num_frames):
try:
xy1, xy2 = self.detect_mouse(frame_index)
cX, cY = (xy1[0] + xy2[0])//2 , (xy1[1] + xy2[1])//2
except ValueError as e:
cX, cY = np.nan, np.nan
# raise e
self.coords.append((cX, cY))
xx = np.array([x[0] for x in self.coords if not np.isnan(x).sum()>0])
yy = np.array([x[1] for x in self.coords if not np.isnan(x).sum()>0])
ii = np.array([i for i, x in enumerate(self.coords) if not np.isnan(x).sum()>0])
fx = interpolate.interp1d(ii, xx, bounds_error=False, fill_value=(xx[0], xx[-1]))
fy = interpolate.interp1d(ii, yy, bounds_error=False, fill_value=(yy[0], yy[-1]))
xx = fx(np.arange(0, len(self.coords)))
yy = fy(np.arange(0, len(self.coords)))
xx = savitzky_golay(xx, 65, 3)
yy = savitzky_golay(yy, 65, 3)
self.coords=[]
for x, y in zip(xx, yy):
self.coords.append((x.astype(int),y.astype(int)))
return self.coords
def detect_mouse(self, frame_index, plot=False, crop=False):
"""
Calculate bounding box containing the mouse location.
:param plot: activate calculation of frame and text on the out put image
:param frame_index: index of the frame in the video
:return: list of four values formed by bottom left corner and top right corner cords
"""
assert frame_index < self.num_frames, f' {frame_index} < {self.num_frames}'
no_background_frame = self.frames_no_bkg[frame_index]
if self._bkg_method == "TH":
gray_image = cv2.cvtColor(no_background_frame, cv2.COLOR_BGR2GRAY)
ret, th1 = cv2.threshold(gray_image, 127, 255, 0)
centroid = cv2.moments(th1)
# calculate x,y coordinate of center
try:
cX = int(centroid["m10"] / centroid["m00"])
cY = int(centroid["m01"] / centroid["m00"])
except Exception as e:
raise e
else:
mask = no_background_frame[...,0]
_, mask = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY)
mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, self.kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, self.kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, self.kernel10)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
area = cv2.contourArea(cnt)
frame_detections = []
if area > 400:
x, y, h, w = cv2.boundingRect(cnt)
frame_detections.append([x, y, h, w])
if len(frame_detections) > 0:
detections_avg = np.mean(frame_detections, axis=0)
detections.append(detections_avg.astype(int).tolist())
# detection
boxes_ids = self.tracker.update(detections)
frame_coords = []
for box_id in boxes_ids:
x, y, w, h, id = box_id
frame_coords.append([x, y, w, h])
if len(frame_coords) > 0:
x, y ,h , w = np.mean(frame_coords, axis=0)
cX, cY = int(x + x + w)//2, int(y + y + h)//2
else:
raise ValueError('Frame has not detections')
if plot:
frame_plot, roi_coords = self.calculate_roi(frame_index, cX, cY, plot=plot, crop=crop)
return frame_plot, roi_coords
else:
roi_coords = self.calculate_roi(frame_index, cX, cY, plot=plot, crop=crop)
return roi_coords
def calculate_roi(self, frame_index, cX, cY, plot=False, crop=False):
# put text and highlight the center
frame = self.frames[frame_index]
shift_y, shift_x = (self.roi_dims[0] // 2, self.roi_dims[1] // 2)
epsx = 1 if self.roi_dims[0] % 2 == 0 else 0 # if odd no shift and need only one if even you need one more pixel
epsy = 1 if self.roi_dims[1] % 2 == 0 else 0 # same for y
down_left_x = 0 if cX - shift_x < 0 else cX - shift_x + epsx
down_left_y = 0 if cY - shift_y < 0 else cY - shift_y + epsy
up_right_x = frame.shape[1] if cX + shift_x >= frame.shape[1] else cX + shift_x + 1
up_right_y = frame.shape[0] if cY + shift_y >= frame.shape[0] else cY + shift_y + 1
roi_cords = (down_left_x, down_left_y), (up_right_x, up_right_y)
if plot and not crop:
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)
cv2.putText(frame, "ROI", (cX - 25, cY - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
return cv2.rectangle(frame, (down_left_x, down_left_y), (up_right_x, up_right_y), 255, 2), roi_cords
elif plot and crop:
crop_dims = list(self.roi_dims) + [frame.shape[-1]] if len(frame.shape) == 3 else self.roi_dims
crop = np.zeros(crop_dims, dtype=frame.dtype)
# this doesn't depend on the center even or odd as it is on the left side.
# Doesn't need +1 as B-Cx=Delta is counting abs from 1 i.e. includes +1 implicitly
down_left_x_roi = 0 if cX - shift_x >= 0 else shift_x - cX - epsx
down_left_y_roi = 0 if cY - shift_y >= 0 else shift_y - cY - epsy
# if even then we have one more pixel in the up-right as the center is an even number in the first quadrant
# that is because we are using int division, which equivalent to use ceil(x/y)
# gamma = frame.shape[0] - 1 - cX from cx to the end of frame the rest is zeros
epsx = self.roi_dims[0] % 2 # if odd no shift and need only one if even you need one more pixel
epsy = self.roi_dims[1] % 2 # same for y
up_right_x_roi = self.roi_dims[0] if cX + shift_x < frame.shape[1] else shift_x + frame.shape[1] - 1 - cX + epsx
up_right_y_roi = self.roi_dims[1] if cY + shift_y < frame.shape[0] else shift_y + frame.shape[0] - 1 - cY + epsy
crop[down_left_y_roi:up_right_y_roi, down_left_x_roi:up_right_x_roi] = \
frame[down_left_y:up_right_y, down_left_x:up_right_x]
return crop, roi_cords
else:
return roi_cords
@property
def frames_no_bkg(self):
return self._frames_no_bkg
@frames_no_bkg.getter
def frames_no_bkg(self):
self._frames_no_bkg = self.remove_background()
return self._frames_no_bkg
@frames_no_bkg.setter
def frames_no_bkg(self, value):
assert value is None, 'Set none to recalculate'
self._frames_no_bkg = None
self._frames_no_bkg = self.remove_background()
def invert(self):
'''
invert frames stored
:return:
'''
for i in range(self.num_frames):
self.frames[i] = 255 - self.frames[i]
def save(self, filepath, fps=30, no_background=False):
writer = write_video(filepath, self.frames[0].shape, fps=fps)
if not no_background:
for frame in self.frames:
writer.write(frame.astype('uint8'))
else:
for frame in self._frames_no_bkg:
writer.write(frame.astype('uint8'))
writer.release()
def save_rois(self, filepath, fps=30):
coords = self.track_mouse()
frame = self.frames[0]
crop_dims = list(self.roi_dims) + [frame.shape[-1]] if len(frame.shape) == 3 else self.roi_dims
writer = write_video(filepath, crop_dims, fps=fps)
for index in range(len(coords)): # range(self.mouse_video.num_frames):
cX, cY = coords[index]
frame, roi = self.calculate_roi(index, cX, cY, plot=True, crop=True)
writer.write(frame.astype('uint8'))
writer.release()
| StarcoderdataPython |
1927977 | import functools
from dataclasses import dataclass
def singleton(cls):
instances = []
@functools.wraps
def wrapper(*args, **kwargs):
if not instances:
instances.append(cls(*args, **kwargs))
return instances[0]
return wrapper
@singleton
@dataclass
class Person:
name: str
age: int
p1 = Person('Gosho', 19)
p2 = Person('Pesho', 29)
p3 = Person('Tony', 39)
print(p1)
print(p2)
print(p3)
| StarcoderdataPython |
3201709 | import tweepy
import logging
import time
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def follow_followers(api):
logger.info("Retrieving and following followers")
for follower in tweepy.Cursor(api.followers).items():
if not follower.following:
logger.info(f"Following {follower.name}")
follower.follow()
def main(api):
follow_followers(api)
logger.info("Waiting...")
| StarcoderdataPython |
3379056 | <filename>app.py
# This is a PyThon Flask app to listen for PR updates in the GitHub
# PyTorch repository. Webhooks PRs that satisfy the conditions set
# below trigger an Azure Pipelines run for running PyTorch custom
# tests on the PR's appropriate artifact(s).
import os
import sys
import requests
import json
from flask import Flask, request, abort, jsonify, render_template
from datetime import datetime
app = Flask(__name__)
# Azure DevOps WebHook trigger variables
AZURE_DEVOPS_BASE_WEBHOOK_URL = "https://dev.azure.com/aiinfra/_apis/public/distributedtask/webhooks/"
AZURE_DEVOPS_TRIGGER_NAME = "GitHubPyTorchPRTrigger"
AZURE_DEVOPS_TRIGGER_ADD_ON = "?api-version=6.0-preview"
TRIGGER_URL = AZURE_DEVOPS_BASE_WEBHOOK_URL + AZURE_DEVOPS_TRIGGER_NAME + AZURE_DEVOPS_TRIGGER_ADD_ON
# List of GitHub PyTorch branches we are currently tracking for custom tests
GITHUB_PYTORCH_TRACKED_BRANCHES = ("master")
# Submitted payloads to Azure DevOps
submitted_payloads_history = []
jsons_of_triggered_prs = {}
@app.route("/")
def index():
return "Running..."
@app.route("/prwebhook", methods=['POST'])
def github_webhook_endpoint():
# Parse GitHub WebHook data, check if received JSON is valid.
github_webhook_data = request.get_json()
if github_webhook_data is None:
abort(400, "Received JSON is NoneType")
if "pull_request" not in github_webhook_data:
abort(400, "JSON does not contain PR details")
if "number" not in github_webhook_data["pull_request"]:
abort(400, "JSON does not contain PR number details")
if "base" not in github_webhook_data["pull_request"]:
abort(400, "JSON does not contain PR base details")
if "ref" not in github_webhook_data["pull_request"]["base"]:
abort(400, "JSON does not contain PR base ref details")
# Upon setting a WebHook in a GitHub repo, GitHub sends a first
# test payload. The test payload does not have an 'action' field.
if "action" not in github_webhook_data:
return "JSON does not contain PR action data. This may be a GitHub test payload. Exiting..."
# Obtain PyTorch PR information
# If the payload is not of an updated or newly opened PR, ignore.
if github_webhook_data["action"] != "opened" and github_webhook_data["action"] != "synchronize":
return "PR WebHook update is not that of a new opened or updated PR. Exiting..."
# If the payload is of a PR that is marked as draft, ignore.
if github_webhook_data["pull_request"]["draft"] == True:
return "PR is marked as draft. Exiting..."
pr_base_ref = github_webhook_data["pull_request"]["base"]["ref"]
pr_number = github_webhook_data["pull_request"]["number"]
target_commit = github_webhook_data["pull_request"]["head"]["sha"]
# If the payload is of a PR not targeted tracked branches, ignore.
if pr_base_ref not in GITHUB_PYTORCH_TRACKED_BRANCHES:
return "PR does not target a targeted PyTorch branch. Exiting..."
# If the PR is an internal PR (pytorch/pytorch branch --> pytorch/pytorch master),
# then report full branch name as target branch to check. Else, report PR number
# in CircleCI format (i.e. refs/pull/12345/head)
if github_webhook_data["pull_request"]["head"]["repo"]["full_name"] == "pytorch/pytorch":
target_branch_to_check = github_webhook_data["pull_request"]["head"]["ref"]
else:
target_branch_to_check = "refs/pull/{0}/head".format(pr_number)
# Build and send HTTP POST Trigger
s = requests.Session()
s.headers.update({"Authorization": "None"})
run_build_raw = s.post(TRIGGER_URL, json={
"repositoryName": "pytorch_tests",
"PR_NUMBER": pr_number,
"TARGET_COMMIT": target_commit,
"TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR": target_branch_to_check
})
# Add trigger to submitted payloads history list
submitted_payloads_history.append({"datetime": datetime.now().strftime("%m/%d/%Y %H:%M:%S")+" PDT", "pr_number": pr_number, "target_branch": target_branch_to_check, "target_commit": target_commit})
jsons_of_triggered_prs[pr_number] = github_webhook_data
return "Build submitted for PR #{0} for CircleCI branch: {1} and commit {2}.".format(pr_number, target_branch_to_check, target_commit[:7])
@app.route("/pulls", methods=['GET'])
def display_submitted_payloads_history():
return render_template('pulls_view.html', submitted_payloads_history=submitted_payloads_history)
@app.route("/jsons/<int:pr_number>", methods=['GET'])
def display_submitted_jsons_history(pr_number):
if pr_number not in jsons_of_triggered_prs:
return "JSON of PR #{0} not found".format(pr_number)
return jsons_of_triggered_prs[pr_number]
| StarcoderdataPython |
239920 | <reponame>sturzl/guet
from unittest import TestCase
from unittest.mock import Mock
from guet.commands.command_factory import CommandFactoryMethod
from guet.commands.strategies.print_strategy import PrintCommandStrategy
from guet.commands.decorators.version_decorator import VersionDecorator
from guet.settings.settings import Settings
class TestVersionDecorator(TestCase):
def test_build_returns_decorated_build_when_no_version_flags_exist(self):
decorated: CommandFactoryMethod = Mock()
decorator = VersionDecorator(decorated)
args = ['not', 'a', 'version', 'flag']
settings: Settings = Mock()
decorator.build(args, settings)
decorated.build.assert_called_with(args, settings)
def test_build_returns_print_strategy_if_dash_v_present(self):
decorated: CommandFactoryMethod = Mock()
decorator = VersionDecorator(decorated)
args = ['other', '-v', 'other']
settings: Settings = Mock()
result = decorator.build(args, settings)
decorated.build.assert_not_called()
self.assertIsInstance(result.strategy, PrintCommandStrategy)
def test_build_returns_returns_print_strategy_if_dash_dash_version_present(self):
decorated: CommandFactoryMethod = Mock()
decorator = VersionDecorator(decorated)
args = ['other', '--version', 'other']
settings: Settings = Mock()
result = decorator.build(args, settings)
decorated.build.assert_not_called()
self.assertIsInstance(result.strategy, PrintCommandStrategy)
| StarcoderdataPython |
1745389 | #
# import json
#
#
# def get_task_name_from_id(task_id):
# """
# Translate the task id (e.g. 'T0') into abbreviated text (e.g. 'features')
#
# Args:
# task_id (str): task id of Galaxy Zoo question e.g. 'T0'
#
# Returns:
# (str) abbreviated text name of Galaxy Zoo question e.g. 'features'
#
# """
# mapping = {
# 'T0': 'features',
# 'T1': 'edge',
# 'T2': 'has_bar',
# 'T3': 'has_spiral',
# 'T4': 'prominent_bulge',
# 'T5': 'merging',
# 'T6': 'odd',
# 'T7': 'rounded',
# 'T8': 'has_bulge',
# 'T9': 'tight_arms',
# 'T10': 'count_arms'
# }
# return task_id[mapping]
#
#
# def create_response(classification):
# annotations = json.loads(classification['annotations'])
# clean_annotations = get_clean_annotations(annotations)
# return {
# 'annotations': clean_annotations,
# 'user_id': classification['user_id']
# }
#
#
# def aggregate_responses(responses):
# """
#
# Args:
# responses (list): in form { user_id: 'user_a', annotations: [{task: 'T0', value: 'smooth'}] }
#
# Returns:
# dict of form {T0: {responses: {'smooth': ['user_a', 'user_b']}
#
# """
#
# aggregated_responses = {}
# tasks = set([response['task'] for response in responses])
# for task in tasks:
#
# # get all the responses to that task
# task_responses = []
# for response in responses:
# for annotation in response['annotations']:
# if annotation['task'] == task:
# task_responses.append({
# 'user_id': response['user_id'],
# 'response': annotation['value']
# })
#
# # get all the possible answers to that task
# answer_types = set([task_response['response'] for task_response in task_responses])
#
# # list users who gave each answer
#
# # aggregated_responses[task] = {'responses': } | StarcoderdataPython |
5185295 | #!/usr/bin/env python3
from pathlib import Path
# import argparse
import ast
from graphviz import Digraph
# import jupytext
import os
# import fire
# opt = argparse.ArgumentParser("Function grapher")
# opt.add_argument("-d", "--dir", help="Enter directory")
# opt.add_argument(
# "-f", type=bool, help="Generate for functions. True by default", default=True)
# opt.add_argument(
# "-c", type=bool, help="Generate for classes. True by default", default=True)
# opt.add_argument(
# "-fo", help="Format to save. Default pdf. Choose between svg/pdf/png etc.", default="png")
# args = opt.parse_args()
#
def top_level_functions(body):
return (f for f in body if isinstance(f, ast.FunctionDef))
def top_level_classes(body):
return (f for f in body if isinstance(f, ast.ClassDef))
def get_all_functions(filename):
with open(str(filename), 'r') as f:
temp = ast.parse(f.read())
classes = [func.name for func in top_level_classes(temp.body)]
functions = [func.name for func in top_level_functions(temp.body)]
return {"classes": classes, "functions": functions}
def get_files(file_path):
print("[INFO] Creating list of files")
all_files = Path(file_path)
try:
ipynb_files = [path for path in all_files.rglob("*.ipynb")]
tmp = [os.system(f"jupytext --to py {fil}") for fil in ipynb_files]
except:
ipynb_files = []
py_files = {path: [] for path in all_files.rglob("*.py")}
fils = list(py_files.keys())
for fil in fils:
try:
py_files[fil] = get_all_functions(fil)
except SyntaxError:
print(f"Could not read file: {fil}")
print("[INFO] Done creating list of files")
for fil in ipynb_files:
if "checkpoint" not in str(fil):
os.remove(str(fil).replace(".ipynb", ".py"))
return py_files
def graph_creator(dictionary, dir, fo, retType="functions"):
# Create the graph
dot = Digraph("Project")
dot.format = fo
# Create node names
for file in dictionary:
dot.node(file.name)
# Add functions/classes
try:
for methods in dictionary[file][retType]:
dot.edge(file.name, methods)
except TypeError:
pass
print(dot.source)
# Save graphs in required location
dot.render(Path.joinpath(Path(dir), retType))
Path.unlink(Path.joinpath(Path(dir), retType))
print("[INFO] Saved Graph")
def mainrunner(dir=".", fo="pdf"):
processed = get_files(dir)
graph_creator(processed, dir, fo, "functions")
graph_creator(processed, dir, fo, "classes")
print("[INFO] Please check the project directory for the graphs")
# if __name__ == '__main__':
# fire.Fire(mainrunner)
| StarcoderdataPython |
11310332 | # Enable type hinting for static methods
from __future__ import annotations
from typing import Optional, Annotated, Any
from dataclasses import dataclass
from requests import Response
@dataclass
class RoundData:
roundID: int
job: Optional[str]
timestamp: Annotated[str, "ISO 8601, YYYY-MM-DDThh:mm:ss.ffffZ"]
connectedTime: Annotated[str, "hh:mm:ss.fffffff"]
roundStartPlayer: bool
playedInRound: bool
antagonist: bool
roundStartSuicide: bool
isSecurity: bool
firstSuicide: bool
firstSuicideEvidence: Optional[Any]
name: Optional[str]
server: str
@staticmethod
def from_scrubby_response(r: Response) -> list[RoundData]:
return r.json(object_hook = lambda d: RoundData(**d)) | StarcoderdataPython |
1898331 | from sqlalchemy.exc import SQLAlchemyError, IntegrityError
from convergence.utils import exceptions
from convergence.utils import logger
from convergence.data.repo import Store
from convergence.data.models import User
class UserStore(Store):
def __init__(self, session=None):
super().__init__(session)
def get_user_by_email(self, email):
"""
Return user for provided email address
:param email: email address
:return: User object
"""
return self.session.query(User).filter_by(email=email).first()
def get_user_by_id(self, user_id):
"""
Return user for provided user id
:param user_id: user id
:return: User object
"""
return self.session.query(User).get(user_id)
def get_users_by_ids(self, user_ids):
"""
Return users for provided user ids
:param user_ids: list of user ids
:return: list of User objects
"""
return self.session.query(User).filter(User.id.in_(user_ids)).all()
def get_users_by_emails(self, emails):
"""
Return users with given set of usernames (emails).
Resulting list might be equal or less in length
then requested usernames.
:param usernames list of user names (emails)
"""
return self.session.query(User).filter(User.email.in_(emails)).all()
def add_user(self, user):
"""
Add user to database
:param user: User object
"""
self.session.add(user)
try:
self.session.commit()
except SQLAlchemyError as e:
self.session.rollback()
if isinstance(e, IntegrityError):
raise exceptions.InputError("Email address in use.")
else:
logger.log_error(f"Database Error: {str(e)}")
raise exceptions.ServerError("Error adding user")
return None
def delete_user(self, user):
"""
Delete user from database
:param user: User object
"""
self.session.delete(user)
try:
self.session.commit()
except SQLAlchemyError as e:
self.session.rollback()
logger.log_error(f"Database Error: {str(e)}")
raise exceptions.ServerError("Error deleting user")
return None
def commit_changes(self):
"""Commit changes in session object to database"""
try:
self.session.commit()
except SQLAlchemyError as e:
logger.log_error(f"Database Error: {str(e)}")
raise exceptions.ServerError("Error updating user info")
| StarcoderdataPython |
301496 | <filename>orangecontrib/OasysWiser/widgets/optical_elements/ow_plane_mirror.py
import numpy
from syned.widget.widget_decorator import WidgetDecorator
from syned.beamline.shape import Plane
from wiselib2 import Optics
from wofrywise2.beamline.optical_elements.wise_plane_mirror import WisePlaneMirror
from orangecontrib.wise2.widgets.gui.ow_optical_element import OWOpticalElement
class OWPlaneMirror(OWOpticalElement, WidgetDecorator):
name = "PlaneMirror"
id = "PlaneMirror"
description = "Plane Mirror"
icon = "icons/plane_mirror.png"
priority = 1
def after_change_workspace_units(self):
super(OWPlaneMirror, self).after_change_workspace_units()
def build_mirror_specific_gui(self, container_box):
pass
def get_inner_wise_optical_element(self):
return Optics.MirrorPlane(L=self.length*self.workspace_units_to_m,
AngleGrazing = numpy.deg2rad(self.alpha))
def get_optical_element(self, inner_wise_optical_element):
return WisePlaneMirror(name= self.oe_name,
plane_mirror=inner_wise_optical_element,
position_directives=self.get_PositionDirectives())
def receive_specific_syned_data(self, optical_element):
pass
def check_syned_shape(self, optical_element):
if not isinstance(optical_element._surface_shape, Plane):
raise Exception("Syned Data not correct: Mirror Surface Shape is not Elliptical")
| StarcoderdataPython |
11297096 | <filename>withpty.py
#!/usr/bin/python
import pty, sys; pty.spawn(sys.argv[1:])
| StarcoderdataPython |
9769608 | """Trainer for OCR CTC model."""
import paddle.fluid as fluid
from utility import add_arguments, print_arguments, to_lodtensor, get_feeder_data
from crnn_ctc_model import ctc_train_net
import ctc_reader
import argparse
import functools
import sys
import time
import os
import numpy as np
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('total_step', int, 720000, "Number of training iterations.")
add_arg('log_period', int, 1000, "Log period.")
add_arg('save_model_period', int, 15000, "Save model period. '-1' means never saving the model.")
add_arg('eval_period', int, 15000, "Evaluate period. '-1' means never evaluating the model.")
add_arg('save_model_dir', str, "./models", "The directory the model to be saved to.")
add_arg('init_model', str, None, "The init model file of directory.")
add_arg('use_gpu', bool, True, "Whether use GPU to train.")
add_arg('min_average_window',int, 10000, "Min average window.")
add_arg('max_average_window',int, 12500, "Max average window. It is proposed to be set as the number of minibatch in a pass.")
add_arg('average_window', float, 0.15, "Average window.")
add_arg('parallel', bool, False, "Whether use parallel training.")
# yapf: enable
def train(args, data_reader=ctc_reader):
"""OCR CTC training"""
num_classes = None
train_images = None
train_list = None
test_images = None
test_list = None
num_classes = data_reader.num_classes(
) if num_classes is None else num_classes
data_shape = data_reader.data_shape()
# define network
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int32', lod_level=1)
sum_cost, error_evaluator, inference_program, model_average = ctc_train_net(
images, label, args, num_classes)
# data reader
train_reader = data_reader.train(
args.batch_size,
train_images_dir=train_images,
train_list_file=train_list)
test_reader = data_reader.test(
test_images_dir=test_images, test_list_file=test_list)
# prepare environment
place = fluid.CPUPlace()
if args.use_gpu:
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# load init model
if args.init_model is not None:
model_dir = args.init_model
model_file_name = None
if not os.path.isdir(args.init_model):
model_dir = os.path.dirname(args.init_model)
model_file_name = os.path.basename(args.init_model)
fluid.io.load_params(exe, dirname=model_dir, filename=model_file_name)
print "Init model from: %s." % args.init_model
train_exe = exe
error_evaluator.reset(exe)
if args.parallel:
train_exe = fluid.ParallelExecutor(
use_cuda=True, loss_name=sum_cost.name)
fetch_vars = [sum_cost] + error_evaluator.metrics
def train_one_batch(data):
var_names = [var.name for var in fetch_vars]
if args.parallel:
results = train_exe.run(var_names,
feed=get_feeder_data(data, place))
results = [np.array(result).sum() for result in results]
else:
results = exe.run(feed=get_feeder_data(data, place),
fetch_list=fetch_vars)
results = [result[0] for result in results]
return results
def test(iter_num):
error_evaluator.reset(exe)
for data in test_reader():
exe.run(inference_program, feed=get_feeder_data(data, place))
_, test_seq_error = error_evaluator.eval(exe)
print "\nTime: %s; Iter[%d]; Test seq error: %s.\n" % (
time.time(), iter_num, str(test_seq_error[0]))
def save_model(args, exe, iter_num):
filename = "model_%05d" % iter_num
fluid.io.save_params(
exe, dirname=args.save_model_dir, filename=filename)
print "Saved model to: %s/%s." % (args.save_model_dir, filename)
iter_num = 0
while True:
total_loss = 0.0
total_seq_error = 0.0
# train a pass
for data in train_reader():
iter_num += 1
if iter_num > args.total_step:
return
results = train_one_batch(data)
total_loss += results[0]
total_seq_error += results[2]
# training log
if iter_num % args.log_period == 0:
print "\nTime: %s; Iter[%d]; Avg Warp-CTC loss: %.3f; Avg seq err: %.3f" % (
time.time(), iter_num,
total_loss / (args.log_period * args.batch_size),
total_seq_error / (args.log_period * args.batch_size))
sys.stdout.flush()
total_loss = 0.0
total_seq_error = 0.0
# evaluate
if iter_num % args.eval_period == 0:
if model_average:
with model_average.apply(exe):
test(iter_num)
else:
test(iter_num)
# save model
if iter_num % args.save_model_period == 0:
if model_average:
with model_average.apply(exe):
save_model(args, exe, iter_num)
else:
save_model(args, exe, iter_num)
def main():
args = parser.parse_args()
print_arguments(args)
train(args, data_reader=ctc_reader)
if __name__ == "__main__":
main()
| StarcoderdataPython |
9731558 | <filename>src/graph_transpiler/webdnn/backend/code_generator/command_buffer.py
from typing import List, Tuple
from webdnn.backend.code_generator.injectors.buffer_injector import BufferInjector
from webdnn.util import flags
class CommandBuffer:
def __init__(self, buffer_injector: BufferInjector):
self.codes = [] # type: List[Tuple]
self.buffer_injector = buffer_injector
self._unique_counter = 0
def _generate_unique_name(self, prefix=""):
self._unique_counter += 1
return prefix + str(self._unique_counter)
def declare(self, varname: str, typename: str, initial_value: str = None, const: bool = False):
"""Declare new variable"""
self.codes.append(("declare", typename, varname, initial_value, const))
def load(self, varname: str, value: any, typename: str = None, const: bool = False):
"""Load data from buffer (Internally, %%LOAD_BUFFER%% is called)"""
buffer_key = self._generate_unique_name()
self._unique_counter += 1
if flags.DEBUG:
self.comment(f"load: {value}")
self.codes.append(("load", typename, varname, buffer_key, const))
self.buffer_injector.register({
buffer_key: value
})
def exec(self, expression: str):
"""Execute any expression"""
self.codes.append(("exec", expression))
def enterFor(self, counter: str, initial_val: str, max_val: str, step_value: str):
"""Enter for-loop"""
self.codes.append(("enterFor", counter, initial_val, max_val, step_value))
def exitFor(self):
"""Exit for-loop"""
self.codes.append(("exitFor",))
def enterBlockScope(self):
"""Enter new block scope. All declared variable in this scope has no effect for outside of the scope."""
self.codes.append(("enterBlockScope",))
def exitBlockScope(self):
"""Exit block scope."""
self.codes.append(("exitBlockScope",))
def comment(self, text):
"""Add comment."""
self.codes.append(("comment", text))
| StarcoderdataPython |
3436597 | <filename>tests/client/test_builds.py
import json
import pytest
import requests_mock
from fl33t.exceptions import InvalidBuildIdError
from fl33t.models import Build
def test_get_build(fl33t_client):
build_id = 'mnbv'
train_id = 'vbnm'
build_response = {
'build': {
'build_id': build_id,
'download_url': 'https://build.example.com/some/build/path',
'filename': 'build.tar',
'md5sum': '14758f1afd44c09b7992073ccf00b43d',
'released': False,
'size': 194503,
'status': 'created',
'train_id': train_id,
'upload_tstamp': '2018-05-30T22:31:08.836406Z',
'upload_url': None,
'version': '5.4.1'
}
}
url = '/'.join((
fl33t_client.base_team_url,
'build',
build_id
))
with requests_mock.Mocker() as mock:
mock.get(url, text=json.dumps(build_response))
obj = fl33t_client.get_build(build_id)
assert isinstance(obj, Build)
assert obj.train_id == train_id
def test_fail_get_build_invalid_id(fl33t_client):
build_id = 'asdf'
url = '/'.join((
fl33t_client.base_team_url,
'build',
build_id
))
with requests_mock.Mocker() as mock:
mock.get(url, [
{'status_code': 400, 'text': 'Invalid build ID'},
{'status_code': 404, 'text': 'Page not found'}
])
with pytest.raises(InvalidBuildIdError):
fl33t_client.get_build(build_id)
fl33t_client.get_build(build_id)
| StarcoderdataPython |
133002 | #!/usr/bin/env python
"""
Export a history to an archive file using attribute files.
usage: %prog history_attrs dataset_attrs job_attrs out_file
-G, --gzip: gzip archive file
"""
from __future__ import print_function
import optparse
import os
import shutil
import sys
from galaxy.model.store import tar_export_directory
def create_archive(export_directory, out_file, gzip=False):
"""Create archive from the given attribute/metadata files and save it to out_file."""
try:
tar_export_directory(export_directory, out_file, gzip)
# Status.
print('Created history archive.')
return 0
except Exception as e:
print('Error creating history archive: %s' % str(e), file=sys.stderr)
return 1
def main(argv=None):
# Parse command line.
parser = optparse.OptionParser()
parser.add_option('-G', '--gzip', dest='gzip', action="store_true", help='Compress archive using gzip.')
parser.add_option('--galaxy-version', dest='galaxy_version', help='Galaxy version that initiated the command.', default=None)
(options, args) = parser.parse_args(argv)
galaxy_version = options.galaxy_version
if galaxy_version is None:
galaxy_version = "19.01" if len(args) == 4 else "19.05"
gzip = bool(options.gzip)
if galaxy_version == "19.01":
# This job was created pre 18.0X with old argument style.
out_file = args[3]
temp_directory = os.path.dirname(args[0])
else:
assert len(args) >= 2
# We have a 19.0X directory argument instead of individual arguments.
temp_directory = args[0]
out_file = args[1]
if galaxy_version == "19.01":
history_attrs = os.path.join(temp_directory, 'history_attrs.txt')
dataset_attrs = os.path.join(temp_directory, 'datasets_attrs.txt')
job_attrs = os.path.join(temp_directory, 'jobs_attrs.txt')
shutil.move(args[0], history_attrs)
shutil.move(args[1], dataset_attrs)
provenance_path = args[1] + ".provenance"
if os.path.exists(provenance_path):
shutil.move(provenance_path, dataset_attrs + ".provenance")
shutil.move(args[2], job_attrs)
# Create archive.
return create_archive(temp_directory, out_file, gzip=gzip)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5043784 | <reponame>srinivasdabbeeru/cisco_python<gh_stars>0
#!/usr/bin/python3
import netmiko
#multi-vendor library
#Device IPs I am connecting to today
#192.168.90.146
#192.168.90.147
#192.168.90.148
device1 = {
'username' : 'root',
'password' : '<PASSWORD>',
'device_type' : 'cisco_ios',
'host' : '192.168.90.146'
}
# to connect target device
# by checking couple of things connecthandler will allow you to connect
'''
. device_type
'''
device_connect = netmiko.ConnectHandler(**device1)
print([i for i in dir(device_connect) if 'send' in i])
# now sending command
cmd = ["show ip int br", "show run | inc domain"]
for i in cmd:
print("sending command ",i)
print ("------------------")
output = device_connect.send_command(i)
print(output) | StarcoderdataPython |
5197373 | # coding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from future.utils import raise_
from future.utils import raise_with_traceback
from future.utils import raise_from
from future.utils import iteritems
import os
import logging
import psutil
from multiprocessing import cpu_count
from sensesagent.collectors.collector import Collector
class SystemStatsCollector(Collector):
"""
Collects system statistics. This Collector exposes the following metrics
"""
def collect_metrics(self):
"""Implements gathering the metrics and filling up our
metrics object"""
load_1_minute, load_5_minute, load_15_minute = os.getloadavg()
num_cpu = cpu_count()
self.add_metric("load_1_minute", load_1_minute)
self.add_metric("load_5_minute", load_5_minute)
self.add_metric("load_15_minute", load_15_minute)
self.add_metric("cpu_percent", psutil.cpu_percent())
#import psutil
#psutil.cpu_times_percent()
#scputimes(user=7.4, nice=0.0, system=4.9, idle=87.5, iowait=0.0, irq=0.2, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
if __name__ == "__main__":
ssc = SystemStatsCollector()
print(ssc)
| StarcoderdataPython |
3200787 | <reponame>dailishan/pachong
# coding:utf-8
# 下载豆瓣爱情的电影封面
import requests
import json
# 下载图片
def download(url, title):
dir = './' + title + '.jpg'
try:
pic = requests.get(url)
fp = open(dir, 'wb')
fp.write(pic.content)
fp.close()
print(title)
except requests.exceptions.ConnectionError:
print('图片无法下载')
for num in range(0, 1000, 20):
# 构造url,翻页变换参数为start=, tag=电影, gender=爱情, 改变start=后面的数字,可以爬取不同的页
url = 'https://movie.douban.com/j/new_search_subjects?sort=U&range=0,10&tags=%E7%94%B5%E5%BD%B1&start='\
+ str(num)+'&genres=%E7%88%B1%E6%83%85'
print(url)
html = requests.get(url).text
# 转为json格式
res = json.loads(html, encoding='utf-8')
for result in res['data']:
cover = result['cover']
title = result['title']
download(cover, title) | StarcoderdataPython |
201491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import json
sys.path.insert(0, os.path.abspath('..'))
from helpers import unittest
from pycaustic import Scraper
from pycaustic.errors import InvalidInstructionError
FILE_PATH = os.path.abspath(__file__)
class TestSetup(object):
def setUp(self):
pass
class StubMixin():
def mockRequest(self, url, method='get', status=200, headers={},
content=''):
"""
Mock an HTTP request.
"""
pass
class TestScraper(TestSetup, StubMixin, unittest.TestCase):
def test_simple_find(self):
"""
Test scraping data from predefined input.
"""
instruction = {
'find': 'foobar',
}
resp = Scraper().scrape(instruction, input="foobar baz boo")
self.assertEquals('found', resp.status)
results = resp.results
# Should have one result
self.assertEquals(1, len(results))
result = results[0]
# It should have the value from regex and no children
self.assertEquals('foobar', result.value)
self.assertIsNone(result.children)
def test_filesystem_json(self):
"""
Test loading in some JSON
"""
instruction = 'fixtures/find-foobar.json'
resp = Scraper().scrape(instruction, input="foobar baz boo",
uri= FILE_PATH)
self.assertEquals('found', resp.status)
self.assertEquals({
'value': 'foobar'
}, resp.results[0].as_dict())
def test_filesystem_ref(self):
"""
Test loading in some JSON by reference in extends
"""
instruction = 'fixtures/find-foobar-by-extension.json'
resp = Scraper().scrape(instruction, input="foobar baz boo",
uri= FILE_PATH)
self.assertEquals('found', resp.status)
self.assertEquals({
'value': 'foobar'
}, resp.results[0].as_dict())
def test_object_extends(self):
"""
We should be able to get a valid scraper using extends.
"""
instruction = {
"extends": {
"find": r"foo\w+"
},
"match": 1
}
resp = Scraper().scrape(instruction, input="foobar foobaz")
# results should only be the second 'foo'
self.assertEquals('found', resp.status)
self.assertEquals([{
'value': 'foobaz'
}], [r.as_dict() for r in resp.results])
def test_array_extends(self):
"""
We should be able to get a valid scraper using an array of extends
objects.
"""
instruction = {
"extends": [{
"find": r"foo\w+"
},{
"match": 1
}]
}
resp = Scraper().scrape(instruction, input="foobar foobaz")
# results should only be the second 'foo'
self.assertEquals('found', resp.status)
self.assertEquals([{
'value': 'foobaz'
}], [r.as_dict() for r in resp.results])
def test_object_extends_update(self):
"""
We should be able to use extends to update certain keys (posts!) in the
original.
"""
resp = Scraper().scrape({
'load': 'http://httpbin.org/post',
'posts': {
'roses': 'red'
},
'extends': {
'posts': {
'violets': 'blue'
}
}
}, force=True)
self.assertEquals('loaded', resp.status)
bin_content = json.loads(resp.results[0].value)
self.assertEquals({
'roses': 'red',
'violets': 'blue'
}, bin_content['form'])
def xtest_simple_google_request(self):
"""
Test a very straightforward request to Google to look for "I'm feeling
lucky".
"""
instruction = {
"load" : "http://www.google.com",
"then" : {
"find" : "Feeling\\s[\\w]*",
"name" : "Feeling?",
"match": 0
}
}
resp = Scraper().scrape(instruction, force=True)
# Outer level
self.assertEquals('loaded', resp.status)
# Should have one result from page load
results = resp.results
self.assertEquals(1, len(results))
result = results[0]
# This result should have one child
children = result.children
self.assertEquals(1, len(children))
child = children[0]
# This child should have been successful with the name specified in
# instruction
self.assertEquals('found', child.status)
self.assertEquals('Feeling?', child.name)
# The child should have one result
results = child.results
self.assertEquals(1, len(results))
result = results[0]
# Whose value should be the word after "Feeling"
self.assertEquals('Feeling Lucky', result.value)
# And which has no children
self.assertIsNone(result.children)
def test_then_reference_string(self):
"""
Test that the proper URI is maintained for resolving "then"
"""
resp = Scraper().scrape('fixtures/then-link.json',
uri=FILE_PATH,
input="foobaz foobar")
self.assertEquals(resp.status, 'found')
self.assertEquals('foobar', resp.results[1].value)
self.assertEquals('foobar', resp.results[1].children[0].results[0].value)
def test_then_reference_list(self):
"""
Test that the proper URI is maintained for resolving "then" in
a list
"""
resp = Scraper().scrape('fixtures/then-links.json',
uri=FILE_PATH,
input="foobaz foobar")
self.assertEquals(resp.status, 'found')
self.assertEquals('foobar', resp.results[1].value)
self.assertEquals('foobar', resp.results[1].children[0].results[0].value)
self.assertEquals('foobar', resp.results[1].children[1].results[0].value)
def test_nonexistent_file(self):
"""
Test that an InvalidInstructionError is thrown for unknown files.
"""
with self.assertRaises(InvalidInstructionError):
Scraper().scrape('/does/not/exist')
def test_nested_files(self):
"""
Each URI should be resolved relative to the current URL.
"""
resp = Scraper().scrape('fixtures/nested.json',
uri=FILE_PATH,
input="there are some russian dolls")
self.assertEquals(resp.status, 'found')
self.assertEquals('some russian dolls', resp.results[0].value)
self.assertEquals('russian dolls', resp.results[0].children[0].results[0].value)
self.assertEquals('dolls', resp.results[0].children[0].results[0].children[0].results[0].value)
def test_flattened_values_all_one_to_one(self):
"""
Flattened values
"""
resp = Scraper().scrape({
"find": "^.*$",
"name": "everything",
"match": 0,
"then": [{
'name': 'roses',
'find': 'red'
}, {
'name': 'violets',
'find': 'blue'
}]
}, input='red blue')
self.assertEquals({
'everything': 'red blue',
'roses': 'red',
'violets': 'blue'
}, resp.flattened_values)
def test_flattened_values_with_one_to_many(self):
"""
Flattened values
"""
resp = Scraper().scrape({
"find": "^.*$",
"name": "everything",
"match": 0,
"then": [{
'name': 'roses',
'find': 'red'
}, {
'name': 'violets',
'find': 'blue'
}]
}, input='red blue blue')
self.assertEquals({
'everything': 'red blue blue',
'roses': 'red',
'violets': [{'violets': 'blue' },
{'violets': 'blue' } ]
}, resp.flattened_values)
def test_nested_flattened_values(self):
"""
Flattened values
"""
resp = Scraper().scrape({
"find": "^.*$",
"name": "everything",
"match": 0,
"then": {
'name': 'sentences',
'find': r'\s?([^.]+)\.',
'replace': '$1',
'then': [{
'name': 'first word',
'find': r'^\w+'
}, {
'name': 'last word',
'find': r'\w+$'
}]
}
}, input='roses are red. violets are blue.')
self.assertEquals({
'everything': 'roses are red. violets are blue.',
'sentences': [{
'sentences': 'roses are red',
'first word': "roses",
'last word': 'red'
}, {
'sentences': 'violets are blue',
'first word': 'violets',
'last word': 'blue'
}]
}, resp.flattened_values)
def test_flattened_overwrite(self):
"""
Should prefer deeply-nested values
"""
resp = Scraper().scrape({
"find": "^.*$",
"name": "roses",
"match": 0,
"then": {
"name": "roses",
"find": "^(.*)$",
"replace": "$1 foobar"
}
}, input='red')
self.assertEquals({
'roses': 'red foobar'
}, resp.flattened_values)
def test_sibling_accessibility(self):
"""
One-to-one sibling tags should be accessible.
"""
resp = Scraper().scrape({
"find": r'\w+ \w+ \w+',
"name": "three words",
"then": [{
"find": r'\w+',
"match": 0,
"name": "first"
}, {
"find": r'\w+',
"match": 1,
"name": "second"
}, {
"find": r'\w+',
"match": 2,
"name": "third"
}, {
"find": ".*",
"name": "backwards",
"match": 0,
"replace": "{{third}} {{second}} {{first}}"
}]
}, input='the quick brown fox jumped over the lazy dog')
self.assertEquals([{
'three words': 'the quick brown',
'first': 'the',
'second': 'quick',
'third': 'brown',
'backwards': 'brown quick the'
}, {
'three words': 'fox jumped over',
'first': 'fox',
'second': 'jumped',
'third': 'over',
'backwards': 'over jumped fox'
}, {
'three words': 'the lazy dog',
'first': 'the',
'second': 'lazy',
'third': 'dog',
'backwards': 'dog lazy the'
}], resp.flattened_values)
def test_input_instruction(self):
"""
Possible to specify input in instruction.
"""
resp = Scraper().scrape({
"find": r'(roses|violets)',
"name": "flower",
"input": "roses are red"
}, input="violets are blue")
self.assertEquals({
"flower": "roses"
}, resp.flattened_values)
def test_input_instruction_template(self):
"""
Possible to parameterize input instruction.
"""
resp = Scraper().scrape({
"find": r'(roses|violets)',
"name": "flower",
"input": "{{flowers}} are red"
}, input="violets are blue", tags = {
"flowers": "roses"
})
self.assertEquals({
"flower": "roses"
}, resp.flattened_values)
def test_match_substitution(self):
"""
Should be possible to use templates in match.
"""
resp = Scraper().scrape({
"find": r'\w+',
"name": "president",
"match": "{{which}}"
}, input="<NAME>", tags = {
"which": "2"
})
self.assertEquals({
"president": "jefferson"
}, resp.flattened_values)
def test_match_substitution_min_max(self):
"""
Should be possible to use templates in min_match and max_match.
"""
resp = Scraper().scrape({
"find": r'\w+',
"name": "president",
"min_match": "{{begin}}",
"max_match": "{{end}}"
}, input="<NAME>", tags = {
"begin": "1",
"end": "2"
})
self.assertEquals([{
"president": "adams"
}, {
"president": "jefferson"
}], resp.flattened_values)
def test_tags_in_instruction(self):
"""
Should be possible to place tags directly in instruction.
"""
resp = Scraper().scrape({
"find": r'{{{flower}}}',
"name": "flower",
"tags": {
"flower": "petunias"
}
}, input="violets roses petunias")
self.assertEquals({
"flower": "petunias"
}, resp.flattened_values)
def test_capture_match_numbers_replace(self):
"""
Should be possible to capture the number of a match in the replace string.
"""
resp = Scraper().scrape({
"find": r'(\w+)',
"name": "president",
"tag_match": "which",
"replace": "$1 was {{which}}"
}, input="<NAME>")
self.assertEquals([{
"president": "<NAME> 0"
}, {
"president": "<NAME> 1"
}, {
"president": "<NAME> 2"
}], resp.flattened_values)
def test_capture_match_numbers_in_tags(self):
"""
Children should have access to the tag_match, too.
"""
resp = Scraper().scrape({
"find": r'\w+',
"tag_match": "which",
"name": "president",
"then": {
"find": r'(\w+)',
"name": "sentence",
"input": "first second third",
"match": "{{which}}",
"replace": "{{{president}}} was $1"
}
}, input="<NAME>")
self.assertEquals([{
"president": "washington",
"sentence": "washington was first"
}, {
"president": "adams",
"sentence": "adams was second"
}, {
"president": "jefferson",
"sentence": "<NAME> third"
}], resp.flattened_values)
def test_replace_tag(self):
"""
Should be able to place arbitrary tags in replace.
"""
resp = Scraper().scrape({
"find": r'(\w+)',
"name": "flower",
"replace": "$1 are {{{adjective}}}",
"tags": {
"adjective": "beautiful"
}
}, input="roses")
self.assertEquals({
"flower": "roses are beautiful"
}, resp.flattened_values)
def test_replace_self(self):
"""
Should be able to modify a tag in-place.
"""
resp = Scraper().scrape({
"find": r'\w+',
"name": "flower",
"then": [{
"find": "^",
"name": "flower",
"replace": "{{{flower}}} forever"
}]
}, input="roses violets")
self.assertEquals([{
"flower": "roses forever"
}, {
"flower": "violets forever"
}], resp.flattened_values)
def test_ascii_in(self):
"""
ASCII string in, ascii string out.
"""
resp = Scraper().scrape({
"find": r'\w+',
"name": "flowers"
}, input="roses violets")
self.assertIsInstance(resp.flattened_values[0]['flowers'], str)
def test_utf_8_in(self):
"""
UTF-8 bytestring in, UTF-8 bytestring out. Should match words
characters as expected.
"""
resp = Scraper().scrape({
"find": r'\S+',
"name": "first name",
"match": 0
}, input='jos\xc3\xa9 alejandro')
self.assertEquals({
"first name": 'jos\xc3\xa9'
}, resp.flattened_values)
def test_no_unicode_in(self):
"""
Matching on unicode is slow. Please use bytestrings already encoded
in UTF-8.
"""
with self.assertRaises(TypeError):
Scraper().scrape({
"find": r'\w+',
"name": "first name",
"match": 0
}, input=u'jos\xe9 alejandro')
def xtest_security_exception(self):
"""
Test that we get a security exception when going from remote to local
URI
"""
# TODO: no "file:" scheme support, don't see any incentive for adding
# it
pass
def test_join(self):
"""
For multi-match, is possible to join along some value.
"""
resp = Scraper().scrape({
"find": r"\w+",
"name": "joined",
"join": ", and "
}, input="<NAME>")
self.assertEquals({
"joined": "peter, and paul, and mary"
}, resp.flattened_values)
def test_else(self):
"""
If no matches, execute 'else' in find.
"""
resp = Scraper().scrape({
"find": r"\d+",
"name": "numbers",
"else": {
"name": "words",
"find": r"\w+"
}
}, input="<NAME>")
self.assertEquals( [{
"words": "peter"
}, {
"words": "paul"
}, {
"words": "mary"
}], resp.flattened_values)
def test_xpath_instruction(self):
"""
Possible to locate content using xpath.
"""
resp = Scraper().scrape({
"name": "second verse",
"xpath": "//p[2]"
}, input="<p>first verse</p><p>same as the first</p>")
self.assertEquals({
"second verse": "same as the first"
}, resp.flattened_values)
def test_xpath_invalid_expression(self):
"""
Handle failure on xpath expression.
"""
invalid = "*sd8g88**"
resp = Scraper().scrape({
"name": "second verse",
"xpath": "*sd8g88**"
}, input="<p>first verse</p><p>same as the first</p>")
self.assertEquals('failed', resp.status)
self.assertEquals("'" + invalid + "' failed because of Invalid expression", resp.reason)
def test_xpath_instruction_nomatch(self):
"""
Handle failure on xpath not matching.
"""
resp = Scraper().scrape({
"name": "second verse",
"xpath": "//p[3]"
}, input="<p>first verse</p><p>same as the first</p>")
self.assertEquals('failed', resp.status)
self.assertEquals("No matches for '//p[3]', evaluated to '//p[3]'", resp.reason)
def test_xpath_invalid_source(self):
"""
Handles garbage (kind of) gracefully.
"""
resp = Scraper().scrape({
"name": "huh",
"xpath": "//attr"
}, input="<attr= f'''oo<This ??> could < not be attr='html? !!> no way")
self.assertEquals({
"huh": " could no way"
}, resp.flattened_values)
def test_xpath_multiple(self):
"""
Xpath can provide multiple values.
"""
resp = Scraper().scrape({
"name": "bullet",
"xpath": "//ul/li"
}, input="<ul><li>first<li>second<li>third</ul>")
self.assertEquals([{
"bullet": "first"
}, {
"bullet": "second"
}, {
"bullet": "third"
}], resp.flattened_values)
def test_xpath_multiple_ranged(self):
"""
Xpath can provide multiple values, limited by min/max.
"""
resp = Scraper().scrape({
"name": "bullet",
"xpath": "//ul/li",
"min_match": 1,
"max_match": 2
}, input="<ul><li>first<li>second<li>third<li>fourth</ul>")
self.assertEquals([{
"bullet": "second"
}, {
"bullet": "third"
}], resp.flattened_values)
def test_xpath_multiple_nested(self):
"""
Xpath can provide multiple values, which could then be nested.
"""
resp = Scraper().scrape({
"name": "bullet",
"xpath": "//ul/li",
"then": {
"find": r"^\w",
"match": 0,
"name": "first letter"
}
}, input="<ul><li>first<li>second<li>third</ul>")
self.assertEquals([{
"first letter": "f",
"bullet": "first"
}, {
"first letter": "s",
"bullet": "second"
}, {
"first letter": "t",
"bullet": "third"
}], resp.flattened_values)
def test_jsonpath_expr(self):
"""
Can find values via jsonpath.
"""
resp = Scraper().scrape({
"name": "foo_value",
"jsonpath": "$.foo"
}, input=json.dumps({"foo":"bar"}))
self.assertEquals({
"foo_value": "bar"
}, resp.flattened_values)
def test_jsonpath_expr_multiple(self):
"""
Can find multiple values via jsonpath.
"""
resp = Scraper().scrape({
"name": "bar_value",
"jsonpath": "foo[*].baz"
}, input=json.dumps({'foo': [{'baz': 1}, {'baz': 2}]}))
self.assertEquals([{
"bar_value": "1"
}, {
"bar_value": "2"
}], resp.flattened_values)
def test_jsonpath_expr_multiple_range(self):
"""
Can find multiple values via jsonpath, limited by range.
"""
resp = Scraper().scrape({
"name": "bar_value",
"jsonpath": "foo[*].baz",
"min_match": 1,
"max_match": 2
}, input=json.dumps({'foo': [{'baz': 1},
{'baz': 2},
{'baz': 3},
{'baz': 4}]}))
self.assertEquals([{
"bar_value": "2"
}, {
"bar_value": "3"
}], resp.flattened_values)
def test_jsonpath_bad_expr(self):
"""
Fails gracefully on bad jsonpath expression
"""
bad_expr = "??dklskdjglks<<CVJ"
resp = Scraper().scrape({
"name": "foo_value",
"jsonpath": bad_expr
}, input=json.dumps({"foo":"bar"}))
self.assertEquals('failed', resp.status)
self.assertEquals("'" + bad_expr +
"' failed because it is not a valid jsonpath expression", resp.reason)
def test_jsonpath_bad_input(self):
"""
Fails gracefully on bad jsonpath expression
"""
bad_input = "[[gidjs kj AINT JSON"
resp = Scraper().scrape({
"name": "foo_value",
"jsonpath": "$.foo"
}, input=bad_input)
self.assertEquals('failed', resp.status)
self.assertEquals("'$.foo' failed because its input '" + bad_input + "' was not JSON",
resp.reason)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3416399 | <filename>module1a/10.py
import sys
# allows import of project files (idk how else to do this)
sys.path.insert(1, '..')
from utils.webassign import array_from_shitstring
from stats import median
from utils.helpers import round_to_nearest_interval
actual_pressure = array_from_shitstring(
"128.6 137.8 148.4 140.0 123.7 132.0 118.3 141.5 143.2 ")
actual_pressure.sort()
print("Actual pressure: {}".format(actual_pressure))
rounded_pressure = round_to_nearest_interval(actual_pressure, 5)
print("Rounded pressure: {0}".format(rounded_pressure))
median_rounded_pressure = median(rounded_pressure)
print("Median (rounded pressure): {0}".format(median_rounded_pressure))
| StarcoderdataPython |
6462369 | """ Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
PoolManager = urllib3.poolmanager.PoolManager
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
def __init__(self, ssl_version=None, **kwargs):
self.ssl_version = ssl_version
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
urllib_ver = urllib3.__version__.split('-')[0]
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
'block': block
}
if urllib3 and urllib_ver == 'dev' and \
StrictVersion(urllib_ver) > StrictVersion('1.5'):
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
| StarcoderdataPython |
3291612 | class ConnectionError(Exception): pass
| StarcoderdataPython |
1698652 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
import datetime
print(time.time())
print(time.localtime())
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print(time.strftime('%Y%m%d'))
print(datetime.datetime.now())
new_time = datetime.timedelta(minutes=10)
print(datetime.datetime.now() + new_time)
one_day = datetime.datetime(2008, 5, 27)
new_date = datetime.timedelta(days=10)
print(one_day + new_date) | StarcoderdataPython |
3563489 | <gh_stars>1-10
import datetime
import dateutil.parser
import json
import os
import subprocess
from prometheus_client import Gauge
class FileBackup():
def parse(self, dict):
self._name = dict['name']
self._time = dateutil.parser.isoparse(dict['time'])
return self
def getName(self):
return self._name
def inject(self, list):
list.append(self)
return self
def setTimestamp(self, gauge):
gauge.set(self._time.timestamp())
return self
def print(self):
print('- '+self._name+' made at '+str(self._time) )
class Borg:
def __init__(self):
self._port = 23
self._borgPassword = os.environ.get('BORG_PASSWORD')
self._encryptionMode = 'repokey-blake2'
def resort(self, resort):
self._resort = resort
return self
def user(self, user):
self._user = user
return self
def host(self, host):
self._host = host
return self
def port(self, port):
self._port = port
return self
def path(self, path):
self._path = path
return self
def keyFilePath(self, keyFilePath):
self._keyFilePath = keyFilePath
return self
def borgPassword(self, borgPassword):
self._borgPassword = borgPassword
return self
def init(self, copies):
repositoryNumber = 1
while repositoryNumber <= copies:
self.__createRepository(repositoryNumber)
repositoryNumber += 1
print("Created")
repositoryNumber = 1
while repositoryNumber <= copies:
self.__initRepository(repositoryNumber)
repositoryNumber += 1
def __createRepository(self, repositoryNumber):
try:
fileRepository = 'repo'+str(repositoryNumber)
self._resort.adapter('files').createFolder(fileRepository)
print("Folder for Repository "+str(repositoryNumber)+" created")
except OSError:
print("Folder for Repository "+str(repositoryNumber)+" already exists")
def __initRepository(self, repositoryNumber):
repo = self.__makeRepo(repositoryNumber)
print("Initializing Repository "+str(repositoryNumber))
completedProcess = self.command([
'init',
'-e',
self._encryptionMode,
repo
], repositoryNumber)
if completedProcess.returncode != 0:
print("Process did not return success:")
print("Code: "+ str(completedProcess.returncode))
print( str(completedProcess.stdout) )
print( str(completedProcess.stderr) )
return self
print("Initialized Repository "+str(repositoryNumber)+" successfully")
return self
def remove(self, name, repositoryNumber):
self.__findBackup(name, repositoryNumber)
print("Removing backup "+name+" from Repository "+str(repositoryNumber))
completedProcess = self.command([
'delete',
'::'+name,
], repositoryNumber)
if completedProcess.returncode != 0:
print("Process did not return success:")
print("Code: "+ str(completedProcess.returncode))
print( completedProcess.stdout.decode('utf-8') )
print( completedProcess.stderr.decode('utf-8') )
return self
print("Removal of "+name+" from Repository "+str(repositoryNumber)+" finished successfully")
return self
def backup(self, name, target, repositoryNumber):
print("Backing up "+target+" to Repository "+str(repositoryNumber))
completedProcess = self.command([
'create',
'::'+name,
'.'
], repositoryNumber, directory=target, capture_output=False)
print("Backup of "+target+" to Repository "+str(repositoryNumber)+" finished successfully")
return self
def umount(self, target):
completedProcess = subprocess.run([
'fusermount',
'-u',
target
],
capture_output=True
)
if completedProcess.returncode != 0:
print("Failed to unmount "+target)
print("Code: "+ str(completedProcess.returncode))
print( completedProcess.stdout.decode('utf-8') )
print( completedProcess.stderr.decode('utf-8') )
return self
def mount(self, name, target, repositoryNumber):
print("Mounting backup "+name+" from Repository "+str(repositoryNumber)+' to '+target)
print("The borg mount is run in foreground to facilitate usage inside Docker")
print("Please cancel the program with an interrupt (control+c) after you are done.")
completedProcess = self.command([
'mount',
'-f',
'::'+name,
target
], repositoryNumber, check=False)
if completedProcess.returncode != 0:
print("Process did not return success:")
print("Code: "+ str(completedProcess.returncode))
print( completedProcess.stdout.decode('utf-8') )
print( completedProcess.stderr.decode('utf-8') )
return self
print("Mounted backup "+name+" from Repository "+str(repositoryNumber)+' to '+target)
return self
def list(self, repositoryNumber):
completedProcess = self.command([
'list',
'--json',
'::'
], repositoryNumber, check=False)
if completedProcess.returncode != 0:
print("Process did not return success:")
print("Code: "+ str(completedProcess.returncode))
print( completedProcess.stdout.decode('utf-8') )
print( completedProcess.stderr.decode('utf-8') )
raise ValueError("List process failed")
output = completedProcess.stdout.decode('utf-8')
decodedOutput = json.loads(output)
return self.__archivesToBackups(decodedOutput['archives'])
def __archivesToBackups(self, list):
backups = []
for archive in list:
backup = FileBackup().parse(archive)
backup.inject(backups)
sortedBackups = sorted(backups, key=lambda backup : backup._time)
return sortedBackups
def restore(self, name, target, repositoryNumber):
backup = self.__findBackup(name, repositoryNumber)
print("Restoring backup "+backup.getName()+" from Repository "+str(repositoryNumber)+' to '+target)
completedProcess = self.command([
'extract',
'::'+backup.getName()
], repositoryNumber, directory=target)
def prune(self, repositoryNumber):
repo = self.__makeRepo(repositoryNumber)
print("Pruning file backups in repository "+repositoryNumber)
completedProcess = self.command([
'prune',
'--keep-daily=14',
'--keep-weekly=6',
'--keep-monthly=6',
'::'
], repositoryNumber)
if completedProcess.returncode != 0:
print("Process did not return success:")
print("Code: "+ str(completedProcess.returncode))
print( completedProcess.stdout.decode('utf-8') )
print( completedProcess.stderr.decode('utf-8') )
return self
return completedProcess.stdout.decode('utf-8')
def command(self, args, repoNumber, directory=None, check=True,
capture_output=True):
if directory is None:
directory = os.getcwd()
return subprocess.run(
['borgbackup'] + args,
capture_output=capture_output,
env={
'BORG_NEW_PASSPHRASE': self._borgPassword,
'BORG_PASSPHRASE': self._borgPassword,
'BORG_REPO': self.__makeRepo(repoNumber),
'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK', ''),
'BORG_RSH': "ssh -o StrictHostKeyChecking=accept-new -i "+self._keyFilePath
}, cwd=directory, check=check)
def __makeRepo(self, number):
return 'ssh://'+self._user+'@'+self._host+':'+str(self._port)+'/.'+self._path+'/repo'+str(number)
def __findBackup(self, target, repositoryNumber):
if target == 'latest':
return self.list(repositoryNumber)[-1]
return target
def getRepositories(self):
repos = []
for directoryName in self._resort.adapter('files').listFolders():
start = len('repo')
end = len(directoryName)
repos.append( directoryName[start:end] )
return repos
def scrape(self, gauge):
for repositoryNumber in self.getRepositories():
try:
backup = self.__findBackup('latest', repositoryNumber)
except IndexError:
gauge.labels(self._resort._name, 'repository_'+str(repositoryNumber)).set(0)
continue
backup.setTimestamp( gauge.labels(self._resort._name, 'repository_'+str(repositoryNumber)) )
return self
| StarcoderdataPython |
9782384 | <filename>train_Cycle_Gan.py
#!/usr/bin/python3
import argparse
import itertools
import os
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.nn as nn
import torch
from mymodels import Generator_resnet
from mymodels import Discriminator
from utils import ReplayBuffer
from utils import LambdaLR
from utils import Logger
from utils import weights_init_normal
from datasets import ImageDataset
# breast/ neuroendocrine / GLAS
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', type=str, default='/home/zhangbc/Mydataspace/LST/neuroendocrine/datasetX20_288/Train_PN_3_1', help='root directory of the dataset')
parser.add_argument('--modelroot', type=str, default='/home/zhangbc/Mydataspace/LST/neuroendocrine/mymodelx288/Cycle_GAN_PN_3_1', help='root directory of the model')
parser.add_argument('--epoch', type=int, default=1, help='starting epoch')
parser.add_argument('--n_epochs', type=int, default=10, help='number of epochs of training')
parser.add_argument('--batchSize', type=int, default=2, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate')
parser.add_argument('--decay_epoch', type=int, default=2, help='epoch to start linearly decaying the learning rate to 0')
parser.add_argument('--size', type=int, default=288, help='size of the data crop (squared assumed)')
parser.add_argument('--input_nc', type=int, default=3, help='number of channels of input data')
parser.add_argument('--output_nc', type=int, default=3, help='number of channels of output data')
parser.add_argument('--cuda', type=bool, default=True, help='use GPU computation')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--continue_train', type=bool, default=False, help='load model and continue trainning')
parser.add_argument('--loadroot', type=str, default='/home/zhangbc/Mydataspace/LST/neuroendocrine/mymodelx288/Cycle_GAN_PN_3_1/temp', help='continue train root directory of the model')
opt = parser.parse_args()
print(opt)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
###### Definition of variables ######
# Networks
netG_A2B = Generator_resnet(opt.input_nc, opt.output_nc, 10, False)
netG_B2A = Generator_resnet(opt.output_nc, opt.input_nc, 10, False)
netD_A = Discriminator(opt.input_nc)
netD_B = Discriminator(opt.output_nc)
if opt.cuda:
netG_A2B.cuda()
netG_B2A.cuda()
netD_A.cuda()
netD_B.cuda()
netG_A2B.apply(weights_init_normal)
netG_B2A.apply(weights_init_normal)
netD_A.apply(weights_init_normal)
netD_B.apply(weights_init_normal)
# Lossess
criterion_GAN = nn.MSELoss()
criterion_cycle = nn.L1Loss()
criterion_identity = nn.L1Loss()
# Optimizers & LR schedulers
optimizer_G = torch.optim.Adam(itertools.chain(netG_A2B.parameters(), netG_B2A.parameters()),
lr=opt.lr, betas=(0.5, 0.999))
optimizer_D_A = torch.optim.Adam(netD_A.parameters(), lr=opt.lr, betas=(0.5, 0.999))
optimizer_D_B = torch.optim.Adam(netD_B.parameters(), lr=opt.lr, betas=(0.5, 0.999))
lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(optimizer_G, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step)
lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(optimizer_D_A, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step)
lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(optimizer_D_B, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step)
# Inputs & targets memory allocation
Tensor = torch.cuda.FloatTensor if opt.cuda else torch.Tensor
input_A = Tensor(opt.batchSize, opt.input_nc, opt.size, opt.size)
input_B = Tensor(opt.batchSize, opt.output_nc, opt.size, opt.size)
target_real = Variable(Tensor(opt.batchSize).fill_(1.0), requires_grad=False)
target_fake = Variable(Tensor(opt.batchSize).fill_(0.0), requires_grad=False)
fake_A_buffer = ReplayBuffer()
fake_B_buffer = ReplayBuffer()
transforms_ = transforms.Compose([
transforms.RandomCrop(opt.size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
dataloader = DataLoader(ImageDataset(opt.dataroot, transforms_=transforms_, batch_size=opt.batchSize, unaligned=True),
batch_size=opt.batchSize, shuffle=True, num_workers=opt.n_cpu)
###################################
start_epoch = opt.epoch
if opt.continue_train:
netG_A2B_checkpoint = torch.load(os.path.join(opt.loadroot, 'netG_A2B.pth')) # 加载断点
netG_A2B.load_state_dict(netG_A2B_checkpoint['model']) # 加载模型可学习参数
netG_B2A_checkpoint = torch.load(os.path.join(opt.loadroot, 'netG_B2A.pth')) # 加载断点
netG_B2A.load_state_dict(netG_B2A_checkpoint['model']) # 加载模型可学习参数
optimizer_G.load_state_dict(netG_B2A_checkpoint['optimizer']) # 加载优化器参数
lr_scheduler_G.load_state_dict(netG_B2A_checkpoint['lr_schedule']) # 加载lr_scheduler
start_epoch = netG_B2A_checkpoint['epoch']+1
netD_A_checkpoint = torch.load(os.path.join(opt.loadroot, 'netD_A.pth')) # 加载断点
netD_A.load_state_dict(netD_A_checkpoint['model']) # 加载模型可学习参数
optimizer_D_A.load_state_dict(netD_A_checkpoint['optimizer']) # 加载优化器参数
lr_scheduler_D_A.load_state_dict(netD_A_checkpoint['lr_schedule']) # 加载lr_scheduler
netD_B_checkpoint = torch.load(os.path.join(opt.loadroot, 'netD_B.pth')) # 加载断点
netD_B.load_state_dict(netD_B_checkpoint['model']) # 加载模型可学习参数
optimizer_D_B.load_state_dict(netD_B_checkpoint['optimizer']) # 加载优化器参数
lr_scheduler_D_B.load_state_dict(netD_B_checkpoint['lr_schedule']) # 加载lr_scheduler
# Loss plot
logger = Logger(opt.n_epochs, len(dataloader), start_epoch)
###### Training ######
for epoch in range(start_epoch, opt.n_epochs):
for i, batch in enumerate(dataloader):
# Set model input
real_A = Variable(input_A.copy_(batch['HE']))
real_B = Variable(input_B.copy_(batch['Ki67']))
# Generators A2B and B2A
optimizer_G.zero_grad()
# Identity loss
# G_A2B(B) should equal B if real B is fed
same_B, _ = netG_A2B(real_B)
loss_identity_B = criterion_identity(same_B, real_B)
# G_B2A(A) should equal A if real A is fed
same_A, _ = netG_B2A(real_A)
loss_identity_A = criterion_identity(same_A, real_A)
# GAN loss
fake_B, _ = netG_A2B(real_A)
pred_fake = netD_B(fake_B)
loss_GAN_A2B = criterion_GAN(pred_fake, target_real)
fake_A, _ = netG_B2A(real_B)
pred_fake = netD_A(fake_A)
loss_GAN_B2A = criterion_GAN(pred_fake, target_real)
# Cycle loss
recovered_A, _ = netG_B2A(fake_B)
loss_cycle_ABA = criterion_cycle(recovered_A, real_A)
recovered_B, _ = netG_A2B(fake_A)
loss_cycle_BAB = criterion_cycle(recovered_B, real_B)
# Total loss
loss_G = 5.0 * (loss_identity_A + loss_identity_B) + \
1.0 * (loss_GAN_A2B + loss_GAN_B2A) + \
10.0 * (loss_cycle_ABA + loss_cycle_BAB)
loss_G.backward()
optimizer_G.step()
###################################
# Discriminator A
optimizer_D_A.zero_grad()
# Real loss
pred_real = netD_A(real_A)
loss_D_real = criterion_GAN(pred_real, target_real)
# Fake loss
fake_Ad = fake_A_buffer.push_and_pop(fake_A)
pred_fake = netD_A(fake_Ad.detach())
loss_D_fake = criterion_GAN(pred_fake, target_fake)
# Total loss
loss_D_A = (loss_D_real + loss_D_fake)*0.5
loss_D_A.backward()
optimizer_D_A.step()
###################################
# Discriminator B
optimizer_D_B.zero_grad()
# Real loss
pred_real = netD_B(real_B)
loss_D_real = criterion_GAN(pred_real, target_real)
# Fake loss
fake_Bd = fake_B_buffer.push_and_pop(fake_B)
pred_fake = netD_B(fake_Bd.detach())
loss_D_fake = criterion_GAN(pred_fake, target_fake)
# Total loss
loss_D_B = (loss_D_real + loss_D_fake)*0.5
loss_D_B.backward()
optimizer_D_B.step()
###################################
# Progress report (http://localhost:8097)
logger.log({'loss_G': loss_G,
'loss_G_identity': (loss_identity_A + loss_identity_B),
'loss_G_GAN': (loss_GAN_A2B + loss_GAN_B2A),
'loss_G_cycle': (loss_cycle_ABA + loss_cycle_BAB),
'loss_D': (loss_D_A + loss_D_B)},
images={'real_cycleGAN_A': real_A, 'real_cycleGAN_B': real_B,
'fake_cycleGAN_A': fake_A, 'fake_cycleGAN_B': fake_B})
# save models at half of an epoch
if (i+1) % (dataloader.__len__()//5 + 1) == 0:
saveroot = os.path.join(opt.modelroot, 'temp')
if not os.path.exists(saveroot):
os.makedirs(saveroot)
# Save models checkpoints
netG_A2B_checkpoints = {
"model": netG_A2B.state_dict()
}
torch.save(netG_A2B_checkpoints, os.path.join(saveroot, 'netG_A2B.pth'))
netG_B2A_checkpoints = {
"model": netG_B2A.state_dict(),
'optimizer': optimizer_G.state_dict(),
"epoch": epoch,
'lr_schedule': lr_scheduler_G.state_dict()
}
torch.save(netG_B2A_checkpoints, os.path.join(saveroot, 'netG_B2A.pth'))
netD_A_checkpoints = {
"model": netD_A.state_dict(),
'optimizer': optimizer_D_A.state_dict(),
'lr_schedule': lr_scheduler_D_A.state_dict()
}
torch.save(netD_A_checkpoints, os.path.join(saveroot, 'netD_A.pth'))
netD_B_checkpoints = {
"model": netD_B.state_dict(),
'optimizer': optimizer_D_B.state_dict(),
'lr_schedule': lr_scheduler_D_B.state_dict()
}
torch.save(netD_B_checkpoints, os.path.join(saveroot, 'netD_B.pth'))
# Update learning rates
lr_scheduler_G.step()
lr_scheduler_D_A.step()
lr_scheduler_D_B.step()
saveroot = os.path.join(opt.modelroot, 'epoch'+str(epoch))
if not os.path.exists(saveroot):
os.makedirs(saveroot)
# Save models checkpoints
torch.save(netG_A2B.state_dict(), os.path.join(saveroot, 'netG_A2B.pth'))
torch.save(netG_B2A.state_dict(), os.path.join(saveroot, 'netG_B2A.pth'))
torch.save(netD_A.state_dict(), os.path.join(saveroot, 'netD_A.pth'))
torch.save(netD_B.state_dict(), os.path.join(saveroot, 'netD_B.pth'))
| StarcoderdataPython |
283832 | <filename>parser/fase2/team14/Entorno/Simbolo.py
from Entorno.TipoSimbolo import TipoSimbolo
class Simbolo:
def __init__(self, tipo="", nombre="", valor=None, linea=0):
self.tipo = tipo
self.nombre = nombre
self.valor = valor
self.linea = linea
self.atributos = {}
self.baseDatos = ""
self.tabla = ""
self.indexId = ""
def toString(self):
cadena: str = ""
# nombre,tipoSym,baseDatos,tabla,valor
if self.nombre != None:
if self.tipo == TipoSimbolo.TABLA:
columnas: Simbolo = []
columnas = self.valor
cadena += "<TR><TD rowspan='" + str(len(columnas)) + "'>" + self.nombre.split('_')[
0] + "</TD><TD rowspan='" + str(len(columnas)) + "'>TABLA</TD><TD rowspan='" + str(
len(columnas)) + "'>" + self.baseDatos + "</TD><TD rowspan='" + str(len(columnas)) + "'>"
cadena += self.tabla + "</TD><TD>" + columnas[0].nombre + ":" + columnas[0].tipo.tipo + "</TD></TR>\n"
for col in range(1, len(columnas), 1):
cadena += "<TR><TD>" + columnas[col].nombre + ":" + columnas[col].tipo.tipo + "</TD></TR>\n"
elif self.tipo == TipoSimbolo.CONSTRAINT_UNIQUE:
cadena += "<TR><TD>" + self.nombre + "</TD><TD>UNIQUE</TD><TD>" + self.baseDatos + "</TD><TD>"
cadena += self.tabla + "</TD><TD>" + self.valor + "</TD></TR>\n\n"
elif self.tipo == TipoSimbolo.CONSTRAINT_CHECK:
cond: str = self.valor.simbolo
if cond in ">":
cond = cond.replace(">", ">")
if cond in "<":
cond = cond.replace("<", "<")
if cond in "<=":
cond = cond.replace("<=", "<=")
if cond in ">=":
cond = cond.replace(">=", ">=")
if cond in "<>":
cond = cond.replace(">=", "<>")
cadena += "<TR><TD>" + self.nombre + "</TD><TD>CONSTRAINT CHECK</TD><TD>" + self.baseDatos + "</TD><TD>"
cadena += self.tabla + "</TD><TD>" + str(self.valor.exp1.valor) + " " + cond + " " + str(
self.valor.exp2.valor) + "</TD></TR>\\n"
elif self.tipo == TipoSimbolo.CONSTRAINT_FOREIGN:
cadena += "<TR><TD>" + self.nombre + "</TD><TD>CONSTRAINT FORANEA</TD><TD>" + self.baseDatos + "</TD><TD>"
cadena += self.tabla + "</TD><TD></TD></TR>"
elif self.tipo == TipoSimbolo.CONSTRAINT_PRIMARY:
cadena += "<TR><TD>" + self.nombre + "</TD><TD>CONSTRAINT PRIMARIA</TD><TD>" + self.baseDatos + "</TD><TD>"
cadena += self.tabla + "</TD><TD>" + str(self.valor) + "</TD></TR>"
elif self.tipo == TipoSimbolo.TYPE_ENUM:
columnas: Simbolo = []
columnas = self.valor
cadena += "<TR><TD rowspan='" + str(len(columnas)) + "'>" + self.nombre.split('_')[
2] + "</TD><TD rowspan='" + str(len(columnas)) + "'>ENUM</TD><TD rowspan='" + str(
len(columnas)) + "'>" + self.baseDatos + "</TD><TD rowspan='" + str(len(columnas)) + "'>"
cadena += self.tabla + "</TD><TD>" + columnas[0].valor + "</TD></TR>\n"
for col in range(1, len(columnas), 1):
cadena += "<TR><TD>" + columnas[col].valor + "</TD></TR>\n"
elif self.tipo == TipoSimbolo.INDEX:
un = self.valor.get('unique')
orden = self.valor.get('orden')
hsh = self.valor.get('hash')
tam: int = 1
aux: str = ""
if un != None:
tam += 1
aux += "<TR><TD>unique</TD></TR>\n"
if orden != None:
tam += 1
aux += "<TR><TD>orden: " + orden + "</TD></TR>\n"
if hsh != None:
tam += 1
aux += "<TR><TD>using hash</TD></TR>\n"
cadena += "<TR><TD rowspan='" + str(tam) + "'>" + self.valor['id'] + "</TD><TD rowspan='" + str(
tam) + "'>INDEX</TD><TD rowspan='" + str(tam) + "'>" + self.baseDatos + "</TD><TD rowspan='" + str(
tam) + "'>"
cadena += self.tabla + "</TD><TD> columna : " + self.valor['columna'] + "</TD></TR>\n"
cadena += aux
elif self.nombre[:2] == "_f":
parametros = self.valor[0]
if parametros!=None:
tamano = len(parametros)
cadena += "<TR><TD rowspan='" + str(tamano) + "'>" + self.nombre[2:] + "</TD><TD rowspan='" + str(tamano) + "'>FUNCION : " + str(self.tipo.tipo) + "</TD><TD rowspan='" + str(tamano) + "'></TD><TD rowspan='" + str(tamano) + "'>"
cadena += "</TD><TD>" + str(parametros[0].nombre) + ":" + str(parametros[0].tipo.tipo) + "</TD></TR>\n"
for x in range(1,len(parametros),1):
cadena += "<TR><TD>" + str(parametros[x].nombre) + ":" + str(parametros[x].tipo.tipo) + "</TD></TR>\n"
else:
cadena += "<TR><TD>" + self.nombre[2:] + "</TD><TD>FUNCION : " + str(self.tipo.tipo) + "</TD><TD></TD><TD>"
cadena += "</TD><TD></TD></TR>\n"
return cadena
def proc(self):
cadena: str = ""
if self.nombre != None:
if self.nombre[:2] == "_P":
parametros = self.valor[0]
if parametros!=None:
tamano = len(parametros)
cadena += "<TR><TD rowspan='" + str(tamano) + "'>" + self.nombre[2:] + "</TD><TD rowspan='" + str(tamano) + "'>PROCEDURE</TD><TD rowspan='" + str(tamano) + "'></TD><TD rowspan='" + str(tamano) + "'>"
cadena += "</TD><TD>" + str(parametros[0].nombre) + ":" + str(parametros[0].tipo.tipo) + "</TD></TR>\n"
for x in range(1,len(parametros),1):
cadena += "<TR><TD>" + str(parametros[x].nombre) + ":" + str(parametros[x].tipo.tipo) + "</TD></TR>\n"
else:
cadena += "<TR><TD>" + self.nombre[2:] + "</TD><TD>PROCEDURE</TD><TD></TD><TD>"
cadena += "</TD><TD></TD></TR>\n"
return cadena | StarcoderdataPython |
5479 | <reponame>Jeans212/codility-dev-training
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
'''
Rotate an array A to the right by a given number of steps K.
Covert the array to a deque
Apply the rotate() method the rotate the deque in positive K steps
Convert the deque to array
'''
from collections import deque
def solution(A, K):
# write your code in Python 3.6
deq_A = deque(A)
deq_A.rotate(K)
return list(deq_A)
| StarcoderdataPython |
3410902 | <reponame>AshKelly/PyAutoLens
import os
from autofit import conf
from autofit.optimize import non_linear as nl
from autofit.mapper import prior
from autolens.data import ccd
from autolens.model.galaxy import galaxy, galaxy_model as gm
from autolens.pipeline import phase as ph
from autolens.pipeline import pipeline as pl
from autolens.model.profiles import light_profiles as lp
from autolens.model.profiles import mass_profiles as mp
from test.integration import tools
test_type = 'sensitivity'
test_name = "sensitivity_profile_via_multinest"
path = '{}/../../'.format(os.path.dirname(os.path.realpath(__file__)))
output_path = path+'output/'+test_type
config_path = path+'config'
conf.instance = conf.Config(config_path=config_path, output_path=output_path)
def pipeline():
lens_mass = mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=1.6)
lens_subhalo = mp.SphericalIsothermal(centre=(1.0, 1.0), einstein_radius=0.0)
source_light = lp.SphericalSersic(centre=(0.0, 0.0), intensity=1.0, effective_radius=0.5, sersic_index=1.0)
lens_galaxy = galaxy.Galaxy(mass=lens_mass, subhalo=lens_subhalo)
source_galaxy = galaxy.Galaxy(light=source_light)
tools.reset_paths(test_name=test_name, output_path=output_path)
tools.simulate_integration_image(test_name=test_name, pixel_scale=0.1, lens_galaxies=[lens_galaxy],
source_galaxies=[source_galaxy], target_signal_to_noise=30.0)
ccd_data = ccd.load_ccd_data_from_fits(image_path=path + '/data/' + test_name + '/image.fits',
psf_path=path + '/data/' + test_name + '/psf.fits',
noise_map_path=path + '/data/' + test_name + '/noise_map.fits',
pixel_scale=0.1)
pipeline = make_pipeline(test_name=test_name)
pipeline.run(data=ccd_data)
def make_pipeline(test_name):
class SensitivePhase(ph.SensitivityPhase):
def pass_priors(self, previous_results):
self.lens_galaxies.lens.mass.centre_0 = 0.0
self.lens_galaxies.lens.mass.centre_1 = 0.0
self.lens_galaxies.lens.mass.einstein_radius = 1.6
self.source_galaxies.source.light.centre_0 = 0.0
self.source_galaxies.source.light.centre_1 = 0.0
self.source_galaxies.source.light.intensity = 1.0
self.source_galaxies.source.light.effective_radius = 0.5
self.source_galaxies.source.light.sersic_index = 1.0
self.sensitive_galaxies.subhalo.mass.centre_0 = prior.GaussianPrior(mean=0.0, sigma=1.0)
self.sensitive_galaxies.subhalo.mass.centre_1 = prior.GaussianPrior(mean=0.0, sigma=1.0)
self.sensitive_galaxies.subhalo.mass.kappa_s = 0.1
self.sensitive_galaxies.subhalo.mass.scale_radius = 5.0
phase1 = SensitivePhase(lens_galaxies=dict(lens=gm.GalaxyModel(mass=mp.SphericalIsothermal)),
source_galaxies=dict(source=gm.GalaxyModel(light=lp.SphericalSersic)),
sensitive_galaxies=dict(subhalo=gm.GalaxyModel(mass=mp.SphericalNFW)),
optimizer_class=nl.MultiNest, phase_name="{}/phase1".format(test_name))
phase1.optimizer.const_efficiency_mode = True
return pl.PipelineImaging(test_name, phase1)
if __name__ == "__main__":
pipeline()
| StarcoderdataPython |
4942716 | import functools
import os
import os.path as osp
from collections import OrderedDict
from math import cos, pi
import torch
from torch import distributed as dist
from .dist import get_dist_info, master_only
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self, apply_dist_reduce=False):
self.apply_dist_reduce = apply_dist_reduce
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def dist_reduce(self, val):
rank, world_size = get_dist_info()
if world_size == 1:
return val
if not isinstance(val, torch.Tensor):
val = torch.tensor(val, device='cuda')
dist.all_reduce(val)
return val.item() / world_size
def get_val(self):
if self.apply_dist_reduce:
return self.dist_reduce(self.val)
else:
return self.val
def get_avg(self):
if self.apply_dist_reduce:
return self.dist_reduce(self.avg)
else:
return self.avg
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# Epoch counts from 0 to N-1
def cosine_lr_after_step(optimizer, base_lr, epoch, step_epoch, total_epochs, clip=1e-6):
if epoch < step_epoch:
lr = base_lr
else:
lr = clip + 0.5 * (base_lr - clip) * \
(1 + cos(pi * ((epoch - step_epoch) / (total_epochs - step_epoch))))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def is_multiple(num, multiple):
return num != 0 and num % multiple == 0
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
@master_only
def checkpoint_save(epoch, model, optimizer, work_dir, save_freq=16):
if hasattr(model, 'module'):
model = model.module
f = os.path.join(work_dir, f'epoch_{epoch}.pth')
checkpoint = {
'net': weights_to_cpu(model.state_dict()),
'optimizer': optimizer.state_dict(),
'epoch': epoch
}
torch.save(checkpoint, f)
if os.path.exists(f'{work_dir}/latest.pth'):
os.remove(f'{work_dir}/latest.pth')
os.system(f'cd {work_dir}; ln -s {osp.basename(f)} latest.pth')
# remove previous checkpoints unless they are a power of 2 or a multiple of save_freq
epoch = epoch - 1
f = os.path.join(work_dir, f'epoch_{epoch}.pth')
if os.path.isfile(f):
if not is_multiple(epoch, save_freq) and not is_power2(epoch):
os.remove(f)
def load_checkpoint(checkpoint, logger, model, optimizer=None, strict=False):
if hasattr(model, 'module'):
model = model.module
device = torch.cuda.current_device()
state_dict = torch.load(checkpoint, map_location=lambda storage, loc: storage.cuda(device))
src_state_dict = state_dict['net']
target_state_dict = model.state_dict()
skip_keys = []
# skip mismatch size tensors in case of pretraining
for k in src_state_dict.keys():
if k not in target_state_dict:
continue
if src_state_dict[k].size() != target_state_dict[k].size():
skip_keys.append(k)
for k in skip_keys:
del src_state_dict[k]
missing_keys, unexpected_keys = model.load_state_dict(src_state_dict, strict=strict)
if skip_keys:
logger.info(
f'removed keys in source state_dict due to size mismatch: {", ".join(skip_keys)}')
if missing_keys:
logger.info(f'missing keys in source state_dict: {", ".join(missing_keys)}')
if unexpected_keys:
logger.info(f'unexpected key in source state_dict: {", ".join(unexpected_keys)}')
# load optimizer
if optimizer is not None:
assert 'optimizer' in state_dict
optimizer.load_state_dict(state_dict['optimizer'])
if 'epoch' in state_dict:
epoch = state_dict['epoch']
else:
epoch = 0
return epoch + 1
def get_max_memory():
mem = torch.cuda.max_memory_allocated()
mem_mb = torch.tensor([int(mem) // (1024 * 1024)], dtype=torch.int, device='cuda')
_, world_size = get_dist_info()
if world_size > 1:
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
return mem_mb.item()
def cuda_cast(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
new_args = []
for x in args:
if isinstance(x, torch.Tensor):
x = x.cuda()
new_args.append(x)
new_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.cuda()
new_kwargs[k] = v
return func(*new_args, **new_kwargs)
return wrapper
| StarcoderdataPython |
75845 | <reponame>seberg/scipy
import numpy as np
from numpy import array, poly1d
from scipy.interpolate import interp1d
from scipy.special import beta
# The following code was used to generate the Pade coefficients for the
# Tukey Lambda variance function. Version 0.17 of mpmath was used.
#---------------------------------------------------------------------------
# import mpmath as mp
#
# mp.mp.dps = 60
#
# one = mp.mpf(1)
# two = mp.mpf(2)
#
# def mpvar(lam):
# if lam == 0:
# v = mp.pi**2 / three
# else:
# v = (two / lam**2) * (one / (one + two*lam) -
# mp.beta(lam + one, lam + one))
# return v
#
# t = mp.taylor(mpvar, 0, 8)
# p, q = mp.pade(t, 4, 4)
# print "p =", [mp.fp.mpf(c) for c in p]
# print "q =", [mp.fp.mpf(c) for c in q]
#---------------------------------------------------------------------------
# Pade coefficients for the Tukey Lambda variance function.
_tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127,
-0.5370742306855439, 0.17292046290190008,
-0.02371146284628187]
_tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124,
1.7660926747377275, 0.2643989311168465]
# numpy.poly1d instances for the numerator and denominator of the
# Pade approximation to the Tukey Lambda variance.
_tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1])
_tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1])
def tukeylambda_variance(lam):
"""Variance of the Tukey Lambda distribution.
Parameters
----------
lam : array_like
The lambda values at which to compute the variance.
Returns
-------
v : ndarray
The variance. For lam < -0.5, the variance is not defined, so
np.nan is returned. For lam = 0.5, np.inf is returned.
Notes
-----
In an interval around lambda=0, this function uses the [4,4] Pade
approximation to compute the variance. Otherwise it uses the standard
formula (http://en.wikipedia.org/wiki/Tukey_lambda_distribution). The
Pade approximation is used because the standard formula has a removable
discontinuity at lambda = 0, and does not produce accurate numerical
results near lambda = 0.
"""
lam = np.asarray(lam)
shp = lam.shape
lam = np.atleast_1d(lam).astype(np.float64)
# For absolute values of lam less than threshold, use the Pade
# approximation.
threshold = 0.075
# Play games with masks to implement the conditional evaluation of
# the distribution.
# lambda < -0.5: var = nan
low_mask = lam < -0.5
# lambda == -0.5: var = inf
neghalf_mask = lam == -0.5
# abs(lambda) < threshold: use Pade approximation
small_mask = np.abs(lam) < threshold
# else the "regular" case: use the explicit formula.
reg_mask = ~(low_mask | neghalf_mask | small_mask)
# Get the 'lam' values for the cases where they are needed.
small = lam[small_mask]
reg = lam[reg_mask]
# Compute the function for each case.
v = np.empty_like(lam)
v[low_mask] = np.nan
v[neghalf_mask] = np.inf
if small.size > 0:
# Use the Pade approximation near lambda = 0.
v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small)
if reg.size > 0:
v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) -
beta(reg + 1, reg + 1))
v.shape = shp
return v
# The following code was used to generate the Pade coefficients for the
# Tukey Lambda kurtosis function. Version 0.17 of mpmath was used.
#---------------------------------------------------------------------------
# import mpmath as mp
#
# mp.mp.dps = 60
#
# one = mp.mpf(1)
# two = mp.mpf(2)
# three = mp.mpf(3)
# four = mp.mpf(4)
#
# def mpkurt(lam):
# if lam == 0:
# k = mp.mpf(6)/5
# else:
# numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) +
# three*mp.beta(two*lam+one, two*lam+one))
# denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2
# k = numer / denom - three
# return k
#
# # There is a bug in mpmath 0.17: when we use the 'method' keyword of the
# # taylor function and we request a degree 9 Taylor polynomial, we actually
# # get degree 8.
# t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01)
# t = [mp.chop(c, tol=1e-15) for c in t]
# p, q = mp.pade(t, 4, 4)
# print "p =", [mp.fp.mpf(c) for c in p]
# print "q =", [mp.fp.mpf(c) for c in q]
#---------------------------------------------------------------------------
# Pade coefficients for the Tukey Lambda kurtosis function.
_tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077,
0.20601184383406815, 4.59796302262789]
_tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842,
0.43075235247853005, -2.789746758009912]
# numpy.poly1d instances for the numerator and denominator of the
# Pade approximation to the Tukey Lambda kurtosis.
_tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1])
_tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1])
def tukeylambda_kurtosis(lam):
"""Kurtosis of the Tukey Lambda distribution.
Parameters
----------
lam : array_like
The lambda values at which to compute the variance.
Returns
-------
v : ndarray
The variance. For lam < -0.25, the variance is not defined, so
np.nan is returned. For lam = 0.25, np.inf is returned.
"""
lam = np.asarray(lam)
shp = lam.shape
lam = np.atleast_1d(lam).astype(np.float64)
# For absolute values of lam less than threshold, use the Pade
# approximation.
threshold = 0.055
# Use masks to implement the conditional evaluation of the kurtosis.
# lambda < -0.25: kurtosis = nan
low_mask = lam < -0.25
# lambda == -0.25: kurtosis = inf
negqrtr_mask = lam == -0.25
# lambda near 0: use Pade approximation
small_mask = np.abs(lam) < threshold
# else the "regular" case: use the explicit formula.
reg_mask = ~(low_mask | negqrtr_mask | small_mask)
# Get the 'lam' values for the cases where they are needed.
small = lam[small_mask]
reg = lam[reg_mask]
# Compute the function for each case.
k = np.empty_like(lam)
k[low_mask] = np.nan
k[negqrtr_mask] = np.inf
if small.size > 0:
k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small)
if reg.size > 0:
numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) +
3 * beta(2 * reg + 1, 2 * reg + 1))
denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2
k[reg_mask] = numer / denom - 3
# The return value will be a numpy array; resetting the shape ensures that
# if `lam` was a scalar, the return value is a 0-d array.
k.shape = shp
return k
| StarcoderdataPython |
1720117 | <reponame>arensdj/data-structures-and-algorithms
from tree import BinarySearchTree
def test_preorder_traversal():
tree = BinarySearchTree()
tree.add(25)
tree.add(15)
tree.add(35)
tree.add(8)
tree.add(19)
tree.add(30)
tree.add(45)
expected = [25, 15, 8, 19, 35, 30, 45]
result = []
result = tree.get_pre_order_data()
assert result == expected
def test_breadth_order_traversal():
tree = BinarySearchTree()
tree.add(25)
tree.add(15)
tree.add(35)
tree.add(8)
tree.add(19)
tree.add(30)
tree.add(45)
expected = [25, 15, 35, 8, 19, 30, 45]
result = []
# result = tree.binary_tree.breadth_first_traversal(None)
result = tree.get_breadth_order_data()
assert result == expected
| StarcoderdataPython |
11299017 | from torch.utils.data import Dataset
class ChunkDataset(Dataset):
"""
Class implementing chunk-based loading.
"""
def __init__(self, cfg, mode='train'):
super(ChunkDataset, self).__init__()
self.cfg = cfg
self.mode = mode
self.chunk_idx = 0 # Next chunk index to load
def shuffle_index(self):
"""
Shuffle indices for re-sampling chunk.
"""
raise NotImplementedError
def load_chunk(self):
"""
Load a chunk to RAM.
"""
raise NotImplementedError
def restart_chunk(self):
self.chunk_idx = 0
def free_chunk(self):
"""
Free all reserved chunks to save RAM space.
"""
raise NotImplementedError
| StarcoderdataPython |
9780580 | <filename>yadi/datalog2sql/ast2sql/ast2sqlconverter.py
from .safety_checker import *
from .preprocessor import *
from .sql_generator import *
from ...sql_engine.db_state_tracker import DBStateTracker
from ...interpreter.syntax_highlighter import SyntaxHighlight
class Ast2SqlConverter:
def __init__(self, db_state_tracker):
self.db_state_tracker = db_state_tracker
def convertAst2Sql(self, query):
print('Original query before processing: ' + \
SyntaxHighlight().highlight(str(query)), end="")
# Preprocess the query
new_query = QueryPreprocessor().preprocess(query)
print('Query transformed into: ' + SyntaxHighlight().highlight(str(new_query)))
# Check the safety of the query. Throws an exception if not safe
SafetyChecker().check_for_safety(new_query)
# Generates sql code from the preprocessed query
sql = SQLGenerator(self.db_state_tracker).get_SQL_code(new_query,query)
return sql
| StarcoderdataPython |
5176986 | # Generated by Django 3.0.8 on 2020-08-11 07:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MlModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_name', models.CharField(max_length=100)),
('heroku_url', models.CharField(max_length=100)),
('model_description', models.TextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
],
),
]
| StarcoderdataPython |
9674952 | <filename>climber/__init__.py
__version__ = '0.1.4'
import requests
import re
import json
from bs4 import BeautifulSoup
# TODO: def see_also() => makes a whole set of related thhings to the topic
# chosen
# TODO:
# def chossy() => parse disambiguation pages can be called
# when the page reached durign climb or
# any given method in the class and it hits a "chossy page"
# one that cannot be parsed in this custiomary
# method ie a disambiguation page or otherwise
# TODO:
# def flash() => grab directly a section of the overall page when supplied
# a set of context levels and/or a bit of text that it can match
# climb links should build based on a depth choice and and builds graph of
# links to help determine later searches
# TODO: add comments to this
# TODO: bolts should also allow for optional images.
# TODO:
# climb should have options (object) passed in to allow it to include images
# in route or to include graph of links with given
# level of depth
# TODO:
# You are creating context and subcontexts, text, links => Bolt() object
# and loading into an Array building structure to the wiki itself
# (or any large text based information page) that can be accessed
# parsed as such. Later should incorporate other checks to find titles and
# context that are more universal.
# TODO:
# Should also work with any amount of headers
# fix the h1 - ?? checks so they are extensible rather than hard coded
# this so it matches the h# set up and loops to
# decide on depth or just inputs the number found
# as the hash for the entry (headers define amounts of context)
# TODO: create overall function that sanitizes the strings for printing them
# "pretty"
# TODO: Replace complex words with definitions you find in the underlying link
# or using dictionary.
# TODO: Build some test harnesses for API and Restful-API.
# TODO: Return related topics and souroundign topics using wikis dropdowns,
# as part of climb or as separate API function.
def check_text(text):
if(text != "Contents" and text != ""):
return text
def chossy():
return {"error": "This is a Disambiguation Page...\n\n"}
class Bolt():
def __init__(self, text):
self.contexts = {}
self.text = text
self.images = None
# Add context to bolt.
def belay(self, context, level=None):
if(not level):
self.contexts = {}
self.contexts["one"] = context
else:
self.contexts[level] = context
# Encodes bolt for json formatting.
def encode(self):
return {"text": self.text, "contexts": self.contexts}
def __str__(self):
temp = "Text: " + self.text
temp += "\nContext:"
for key in self.contexts:
temp += "\nlvl" + key + ": " + self.contexts[key]
return temp
class Climber():
# Constructs route of entire wiki page based on topic chosen.
def __init__(self, options=None):
self.options = {} if not options else options
def climb(self, topic):
self.depth = self.options["depth"] if "depth" in self.options.keys() else None
self.summary = self.options["summary"] if "summary" in self.options.keys() else None
if(topic is None):
return None
else:
url = 'http://en.wikipedia.org/?title=%s' % topic
content = requests.get(url)
self.soup = BeautifulSoup(content.text, "html.parser")
check = self.soup.find_all(id="disambigbox")
return self.get_scaffold(check)
# Extracts images given a topic.
def climb_images(self, topic=None):
images = []
if(topic is None):
check = self.soup.find_all(id="disambigbox")
for image in self.soup.findAll("img"):
images.append("https://" + image["src"])
else:
url = 'http://en.wikipedia.org/?title=%s' % topic
content = requests.get(url)
self.soup = BeautifulSoup(content.text, "html.parser")
check = self.soup.find_all(id="disambigbox")
if(check):
for image in self.soup.findAll("img"):
images.append("https://" + image["src"])
else:
return chossy()
return json.dumps(images)
def get_scaffold(self, check):
# TODO: WIll cause a toggle based on passed type in which case the
# include summary scaffold will be used but no matter what the depth
# will be passed to scaffold defaulting to 0
if(not len(check)):
images_list = None
wiki_parsed = self.scaffold_basic(self.summary, self.depth)
if("images" in self.options.keys()):
images_list = self.climb_images()
if(images_list is None):
return json.dumps({"data": wiki_parsed})
else:
return json.dumps({"data": wiki_parsed,
"images": images_list})
else:
# TODO: WIll return all the other options to search from
# disambiguation page
return chossy()
def scaffold_basic(self, summary, depth):
selected = []
h = ["", "", "", ""]
for section in self.soup.find_all(["h1", "h2", "h3", "h4", "p"]):
try:
if(section.name == "h1"):
text = section.get_text()
if(check_text(text)):
h[0] = text
elif(section.name == "h2"):
text = section.get_text()
if(check_text(text)):
h[1] = text
h[2] = ""
h[3] = ""
elif(section.name == "h3"):
text = section.get_text()
if(check_text(text)):
h[2] = text
h[3] = ""
elif(section.name == "h4"):
text = section.get_text()
if(check_text(text)):
h[3] = text
elif(section.name == "p"):
# Add text to the bolt.
string = section.get_text()
if(string != ""):
string = re.sub(r"\[\d+\]", "", string)
bolt = Bolt(string)
bolt.belay(h[0], "one")
bolt.belay(h[1], "two")
bolt.belay(h[2], "three")
bolt.belay(h[3], "four")
selected.append(bolt.encode())
else:
continue
pass
except Exception as e:
print e
continue
return selected
# Builds map of links with given search depth option as parameter.
# def climb_links(self, topic, options):
# if(not len(check)):
# link_query = 'div#mw-content-text a'
# links = [a.get('href') for a in self.soup.select(link_query)]
# return json.dumps(links)
# else:
# return chossy()
| StarcoderdataPython |
8165589 | import numpy as np
import json
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import SGDClassifier
import time
import sys
train_file = sys.argv[1]
test_file = sys.argv[2]
output_file = sys.argv[3]
train_data = [json.loads(line) for line in open(train_file, 'r')]
test_data = [json.loads(line) for line in open(test_file, 'r')]
m_train = len(train_data)
m_test = len(test_data)
reviews_train = [train_data[i]['text'] for i in range(m_train)]
stars_train = [train_data[i]['stars'] for i in range(m_train)]
reviews_test = [test_data[i]['text'] for i in range(m_test)]
stars_test = [test_data[i]['stars'] for i in range(m_test)]
x_train, x_val, y_train, y_val = train_test_split(reviews_train, stars_train, test_size=0.1)
valAcc = []
classifiers = []
def Accuracy(clf, x, y):
pred = clf.predict(x)
acc = np.mean(y == pred)
return pred, acc
def LIBLINEAR():
C = [10, 5, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
for c in C:
print("C = ", c)
clf = Pipeline([('vect', CountVectorizer()), ('clf', svm.LinearSVC(C=c))])
start = time.time()
clf.fit(x_train, y_train)
end = time.time()
train_pred, train_acc = Accuracy(clf, x_train, y_train)
val_pred, val_acc = Accuracy(clf, x_val, y_val)
valAcc.append(val_acc)
classifiers.append(clf)
print("Training Time = ", end-start)
print("Training Accuracy = ", train_acc)
print("Val Accuracy = ", val_acc)
bestvalAcc = np.argmax(np.array(valAcc))
bestclf = classifiers[bestvalAcc]
bestC = C[bestvalAcc]
print("Best C = ", bestC)
return bestclf, bestC
clf, C = LIBLINEAR()
test_pred, test_acc = Accuracy(clf, reviews_test, stars_test)
print("Test Accuracy = ", test_acc)
f = open(output_file, 'w')
for pred in test_pred:
print((int)(pred), file=f) | StarcoderdataPython |
12820432 | def make_example_image_1():
from playfair.compare import add_comparisons_to_axes, Comparison, stars
from matplotlib import pyplot as plt
import numpy as np
# Generate some data
d1 = np.linspace(1, 2, 55)
d2 = np.linspace(2, 2.5, 34)
# Create a comparison marker between populations at the positions 1 and 2.
comparison_marker = Comparison("$p < 0.01$", d1, d2, 1, 2)
fig, ax = plt.subplots(1)
# Add a normal boxplot
ax.boxplot([d1, d2], labels=["Left", "Right"])
add_comparisons_to_axes(ax, [comparison_marker])
# Set the ylims manually because matplotlib isn't smart enough
# to scale things such that the markers fit in the plot
ax.set_ylim(0, 3.5)
fig.savefig('docs/_static/images/example1.png')
def make_example_image_2():
from playfair.compare import add_comparisons_to_axes, Comparison, stars
from matplotlib import pyplot as plt
import numpy as np
# Generate some data
d1 = np.linspace(1, 2, 55)
d2 = np.linspace(2, 2.5, 34)
d3 = np.linspace(1.35, 1.70, 55)
# Create a comparison marker between populations at the positions 1 and 2.
comparison_marker_1 = Comparison("$p < 0.01$", d1, d2, 1, 2)
# Create a comparison marker between populations at the positions 1 and 3.
comparison_marker_2 = Comparison("$p < 0.05$", d1, d2, 1, 3)
fig, ax = plt.subplots(1)
# Add a normal boxplot
ax.boxplot([d1, d2, d3], labels=["A", "B", "C"])
add_comparisons_to_axes(ax, [comparison_marker_1, comparison_marker_2])
# Set the ylims manually because matplotlib isn't smart enough
# to scale things such that the markers fit in the plot
ax.set_ylim(0, 4)
fig.savefig('docs/_static/images/example2.png')
if __name__ == '__main__':
make_example_image_1()
make_example_image_2()
| StarcoderdataPython |
6685141 | import os
def list_dir():
'''Print out working directory path, as well as the subdirectories
and files.
'''
print("You are here: " + os.getcwd() + "\n")
for root, dirs, files in os.walk("."):
level = root.replace(".", '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
| StarcoderdataPython |
12831226 | <filename>hmmer_reader/_click.py<gh_stars>1-10
import click
def command(either=None):
if either is None:
either = []
class CommandOptionsTogether(click.Command):
def invoke(self, ctx):
eit = [list(t) for t in either]
for opts in eit:
if sum([ctx.params[opt] is not None for opt in opts]) > 1:
opts = ", ".join([f"--{o}" for o in opts])
msg = f"The options [{opts}] are mutually exclusive."
raise click.ClickException(msg)
super(CommandOptionsTogether, self).invoke(ctx)
return CommandOptionsTogether
| StarcoderdataPython |
3298154 | <reponame>ktaranov/HPI<gh_stars>1-10
from pathlib import Path
from my.core.common import get_files
import pytest # type: ignore
def test_single_file():
"""
Regular file path is just returned as is.
"""
"Exception if it doesn't exist"
with pytest.raises(Exception):
get_files("/tmp/hpi_test/file.ext")
create("/tmp/hpi_test/file.ext")
"""
Couple of things:
1. Return type is a tuple, it's friendlier for hashing/caching
2. It always return pathlib.Path instead of plain strings
"""
assert get_files("/tmp/hpi_test/file.ext") == (Path("/tmp/hpi_test/file.ext"),)
"if the path starts with ~, we expand it"
assert get_files("~/.bashrc") == (Path("~").expanduser() / ".bashrc",)
def test_multiple_files():
"""
If you pass a directory/multiple directories, it flattens the contents
"""
create("/tmp/hpi_test/dir1/")
create("/tmp/hpi_test/dir1/zzz")
create("/tmp/hpi_test/dir1/yyy")
# create('/tmp/hpi_test/dir1/whatever/') # TODO not sure about this... should really allow extra dirs
create("/tmp/hpi_test/dir2/")
create("/tmp/hpi_test/dir2/mmm")
create("/tmp/hpi_test/dir2/nnn")
create("/tmp/hpi_test/dir3/")
create("/tmp/hpi_test/dir3/ttt")
assert get_files(
[
Path("/tmp/hpi_test/dir3"), # it takes in Path as well as str
"/tmp/hpi_test/dir1",
]
) == (
# the paths are always returned in sorted order (unless you pass sort=False)
Path("/tmp/hpi_test/dir1/yyy"),
Path("/tmp/hpi_test/dir1/zzz"),
Path("/tmp/hpi_test/dir3/ttt"),
)
def test_explicit_glob():
"""
You can pass a glob to restrict the extensions
"""
create("/tmp/hpi_test/file_3.zip")
create("/tmp/hpi_test/file_2.zip")
create("/tmp/hpi_test/ignoreme")
create("/tmp/hpi_test/file.zip")
# todo walrus operator would be great here...
expected = (
Path("/tmp/hpi_test/file_2.zip"),
Path("/tmp/hpi_test/file_3.zip"),
)
assert get_files("/tmp/hpi_test", "file_*.zip") == expected
"named argument should work too"
assert get_files("/tmp/hpi_test", glob="file_*.zip") == expected
def test_implicit_glob():
"""
Asterisc in the path results in globing too.
"""
# todo hopefully that makes sense? dunno why would anyone actually rely on asteriscs in names..
# this is very convenient in configs, so people don't have to use some special types
create("/tmp/hpi_test/123/")
create("/tmp/hpi_test/123/dummy")
create("/tmp/hpi_test/123/file.zip")
create("/tmp/hpi_test/456/")
create("/tmp/hpi_test/456/dummy")
create("/tmp/hpi_test/456/file.zip")
assert get_files(["/tmp/hpi_test/*/*.zip"]) == (
Path("/tmp/hpi_test/123/file.zip"),
Path("/tmp/hpi_test/456/file.zip"),
)
def test_no_files():
"""
Test for empty matches. They work, but should result in warning
"""
with pytest.warns(None) as record:
assert get_files("") == ()
# todo test these for warnings?
assert get_files([]) == ()
assert get_files("bad*glob") == ()
assert len(record) == 2
assert "no paths were matched against" in str(record[0].message)
# TODO not sure if should uniquify if the filenames end up same?
# TODO not sure about the symlinks? and hidden files?
test_path = Path("/tmp/hpi_test")
def setup():
teardown()
test_path.mkdir()
def teardown():
import shutil
if test_path.is_dir():
shutil.rmtree(test_path)
def create(f: str) -> None:
if f.endswith("/"):
Path(f).mkdir()
else:
Path(f).touch()
| StarcoderdataPython |
9714197 | from setuptools import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name='InstagramAPI',
version='1.0.2',
description='Unofficial instagram API, give you access to ALL instagram features (like, follow, upload photo and video and etc)! Write on python.',
url='https://github.com/LevPasha/Instagram-API-python/',
author='<NAME>',
author_email='<EMAIL>',
license='GNU',
packages=['InstagramAPI'],
zip_safe=False,
install_requires=requirements,
python_requires='>=3.6'
)
| StarcoderdataPython |
261733 | """ Xbox 360 controller support for Python
11/9/13 - <NAME>
This class module supports reading a connected Xbox controller under Python 2 and 3.
You'll need to first install xboxdrv:
sudo apt-get install xboxdrv
See http://pingus.seul.org/~grumbel/xboxdrv/ for details on xboxdrv
Example usage:
import xbox
joy = xbox.Joystick() #Initialize joystick
if joy.A(): #Test state of the A button (1=pressed, 0=not pressed)
print 'A button pressed'
x_axis = joy.leftX() #X-axis of the left stick (values -1.0 to 1.0)
(x,y) = joy.leftStick() #Returns tuple containing left X and Y axes (values -1.0 to 1.0)
trigger = joy.rightTrigger() #Right trigger position (values 0 to 1.0)
joy.close() #Cleanup before exit
All controller buttons are supported. See code for all functions.
"""
import subprocess
import select
import time
class Joystick:
"""Initializes the joystick/wireless receiver, launching 'xboxdrv' as a subprocess
and checking that the wired joystick or wireless receiver is attached.
The refreshRate determines the maximnum rate at which events are polled from xboxdrv.
Calling any of the Joystick methods will cause a refresh to occur, if refreshTime has elapsed.
Routinely call a Joystick method, at least once per second, to avoid overfilling the event buffer.
Usage:
joy = xbox.Joystick()
"""
def __init__(self,refreshRate = 30):
self.proc = subprocess.Popen(['xboxdrv','--no-uinput','--detach-kernel-driver'], stdout=subprocess.PIPE, bufsize=0)
self.pipe = self.proc.stdout
#
self.connectStatus = False #will be set to True once controller is detected and stays on
self.reading = '0' * 140 #initialize stick readings to all zeros
#
self.refreshTime = 0 #absolute time when next refresh (read results from xboxdrv stdout pipe) is to occur
self.refreshDelay = 1.0 / refreshRate #joystick refresh is to be performed 30 times per sec by default
#
# Read responses from 'xboxdrv' for upto 2 seconds, looking for controller/receiver to respond
found = False
waitTime = time.time() + 2
while waitTime > time.time() and not found:
readable, writeable, exception = select.select([self.pipe],[],[],0)
if readable:
response = self.pipe.readline()
# Hard fail if we see this, so force an error
if response[0:7] == b'No Xbox':
raise IOError('No Xbox controller/receiver found')
# Success if we see the following
if response[0:12].lower() == b'press ctrl-c':
found = True
# If we see 140 char line, we are seeing valid input
if len(response) == 140:
found = True
self.connectStatus = True
self.reading = response
# if the controller wasn't found, then halt
if not found:
self.close()
raise IOError('Unable to detect Xbox controller/receiver - Run python as sudo')
"""Used by all Joystick methods to read the most recent events from xboxdrv.
The refreshRate determines the maximum frequency with which events are checked.
If a valid event response is found, then the controller is flagged as 'connected'.
"""
def refresh(self):
# Refresh the joystick readings based on regular defined freq
if self.refreshTime < time.time():
self.refreshTime = time.time() + self.refreshDelay #set next refresh time
# If there is text available to read from xboxdrv, then read it.
readable, writeable, exception = select.select([self.pipe],[],[],0)
if readable:
# Read every line that is availabe. We only need to decode the last one.
while readable:
response = self.pipe.readline()
# A zero length response means controller has been unplugged.
if len(response) == 0:
raise IOError('Xbox controller disconnected from USB')
readable, writeable, exception = select.select([self.pipe],[],[],0)
# Valid controller response will be 140 chars.
if len(response) == 140:
self.connectStatus = True
self.reading = response
else: #Any other response means we have lost wireless or controller battery
self.connectStatus = False
"""Return a status of True, when the controller is actively connected.
Either loss of wireless signal or controller powering off will break connection. The
controller inputs will stop updating, so the last readings will remain in effect. It is
good practice to only act upon inputs if the controller is connected. For instance, for
a robot, stop all motors if "not connected()".
An inital controller input, stick movement or button press, may be required before the connection
status goes True. If a connection is lost, the connection will resume automatically when the
fault is corrected.
"""
def connected(self):
self.refresh()
return self.connectStatus
# Left stick X axis value scaled between -1.0 (left) and 1.0 (right) with deadzone tolerance correction
def leftX(self,deadzone=4000):
self.refresh()
raw = int(self.reading[3:9])
return self.axisScale(raw,deadzone)
# Left stick Y axis value scaled between -1.0 (down) and 1.0 (up)
def leftY(self,deadzone=4000):
self.refresh()
raw = int(self.reading[13:19])
return self.axisScale(raw,deadzone)
# Right stick X axis value scaled between -1.0 (left) and 1.0 (right)
def rightX(self,deadzone=4000):
self.refresh()
raw = int(self.reading[24:30])
return self.axisScale(raw,deadzone)
# Right stick Y axis value scaled between -1.0 (down) and 1.0 (up)
def rightY(self,deadzone=4000):
self.refresh()
raw = int(self.reading[34:40])
return self.axisScale(raw,deadzone)
# Scale raw (-32768 to +32767) axis with deadzone correcion
# Deadzone is +/- range of values to consider to be center stick (ie. 0.0)
def axisScale(self,raw,deadzone):
if abs(raw) < deadzone:
return 0.0
else:
if raw < 0:
return (raw + deadzone) / (32768.0 - deadzone)
else:
return (raw - deadzone) / (32767.0 - deadzone)
# Dpad Up status - returns 1 (pressed) or 0 (not pressed)
def dpadUp(self):
self.refresh()
return int(self.reading[45:46])
# Dpad Down status - returns 1 (pressed) or 0 (not pressed)
def dpadDown(self):
self.refresh()
return int(self.reading[50:51])
# Dpad Left status - returns 1 (pressed) or 0 (not pressed)
def dpadLeft(self):
self.refresh()
return int(self.reading[55:56])
# Dpad Right status - returns 1 (pressed) or 0 (not pressed)
def dpadRight(self):
self.refresh()
return int(self.reading[60:61])
# Back button status - returns 1 (pressed) or 0 (not pressed)
def Back(self):
self.refresh()
return int(self.reading[68:69])
# Guide button status - returns 1 (pressed) or 0 (not pressed)
def Guide(self):
self.refresh()
return int(self.reading[76:77])
# Start button status - returns 1 (pressed) or 0 (not pressed)
def Start(self):
self.refresh()
return int(self.reading[84:85])
# Left Thumbstick button status - returns 1 (pressed) or 0 (not pressed)
def leftThumbstick(self):
self.refresh()
return int(self.reading[90:91])
# Right Thumbstick button status - returns 1 (pressed) or 0 (not pressed)
def rightThumbstick(self):
self.refresh()
return int(self.reading[95:96])
# A button status - returns 1 (pressed) or 0 (not pressed)
def A(self):
self.refresh()
return int(self.reading[100:101])
# B button status - returns 1 (pressed) or 0 (not pressed)
def B(self):
self.refresh()
return int(self.reading[104:105])
# X button status - returns 1 (pressed) or 0 (not pressed)
def X(self):
self.refresh()
return int(self.reading[108:109])
# Y button status - returns 1 (pressed) or 0 (not pressed)
def Y(self):
self.refresh()
return int(self.reading[112:113])
# Left Bumper button status - returns 1 (pressed) or 0 (not pressed)
def leftBumper(self):
self.refresh()
return int(self.reading[118:119])
# Right Bumper button status - returns 1 (pressed) or 0 (not pressed)
def rightBumper(self):
self.refresh()
return int(self.reading[123:124])
# Left Trigger value scaled between 0.0 to 1.0
def leftTrigger(self):
self.refresh()
return int(self.reading[129:132]) / 255.0
# Right trigger value scaled between 0.0 to 1.0
def rightTrigger(self):
self.refresh()
return int(self.reading[136:139]) / 255.0
# Returns tuple containing X and Y axis values for Left stick scaled between -1.0 to 1.0
# Usage:
# x,y = joy.leftStick()
def leftStick(self,deadzone=4000):
self.refresh()
return (self.leftX(deadzone),self.leftY(deadzone))
# Returns tuple containing X and Y axis values for Right stick scaled between -1.0 to 1.0
# Usage:
# x,y = joy.rightStick()
def rightStick(self,deadzone=4000):
self.refresh()
return (self.rightX(deadzone),self.rightY(deadzone))
# Cleanup by ending the xboxdrv subprocess
def close(self):
self.proc.kill()
| StarcoderdataPython |
5053322 | #!/usr/bin/env python3
# Update the ValidatingWebhookConfiguration with the contents of the Service CA.
from kubernetes import client, config
import os
import argparse
import copy
import base64
parser = argparse.ArgumentParser(description="Options to Program")
parser.add_argument('-a', default="managed.openshift.io/inject-cabundle-from", dest='annotation_name', help='What is the annotation that has a reference to a namespace/configmap for the caBundle. The cert must be stored in pem format in a key called service-ca.crt')
parsed = parser.parse_args()
config.load_incluster_config()
admission_client = client.AdmissionregistrationV1beta1Api()
cm_client = client.CoreV1Api()
def get_cert_from_configmap(client, namespace, configmap_name, key="service-ca.crt"):
try:
o = client.read_namespaced_config_map(configmap_name, namespace)
if key in o.data:
return o.data[key].rstrip()
except:
return None
return None
def encode_cert(cert):
return base64.b64encode(cert.encode("UTF-8")).decode("UTF-8")
def get_validating_webhook_configuration_objects_with_annotation(client, annotation):
ret = []
for o in client.list_validating_webhook_configuration().items:
if o.metadata.annotations is not None and annotation in o.metadata.annotations:
ret.append(o)
return ret
for vwc in get_validating_webhook_configuration_objects_with_annotation(admission_client, parsed.annotation_name):
ns, cm_name = vwc.metadata.annotations[parsed.annotation_name].split('/')
cert = get_cert_from_configmap(cm_client, ns, cm_name)
if cert is None:
print("WARNING: Skipping validatingwebhookconfiguration/{}: Couldn't find a cert from {}/{} ConfigMap. \n".format(vwc.metadata.name, ns, cm_name))
continue
encoded_cert = encode_cert(cert)
new_vwc = copy.deepcopy(vwc)
for hook in new_vwc.webhooks:
if hook.client_config.service is not None and hook.client_config.ca_bundle is not encoded_cert:
hook.client_config.ca_bundle = encoded_cert
print("validatingwebhookconfiguration/{}: Injecting caBundle from {}/{}, for hook name {}, to service/{}/{}\n".format(new_vwc.metadata.name, ns, cm_name, hook.name, hook.client_config.service.namespace, hook.client_config.service.name))
try:
result = admission_client.patch_validating_webhook_configuration(name=new_vwc.metadata.name, body=new_vwc)
except Exception as err:
print("ERROR: Couldn't save validatingwebhookconfiguration/{}: {}\n", new_vwc.metadata.name, err)
os.exit(1)
| StarcoderdataPython |
9753786 | <reponame>Indigo-Uliv/indigo-cli<gh_stars>0
"""
Indigo Command Line Interface -- multiple put.
Copyright 2015 Archive Analytics Solutions
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sqlite3
import time
from Queue import Queue
from threading import Thread
import os.path
from requests import ConnectionError
# Start
# We have two functions, the outer one is just to manage the status of the operation in the database
# the child function ( the worker ) actually puts the file
#
def file_putter(q, client, cnx, cache = None , db_queue = None ) :
"""
Pull local (source) file and remote ( target ) object paths and send them, and then
update the database that tracks the files....
:param q: Queue
:param client: IndigoClient
:param cnx: sqlite3.Connection a database connection to update the file status on completion
:param cache: .utils._dirmgmt
:param logger_queue: Queue
:return: N/A
"""
### Set everything up ... primarily database connection
_stmt1 = '''UPDATE transfer SET state = ? ,start_time=? , end_time = ? Where row_id = ?'''
cs = None
if cnx :
if isinstance(cnx,basestring) :
cnx = sqlite3.connect(cnx)
if not isinstance(cnx,sqlite3.Connection) :
raise ValueError("don't know what to do with {} for database connection".format(cnx))
cs = cnx.cursor()
### Now loop on the queue entry ... which will continue until the parent thread 'joins'
while True:
src, target, row_id = q.get()
T0 = time.time()
ret = file_putter_worker(src,target , client, cache = cache )
T1 = time.time()
q.task_done()
if ret and ret['ok'] : status = 'DONE'
else :
status = 'FAIL'
if ret : print ret['msg']
if db_queue :
db_queue.put((row_id,status,T0,T1))
elif cs :
try:
cs.execute(_stmt1, (status, T0, T1, row_id))
cs.connection.commit()
except sqlite3.OperationalError as e :
pass
def file_putter_worker(src, target , client, cache = None ):
"""
:param src: basestring
:param target: basestring
:param client: IndigoClient
:param cache: .util._dirmgmt
:return: N/A
"""
### Handle directory creation here...
### Note that the cache object recursively creates containers... walking up the tree until it finds a container
### and then walking down creating as it goes ...
###
if cache is not None : # Cache may be empty, or it may be not present, so be precise.
tgtdir,nm = os.path.split(target)
if not cache.getdir(tgtdir, client):
return {'ok': False, 'msg': 'Failed to Create {} or one of its parents'.format(tgtdir)}
with open(src, 'rb') as fh:
try:
res = client.put(target, fh)
if res.ok() :
print 'put ',str(target)
return {'ok' : True }
except ConnectionError as e:
return {'ok': False, 'msg': 'Connection Error'}
except Exception as e:
return {'ok': False, 'msg': u'failed to put {} to {} [{} / {}]'.format(src, target,type(e), e)}
def thread_setup(N, cnx, client, target=file_putter , cache = None , db_queue = None ):
"""
:param N: int -- Number of worker threads...
:param cnx: sqlite3.Connection -- database connection object
:param client: IndigoClient -- the CDMI client object ... it appears to be thread safe,so no point in replicating it
:param target: -- function
:param cache: _dirmgmt -- Cache of found filenames...
:return: [ queue , [threads] ]
"""
q = Queue(4096)
threads = []
for k in range(N):
t = Thread(target=target, args=(q, client, cnx , cache , db_queue ))
t.setDaemon(True)
#t.start()
threads.append(t)
return [q, threads]
| StarcoderdataPython |
11293783 | <reponame>lawi21/escriptorium<gh_stars>1-10
# Generated by Django 2.2.23 on 2021-06-11 08:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0049_auto_20210526_1517'),
]
operations = [
migrations.AddField(
model_name='ocrmodel',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.OcrModel'),
),
]
| StarcoderdataPython |
3344804 | from pprint import pprint
from configparser import ConfigParser
from powerbi.client import PowerBiClient
# Initialize the Parser.
config = ConfigParser()
# Read the file.
config.read('config/config.ini')
# Get the specified credentials.
client_id = config.get('power_bi_api', 'client_id')
redirect_uri = config.get('power_bi_api', 'redirect_uri')
client_secret = config.get('power_bi_api', 'client_secret')
# Initialize the Client.
power_bi_client = PowerBiClient(
client_id=client_id,
client_secret=client_secret,
scope=['https://analysis.windows.net/powerbi/api/.default'],
redirect_uri=redirect_uri,
credentials='config/power_bi_state.jsonc'
)
# Initialize the `Dashboards` service.
dashboard_service = power_bi_client.dashboards()
# Add a dashboard to our Workspace.
dashboard_service.add_dashboard(name='tradingRobot')
# Get all the dashboards in our Org.
pprint(dashboard_service.get_dashboards())
# Grab all the dashboards for a specific workspace.
pprint(
dashboard_service.get_dashboard(
dashboard_id='bf2c7d16-ec7b-40a2-ab56-f8797fdc5fb8'
)
)
# Add a dashboard to a specific workspace.
pprint(
dashboard_service.add_dashboard_in_group(
name='my_new_dashboard',
group_id='f78705a2-bead-4a5c-ba57-166794b05c78'
)
)
# Grab all the dashboards for a specific workspace.
pprint(
dashboard_service.get_group_dashboards(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78'
)
)
# Grab a specific dashboard from a specific workspace.
pprint(
dashboard_service.get_group_dashboard(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358'
)
)
# Grab all the tiles from a dashboard.
pprint(
dashboard_service.get_tiles(
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358'
)
)
# Grab all the tiles from a specific dashboard from a specific workspace.
pprint(
dashboard_service.get_group_tiles(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358'
)
)
# Grab a specific tile from a specific dashboard.
pprint(
dashboard_service.get_tile(
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70'
)
)
# Grab a specific tile from a specific workspace and a specific workspace..
pprint(
dashboard_service.get_group_tile(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70'
)
)
# Clone a specific tile.
pprint(
dashboard_service.clone_tile(
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70',
target_dashboard_id='86cb0a0e-612d-4822-9a29-d83478e21199'
)
)
# Clone a specific tile from a specific workspace.
pprint(
dashboard_service.clone_group_tile(
group_id='f78705a2-bead-4a5c-ba57-166794b05c78',
dashboard_id='1a0a15d9-67d1-4e97-b7bd-4f0ed4ec8358',
tile_id='093bfb85-828e-4705-bcf8-0126dd2d5d70',
target_dashboard_id='86cb0a0e-612d-4822-9a29-d83478e21199'
)
) | StarcoderdataPython |
12807504 | #!/usr/bin/env python3
from zencad import *
from globals import *
class Room(zencad.assemble.unit):
motor_hole = cylinder(r=5.5, h=T) - halfspace().rotateX(deg(90)).moveY(5)
def __init__(self):
super().__init__()
self.t = T
self.roof_r = ROOF_R
self.border_t = BORDER_T
self.add(self.model())
def model(self):
y=100
t=T
x_ext = 6*t
add_z_to_roof = 10
add_z_to_down = 1
panel = self.panel()
z = panel.bbox().ymax - panel.bbox().ymin + 2*t + add_z_to_roof + add_z_to_down
x = panel.bbox().xmax + x_ext
k = panel.bbox().ymax - panel.bbox().ymin - 5 - 1.6 + t + add_z_to_roof
base = box(x,y,z) - box(x-2*t,y-2*t,z-t).move(t,t,t)
base = base.moveZ(-t)
panel = (panel.rotateX(deg(-90)).move(x_ext/2,y-t,z-t + panel.bbox().ymin- add_z_to_down))
roof_trans = move(x/2, y/2, -t)
# крепление для stereopi
#print(k)
srad=2.5
ir = 1.8
spi_kreps = union([
stolb(srad,ir,7,k,angles=[deg(0),deg(180),deg(270)]).move(x_ext/2+srad,y-3-t*1-1,0),
stolb(srad,ir,7,k,angles=[deg(0),deg(180),deg(270)]).move(x-x_ext/2-srad,y-3-t*1-1,0),
stolb(srad,ir,7,k).move(x_ext/2+srad,y-3-t*1-1-35,0),
stolb(srad,ir,7,k).move(x-x_ext/2-srad,y-3-t*1-1-35,0)
])
f = unify(
base - panel.bbox().shape() + panel
- roof_trans(cylinder(r=ROOF_R, h=t))
+ roof_trans(self.roof())
+ spi_kreps
)
f = f.move(-x/2,-y/2,self.t).rotX(deg(180)).movZ(z)
self.socket = zencad.assemble.unit(parent=self, location=moveZ(z))
return f
def panel(self):
w=90
h=18 + 2
fw=95
fh=32
wl=4
wr=4
hb=5
ht=3
g = 5
tz = 1.6
m= (
rectangle(w, h+g).move(0,-g)
- (
rectangle(14-5, 6-tz).moveX(5)
+ rectangle(47-32, 15-tz).moveX(32)
+ rectangle(65-50, 18-tz).moveX(50)
+ rectangle(82-68, 8-tz).moveX(68)
)
.moveY(tz)
)
return unify(m).extrude(T)
def roof(self):
r = self.roof_r + self.border_t
r1 = r + 5
t = self.t
c_h = 10
base = (cylinder(r=r, h=t)
- LIGHT_HOLES
- self.motor_hole
- NUT_HOLE2
- cables_hole()
)
mh = (
box(31,1,2,center=True)
+ box(1,15,2,center=True).moveY(7.5)
+ cylinder(r=2.5,h=2,center=True).moveX(31/2)
+ cylinder(r=2.5,h=2,center=True).moveX(-31/2)
- cylinder(r=1,h=2,center=True).moveX(31/2)
- cylinder(r=1,h=2,center=True).moveX(-31/2)
).up(t+1).moveY(5+0.5)
return unify(base + mh)
if __name__ == "__main__":
room = Room()
disp(room)
show() | StarcoderdataPython |
9625816 | <gh_stars>10-100
# JN 2015-05-08 adding docstrings to this old, useful code
"""
Simple signal filtering for spike extraction
"""
from __future__ import absolute_import, division
import numpy as np
from scipy.signal import ellip, filtfilt
# pylint: disable=invalid-name, unbalanced-tuple-unpacking, E1101
DETECT_LOW = 300 # default 300
DETECT_HIGH = 1000 # default 1000
EXTRACT_LOW = 300 # default 300
EXTRACT_HIGH = 3000 # default 3000
class DefaultFilter(object):
"""
Simple filters for spike extraction
"""
def __init__(self, timestep):
self.sampling_rate = int(1. / timestep)
self.timestep = timestep
self.c_detect = ellip(2, .1, 40,
(2 * timestep * DETECT_LOW,
2 * timestep * DETECT_HIGH),
'bandpass')
self.c_extract = ellip(2, .1, 40,
(2 * timestep * EXTRACT_LOW,
2 * timestep * EXTRACT_HIGH),
'bandpass')
self.c_notch = ellip(2, .5, 20,
(2 * timestep * 1999, 2 * timestep * 2001),
'bandstop')
def filter_detect(self, x):
"""
filter for spike detection
"""
b, a = self.c_detect
return filtfilt(b, a, x)
def filter_extract(self, x):
"""
filter for spike extraction
"""
b, a = self.c_extract
return filtfilt(b, a, x)
def filter_denoise(self, x):
"""
notch filter to remove higher harmonics of 50/60 cycle
"""
b, a = self.c_notch
return filtfilt(b, a, x)
def nonlinear(x):
"""
Nonlinear energy operator for spike detection
"""
xo = np.int32(x)
y = [xo[n] ** 2 + xo[n - 1] * xo[n + 1] for n in range(1, len(x) - 1)]
window = np.bartlett(12)
return np.convolve(y, window)
| StarcoderdataPython |
8087674 | <reponame>kevin-ci/janeric2
from django.test import TestCase, RequestFactory
from django_libs.tests.mixins import ViewTestMixin
from products.forms import ProductForm, ProductFamilyForm
from products.models import Category, Product_Family, Product
from .factories import (
CategoryFactory,
Product_FamilyFactory,
ProductFactory,
)
class ProductFormTestCase(ViewTestMixin, TestCase):
# Test if form is valid
def test_form_valid(self):
product = ProductFactory()
data = {
'name': product.name,
'category': product.category,
'product_family': product.product_family,
}
form = ProductForm(data=data)
self.assertTrue(form.is_valid())
def test_form_not__valid(self):
product = ProductFactory()
data = {
'name': "",
'category': product.category,
'product_family': product.product_family,
}
form = ProductForm(data=data)
self.assertFalse(form.is_valid())
class ProductFamilyFormTestCase(ViewTestMixin, TestCase):
# Test if form is valid
def test_form_valid(self):
product_family = Product_FamilyFactory()
data = {
'name': product_family.name,
'brand_name': product_family.brand_name,
}
form = ProductFamilyForm(data=data)
self.assertTrue(form.is_valid())
def test_form_not__valid(self):
product_family = Product_FamilyFactory()
data = {
'name': "",
'brand_name': product_family.brand_name,
}
form = ProductFamilyForm(data=data)
self.assertFalse(form.is_valid())
| StarcoderdataPython |
5070922 | from config import get_config
from geoserver.catalog import Catalog
geoserver = Catalog(
get_config('geoserver.restUrl'),
get_config('geoserver.user'),
get_config('geoserver.password'),
)
# create workspace if not exists, a workspace is mandatory to work with geoserver
workspace_name = get_config('geoserver.workspace')
workspace = geoserver.get_workspace(workspace_name)
if workspace is None:
geoserver.create_workspace(
workspace_name,
get_config('geoserver.hostUrl') + workspace_name
)
geoserver.reload()
geoserver_connection = geoserver
workspace_obj = workspace | StarcoderdataPython |
6524398 | from scenes.leve1.main_scene import MainScene
# class ScenesManager:
# def __init__(self, wind):
# self.win = wind
# self.scenes = [MainScene(wind)]
# self.current_scene = self.scenes[0]
#
# def draw(self):
# self.current_scene.action()
# self.current_scene.draw()
#
# def eventAction(self, ev):
# if ev.type == self.enemy_creat:
# self.enemys.append(EnemyPlance(self.scene)) | StarcoderdataPython |
4984027 |
class SqlaJsonTranscoder(object):
"""
encodes/decodes a type of object to a "flat" json form, which matches
the form needed by SQL tables supported by SQLAlchemy. In particular
the form of insert().values(flat_data) in insertion queries and the
form returned by query results that then needs to be transformed into
a python object.
"""
def object_to_flat_jdata(self, dto):
"""
must be able to convert the object to a flat dictionary (to map
to a relational table). arrays of primitives are allowed, so long
as the table in the database is of type ARRAY
"""
jdata = None
raise Exception("Abstract Class. Not Implemented")
return jdata
def flat_jdata_to_object(self, jdata):
dto = None
raise Exception("Abstract Class. Not Implemented")
return dto
| StarcoderdataPython |
6528460 | # AUTOGENERATED! DO NOT EDIT! File to edit: 08_contrastive_loss.ipynb (unless otherwise specified).
__all__ = ['TripletLoss', 'ContrastiveLoss', 'CosineContrastiveLoss', 'batched_labels', 'XentOldContrastiveLoss',
'XentLoss', 'XentContrastiveLoss', 'XentContrastiveLoss2', 'BatchContrastiveLoss']
# Cell
from .imports import *
# Cell
class TripletLoss(nn.Module):
"""
Triplet loss
Takes embeddings of an anchor sample, a positive sample and a negative sample
"""
def __init__(self, margin=0.5):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative=None, size_average=True):
if negative is None: negative = positive.flip(dims=[0])
distance_positive = (anchor - positive).pow(2).sum(1) # .pow(.5)
distance_negative = (anchor - negative).pow(2).sum(1) # .pow(.5)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean() if size_average else losses.sum()
# Cell
# https://github.com/adambielski/siamese-triplet/blob/master/losses.py
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output1, output2, target, size_average=True):
distances = (output2 - output1).pow(2).sum(-1) # squared distances
# distances = F.pairwise_distance(output1, output2, keepdim=True).pow(2).sum(-1) # squared distances
losses = (target.float() * distances +
(1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
# Cell
# https://stackoverflow.com/questions/47107589/how-do-i-use-a-bytetensor-in-a-contrastive-cosine-loss-function
class CosineContrastiveLoss(nn.Module):
def __init__(self, margin=2.0):
super().__init__()
self.margin = margin
def forward(self, output1, output2, label):
cos_sim = F.cosine_similarity(output1, output2, dim=-1)
loss_cos_con = torch.mean((label) * torch.div(torch.pow((1.0-cos_sim), 2), 4) +
(1-label) * torch.pow(cos_sim * torch.lt(cos_sim, self.margin), 2))
return loss_cos_con
# Cell
def batched_labels(output1, output2, onehot=True):
bs = output1.shape[0]
rp = [1]*len(output1.shape)
o1 = output1.repeat(*rp,bs).view(bs,*output1.shape)
labels = torch.arange(bs, device=output1.device)
if onehot: labels = torch.eye(o1.shape[0], device=output1.device)[labels]
return o1, output2, labels
# Cell
# https://arxiv.org/pdf/2002.05709.pdf
class XentOldContrastiveLoss(nn.Module):
def __init__(self, temp=0.5):
super().__init__()
self.temp = temp
def forward(self, output1, output2, labels):
cos_sim = F.cosine_similarity(output1, output2, dim=-1)/self.temp
xent_loss = F.cross_entropy(cos_sim,labels.long())
return xent_loss
# Cell
from pytorch_metric_learning import losses
class XentLoss(losses.NTXentLoss):
def forward(self, output1, output2):
stacked = torch.cat((output1, output2), dim=0)
labels = torch.arange(output1.shape[0]).repeat(2)
return super().forward(stacked, labels, None)
# Cell
# https://arxiv.org/pdf/2002.05709.pdf
class XentContrastiveLoss(nn.Module):
def __init__(self, temp=0.5):
super().__init__()
self.temp = temp
def forward(self, output1, output2, labels):
cos_sim = F.cosine_similarity(output1, output2, dim=-1)/self.temp
cexp = torch.exp(cos_sim)
neg_denom = (cexp*(1-labels)).sum(dim=-1)
lsoft = torch.log(cexp/neg_denom)
lsoft = torch.sum(-labels * lsoft, dim=-1)
print(lsoft)
return lsoft.mean()
# Cell
class XentContrastiveLoss2(nn.Module):
def __init__(self, temp=0.5):
super().__init__()
self.temp = temp
def forward(self, output1, output2, labels):
cos_sim = F.cosine_similarity(output1, output2, dim=-1)/self.temp
cexp = torch.exp(cos_sim)
x = (cexp * labels).sum(dim=-1)
denom = cexp.sum(dim=-1) - x
lsoft = -torch.log(x/denom)
print(lsoft)
return lsoft.mean()
# Cell
class BatchContrastiveLoss(nn.Module):
def __init__(self, loss_func):
super().__init__()
self.loss_func = loss_func
self.onehot = not isinstance(loss_func, XentOldContrastiveLoss)
def forward(self, output1, output2):
output1, output2, labels = batched_labels(output1, output2, self.onehot)
return self.loss_func(output1, output2, labels) | StarcoderdataPython |
9601041 | # -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
"""
本文件定义了LSTM的Cell类
"""
import numpy as np
from paddle import fluid
from paddle.fluid import dygraph
from paddle.fluid import layers
class BasicLSTMUnit(dygraph.Layer):
"""
****
BasicLSTMUnit class, Using basic operator to build LSTM
The utilsorithm can be described as the code below.
.. math::
i_t &= \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + b_i)
f_t &= \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + b_f + forget_bias )
o_t &= \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + b_o)
\\tilde{c_t} &= tanh(W_{cx}x_t + W_{ch}h_{t-1} + b_c)
c_t &= f_t \odot c_{t-1} + i_t \odot \\tilde{c_t}
h_t &= o_t \odot tanh(c_t)
- $W$ terms denote weight matrices (e.g. $W_{ix}$ is the matrix
of weights from the input gate to the input)
- The b terms denote bias vectors ($bx_i$ and $bh_i$ are the input gate bias vector).
- sigmoid is the logistic sigmoid function.
- $i, f, o$ and $c$ are the input gate, forget gate, output gate,
and cell activation vectors, respectively, all of which have the same size as
the cell output activation vector $h$.
- The :math:`\odot` is the element-wise product of the vectors.
- :math:`tanh` is the activation functions.
- :math:`\\tilde{c_t}` is also called candidate hidden state,
which is computed based on the current input and the previous hidden state.
Args:
name_scope(string) : The name scope used to identify parameter and bias name
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of LSTM unit.
If it is set to None or one attribute of ParamAttr, lstm_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized as zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cells (actNode).
Default: 'fluid.layers.tanh'
forget_bias(float|1.0): forget bias used when computing forget gate
dtype(string): data type used in this unit
TODO:
Replace this class with the official implementation.
"""
def __init__(self,
hidden_size,
input_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
forget_bias=1.0,
dtype='float32'):
super(BasicLSTMUnit, self).__init__(dtype)
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._forget_bias = layers.fill_constant([1], dtype=dtype, value=forget_bias)
self._forget_bias.stop_gradient = False
self._dtype = dtype
self._input_size = input_size
self._weight = self.create_parameter(attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 4 * self._hiden_size],
dtype=self._dtype)
self._bias = self.create_parameter(attr=self._bias_attr,
shape=[4 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden, pre_cell):
"""Forward network"""
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._weight)
gate_input = layers.elementwise_add(gate_input, self._bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
new_cell = layers.elementwise_add(
layers.elementwise_mul(pre_cell, layers.sigmoid(layers.elementwise_add(f, self._forget_bias))),
layers.elementwise_mul(layers.sigmoid(i), layers.tanh(j)))
new_hidden = layers.tanh(new_cell) * layers.sigmoid(o)
return (new_hidden, new_cell)
| StarcoderdataPython |
5079548 | <reponame>anton-sidelnikov/openstacksdk<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class Limit(resource.Resource):
resources_key = "limits"
base_path = "/limits"
# capabilities
allow_create = False
allow_fetch = False
allow_commit = False
allow_delete = False
allow_list = True
allow_head = False
#: Properties
#: The maximum number of replica gigabytes that are allowed
#: in a project.
maxTotalReplicaGigabytes = resource.Body(
"maxTotalReplicaGigabytes", type=int)
#: The total maximum number of shares that are allowed in a project.
maxTotalShares = resource.Body("maxTotalShares", type=int)
#: The total maximum number of share gigabytes that are allowed in a
#: project.
maxTotalShareGigabytes = resource.Body(
"maxTotalShareGigabytes", type=int)
#: The total maximum number of share-networks that are allowed in a
#: project.
maxTotalShareNetworks = resource.Body(
"maxTotalShareNetworks", type=int)
#: The total maximum number of share snapshots that are allowed in a
#: project.
maxTotalShareSnapshots = resource.Body(
"maxTotalShareSnapshots", type=int)
#: The maximum number of share replicas that is allowed.
maxTotalShareReplicas = resource.Body(
"maxTotalShareReplicas", type=int)
#: The total maximum number of snapshot gigabytes that are allowed
#: in a project.
maxTotalSnapshotGigabytes = resource.Body(
"maxTotalSnapshotGigabytes", type=int)
#: The total number of replica gigabytes used in a project by
#: share replicas.
totalReplicaGigabytesUsed = resource.Body(
"totalReplicaGigabytesUsed", type=int)
#: The total number of gigabytes used in a project by shares.
totalShareGigabytesUsed = resource.Body(
"totalShareGigabytesUsed", type=int)
#: The total number of created shares in a project.
totalSharesUsed = resource.Body(
"totalSharesUsed", type=int)
#: The total number of created share-networks in a project.
totalShareNetworksUsed = resource.Body(
"totalShareNetworksUsed", type=int)
#: The total number of created share snapshots in a project.
totalShareSnapshotsUsed = resource.Body(
"totalShareSnapshotsUsed", type=int)
#: The total number of gigabytes used in a project by snapshots.
totalSnapshotGigabytesUsed = resource.Body(
"totalSnapshotGigabytesUsed", type=int)
#: The total number of created share replicas in a project.
totalShareReplicasUsed = resource.Body(
"totalShareReplicasUsed", type=int)
| StarcoderdataPython |
6605984 | <reponame>andrewp-as-is/django-postgres-drop-index.py
from django.core.management.base import BaseCommand
from django_postgres_drop_index.utils import drop_schema_indexes
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('schemaname', nargs='+')
def handle(self, *args, **options):
for schemaname in options['schemaname']:
drop_schema_indexes(schemaname)
| StarcoderdataPython |
8185969 | import data_fetcher
import os
from skimage import io
# You need to implement a function that defines how the data is prepared,
# this function will be called in the data thread, and a arg will be passed when it's called,
# so even if you don't need any args, keep an args variable for the function,
# if you have mutiple args, pack them in a tuple or list.
def prep_func(args):
index = args[0]# unpack args you need
img_path = args[1]# unpack args you need
if index is None or img_path is None: # remember to consider the ending cases
return None,None
data = io.imread(img_path)# do some data preparation
return index, data
# Commonly it is better to let the data preparing thread start before the main thread, so the main thread need not to wait for data when it starts
# you can put the params needed in a list, the data thread will call the prepare function with each of the params in the list.
pre_count = 3 # defines how many data is prefetched in advance
init_params = []
img_lst = ['./imgs/' + x for x in os.listdir('./imgs/')]
for i in range(pre_count):
index = i
data = img_lst[i]
init_params.append((index,data)) # pack multiple args in a tuple
df = data_fetcher.data_fetcher(prep_func,mute = True) # Construct a data_fetcher with the prepare function, mute controls whether the data_fetcher prints information, by default, it is False.
df.start(init_params)# now, start the data_fetcher, pass the list of initial params to it
try:
for i in range(100):
if i < 100 - pre_count:
index,data = df.get((i + pre_count,img_lst[i + pre_count]))# call get() to fetch prepared data, pass arguments needed for the next time(next pre_countth time)
else:
index,data = df.get((None,None))# handle the ending case if necessary
print index, data.shape # do some job with the data fetched.
finally:
df.over() # call over to notify the data thread to exit
| StarcoderdataPython |
1632167 | """
Issue:
NOTE using Python 2.4, this results in an exe about 4Mb in size.
NOTE using Python 2.6, this results in an exe about 5.5Mb in size.
E:\Python24\python.exe p2_setup.py py2exe
c:\python24\python p2_setup.py py2exe
setup.py py2exe
Quick-N-Dirty create win32 binaries and zip file script.
Zero error checking.
TODO inject 'py2exe' into sys.argv?
"""
import os
import sys
import glob
import shutil
from distutils.core import setup
try:
import py2exe
except ImportError:
# either non-Windows or py2exe just not installed
py2exe = None
# Clean temp Python/Jython files
delete_list = glob.glob('simplejson/*.pyc') + glob.glob('simplejson/*$py.class')
for x in delete_list:
os.remove(x)
try:
shutil.rmtree('dist')
except WindowsError, info:
# assume directory does not exist
pass
print 'nasty copy hack'
shutil.copy2(os.path.join('rbtools', 'postreview.py'), 'postreview.py')
if len(sys.argv) == 1:
if py2exe:
print 'defaulting to creating py2exe'
sys.argv += ['py2exe']
else:
print 'py2exe not available'
sys.argv += ['sdist']
# disable optimization- we _may_ need docs strings, specifically "copyright"
setup(
options={"py2exe": {
#"includes": ["decimal"],
"optimize": 1, # 1 and NOT 2 because I use the __doc__ string as the usage string. 2 optimises out the doc strings
'bundle_files': 1,
## options to reduce size of final exe
#~ 'ascii': True, # Exclude encodings
'excludes': [
'_ssl', # Exclude _ssl
'pyreadline', # 'difflib',
'doctest', # 'locale',
#'optparse',
'pickle', # 'calendar',# Exclude standard library
#'re',
],
}
},
zipfile=None, # try and make a single exe, if do not want this loose this and the 'bundle_files' option
console=['postreview.py']
)
zipfilename = 'distribute_me.zip'
zipfilelist = ['p2_readme.txt', '__main__.py', 'postreview.py', os.path.join('win32bin', 'diff.exe'), os.path.join('win32bin', 'p.exe')] + glob.glob('rbtools/*.py') + glob.glob('rbtools/*/*.py') + glob.glob('simplejson/*') + glob.glob('dist/*')
import zipfile
z = zipfile.ZipFile(zipfilename, 'w')
for x in zipfilelist:
z.write(x)
z.close()
print 'Created:', zipfilename
| StarcoderdataPython |
3241214 | <reponame>hashnfv/hashnfv-functest<filename>functest/tests/unit/openstack/tempest/test_conf_utils.py<gh_stars>0
#!/usr/bin/env python
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
import logging
import unittest
import mock
from functest.opnfv_tests.openstack.tempest import tempest, conf_utils
from functest.utils.constants import CONST
from snaps.openstack.os_credentials import OSCreds
class OSTempestConfUtilsTesting(unittest.TestCase):
def setUp(self):
self.os_creds = OSCreds(
username='user', password='<PASSWORD>',
auth_url='http://foo.com:5000/v3', project_name='bar')
@mock.patch('snaps.openstack.utils.deploy_utils.create_project',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_user',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_network',
return_value=None)
@mock.patch('snaps.openstack.utils.deploy_utils.create_image',
return_value=mock.Mock())
def test_create_tempest_resources_missing_network_dic(self, *mock_args):
tempest_resources = tempest.TempestResourcesManager(os_creds={})
with self.assertRaises(Exception) as context:
tempest_resources.create()
msg = 'Failed to create private network'
self.assertTrue(msg in context.exception)
@mock.patch('snaps.openstack.utils.deploy_utils.create_project',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_user',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_network',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_image',
return_value=None)
def test_create_tempest_resources_missing_image(self, *mock_args):
tempest_resources = tempest.TempestResourcesManager(os_creds={})
CONST.__setattr__('tempest_use_custom_imagess', True)
with self.assertRaises(Exception) as context:
tempest_resources.create()
msg = 'Failed to create image'
self.assertTrue(msg in context.exception, msg=str(context.exception))
CONST.__setattr__('tempest_use_custom_imagess', False)
with self.assertRaises(Exception) as context:
tempest_resources.create(use_custom_images=True)
msg = 'Failed to create image'
self.assertTrue(msg in context.exception, msg=str(context.exception))
@mock.patch('snaps.openstack.utils.deploy_utils.create_project',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_user',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_network',
return_value=mock.Mock())
@mock.patch('snaps.openstack.utils.deploy_utils.create_image',
return_value=mock.Mock())
@mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create',
return_value=None)
def test_create_tempest_resources_missing_flavor(self, *mock_args):
tempest_resources = tempest.TempestResourcesManager(
os_creds=self.os_creds)
CONST.__setattr__('tempest_use_custom_images', True)
CONST.__setattr__('tempest_use_custom_flavors', True)
with self.assertRaises(Exception) as context:
tempest_resources.create()
msg = 'Failed to create flavor'
self.assertTrue(msg in context.exception, msg=str(context.exception))
CONST.__setattr__('tempest_use_custom_images', True)
CONST.__setattr__('tempest_use_custom_flavors', False)
with self.assertRaises(Exception) as context:
tempest_resources.create(use_custom_flavors=True)
msg = 'Failed to create flavor'
self.assertTrue(msg in context.exception, msg=str(context.exception))
def test_get_verifier_id_missing_verifier(self):
CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
mock_stdout = mock.Mock()
attrs = {'stdout.readline.return_value': ''}
mock_stdout.configure_mock(**attrs)
mock_popen.return_value = mock_stdout
conf_utils.get_verifier_id(),
def test_get_verifier_id_default(self):
CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
attrs = {'stdout.readline.return_value': 'test_deploy_id'}
mock_stdout.configure_mock(**attrs)
mock_popen.return_value = mock_stdout
self.assertEqual(conf_utils.get_verifier_id(),
'test_deploy_id')
def test_get_verifier_deployment_id_missing_rally(self):
CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
mock_stdout = mock.Mock()
attrs = {'stdout.readline.return_value': ''}
mock_stdout.configure_mock(**attrs)
mock_popen.return_value = mock_stdout
conf_utils.get_verifier_deployment_id(),
def test_get_verifier_deployment_id_default(self):
CONST.__setattr__('tempest_deployment_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
attrs = {'stdout.readline.return_value': 'test_deploy_id'}
mock_stdout.configure_mock(**attrs)
mock_popen.return_value = mock_stdout
self.assertEqual(conf_utils.get_verifier_deployment_id(),
'test_deploy_id')
def test_get_verifier_repo_dir_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.path.join',
return_value='test_verifier_repo_dir'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.get_verifier_id') as m:
self.assertEqual(conf_utils.get_verifier_repo_dir(''),
'test_verifier_repo_dir')
self.assertTrue(m.called)
def test_get_verifier_deployment_dir_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.path.join',
return_value='test_verifier_repo_dir'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.get_verifier_id') as m1, \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.get_verifier_deployment_id') as m2:
self.assertEqual(conf_utils.get_verifier_deployment_dir('', ''),
'test_verifier_repo_dir')
self.assertTrue(m1.called)
self.assertTrue(m2.called)
def test_backup_tempest_config_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.path.exists',
return_value=False), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.makedirs') as m1, \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile') as m2:
conf_utils.backup_tempest_config('test_conf_file')
self.assertTrue(m1.called)
self.assertTrue(m2.called)
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.path.exists',
return_value=True), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile') as m2:
conf_utils.backup_tempest_config('test_conf_file')
self.assertTrue(m2.called)
def test_configure_tempest_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_verifier',
return_value='test_conf_file'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_tempest_update_params') as m1:
conf_utils.configure_tempest('test_dep_dir')
self.assertTrue(m1.called)
def test_configure_tempest_defcore_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_verifier',
return_value='test_conf_file'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.configure_tempest_update_params'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
'set') as mset, \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
'read') as mread, \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
'write') as mwrite, \
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.generate_test_accounts_file'), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.shutil.copyfile'):
conf_utils.configure_tempest_defcore(
'test_dep_dir', 'test_image_id', 'test_flavor_id',
'test_image_alt_id', 'test_flavor_alt_id', 'test_tenant_id')
mset.assert_any_call('compute', 'image_ref', 'test_image_id')
mset.assert_any_call('compute', 'image_ref_alt',
'test_image_alt_id')
mset.assert_any_call('compute', 'flavor_ref', 'test_flavor_id')
mset.assert_any_call('compute', 'flavor_ref_alt',
'test_flavor_alt_id')
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
def test_generate_test_accounts_file_default(self):
with mock.patch("__builtin__.open", mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
'yaml.dump') as mock_dump:
conf_utils.generate_test_accounts_file('test_tenant_id')
self.assertTrue(mock_dump.called)
def _test_missing_param(self, params, image_id, flavor_id):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
'set') as mset, \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
'read') as mread, \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ConfigParser.RawConfigParser.'
'write') as mwrite, \
mock.patch('__builtin__.open', mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.backup_tempest_config'), \
mock.patch('functest.utils.functest_utils.yaml.safe_load',
return_value={'validation': {'ssh_timeout': 300}}):
CONST.__setattr__('OS_ENDPOINT_TYPE', None)
conf_utils.\
configure_tempest_update_params('test_conf_file',
image_id=image_id,
flavor_id=flavor_id)
mset.assert_any_call(params[0], params[1], params[2])
self.assertTrue(mread.called)
self.assertTrue(mwrite.called)
def test_configure_tempest_update_params_missing_image_id(self):
CONST.__setattr__('tempest_use_custom_images', True)
self._test_missing_param(('compute', 'image_ref',
'test_image_id'), 'test_image_id',
None)
def test_configure_tempest_update_params_missing_image_id_alt(self):
CONST.__setattr__('tempest_use_custom_images', True)
conf_utils.IMAGE_ID_ALT = 'test_image_id_alt'
self._test_missing_param(('compute', 'image_ref_alt',
'test_image_id_alt'), None, None)
def test_configure_tempest_update_params_missing_flavor_id(self):
CONST.__setattr__('tempest_use_custom_flavors', True)
self._test_missing_param(('compute', 'flavor_ref',
'test_flavor_id'), None,
'test_flavor_id')
def test_configure_tempest_update_params_missing_flavor_id_alt(self):
CONST.__setattr__('tempest_use_custom_flavors', True)
conf_utils.FLAVOR_ID_ALT = 'test_flavor_id_alt'
self._test_missing_param(('compute', 'flavor_ref_alt',
'test_flavor_id_alt'), None,
None)
def test_configure_verifier_missing_temp_conf_file(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.path.isfile',
return_value=False), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ft_utils.execute_command') as mexe, \
self.assertRaises(Exception) as context:
conf_utils.configure_verifier('test_dep_dir')
mexe.assert_any_call("rally verify configure-verifier")
msg = ("Tempest configuration file 'test_dep_dir/tempest.conf'"
" NOT found.")
self.assertTrue(msg in context)
def test_configure_verifier_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.path.isfile',
return_value=True), \
mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.ft_utils.execute_command') as mexe:
self.assertEqual(conf_utils.configure_verifier('test_dep_dir'),
'test_dep_dir/tempest.conf')
mexe.assert_any_call("rally verify configure-verifier "
"--reconfigure")
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| StarcoderdataPython |
321471 | import argparse
import sys
import time
from itertools import izip
from string import ascii_uppercase
import numpy as np
from .volume import Volume
from .structure import Ligand, Structure
from .transformer import Transformer
from .solvers import QPSolver, MIQPSolver
from .validator import Validator
def parse_args():
p = argparse.ArgumentParser(description="Determine occupancies from ligand set.")
p.add_argument("xmap", help="CCP4 map file with P1 symmetry.")
p.add_argument("resolution", type=float, help="Map resolution in angstrom.")
p.add_argument("ligands", nargs="+", help="PDB files containing ligand.")
# Map preparation routines
#p.add_argument("-ns", "--no-scale", action="store_true",
# help="Do not scale the density.")
p.add_argument("-r", "--receptor", type=str, default=None,
help="PDB file with receptor used to scale and prepare the density.")
p.add_argument("-dc", "--density-cutoff", type=float, default=0.0,
help="Density value to use as cutoff in sigma.")
p.add_argument("-c", "--cardinality", type=int, default=5,
help="Cardinality constraint during MIQP.")
p.add_argument("-t", "--threshold", type=float, default=0.2,
help="Threshold constraint during MIQP.")
p.add_argument("-o", "--output", default="solve.pdb",
help="Name of output PDB file.")
p.add_argument("-i", "--info", default=None,
help="File to write info results to.")
args = p.parse_args()
if args.info is None:
args.info = sys.stdout
else:
args.info = open(args.info, 'w')
return args
def scale_map(args, xmap, rmask):
if args.receptor is not None:
footprint = Structure.fromfile(args.receptor).select('record', 'ATOM')
else:
footprint = Structure.fromfile(args.ligands[0])
model_map = Volume.zeros_like(xmap)
transformer = Transformer(footprint, model_map, simple=True, rmax=3)
transformer.mask(rmask)
mask = model_map.array > 0
transformer.reset()
transformer.initialize()
transformer.density()
if args.density_cutoff is not None:
mean = xmap.array.mean()
std = xmap.array.std()
cutoff_mask = ((xmap.array - mean) / std) < args.density_cutoff
xmap_masked = xmap.array[mask]
model_masked = model_map.array[mask]
model_masked_mean = model_masked.mean()
xmap_masked_mean = xmap_masked.mean()
model_masked -= model_masked_mean
xmap_masked -= xmap_masked_mean
scaling_factor = ((model_masked * xmap_masked).sum() /
(xmap_masked * xmap_masked).sum())
args.info.write('Scaling factor: {:.2f}\n'.format(scaling_factor))
xmap.array -= xmap_masked_mean
xmap.array *= scaling_factor
xmap.array += model_masked_mean
if args.density_cutoff is not None:
xmap.array[cutoff_mask] = 0
if args.receptor is not None:
xmap.array -= model_map.array
model_map.array.fill(0)
return xmap
def solve():
args = parse_args()
# Expand the crystallography map to fill the whole unit cell
xmap = Volume.fromfile(args.xmap).fill_unit_cell()
xmap.set_spacegroup("P1")
resolution = args.resolution
conformers = [Ligand.fromfile(fname) for fname in args.ligands]
args.info.write("Initial number of conformers: {}\n".format(len(conformers)))
if resolution < 3.0:
rmask = 0.7 + (resolution - 0.6) / 3.0
else:
rmask = 0.5 * resolution
# Scale the map under the footprint of the receptor
xmap = scale_map(args, xmap, rmask)
# Analyze ligand for rscc and rmsd metrics
validator = Validator(xmap, resolution)
for fname, conformer in izip(args.ligands, conformers):
conformer.rscc = validator.rscc(conformer, rmask=1.5)
conformer.fname = fname
conformers_sorted = sorted(conformers, key=lambda conformer: conformer.rscc, reverse=True)
line = '{fname} {rscc:.3f}\n'
for conformer in conformers_sorted:
args.info.write(line.format(fname=conformer.fname, rscc=conformer.rscc))
best_rscc = conformers_sorted[0].rscc
# Remove conformers with a significantly lower rscc
rscc_cutoff = 0.1 * best_rscc
conformers = [conformer for conformer in conformers_sorted
if (best_rscc - conformer.rscc) < rscc_cutoff]
noH = np.logical_not(conformers[0].select('e', 'H', return_ind=True))
args.info.write('Number of conformers after rscc cutoff: {}\n'.format(len(conformers)))
coor_set = [conformers[0].coor]
# Remove geometrically similar ligands
filtered_conformers = [conformers[0]]
for conformer in conformers[1:]:
max_dist = min([np.abs(
np.linalg.norm(conformer.coor[noH] - coor[noH], axis=1).max()
) for coor in coor_set])
if max_dist < 1.5:
continue
coor_set.append(conformer.coor)
filtered_conformers.append(conformer)
conformers = filtered_conformers
args.info.write('Number of conformers after removing duplicates: {}\n'.format(len(conformers)))
# Remove conformers that have drifted off
best_conformer = conformers[0]
filtered_conformers = [best_conformer]
center = best_conformer.coor.mean(axis=0)
for conformer in conformers[1:]:
rmsd = conformer.rmsd(best_conformer)
if rmsd > 6:
continue
shift = np.linalg.norm(conformer.coor.mean(axis=0) - center)
if shift > 3:
continue
filtered_conformers.append(conformer)
conformers = filtered_conformers
args.info.write('Number of conformers after removing drifters: {}\n'.format(len(conformers)))
# Now do QP/MIQP with the remaining conformers. Check if rscc increases
# substantially by including each conformer. Keep repeating till it is
# consistent
ligand_template = Ligand.fromfile(args.ligands[0])
ligand_template.data['q'].fill(1)
smax = 1.0 / (2.0 * resolution)
model_map = Volume.zeros_like(xmap)
transformer = Transformer(ligand_template, model_map, smax=smax, rmax=3)
while True:
# Create mask
for conformer in conformers:
ligand_template.coor[:] = conformer.coor
transformer.mask(rmask)
mask = model_map.array > 0
model_map.array.fill(0)
# Create densities
nvalues = mask.sum()
target = xmap.array[mask]
models = np.zeros((len(conformers), nvalues))
for n, conformer in enumerate(conformers):
transformer.reset()
ligand_template.coor[:] = conformer.coor
transformer.density()
models[n,:] = model_map.array[mask]
model_map.array.fill(0)
# Do MIQP
miqpsolver = MIQPSolver(target, models)
miqpsolver(maxfits=args.cardinality, threshold=args.threshold)
filtered_conformers = []
for q, conformer in izip(miqpsolver.occupancies, conformers):
if q > 0.0001:
conformer.data['q'].fill(q)
filtered_conformers.append(conformer)
conformers = filtered_conformers
# Check fisher z correlation
conformers[0].zscore = -1
multiconformer = conformers[0]
multiconformer.data['altloc'].fill('A')
nconformers = 1
filtered_conformers = [conformers[0]]
for conformer in conformers[1:]:
conformer.data['altloc'].fill(ascii_uppercase[nconformers])
new_multiconformer = multiconformer.combine(conformer)
diff = validator.fisher_z_difference(
multiconformer, new_multiconformer, rmask=1.5, simple=True
)
if diff < 0.0:
continue
multiconformer = new_multiconformer
conformer.zscore = diff
filtered_conformers.append(conformer)
nconformers += 1
if len(filtered_conformers) == len(conformers):
conformers = filtered_conformers
break
conformers = filtered_conformers
line = "{fname}\t{rscc:.3f}\t{zscore:.3f}\t{occupancy:.2f}\n"
for conformer in conformers:
args.info.write(line.format(
fname=conformer.fname, rscc=conformer.rscc,
zscore=conformer.zscore, occupancy=conformer.q[0]
))
multiconformer.tofile(args.output)
| StarcoderdataPython |
11311137 | <gh_stars>0
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11, True)
print "ON"
time.sleep(10)
GPIO.output(11, False)
print "OFF"
| StarcoderdataPython |
168072 | """
This problem was asked by Stripe.
Given an array of integers, find the first missing positive integer in linear time and constant space. In other words,
find the lowest positive integer that does not exist in the array. The array can contain duplicates and
negative numbers as well.
For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.
You can modify the input array in-place.
"""
# i really had no clue how to do it in linear time and constant space, here are some answers, though:
# https://stackoverflow.com/questions/51346136/given-an-array-of-integers-find-the-first-missing-positive-integer-in-linear-ti
# using the indices does the trick
def lowest_integer(numbers):
min_number = min(numbers)
max_number = max(numbers)
if (min_number > 1):
return 1
else:
lowest_integer = max_number + 1
"""
kudos to pmcarpan from stackoverflow:
Assuming the array can be modified,
We divide the array into 2 parts such that the first part consists of only positive numbers. Say we have the starting index as 0 and the ending index as end(exclusive).
We traverse the array from index 0 to end. We take the absolute value of the element at that index - say the value is x.
If x > end we do nothing.
If not, we make the sign of the element at index x-1 negative. (Clarification: We do not toggle the sign. If the value is positive, it becomes negative. If it is negative, it remains negative. In pseudo code, this would be something like if (arr[x-1] > 0) arr[x-1] = -arr[x-1] and not arr[x-1] = -arr[x-1].)
Finally, we traverse the array once more from index 0 to end. In case we encounter a positive element at some index, we output index + 1. This is the answer. However, if we do not encounter any positive element, it means that integers 1 to end occur in the array. We output end + 1.
It can also be the case that all the numbers are non-positive making end = 0. The output end + 1 = 1 remains correct.
All the steps can be done in O(n) time and using O(1) space.
Example:
Initial Array: 1 -1 -5 -3 3 4 2 8
Step 1 partition: 1 8 2 4 3 | -3 -5 -1, end = 5
In step 2 we change the signs of the positive numbers to keep track of which integers have already occurred. For example, here array[2] = -2 < 0, it suggests that 2 + 1 = 3 has already occurred in the array. Basically, we change the value of the element having index i to negative if i+1 is in the array.
Step 2 Array changes to: -1 -8 -2 -4 3 | -3 -5 -1
In step 3, if some value array[index] is positive, it means that we did not find any integer of value index + 1 in step 2.
Step 3: Traversing from index 0 to end, we find array[4] = 3 > 0
The answer is 4 + 1 = 5
"""
| StarcoderdataPython |
11278192 | <gh_stars>0
__author__ = 'bengt'
BOARD, WHITE, BLACK, MOVE = 'BOARD', 'WHITE', 'BLACK', 'MOVE'
WIDTH, HEIGHT = 8, 8
NORTH = -HEIGHT
NORTHEAST = -HEIGHT + 1
EAST = 1
SOUTHEAST = HEIGHT + 1
SOUTH = HEIGHT
SOUTHWEST = HEIGHT - 1
WEST = - 1
NORTHWEST = -HEIGHT - 1
DIRECTIONS = (NORTH, NORTHEAST, EAST, SOUTHEAST, SOUTH, SOUTHWEST, WEST, NORTHWEST)
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_opponent(player):
if player == WHITE:
return BLACK
elif player == BLACK:
return WHITE
else:
raise ValueError
class NoMovesError(Exception):
pass
def outside_board(tile, direction):
tile_top = 0 <= tile <= 7
tile_bot = 56 <= tile <= 63
tile_right = tile % WIDTH == 7
tile_left = tile % WIDTH == 0
return (direction in (NORTH, NORTHEAST, NORTHWEST) and tile_top) or \
(direction in (SOUTH, SOUTHWEST, SOUTHEAST) and tile_bot) or \
(direction in (NORTHEAST, EAST, SOUTHEAST) and tile_right) or \
(direction in (NORTHWEST, WEST, SOUTHWEST) and tile_left)
| StarcoderdataPython |
299463 | ##
# Contains TranscriptomeIndexListView, TranscriptomeIndexDetailView, and needed serializer
##
from django.utils.decorators import method_decorator
from rest_framework import filters, generics, serializers
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from data_refinery_api.exceptions import InvalidFilters
from data_refinery_api.utils import check_filters
from data_refinery_common.models import OrganismIndex
class OrganismIndexSerializer(serializers.ModelSerializer):
organism_name = serializers.StringRelatedField(source="organism", read_only=True)
download_url = serializers.SerializerMethodField()
class Meta:
model = OrganismIndex
fields = (
"id",
"assembly_name",
"organism_name",
"database_name",
"release_version",
"index_type",
"salmon_version",
"download_url",
"result_id",
"last_modified",
)
read_only_fields = fields
def get_download_url(self, obj):
computed_file = obj.get_computed_file()
if computed_file is not None:
return computed_file.s3_url
return None
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="organism__name",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Organism name. Eg. `MUS_MUSCULUS`",
),
openapi.Parameter(
name="length",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Short hand for `index_type` Eg. `short` or `long`",
),
openapi.Parameter(
name="salmon_version",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `salmon 0.13.1`",
),
openapi.Parameter(
name="index_type",
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
description="Eg. `TRANSCRIPTOME_LONG`",
),
]
),
)
class TranscriptomeIndexListView(generics.ListAPIView):
"""
List all Transcriptome Indices. These are a special type of process result,
necessary for processing other SRA samples.
"""
serializer_class = OrganismIndexSerializer
filter_backends = (
DjangoFilterBackend,
filters.OrderingFilter,
)
filterset_fields = ["salmon_version", "index_type"]
ordering_fields = ("created_at", "salmon_version")
ordering = ("-created_at",)
def get_queryset(self):
invalid_filters = check_filters(
self, special_filters=["organism__name", "result_id", "length"]
)
if invalid_filters:
raise InvalidFilters(invalid_filters=invalid_filters)
queryset = OrganismIndex.public_objects.all()
organism_name = self.request.query_params.get("organism__name", None)
if organism_name is not None:
queryset = queryset.filter(organism__name=organism_name.upper())
# https://github.com/AlexsLemonade/refinebio/issues/2459
# It looks like when we set `result_id` as a filterset field,
# django_forms goes nuts and tries to call __str__ on every single
# computational result in our database trying to find all of the
# different possible computational_results. So let's just take care of
# this one ourselves.
result_id = self.request.query_params.get("result_id", None)
if result_id is not None:
queryset = queryset.filter(result_id=result_id)
length = self.request.query_params.get("length", None)
if length is not None:
index_type = "TRANSCRIPTOME_{}".format(length.upper())
queryset = queryset.filter(index_type=index_type)
return queryset
@method_decorator(
name="get",
decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
name="id",
in_=openapi.IN_PATH,
type=openapi.TYPE_NUMBER,
description="Transcriptome Index Id eg `1`",
),
]
),
)
class TranscriptomeIndexDetailView(generics.RetrieveAPIView):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored.
"""
serializer_class = OrganismIndexSerializer
lookup_field = "id"
queryset = OrganismIndex.public_objects.all()
| StarcoderdataPython |
3481183 | <filename>leetcodeOct2020/oct1.py
# You have a RecentCounter class which counts the number of recent requests within a certain time frame.
# Implement the RecentCounter class:
# RecentCounter() Initializes the counter with zero recent requests.
# int ping(int t) Adds a new request at time t, where t represents some time in milliseconds, and returns the number of requests that has happened in the past 3000 milliseconds (including the new request). Specifically, return the number of requests that have happened in the inclusive range [t - 3000, t].
# It is guaranteed that every call to ping uses a strictly larger value of t than the previous call.
# Example 1:
# Input
# ["RecentCounter", "ping", "ping", "ping", "ping"]
# [[], [1], [100], [3001], [3002]]
# Output
# [null, 1, 2, 3, 3]
# Explanation
# RecentCounter recentCounter = new RecentCounter();
# recentCounter.ping(1); // requests = [1], range is [-2999,1], return 1
# recentCounter.ping(100); // requests = [1, 100], range is [-2900,100], return 2
# recentCounter.ping(3001); // requests = [1, 100, 3001], range is [1,3001], return 3
# recentCounter.ping(3002); // requests = [1, 100, 3001, 3002], range is [2,3002], return 3
# Constraints:
# 1 <= t <= 104
# Each test case will call ping with strictly increasing values of t.
# At most 104 calls will be made to ping.
class RecentCounter:
def __init__(self):
self.request_count = []
# print("Hey1", self.request_count)
def ping(self, t: int) -> int:
self.request_count.append(t)
while len(self.request_count) and t - self.request_count[0] > 3000:
self.request_count.pop(0)
return len(self.request_count)
ob = RecentCounter()
print(ob.ping(1))
print(ob.ping(100))
print(ob.ping(3001))
print(ob.ping(3002))
# Code to work upon
# class RecentCounter:
# def __init__(self):
# self.input_string1 = input("1")
# self.input_string2 = input("2")
# self.counter = 0
# self.input_list = self.input_string2.strip("").split(",")
# def ping(self, t):
# counter = self.counter
# self.t = t
# print(self.input_list)
# for i in self.input_list[1: len(self.input_list)]:
# if int(i.strip("][ ")) in range(t-3000, t+1):
# counter += 1
# return counter
# recentCounter = RecentCounter()
# print(recentCounter.ping(1))
# print(recentCounter.ping(100))
# print(recentCounter.ping(3001))
# print(recentCounter.ping(3002)) | StarcoderdataPython |
1925681 | <reponame>meyerweb/wpt
def main(request, response):
status = request.GET.first(b'status')
response.status = (status, b"");
if b'tao_value' in request.GET:
response.headers.set(b'timing-allow-origin', request.GET.first(b'tao_value'))
| StarcoderdataPython |
9624204 | <filename>day07/00/solution.py
import util
def test():
#test_vals =
assert run(util.TEST_VALS) == 'tknk'
def run(in_val):
tree = util.parse_to_tree(in_val)
return(tree.getroot().get(util.NAME))
| StarcoderdataPython |
1894491 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from backbones import utils
resnet_arg_scope = utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None, activation_fn=None, scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
return_raw=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck, utils.stack_blocks_dense], outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):
net = utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = utils.stack_blocks_dense(net, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if return_raw:
return net, end_points
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
end_points[sc.name + '/postnorm'] = net
if global_pool:
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
return utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
return_raw=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training, return_raw=return_raw, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
resnet_v2_50.default_image_size = resnet_v2.default_image_size
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
return_raw=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training, return_raw=return_raw, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
resnet_v2_101.default_image_size = resnet_v2.default_image_size
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
return_raw=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training, return_raw=return_raw, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
resnet_v2_152.default_image_size = resnet_v2.default_image_size
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
return_raw=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training, return_raw=return_raw, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
resnet_v2_200.default_image_size = resnet_v2.default_image_size
| StarcoderdataPython |
11361412 | <filename>TMbidimensional.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 14:56:30 2019
@author: fernando
"""
#Use esta Máquina de Turing responsablemente (y bajo su propio riesgo)
import copy
def ConstruccTM():
TM = []
numInstrucc = int(input("Elija la cantidad de instrucciones de la máquina. "))
for i in range(numInstrucc):
cuad = []
print("\nIntroduzca una cuádrupla")
cuad.append(input("Introduzca el estado ACTUAL: "))
cuad.append(input("Introduzca el símbolo ACTUAL: "))
cuad.append(input("Introduzca el símbolo o movimiento A CAMBIAR: "))
cuad.append(input("Introduzca el estado A CAMBIAR: "))
TM.append(cuad)
return TM
def VerificacionTM(TM):
verificador = True
for cuad in TM:
for cuadAux in TM:
if(cuad[0]==cuadAux[0] and cuad[1]==cuadAux[1] and not(cuad==cuadAux)):
print("La instrucciones", cuad, cuadAux, "no son válidas.")
verificador = False
break
if(verificador):
continue
else:
break
return verificador
def FuncionamientoDeLaTM(cinta,TM):
cintaAux = copy.deepcopy(cinta)
cintaAux2 = []
cintaCursor = [u"\u2588"]
print("\n-------\n")
detencion = False
posicionCursor = 0
cursorCinta = 0
estadoMaquina = 1
numeroUnos = 0
print("La posición inicial del programa es: ", posicionCursor,
"\n el estado: ", estadoMaquina,
"\n marcando el símbolo: ", cinta[cursorCinta], "\n")
print(cintaCursor)
print(cinta)
print("\n-------\n")
while(not(detencion)):
cintaAux2 = []
cuadActual = []
for cuad in TM:
a0 = int(cuad[0])
if((a0 == estadoMaquina) and (cuad[1] == cinta[cursorCinta])):
cuadActual.extend(cuad)
break
if(cuadActual == []):
detencion = True
if(not(detencion)):
#Movimientos derecha e izquierda
if(cuadActual[2] == "R"):
cintaCursor[cursorCinta] = "-"
posicionCursor = posicionCursor + 1
cursorCinta = cursorCinta + 1
if(len(cintaCursor)-1<cursorCinta):
cintaCursor.append(u"\u2588")
else:
cintaCursor[cursorCinta] = u"\u2588"
elif(cuadActual[2] == "L"):
if(cursorCinta==0):
cintaAux2.append("B")
for elemento in cinta:
cintaAux2.append(elemento)
cinta = cintaAux2
posicionCursor = posicionCursor - 1
else:
cintaCursor[cursorCinta] = "-"
posicionCursor = posicionCursor - 1
cursorCinta = cursorCinta - 1
cintaCursor.append(u"\u2588")
#Cuando no hay movimientos izquierda y derecha
if(cuadActual[2]!="R" and cuadActual[2]!="L"):
cinta[cursorCinta] = cuadActual[2]
#Agrega más espacio si es necesario
if(len(cinta)-1<cursorCinta):
cinta.append("B")
#Actualiza el estado de la máquina y regresa un ¡reporte exclusivo!
estadoMaquina = int(cuadActual[3])
print("La posición del programa es: ", posicionCursor,
"\n el estado: ", estadoMaquina,
"\n marcando el símbolo: ", cinta[cursorCinta],
"\n donde el cursorCinta está en: ", cursorCinta)
print("\n")
print(cintaCursor)
print(cinta)
print("\n-------\n")
else:
break
print("El programa se detuvo en\n la posición: ", posicionCursor,
"\n el estado: ", estadoMaquina,
"\n marcando el símbolo: ", cinta[posicionCursor],
"\n donde el cursorCinta está en: ", cursorCinta)
print("\n")
print(cintaCursor)
print(cinta)
for i in range(len(cinta)):
if(cinta[i] == "1"):
numeroUnos = numeroUnos + 1
print("\nEl número de unos que produce la máquina en el input\n", cintaAux, "es", numeroUnos)
print("\n-------\n")
cinta = []
TM = ConstruccTM()
VerificacionTM(TM)
cintaEntrada = input("Introduzca un input: ")
for simbolo in cintaEntrada:
cinta.append(simbolo)
FuncionamientoDeLaTM(cinta,TM) | StarcoderdataPython |
5113597 | <reponame>Rahuum/glooey<filename>tests/drawing/demo_outline.py
#!/usr/bin/env python3
import pyglet
import glooey
import run_demos
from vecrec import Vector, Rect
window = pyglet.window.Window()
batch = pyglet.graphics.Batch()
full = Rect.from_pyglet_window(window)
left = Rect(full.left, full.bottom, full.width/2, full.height)
right = Rect(full.left, full.bottom, full.width/2, full.height)
right.left = left.right
left.shrink(50)
right.shrink(50)
@run_demos.on_space(window, batch)
def test_outline():
a = glooey.drawing.Outline(left, batch=batch)
b = glooey.drawing.Outline(right, batch=batch)
yield "Show two unconnected outlines."
a.hide()
b.hide()
c = glooey.drawing.Outline(full, batch=batch)
yield "Put an outline just inside the window."
c.hide()
@window.event
def on_draw():
window.clear()
batch.draw()
pyglet.app.run()
| StarcoderdataPython |
101264 | <reponame>superseeker13/Sain
import time
import copy
import socket
import sys
import traceback
import color_hex
import remoteAPI
PreRead = 0
PostRead = 1
PreWrite = 2
PostWrite = 3
PreExecute = 4
PostExecute = 5
_Activate = 1
_Deactivate = 3
_Stop = 5
_Access = 9
_Controllers = 11
_Frame = 13
_Scanline = 15
_ScanlineCycle = 17
_SpriteZero = 19
_Status = 21
_EVENT_TYPES = [ _Activate, _Deactivate, _Stop, _Access, _Controllers,
_Frame, _Scanline, _ScanlineCycle, _SpriteZero, _Status ]
_EVENT_REQUEST = 0xFF
_EVENT_RESPONSE = 0xFE
_HEARTBEAT = 0xFD
_READY = 0xFC
_RETRY_SECONDS = 1
_BLOCK_SIZE = 4096
_ARRAY_LENGTH = 1024
_host = None
_port = -1
_remoteAPI = None
def _isNotBlank(s):
return s and s.strip()
def initRemoteAPI(host, port):
global _host
global _port
_host = host
_port = port
def getAPI():
global _remoteAPI
if _remoteAPI == None and _isNotBlank(_host):
_remoteAPI = remoteAPI._RemoteAPI(_host, _port)
return _remoteAPI
class _AccessPoint(object):
def __init__(self, listener, accessPointType, minAddress, maxAddress = -1,
bank = -1):
self.listener = listener;
self.accessPointType = accessPointType;
self.bank = bank;
if maxAddress < 0:
self.minAddress = self.maxAddress = minAddress
elif minAddress <= maxAddress:
self.minAddress = minAddress
self.maxAddress = maxAddress
else:
self.minAddress = maxAddress
self.maxAddress = minAddress
class _ScanlineCyclePoint(object):
def __init__(self, listener, scanline, scanlineCycle):
self.listener = listener
self.scanline = scanline
self.scanlineCycle = scanlineCycle
class _ScanlinePoint(object):
def __init__(self, listener, scanline):
self.listener = listener
self.scanline = scanline
class _DataStream(object):
def __init__(self, sock):
self._readBuffer = bytearray()
self._writeBuffer = bytearray()
self._sock = sock
def _close(self):
self._sock.shutdown()
self._sock.close()
def _fillReadBuffer(self, count):
while len(self._readBuffer) < count:
block = self._sock.recv(_BLOCK_SIZE)
if len(block) == 0:
raise IOError("Disconnected.")
self._readBuffer.extend(block)
def _read(self):
return self._readBuffer.pop(0)
def writeByte(self, value):
self._writeBuffer.append(value & 0xFF)
def readByte(self) -> int:
self._fillReadBuffer(1)
return self._read()
def writeInt(self, value):
self.writeByte(value >> 24)
self.writeByte(value >> 16)
self.writeByte(value >> 8)
self.writeByte(value)
def readInt(self) -> int:
self._fillReadBuffer(4)
value = self._read() << 24
value |= self._read() << 16
value |= self._read() << 8
value |= self._read()
return value
def writeIntArray(self, array):
self.writeInt(len(array))
for i in range(len(array)):
self.writeInt(array[i])
def readIntArray(self, array):
length = self.readInt()
if length < 0 or length > len(array):
self._close()
raise IOError("Invalid array length: %d" % length)
for i in range(length):
array[i] = self.readInt()
return length
def writeBoolean(self, value):
self.writeByte(1 if value else 0)
def readBoolean(self):
return self.readByte() != 0
def writeChar(self, value):
self.writeByte(ord(value[0]))
def readChar(self) -> chr:
return chr(self.readByte())
def writeCharArray(self, array):
self.writeString(array)
def readCharArray(array):
length = self.readInt()
if length < 0 or length > len(array):
self._close()
raise IOError("Invalid array length: %d" % length)
for i in range(length):
array[i] = self.readChar()
return length
def writeString(self, value):
self.writeInt(len(value))
for i in range(len(value)):
self.writeByte(ord(value[i]))
def readString(self) -> str:
length = self.readInt()
if length < 0 or length > _ARRAY_LENGTH:
self._close()
raise IOError("Invalid array length: %d" % length)
cs = bytearray()
for i in range(length):
cs.append(self.readByte())
return str(cs)
def writeStringArray(self, array):
self.writeInt(len(array))
for i in range(len(array)):
self.writeString(array[i])
def readStringArray(self, array) -> int:
length = self.readInt()
if length < 0 or length > len(array):
self._close()
raise IOError("Invalid array length: %d" % length)
for i in range(length):
array[i] = self.readString()
return length
def readDynamicStringArray(self):
length = self.readInt()
if length < 0 or length > _ARRAY_LENGTH:
self._close()
raise IOError("Invalid array length: %d" % length)
array = []
for i in range(length):
array.append(self.readString())
return array
def flush(self):
self._sock.sendall(self._writeBuffer)
del self._writeBuffer[:]
class _RemoteBase(object):
def __init__(self, host, port):
self._host = host
self._port = port
self._stream = None
self._nextID = 0
self._listenerIDs = {}
self._running = False
# eventType -> listenerID -> listenerObject(listener)
self._listenerObjects = { eventType : {} for eventType in _EVENT_TYPES }
def run(self):
if self._running:
return
else:
self._running = True
while True:
self._fireStatusChanged("Connecting to %s:%d..."
% (self._host, self._port))
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._host, self._port))
self._stream = _DataStream(sock)
except:
self._fireStatusChanged("Failed to establish connection.")
if self._stream != None:
try:
self._fireStatusChanged("Connection established.")
self._sendListeners()
self._sendReady()
while True:
self._probeEvents()
except IOError:
self._fireDeactivated()
self._fireStatusChanged("Disconnected.")
except:
ex_type, ex_value, ex_traceback = sys.exc_info()
trace_back = traceback.extract_tb(ex_traceback)
print("%s: %s" % (ex_type.__name__, ex_value))
for trace in trace_back:
print(" File \"%s\", line %d, in %s"
% (trace[0], trace[1], trace[2]))
print(" %s" % trace[3])
self._fireDeactivated()
self._fireStatusChanged("Disconnected.")
finally:
self._stream = None
time.sleep(_RETRY_SECONDS)
def _fireDeactivated(self):
for listenerID, listener in copy.copy(self._listenerObjects[_Deactivate]
.items()):
listener()
def _fireStatusChanged(self, message):
for listenerID, listener in copy.copy(self._listenerObjects[_Status]
.items()):
listener(message)
def _sendReady(self):
if self._stream != None:
try:
self._stream.writeByte(_READY)
self._stream.flush()
except:
pass
def _sendListeners(self):
for eventType, listObjs in self._listenerObjects.items():
for listenerID, listenerObject in listObjs.items():
self._sendListener(listenerID, eventType, listenerObject)
def _probeEvents(self):
self._stream.writeByte(_EVENT_REQUEST)
self._stream.flush()
eventType = self._stream.readByte()
if eventType == _HEARTBEAT:
self._stream.writeByte(_EVENT_RESPONSE)
self._stream.flush()
return
listenerID = self._stream.readInt()
obj = self._listenerObjects[eventType].get(listenerID, None)
if obj != None:
if eventType == _Access:
accessPointType = self._stream.readInt()
address = self._stream.readInt()
value = self._stream.readInt()
result = obj.listener(accessPointType, address, value)
self._stream.writeByte(_EVENT_RESPONSE)
self._stream.writeInt(result)
else:
if ( eventType == _Activate
or eventType == _Deactivate
or eventType == _Stop
or eventType == _Controllers
or eventType == _Frame):
obj()
elif eventType == _Scanline:
obj.listener(self._stream.readInt())
elif eventType == _ScanlineCycle:
obj.listener(self._stream.readInt(), self._stream.readInt(),
self._stream.readInt(), self._stream.readBoolean())
elif eventType == _SpriteZero:
obj.listener(self._stream.readInt(), self._stream.readInt())
elif eventType == _Status:
obj.listener(self._stream.readString())
else:
raise IOError("Unknown listener type: %d" % eventType)
self._stream.writeByte(_EVENT_RESPONSE)
self._stream.flush()
def _sendListener(self, listenerID, eventType, listenerObject):
if self._stream != None:
try:
self._stream.writeByte(eventType)
self._stream.writeInt(listenerID)
if eventType == _Access:
self._stream.writeInt(listenerObject.accessPointType)
self._stream.writeInt(listenerObject.minAddress)
self._stream.writeInt(listenerObject.maxAddress)
self._stream.writeInt(listenerObject.bank)
elif eventType == _Scanline:
self._stream.writeInt(listenerObject.scanline)
elif eventType == _ScanlineCycle:
self._stream.writeInt(listenerObject.scanline)
self._stream.writeInt(listenerObject.scanlineCycle)
self._stream.flush()
except:
pass
def _addListener(self, listener, eventType):
if listener != None:
self._sendListener(self._addListenerObject(listener, eventType),
eventType, listener)
def _removeListener(self, listener, eventType, methodValue):
if listener != None:
listenerID = self._removeListenerObject(listener, eventType)
if listenerID >= 0 and self._stream != None:
try:
self._stream.writeByte(methodValue)
self._stream.writeInt(listenerID)
self._stream.flush()
except:
pass
def _addListenerObject(self, listener, eventType, listenerObject = None):
if listenerObject == None:
listenerObject = listener
listenerID = self._nextID
self._nextID += 1
self._listenerIDs[listener] = listenerID
self._listenerObjects[eventType][listenerID] = listenerObject
return listenerID
def _removeListenerObject(self, listener, eventType) -> int:
listenerID = self._listenerIDs.pop(listener, None)
if listenerID != None:
self._listenerObjects[eventType].pop(listenerID)
return listenerID
else:
return -1
def addActivateListener(self, listener):
self._addListener(listener, _Activate)
def removeActivateListener(self, listener):
self._removeListener(listener, _Activate, 2)
def addDeactivateListener(self, listener):
self._addListener(listener, _Deactivate)
def removeDeactivateListener(self, listener):
self._removeListener(listener, _Deactivate, 4)
def addStopListener(self, listener):
self._addListener(listener, _Stop)
def removeStopListener(self, listener):
self._removeListener(listener, _Stop, 6)
def addAccessPointListener(self, listener, accessPointType, minAddress,
maxAddress = -1, bank = -1):
if listener != None:
point = _AccessPoint(listener, accessPointType, minAddress, maxAddress,
bank)
self._sendListener(self._addListenerObject(listener, _Access, point),
_Access, point)
def removeAccessPointListener(self, listener):
self._removeListener(listener, _Access, 10)
def addControllersListener(self, listener):
self._addListener(listener, _Controllers)
def removeControllersListener(self, listener):
self._removeListener(listener, _Controllers, 12)
def addFrameListener(self, listener):
self._addListener(listener, _Frame)
def removeFrameListener(self, listener):
self._removeListener(listener, _Frame, 14)
def addScanlineListener(self, listener, scanline):
if listener != None:
point = _ScanlinePoint(listener, scanline)
self._sendListener(self._addListenerObject(listener, _Scanline, point),
_Scanline, point)
def removeScanlineListener(self, listener):
self._removeListener(listener, _Scanline, 16)
def addScanlineCycleListener(self, listener, scanline, scanlineCycle):
if listener != None:
point = _ScanlineCyclePoint(listener, scanline, scanlineCycle)
self._sendListener(self._addListenerObject(listener, _ScanlineCycle,
point), _ScanlineCycle, point)
def removeScanlineCycleListener(self, listener):
self._removeListener(listener, _ScanlineCycle, 18)
def addSpriteZeroListener(self, listener):
self._addListener(listener, _SpriteZero)
def removeSpriteZeroListener(self, listener):
self._removeListener(listener, _SpriteZero, 20)
def addStatusListener(self, listener):
self._addListener(listener, _Status)
def removeStatusListener(self, listener):
self._removeListener(listener, _Status, 22)
def getPixels(self, pixels):
try:
self._stream.writeByte(119)
self._stream.flush()
self._stream.readIntArray(pixels)
except:
pass
| StarcoderdataPython |
3236171 | <reponame>rs992214/keanu
## This is a generated file. DO NOT EDIT.
from typing import Collection, Optional
from py4j.java_gateway import java_import
from keanu.context import KeanuContext
from .base import Vertex, Double, Integer, Boolean, vertex_constructor_param_types
from keanu.vertex.label import _VertexLabel
from keanu.vartypes import (
tensor_arg_types,
shape_types
)
from .vertex_casting import (
do_vertex_cast,
do_inferred_vertex_cast,
cast_to_double_tensor,
cast_to_integer_tensor,
cast_to_boolean_tensor,
cast_to_double,
cast_to_integer,
cast_to_boolean,
cast_to_long_array,
cast_to_boolean_array,
cast_to_int_array,
cast_to_vertex_array,
)
context = KeanuContext()
def cast_to_double_vertex(input: vertex_constructor_param_types) -> Vertex:
return do_vertex_cast(ConstantDouble, input)
def cast_to_integer_vertex(input: vertex_constructor_param_types) -> Vertex:
return do_vertex_cast(ConstantInteger, input)
def cast_to_boolean_vertex(input: vertex_constructor_param_types) -> Vertex:
return do_vertex_cast(ConstantBoolean, input)
def cast_to_vertex(input: vertex_constructor_param_types) -> Vertex:
return do_inferred_vertex_cast({bool: ConstantBoolean, int: ConstantInteger, float: ConstantDouble}, input)
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.BroadcastVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.DiagPartVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.DiagVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.FillTriangularVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.PermuteVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.ReshapeVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.SliceVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.StridedSliceVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.TakeVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.TriLowerVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.TriUpperVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.TrianglePartVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.WhereVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.BooleanProxyVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.CastNumberToBooleanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.CastToBooleanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.ConstantBooleanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.AndBinaryVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.OrBinaryVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.XorBinaryVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.compare.EqualsVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.compare.GreaterThanOrEqualVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.compare.GreaterThanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.compare.LessThanOrEqualVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.compare.LessThanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.compare.NotEqualsVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.binary.compare.NumericalEqualsVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.multiple.BooleanConcatenationVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.multiple.BooleanToDoubleMaskVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.multiple.BooleanToIntegerMaskVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.AllFalseVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.AllTrueVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.AnyFalseVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.AnyTrueVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.IsFiniteVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.IsInfiniteVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.IsNaNVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.IsNegativeInfinityVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.IsPositiveInfinityVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.NotBinaryVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.nonprobabilistic.operators.unary.NotNaNVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.bool.probabilistic.BernoulliVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.generic.nonprobabilistic.PrintVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.CastNumberToIntegerVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.ConstantIntegerVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.IntegerProxyVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.operators.multiple.IntegerConcatenationVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.operators.unary.ArgMaxVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.operators.unary.ArgMinVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.operators.unary.NaNArgMaxVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.nonprobabilistic.operators.unary.NaNArgMinVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.probabilistic.BinomialVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.probabilistic.GeometricVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.probabilistic.MultinomialVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.probabilistic.PoissonVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.intgr.probabilistic.UniformIntVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.fixed.operators.unary.ModVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.nonprobabilistic.CastNumberToDoubleVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.nonprobabilistic.ConstantDoubleVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.nonprobabilistic.DoubleProxyVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.nonprobabilistic.operators.multiple.ConcatenationVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.BetaVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.CauchyVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.ChiSquaredVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.DirichletVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.ExponentialVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.GammaVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.GaussianVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.HalfCauchyVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.HalfGaussianVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.InverseGammaVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.KDEVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.LaplaceVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.LogNormalVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.LogisticVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.MultivariateGaussianVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.ParetoVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.StudentTVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.TriangularVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.dbl.probabilistic.UniformVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.binary.ArcTan2Vertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.binary.LogAddExp2Vertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.binary.LogAddExpVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.binary.SafeLogTimesVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ArcCosVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ArcCoshVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ArcSinVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ArcSinhVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ArcTanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ArcTanhVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.CeilVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.CholeskyDecompositionVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.CholeskyInverseVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.CosVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.CoshVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.DigammaVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.Exp2Vertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ExpM1Vertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ExpVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.FloorVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.Log10Vertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.Log1pVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.Log2Vertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.LogGammaVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.LogVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.MatrixDeterminantVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.MatrixInverseVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.MeanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.ReplaceNaNVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.RoundVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.SigmoidVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.SinVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.SinhVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.StandardDeviationVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.StandardizeVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.TanVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.TanhVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.floating.operators.unary.TrigammaVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.AdditionVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.DifferenceVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.DivisionVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.GreaterThanMaskVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.GreaterThanOrEqualToMaskVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.LessThanMaskVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.LessThanOrEqualToMaskVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.MatrixMultiplicationVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.MaxVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.MinVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.MultiplicationVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.PowerVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.binary.TensorMultiplicationVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.ternary.SetWithMaskVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.AbsVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.CumProdVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.CumSumVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.MaxUnaryVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.MinUnaryVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.ProductVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.SignVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.tensor.number.operators.unary.SumVertex")
java_import(context.jvm_view(), "io.improbable.keanu.vertices.utility.AssertVertex")
def Broadcast(input_vertex: vertex_constructor_param_types, to_shape: Collection[int], label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().BroadcastVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(to_shape))
def DiagPart(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().DiagPartVertex, label, cast_to_vertex(input_vertex))
def Diag(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().DiagVertex, label, cast_to_vertex(input_vertex))
def FillTriangular(input_vertex: vertex_constructor_param_types, fill_upper: bool, fill_lower: bool, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().FillTriangularVertex, label, cast_to_vertex(input_vertex), cast_to_boolean(fill_upper), cast_to_boolean(fill_lower))
def Permute(input_vertex: vertex_constructor_param_types, rearrange: Collection[int], label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().PermuteVertex, label, cast_to_vertex(input_vertex), cast_to_int_array(rearrange))
def Reshape(input_vertex: vertex_constructor_param_types, proposed_shape: Collection[int], label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().ReshapeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(proposed_shape))
def Slice(input_vertex: vertex_constructor_param_types, dimension: int, index: int, label: Optional[str]=None) -> Vertex:
"""
Takes the slice along a given dimension and index of a vertex
:param input_vertex: the input vertex
:param dimension: the dimension to extract along
:param index: the index of extraction
"""
return Vertex(context.jvm_view().SliceVertex, label, cast_to_vertex(input_vertex), cast_to_integer(dimension), cast_to_integer(index))
def StridedSlice(input_vertex: vertex_constructor_param_types, start: Collection[int], end: Collection[int], stride: Collection[int], ellipsis: int, upper_bound_stop: Collection[bool], drop_dimension: Collection[bool], label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().StridedSliceVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(start), cast_to_long_array(end), cast_to_long_array(stride), cast_to_integer(ellipsis), cast_to_boolean_array(upper_bound_stop), cast_to_boolean_array(drop_dimension))
def Take(input_vertex: vertex_constructor_param_types, index: Collection[int], label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().TakeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(index))
def TriLower(input_vertex: vertex_constructor_param_types, k: int, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().TriLowerVertex, label, cast_to_vertex(input_vertex), cast_to_integer(k))
def TriUpper(input_vertex: vertex_constructor_param_types, k: int, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().TriUpperVertex, label, cast_to_vertex(input_vertex), cast_to_integer(k))
def TrianglePart(input_vertex: vertex_constructor_param_types, upper_part: bool, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().TrianglePartVertex, label, cast_to_vertex(input_vertex), cast_to_boolean(upper_part))
def Where(predicate: vertex_constructor_param_types, thn: vertex_constructor_param_types, els: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().WhereVertex, label, cast_to_boolean_vertex(predicate), cast_to_vertex(thn), cast_to_vertex(els))
def BooleanProxy(shape: Collection[int], label: str) -> Vertex:
return Boolean(context.jvm_view().BooleanProxyVertex, label, cast_to_long_array(shape), _VertexLabel(label))
def CastNumberToBoolean(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().CastNumberToBooleanVertex, label, cast_to_vertex(input_vertex))
def CastToBoolean(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().CastToBooleanVertex, label, cast_to_boolean_vertex(input_vertex))
def ConstantBoolean(constant: tensor_arg_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().ConstantBooleanVertex, label, cast_to_boolean_tensor(constant))
def AndBinary(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().AndBinaryVertex, label, cast_to_boolean_vertex(a), cast_to_boolean_vertex(b))
def OrBinary(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().OrBinaryVertex, label, cast_to_boolean_vertex(a), cast_to_boolean_vertex(b))
def XorBinary(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().XorBinaryVertex, label, cast_to_boolean_vertex(a), cast_to_boolean_vertex(b))
def Equals(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().EqualsVertex, label, cast_to_vertex(a), cast_to_vertex(b))
def GreaterThanOrEqual(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().GreaterThanOrEqualVertex, label, cast_to_vertex(a), cast_to_vertex(b))
def GreaterThan(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().GreaterThanVertex, label, cast_to_vertex(a), cast_to_vertex(b))
def LessThanOrEqual(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().LessThanOrEqualVertex, label, cast_to_vertex(a), cast_to_vertex(b))
def LessThan(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().LessThanVertex, label, cast_to_vertex(a), cast_to_vertex(b))
def NotEquals(a: vertex_constructor_param_types, b: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().NotEqualsVertex, label, cast_to_vertex(a), cast_to_vertex(b))
def NumericalEquals(a: vertex_constructor_param_types, b: vertex_constructor_param_types, epsilon: float, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().NumericalEqualsVertex, label, cast_to_vertex(a), cast_to_vertex(b), cast_to_double(epsilon))
def BooleanConcatenation(dimension: int, input: Collection[Vertex], label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().BooleanConcatenationVertex, label, cast_to_integer(dimension), cast_to_vertex_array(input))
def BooleanToDoubleMask(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().BooleanToDoubleMaskVertex, label, cast_to_boolean_vertex(input_vertex))
def BooleanToIntegerMask(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().BooleanToIntegerMaskVertex, label, cast_to_boolean_vertex(input_vertex))
def AllFalse(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().AllFalseVertex, label, cast_to_boolean_vertex(input_vertex))
def AllTrue(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().AllTrueVertex, label, cast_to_boolean_vertex(input_vertex))
def AnyFalse(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().AnyFalseVertex, label, cast_to_boolean_vertex(input_vertex))
def AnyTrue(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().AnyTrueVertex, label, cast_to_boolean_vertex(input_vertex))
def IsFinite(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().IsFiniteVertex, label, cast_to_vertex(input_vertex))
def IsInfinite(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().IsInfiniteVertex, label, cast_to_vertex(input_vertex))
def IsNaN(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().IsNaNVertex, label, cast_to_vertex(input_vertex))
def IsNegativeInfinity(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().IsNegativeInfinityVertex, label, cast_to_vertex(input_vertex))
def IsPositiveInfinity(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().IsPositiveInfinityVertex, label, cast_to_vertex(input_vertex))
def NotBinary(a: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().NotBinaryVertex, label, cast_to_boolean_vertex(a))
def NotNaN(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Boolean(context.jvm_view().NotNaNVertex, label, cast_to_vertex(input_vertex))
def Bernoulli(prob_true: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of probTrue to
a matching shaped Bernoulli.
:param prob_true: probTrue with same shape as desired Bernoulli tensor or scalar
"""
return Boolean(context.jvm_view().BernoulliVertex, label, cast_to_double_vertex(prob_true))
def Print(parent: vertex_constructor_param_types, message: str, print_data: bool, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().PrintVertex, label, cast_to_vertex(parent), message, cast_to_boolean(print_data))
def CastNumberToInteger(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().CastNumberToIntegerVertex, label, cast_to_vertex(input_vertex))
def ConstantInteger(constant: tensor_arg_types, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().ConstantIntegerVertex, label, cast_to_integer_tensor(constant))
def IntegerProxy(shape: Collection[int], label: str) -> Vertex:
return Integer(context.jvm_view().IntegerProxyVertex, label, cast_to_long_array(shape), _VertexLabel(label))
def IntegerConcatenation(dimension: int, input: Collection[Vertex], label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().IntegerConcatenationVertex, label, cast_to_integer(dimension), cast_to_vertex_array(input))
def ArgMax(input_vertex: vertex_constructor_param_types, axis: int, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().ArgMaxVertex, label, cast_to_vertex(input_vertex), cast_to_integer(axis))
def ArgMin(input_vertex: vertex_constructor_param_types, axis: int, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().ArgMinVertex, label, cast_to_vertex(input_vertex), cast_to_integer(axis))
def NaNArgMax(input_vertex: vertex_constructor_param_types, axis: int, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().NaNArgMaxVertex, label, cast_to_vertex(input_vertex), cast_to_integer(axis))
def NaNArgMin(input_vertex: vertex_constructor_param_types, axis: int, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().NaNArgMinVertex, label, cast_to_vertex(input_vertex), cast_to_integer(axis))
def Binomial(p: vertex_constructor_param_types, n: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().BinomialVertex, label, cast_to_double_vertex(p), cast_to_integer_vertex(n))
def Geometric(p: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().GeometricVertex, label, cast_to_double_vertex(p))
def Multinomial(n: vertex_constructor_param_types, p: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().MultinomialVertex, label, cast_to_integer_vertex(n), cast_to_double_vertex(p))
def Poisson(mu: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of mu to
a matching shaped Poisson.
:param mu: mu with same shape as desired Poisson tensor or scalar
"""
return Integer(context.jvm_view().PoissonVertex, label, cast_to_double_vertex(mu))
def UniformInt(min: vertex_constructor_param_types, max: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Integer(context.jvm_view().UniformIntVertex, label, cast_to_integer_vertex(min), cast_to_integer_vertex(max))
def Mod(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().ModVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def CastNumberToDouble(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().CastNumberToDoubleVertex, label, cast_to_vertex(input_vertex))
def ConstantDouble(constant: tensor_arg_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().ConstantDoubleVertex, label, cast_to_double_tensor(constant))
def DoubleProxy(shape: Collection[int], label: str) -> Vertex:
return Double(context.jvm_view().DoubleProxyVertex, label, cast_to_long_array(shape), _VertexLabel(label))
def Concatenation(dimension: int, operands: Collection[Vertex], label: Optional[str]=None) -> Vertex:
"""
A vertex that can concatenate any amount of vertices along a given dimension.
:param dimension: the dimension to concatenate on. This is the only dimension in which sizes may be different. Negative
dimension indexing is not supported.
:param operands: the operands vertices to concatenate
"""
return Double(context.jvm_view().ConcatenationVertex, label, cast_to_integer(dimension), cast_to_vertex_array(operands))
def Beta(alpha: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some tensorShape of alpha and beta to
a matching tensorShaped Beta.
:param alpha: the alpha of the Beta with either the same tensorShape as specified for this vertex or a scalar
:param beta: the beta of the Beta with either the same tensorShape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().BetaVertex, label, cast_to_double_vertex(alpha), cast_to_double_vertex(beta))
def Cauchy(location: vertex_constructor_param_types, scale: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().CauchyVertex, label, cast_to_double_vertex(location), cast_to_double_vertex(scale))
def ChiSquared(k: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of k to
a matching shaped ChiSquared.
:param k: the number of degrees of freedom
"""
return Double(context.jvm_view().ChiSquaredVertex, label, cast_to_integer_vertex(k))
def Dirichlet(concentration: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Matches a vector of concentration values to a Dirichlet distribution
:param concentration: the concentration values of the dirichlet
"""
return Double(context.jvm_view().DirichletVertex, label, cast_to_double_vertex(concentration))
def Exponential(rate: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of rate to matching shaped exponential.
:param rate: the rate of the Exponential with either the same shape as specified for this vertex or scalar
"""
return Double(context.jvm_view().ExponentialVertex, label, cast_to_double_vertex(rate))
def Gamma(theta: vertex_constructor_param_types, k: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of theta and k to matching shaped gamma.
:param theta: the theta (scale) of the Gamma with either the same shape as specified for this vertex
:param k: the k (shape) of the Gamma with either the same shape as specified for this vertex
"""
return Double(context.jvm_view().GammaVertex, label, cast_to_double_vertex(theta), cast_to_double_vertex(k))
def Gaussian(mu: vertex_constructor_param_types, sigma: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().GaussianVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(sigma))
def HalfCauchy(scale: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().HalfCauchyVertex, label, cast_to_double_vertex(scale))
def HalfGaussian(sigma: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().HalfGaussianVertex, label, cast_to_double_vertex(sigma))
def InverseGamma(alpha: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of alpha and beta to
alpha matching shaped Inverse Gamma.
:param alpha: the alpha of the Inverse Gamma with either the same shape as specified for this vertex or alpha scalar
:param beta: the beta of the Inverse Gamma with either the same shape as specified for this vertex or alpha scalar
"""
return Double(context.jvm_view().InverseGammaVertex, label, cast_to_double_vertex(alpha), cast_to_double_vertex(beta))
def KDE(samples: tensor_arg_types, bandwidth: float, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().KDEVertex, label, cast_to_double_tensor(samples), cast_to_double(bandwidth))
def Laplace(mu: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of mu and sigma to
a matching shaped Laplace.
:param mu: the mu of the Laplace with either the same shape as specified for this vertex or a scalar
:param beta: the beta of the Laplace with either the same shape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().LaplaceVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(beta))
def LogNormal(mu: vertex_constructor_param_types, sigma: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().LogNormalVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(sigma))
def Logistic(mu: vertex_constructor_param_types, s: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().LogisticVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(s))
def MultivariateGaussian(mu: vertex_constructor_param_types, covariance: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Matches a mu and full covariance matrix of some shape to a Multivariate Gaussian distribution. Mu should
be shape (batchShape, N) where N is the number of dimensions and batchShape can be any shape that is broadcastable
with the covariance batchShape if it is also batched. The covariance matrix should be shape (batchShape, N, N) where
the batchShape must be broadcastable with the batchShape of mu. Only the lower triangle of the covariance matrix
is used due to it being assumed to be a symmetric matrix. The upper triangle will be ignored.
:param mu: the mu of the Multivariate Gaussian
:param covariance: the covariance matrix of the Multivariate Gaussian
"""
return Double(context.jvm_view().MultivariateGaussianVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(covariance))
def Pareto(location: vertex_constructor_param_types, scale: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().ParetoVertex, label, cast_to_double_vertex(location), cast_to_double_vertex(scale))
def StudentT(v: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().StudentTVertex, label, cast_to_integer_vertex(v))
def Triangular(x_min: vertex_constructor_param_types, x_max: vertex_constructor_param_types, c: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of xMin, xMax and c to a matching shaped triangular.
:param x_min: the xMin of the Triangular with either the same shape as specified for this vertex or a scalar
:param x_max: the xMax of the Triangular with either the same shape as specified for this vertex or a scalar
:param c: the c of the Triangular with either the same shape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().TriangularVertex, label, cast_to_double_vertex(x_min), cast_to_double_vertex(x_max), cast_to_double_vertex(c))
def Uniform(x_min: vertex_constructor_param_types, x_max: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of mu and sigma to
a matching shaped Uniform Vertex
:param x_min: the inclusive lower bound of the Uniform with either the same shape as specified for this vertex or a scalar
:param x_max: the exclusive upper bound of the Uniform with either the same shape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().UniformVertex, label, cast_to_double_vertex(x_min), cast_to_double_vertex(x_max))
def ArcTan2(x: vertex_constructor_param_types, y: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Calculates the signed angle, in radians, between the positive x-axis and a ray to the point (x, y) from the origin
:param x: x coordinate
:param y: y coordinate
"""
return Vertex(context.jvm_view().ArcTan2Vertex, label, cast_to_vertex(x), cast_to_vertex(y))
def LogAddExp2(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().LogAddExp2Vertex, label, cast_to_vertex(left), cast_to_vertex(right))
def LogAddExp(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().LogAddExpVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def SafeLogTimes(x: vertex_constructor_param_types, y: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().SafeLogTimesVertex, label, cast_to_vertex(x), cast_to_vertex(y))
def ArcCos(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse cosine of a vertex, Arccos(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcCosVertex, label, cast_to_vertex(input_vertex))
def ArcCosh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcCoshVertex, label, cast_to_vertex(input_vertex))
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcSinVertex, label, cast_to_vertex(input_vertex))
def ArcSinh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcSinhVertex, label, cast_to_vertex(input_vertex))
def ArcTan(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse tan of a vertex, Arctan(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcTanVertex, label, cast_to_vertex(input_vertex))
def ArcTanh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcTanhVertex, label, cast_to_vertex(input_vertex))
def Ceil(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Ceiling operator to a vertex.
This maps a vertex to the smallest integer greater than or equal to its value
:param input_vertex: the vertex to be ceil'd
"""
return Vertex(context.jvm_view().CeilVertex, label, cast_to_vertex(input_vertex))
def CholeskyDecomposition(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CholeskyDecompositionVertex, label, cast_to_vertex(input_vertex))
def CholeskyInverse(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CholeskyInverseVertex, label, cast_to_vertex(input_vertex))
def Cos(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the cosine of a vertex, Cos(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CosVertex, label, cast_to_vertex(input_vertex))
def Cosh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CoshVertex, label, cast_to_vertex(input_vertex))
def Digamma(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().DigammaVertex, label, cast_to_vertex(input_vertex))
def Exp2(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Exp2Vertex, label, cast_to_vertex(input_vertex))
def ExpM1(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ExpM1Vertex, label, cast_to_vertex(input_vertex))
def Exp(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Calculates the exponential of an input vertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ExpVertex, label, cast_to_vertex(input_vertex))
def Floor(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Floor operator to a vertex.
This maps a vertex to the biggest integer less than or equal to its value
:param input_vertex: the vertex to be floor'd
"""
return Vertex(context.jvm_view().FloorVertex, label, cast_to_vertex(input_vertex))
def Log10(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log10Vertex, label, cast_to_vertex(input_vertex))
def Log1p(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log1pVertex, label, cast_to_vertex(input_vertex))
def Log2(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log2Vertex, label, cast_to_vertex(input_vertex))
def LogGamma(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Returns the log of the gamma of the inputVertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().LogGammaVertex, label, cast_to_vertex(input_vertex))
def Log(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Returns the natural logarithm, base e, of a vertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().LogVertex, label, cast_to_vertex(input_vertex))
def MatrixDeterminant(vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().MatrixDeterminantVertex, label, cast_to_vertex(vertex))
def MatrixInverse(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().MatrixInverseVertex, label, cast_to_vertex(input_vertex))
def Mean(input_vertex: vertex_constructor_param_types, over_dimensions: Collection[int], label: Optional[str]=None) -> Vertex:
"""
Performs a sum across specified dimensions. Negative dimension indexing is not supported.
:param input_vertex: the vertex to have its values summed
:param over_dimensions: dimensions to sum over
"""
return Vertex(context.jvm_view().MeanVertex, label, cast_to_vertex(input_vertex), cast_to_int_array(over_dimensions))
def ReplaceNaN(input_vertex: vertex_constructor_param_types, replace_with_value: float, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().ReplaceNaNVertex, label, cast_to_vertex(input_vertex), cast_to_double(replace_with_value))
def Round(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Rounding operator to a vertex.
This maps a vertex to the nearest integer value
:param input_vertex: the vertex to be rounded
"""
return Vertex(context.jvm_view().RoundVertex, label, cast_to_vertex(input_vertex))
def Sigmoid(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the sigmoid function to a vertex.
The sigmoid function is a special case of the Logistic function.
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().SigmoidVertex, label, cast_to_vertex(input_vertex))
def Sin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the sine of a vertex. Sin(vertex).
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().SinVertex, label, cast_to_vertex(input_vertex))
def Sinh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().SinhVertex, label, cast_to_vertex(input_vertex))
def StandardDeviation(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().StandardDeviationVertex, label, cast_to_vertex(input_vertex))
def Standardize(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().StandardizeVertex, label, cast_to_vertex(input_vertex))
def Tan(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the tangent of a vertex. Tan(vertex).
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().TanVertex, label, cast_to_vertex(input_vertex))
def Tanh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().TanhVertex, label, cast_to_vertex(input_vertex))
def Trigamma(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().TrigammaVertex, label, cast_to_vertex(input_vertex))
def Addition(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Adds one vertex to another
:param left: a vertex to add
:param right: a vertex to add
"""
return Vertex(context.jvm_view().AdditionVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def Difference(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().DifferenceVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def Division(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Divides one vertex by another
:param left: the vertex to be divided
:param right: the vertex to divide
"""
return Vertex(context.jvm_view().DivisionVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def GreaterThanMask(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().GreaterThanMaskVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def GreaterThanOrEqualToMask(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().GreaterThanOrEqualToMaskVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def LessThanMask(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().LessThanMaskVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def LessThanOrEqualToMask(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().LessThanOrEqualToMaskVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def MatrixMultiplication(left: vertex_constructor_param_types, right: vertex_constructor_param_types, transpose_left: bool, transpose_right: bool, label: Optional[str]=None) -> Vertex:
"""
Matrix multiplies one vertex by another. C = AB
:param left: vertex A
:param right: vertex B
:param transpose_left: transpose the left operand before multiply
:param transpose_right: transpose the right operand before multiply
"""
return Vertex(context.jvm_view().MatrixMultiplicationVertex, label, cast_to_vertex(left), cast_to_vertex(right), cast_to_boolean(transpose_left), cast_to_boolean(transpose_right))
def Max(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Finds the minimum between two vertices
:param left: one of the vertices to find the minimum of
:param right: one of the vertices to find the minimum of
"""
return Vertex(context.jvm_view().MaxVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def Min(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Finds the minimum between two vertices
:param left: one of the vertices to find the minimum of
:param right: one of the vertices to find the minimum of
"""
return Vertex(context.jvm_view().MinVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def Multiplication(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Multiplies one vertex by another
:param left: vertex to be multiplied
:param right: vertex to be multiplied
"""
return Vertex(context.jvm_view().MultiplicationVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def Power(base: vertex_constructor_param_types, exponent: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Raises a vertex to the power of another
:param base: the base vertex
:param exponent: the exponent vertex
"""
return Vertex(context.jvm_view().PowerVertex, label, cast_to_vertex(base), cast_to_vertex(exponent))
def TensorMultiplication(left: vertex_constructor_param_types, right: vertex_constructor_param_types, dims_left: Collection[int], dims_right: Collection[int], label: Optional[str]=None) -> Vertex:
"""
Tensor multiplies one vertex by another. C = AB.
:param left: the left vertex for operand
:param right: the right vertex for operand
:param dims_left: The dimensions of the left for multiplying. The left shape at these dimensions must align with the
shape of the corresponding right vertex at its specified dimensions.
:param dims_right: The dimensions of the right for multiplying. The right shape at these dimensions must align with the
shape of the corresponding left vertex at its specified dimensions.
"""
return Vertex(context.jvm_view().TensorMultiplicationVertex, label, cast_to_vertex(left), cast_to_vertex(right), cast_to_int_array(dims_left), cast_to_int_array(dims_right))
def SetWithMask(operand: vertex_constructor_param_types, mask: vertex_constructor_param_types, set_value: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().SetWithMaskVertex, label, cast_to_vertex(operand), cast_to_vertex(mask), cast_to_vertex(set_value))
def Abs(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the absolute of a vertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().AbsVertex, label, cast_to_vertex(input_vertex))
def CumProd(input_vertex: vertex_constructor_param_types, requested_dimension: int, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().CumProdVertex, label, cast_to_vertex(input_vertex), cast_to_integer(requested_dimension))
def CumSum(input_vertex: vertex_constructor_param_types, requested_dimension: int, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().CumSumVertex, label, cast_to_vertex(input_vertex), cast_to_integer(requested_dimension))
def MaxUnary(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().MaxUnaryVertex, label, cast_to_vertex(input_vertex))
def MinUnary(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().MinUnaryVertex, label, cast_to_vertex(input_vertex))
def Product(input_vertex: vertex_constructor_param_types, over_dimensions: Collection[int], label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().ProductVertex, label, cast_to_vertex(input_vertex), cast_to_int_array(over_dimensions))
def Sign(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the sign of a vertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().SignVertex, label, cast_to_vertex(input_vertex))
def Sum(input_vertex: vertex_constructor_param_types, over_dimensions: Collection[int], label: Optional[str]=None) -> Vertex:
"""
Performs a sum across specified dimensions. Negative dimension indexing is not supported.
:param input_vertex: the vertex to have its values summed
:param over_dimensions: dimensions to sum over
"""
return Vertex(context.jvm_view().SumVertex, label, cast_to_vertex(input_vertex), cast_to_int_array(over_dimensions))
def Assert(predicate: vertex_constructor_param_types, error_message: str, label: Optional[str]=None) -> Vertex:
"""
A vertex that asserts a {@link BooleanVertex} is all true on calculation.
:param predicate: the predicate to evaluate
:param error_message: a message to include in the {@link AssertionError}
"""
return Boolean(context.jvm_view().AssertVertex, label, cast_to_boolean_vertex(predicate), error_message)
| StarcoderdataPython |
3381616 | <gh_stars>0
import os
import sys
import itertools
if sys.platform == "linux" or sys.platform == "linux2":
os.system("clear")
elif sys.platform == "win32":
os.system("cls")
strings = []
user_repeats = ""
user_wdlist = "<PASSWORD>"
def strings_control():
global strings
strings = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h','i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'z', 'y',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Z', 'Y', '*', '&', '.', ',', '/']
def user_custom():
global user_repeats, user_wdlist
user_repeats = int(input("\033[1;32m Enter Length: \033[1;m"))
user_wdlist = input("\033[1;32m wordlist: \033[1;m")
if len(user_wdlist) < 1:
print("\033[1;32m Error wordlist not Entered\033[1;m")
sys.exit()
while True:
user_input = input("\033[1;32m Enter Words then enter --done when your finished: \033[1;m")
if user_input == "--done":
break
strings.append(user_input)
def mainfunc(userrange, userrange1, userchoice=0):
global user_wdlist, user_repeats
for e in range(userrange, userrange1):
opnr = open(user_wdlist, 'a+')
if userchoice != 0:
user_repeats += 1
for combo in itertools.product(''.join(strings), repeat=user_repeats):
chars = ''.join(combo)
opnr.write(chars + "\n")
sys.stdout.write("\r \033[1;32m [+}Writing %s: %s Length: %s\033[1;m"%(user_wdlist, chars, user_repeats))
sys.stdout.flush()
opnr.close()
def user_select():
global user_repeats
user_input = input("\033[1;32m Do you want custom wordlist y/n \033[1;m")
if user_input == "n":
user_repeats = 2
strings_control()
mainfunc(2, 11, 1)
elif user_input == "y":
user_custom()
mainfunc(0, 1)
if __name__ == "__main__":
user_select() | StarcoderdataPython |
9766384 | <gh_stars>0
import argparse
import numpy as np
from scipy import ndimage
import h5py
class Clefts:
def __init__(self, test, truth):
test_clefts = test
truth_clefts = truth
self.resolution=(40.0, 8.0, 8.0)
#self.truth_clefts_invalid = (truth_clefts == 0)
self.test_clefts_mask = (test_clefts == 0).astype(int)
self.truth_clefts_mask = (truth_clefts == 0).astype(int)
self.test_clefts_edt = ndimage.distance_transform_edt(self.test_clefts_mask, sampling=self.resolution)
self.truth_clefts_edt = ndimage.distance_transform_edt(self.truth_clefts_mask, sampling=self.resolution)
def count_false_positives(self, threshold = 200):
mask1 = 1-self.test_clefts_mask
mask2 = self.truth_clefts_edt > threshold
#false_positives = np.logical_and(mask1, mask2).astype(int)
false_positives = mask1 * mask2
return np.sum(false_positives)
def count_false_negatives(self, threshold = 200):
mask1 = 1-self.truth_clefts_mask
mask2 = self.test_clefts_edt > threshold
false_negatives = mask1 * mask2
return np.sum(false_negatives)
# def acc_false_positives(self):
# mask = 1-self.test_clefts_mask
# false_positives = self.truth_clefts_edt[mask]
# stats = {
# 'mean': np.mean(false_positives),
# 'std': np.std(false_positives),
# 'max': np.amax(false_positives),
# 'count': false_positives.size,
# 'median': np.median(false_positives)}
# return stats
# def acc_false_negatives(self):
# mask = 1-self.truth_clefts_mask
# false_negatives = self.test_clefts_edt[mask]
# stats = {
# 'mean': np.mean(false_negatives),
# 'std': np.std(false_negatives),
# 'max': np.amax(false_negatives),
# 'count': false_negatives.size,
# 'median': np.median(false_negatives)}
# return stats
def get_args():
parser = argparse.ArgumentParser(description='Training Synapse Detection Model')
# I/O
parser.add_argument('-p','--prediction', type=str, help='prediction path')
parser.add_argument('-g','--groundtruth', type=str, help='groundtruth path')
args = parser.parse_args()
return args
def main():
args = get_args()
print('0. load data')
test = h5py.File(name=args.prediction, mode='r', libver='latest')['main']
test = np.array(test)[14:-14, 200:-200, 200:-200]
test = (test*255).astype(np.uint8)
test[test < 200] = 0
test = (test != 0).astype(np.uint8)
truth = h5py.File(name=args.groundtruth, mode='r', libver='latest')['main']
truth = np.array(truth)
truth = truth.transpose((2,1,0))
truth = truth[14:-14, 200:-200, 200:-200]
truth = (truth != 0).astype(np.uint8)
assert (test.shape == truth.shape)
print('volume shape:', test.shape)
print('volume dtype:', test.dtype)
print('number of pixels:', np.prod(test.shape))
print('1. start evaluation')
clefts_evaluation = Clefts(test, truth)
false_positive_count = clefts_evaluation.count_false_positives()
false_negative_count = clefts_evaluation.count_false_negatives()
print('Clefts')
print('======')
print('\tfalse positives: ' + str(false_positive_count))
print('\tfalse negatives: ' + str(false_negative_count))
# false_positive_stats = clefts_evaluation.acc_false_positives()
# false_negative_stats = clefts_evaluation.acc_false_negatives()
# print('\tdistance to ground truth: ' + str(false_positive_stats))
# print('\tdistance to proposal : ' + str(false_negative_stats))
if __name__ == "__main__":
main()
| StarcoderdataPython |
6581022 | #!/usr/bin/python3
import os
from db_manager.database import managed_connection
import db_manager.dimManuscript
import db_manager.dimManuscriptVersion
import db_manager.dimManuscriptVersionHistory
import db_manager.dimCountry
import db_manager.dimPerson
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-40s %(levelname)-8s %(message)s',
datefmt='%H:%M:%S',
filename='/tmp/dbmanager.log',
filemode='w')
LOGGER = logging.getLogger(__name__)
with managed_connection() as conn:
sqlTearDown = open("sql_scripts/teardown.sql", "r").read()
sqlCreate = open("sql_scripts/create.sql", "r").read()
with conn.cursor() as c:
c.execute(sqlTearDown)
c.execute(sqlCreate)
conn.commit()
###
### Example staging csv's and applying changes
###
################################################################################
# A Manuscript is composed of it's versions and VersionHistory too.
#
# This means that those must all be staged before applying changes
# (which will be cascaded down to the child entities)
#
# Applying changes to a manuscript without loading the Version information is
# equivalent to explicitly stating that a manuscript has no Versions.
################################################################################
LOGGER.info("NEW FILES...")
db_manager.dimManuscript.stage_csv(
conn,
"dummy_csv/1526868166_manuscripts.csv"
)
db_manager.dimManuscriptVersion.stage_csv(
conn,
"dummy_csv/1526868166_versions.csv"
)
db_manager.dimManuscriptVersionHistory.stage_csv(
conn,
"dummy_csv/1526868166_stages.csv"
)
db_manager.dimManuscript.applyChanges(conn, None)
### relabel countries with different externalReferences, to have the same label
LOGGER.info("NEW FILES...")
db_manager.dimCountry.stage_csv(conn, "dummy_csv/1526868166_country_relabel.csv")
db_manager.dimCountry.applyChanges(conn, None)
### load persons
LOGGER.info("NEW FILES...")
db_manager.dimPerson.stage_csv(
conn,
"dummy_csv/1500000000_persons.csv"
)
db_manager.dimPersonRole.stage_csv(
conn,
"dummy_csv/1500000000_person_roles.csv"
)
db_manager.dimPerson.applyChanges(conn, None)
LOGGER.info("NEW FILES...")
db_manager.dimPerson.stage_csv(
conn,
"dummy_csv/1500000000_persons.csv"
)
db_manager.dimPersonRole.stage_csv(
conn,
"dummy_csv/1500000000_person_roles.csv"
)
db_manager.dimPerson.applyChanges(conn, None)
LOGGER.info("NEW FILES...")
db_manager.dimPerson.stage_csv(
conn,
"dummy_csv/1600000000_persons.csv"
)
db_manager.dimPersonRole.stage_csv(
conn,
"dummy_csv/1600000000_person_roles.csv"
)
db_manager.dimPerson.applyChanges(conn, None)
| StarcoderdataPython |
276142 | #!/usr/bin/env python
# coding: utf-8
# In given array find the duplicate odd number .
#
# Note: There is only one duplicate odd number
#
# <b> Ex [1,4,6,3,1] should return 1 </b>
# In[3]:
def dup_odd_num(num):
count=0
for i in range(len(num)):
if num[i] % 2 != 0:
count+=1
if count > 1: return num[i]
return False
print(dup_odd_num([1,3,2,3]))
# In[ ]:
def dup_odd_num(num):
count=0
dic={}
for i in range(len(num)):
if num[i] % 2 != 0:
dic[0]=count
dic[1]=num[i]
count+=1
return dic[1]
# In[28]:
print(dup_odd_num([3,4,6,8,]))
| StarcoderdataPython |
1940597 | from tensorflow.test import TestCase
from groco.groups import wallpaper_group_dict
import tensorflow as tf
from groco.utils import test_equivariance
class TestWallpaperGroup(TestCase):
def test_inverse_comp(self):
"""
The composition attribute gives the composition of an inverse with another group element,
so for the inverse to be correct the diagonal needs to be the identity.
"""
for group in wallpaper_group_dict.values():
identities = tf.reshape(
tf.concat([group.composition[r][c] for r, c in enumerate(group.inverses)], axis=0),
(group.order,))
self.assertAllEqual(identities, tf.zeros(group.order, dtype=tf.int32))
def test_subgroups(self):
"""
A subset H of a group G is a subgroup if and only if for all g, h in H, g^-1 h is also in H.
"""
for group in wallpaper_group_dict.values():
for subgroup_name, subgroup_indices in group.subgroup.items():
subgroup_invs = [group.inverses[i] for i in subgroup_indices]
subgroup_composition = tf.gather(group.composition, axis=0, indices=subgroup_invs)
subgroup_composition = tf.gather(subgroup_composition, axis=1, indices=subgroup_indices)
elements, _ = tf.unique(tf.reshape(subgroup_composition, [-1]))
elements = tf.sort(elements)
msg = f'Subgroup {subgroup_name} not closed in group {group.name}'
self.assertAllEqual(elements, tf.sort(subgroup_indices), msg=msg)
def test_cosets_identity(self):
"""Cosets contain identity."""
for group in wallpaper_group_dict.values():
for coset_name, coset in group.cosets.items():
msg = f'Coset {coset_name} of group {group.name} does not contain identity.'
self.assertEqual(tf.reduce_min(coset), tf.constant([0]), msg=msg)
def test_cosets_size(self):
"""Number of cosets times corresponding group order equals the full group order."""
for group in wallpaper_group_dict.values():
for coset_name, coset in group.cosets.items():
subgroup = wallpaper_group_dict[coset_name]
msg = f'Cosets of subgroup {coset_name} of group {group.name} not the right size.'
self.assertEqual(group.order, subgroup.order * len(coset), msg=msg)
def test_cosets_unique(self):
"""
Check that multiplying the subgroup with its cosets recovers the full group.
"""
for group in wallpaper_group_dict.values():
for coset_name, coset in group.cosets.items():
subgroup_indices = group.subgroup[coset_name]
products = tf.gather(group.composition, axis=1, indices=coset)
subgroup_inverses = tf.gather(group.inverses, axis=0, indices=subgroup_indices)
products = tf.gather(products, axis=0, indices=subgroup_inverses)
products = tf.sort(tf.reshape(products, [-1]))
msg = f'Subgroup {coset_name} multiplied with its cosets does not recover full group {group.name}.'
self.assertAllEqual(tf.range(group.order), products, msg=msg)
def test_action_composition(self):
signal = tf.random.normal((28, 28, 3), seed=42)
for group in wallpaper_group_dict.values():
g_signal = group.action(signal, spatial_axes=[0, 1], new_group_axis=2, domain_group=None)
for gi in range(group.order):
gi_signal = tf.gather(g_signal, axis=2, indices=[gi])
gi_signal = tf.reshape(gi_signal, (28, 28, 3))
h_gi_signal = group.action(gi_signal, spatial_axes=[0, 1], new_group_axis=2, domain_group=None)
h_gi = tf.reshape(tf.gather(group.composition, axis=1, indices=[gi]), (group.order))
h_gi_at_signal = group.action(signal, spatial_axes=[0, 1], new_group_axis=2, domain_group=None)
h_gi_at_signal = tf.gather(h_gi_at_signal, axis=2, indices=h_gi)
msg = f'Action of {group.name} not compatible with its composition.'
self.assertAllEqual(h_gi_signal, h_gi_at_signal, msg=msg)
def test_action_shape(self):
signal = tf.random.normal((28, 28, 3), seed=42)
for group in wallpaper_group_dict.values():
g_signal = group.action(signal, spatial_axes=[0, 1], new_group_axis=0, domain_group=None)
self.assertEqual(g_signal.shape, (group.order) + signal.shape)
def test_action_on_subgroup_shape(self):
for group in wallpaper_group_dict.values():
for subgroup_name, subgroup_indices in group.subgroup.items():
signal = tf.random.normal((28, 28, len(subgroup_indices), 3))
g_signal = group.action(signal, spatial_axes=[0, 1], new_group_axis=0, domain_group=subgroup_name,
acting_group=group.name, group_axis=2)
self.assertEqual(g_signal.shape, (group.order, ) + signal.shape)
def test_action_on_signal_composition(self):
signal = tf.random.normal((28, 28, 3), seed=42)
new_group_axis = 3
for group in wallpaper_group_dict.values():
g_signal = group.action(signal, new_group_axis=new_group_axis, spatial_axes=[0, 1], domain_group=None)
for gi in range(group.order):
gi_signal = tf.reshape(tf.gather(g_signal, axis=new_group_axis, indices=[gi]), signal.shape)
h_gi_signal = group.action(gi_signal, new_group_axis=new_group_axis, spatial_axes=[0, 1], domain_group=None)
h_gi = tf.reshape(tf.gather(group.composition, axis=1, indices=[gi]), (group.order))
h_gi_at_signal = tf.gather(
group.action(signal, new_group_axis=new_group_axis, spatial_axes=[0, 1], domain_group=None),
axis=new_group_axis, indices=h_gi)
msg = f'Action of {group.name} not compatible with its composition.'
self.assertAllEqual(h_gi_signal, h_gi_at_signal, msg=msg)
def test_action_on_group_composition(self):
new_group_axis = 3
for group in wallpaper_group_dict.values():
signal = tf.random.normal((28, 28, group.order, 3), seed=42)
g_signal = group.action(signal, spatial_axes=[0, 1], group_axis=2, new_group_axis=new_group_axis)
for gi in range(group.order):
gi_signal = tf.reshape(tf.gather(g_signal, axis=new_group_axis, indices=[gi]), signal.shape)
h_gi_signal = group.action(
gi_signal, spatial_axes=[0, 1], group_axis=2, new_group_axis=new_group_axis)
h_gi = tf.reshape(tf.gather(group.composition, axis=1, indices=[gi]), (group.order))
h_gi_at_signal = tf.gather(
group.action(signal, spatial_axes=[0, 1], group_axis=2, new_group_axis=new_group_axis),
axis=new_group_axis, indices=h_gi)
msg = f'Action of {group.name} not compatible with its composition.'
self.assertAllEqual(h_gi_signal, h_gi_at_signal, msg=msg)
def test_subgroup_action_on_grid(self):
signal = tf.random.normal((28, 28, 3))
for group in wallpaper_group_dict.values():
g_signal = group.action(signal, spatial_axes=[0, 1], new_group_axis=0, domain_group=None)
for subgroup_name, subgroup_indices in group.subgroup.items():
subgroup = wallpaper_group_dict[subgroup_name]
h_signal = subgroup.action(signal, spatial_axes=[0, 1], new_group_axis=0, domain_group=None)
g_signal_sub = tf.gather(g_signal, axis=0, indices=subgroup_indices)
msg = f'Action of subgroup {subgroup_name} on signal on grid not the same as corresponding indices in action of full group {group.name}'
self.assertAllEqual(h_signal, g_signal_sub, msg=msg)
def test_subgroups_cosets(self):
"""Test only if the keys are the same."""
for group in wallpaper_group_dict.values():
self.assertAllEqual(set(group.subgroup.keys()), set(group.cosets.keys()))
def test_upsample_downsample(self):
for group in wallpaper_group_dict.values():
for subgroup_name, subgroup_indices in group.subgroup.items():
subgroup_signal = tf.random.normal((1, 28, 28, len(subgroup_indices), 3))
group_signal = group.upsample(subgroup_signal, group_axis=3, domain_group=subgroup_name)
subgroup_signal_reconstructed = tf.gather(group_signal, axis=3, indices=group.subgroup[subgroup_name])
msg = f"Upsampling with zeroes from subgroup {subgroup_name} to {group.name} and back doesn't give the same"
self.assertAllEqual(subgroup_signal, subgroup_signal_reconstructed, msg=msg)
def test_domain_group_action(self):
for group in wallpaper_group_dict.values():
for subgroup_name, subgroup_indices in group.subgroup.items():
subgroup_signal = tf.random.normal((1, 28, 28, len(subgroup_indices), 3))
subgroup = wallpaper_group_dict[subgroup_name]
action_1 = subgroup.action(subgroup_signal, spatial_axes=(1, 2), group_axis=3, new_group_axis=0)
action_2 = group.action(subgroup_signal, spatial_axes=(1, 2), group_axis=3, new_group_axis=0,
domain_group=subgroup_name, acting_group=subgroup_name)
self.assertAllEqual(action_1, action_2)
def test_upsample_equiv(self):
for group in wallpaper_group_dict.values():
for subgroup_name, subgroup_indices in group.subgroup.items():
subgroup_signal = tf.random.normal((1, 28, 28, len(subgroup_indices), 3))
layer = lambda s: group.upsample(s, group_axis=3, domain_group=subgroup_name)
equiv_diff = test_equivariance(
layer, subgroup_signal, group_axis=3, spatial_axes=(1, 2),
group=group, domain_group=subgroup_name, target_group=group.name, acting_group=subgroup_name)
self.assertAllLess(equiv_diff, 1e-4)
tf.test.main() | StarcoderdataPython |
12848300 | <filename>meiduo_mall/apps/orders/views.py
import json
from datetime import datetime
from decimal import Decimal
from django import http
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
# Create your views here.
from django.views import View
from django_redis import get_redis_connection
from apps.goods.models import SKU
from apps.orders.models import OrderInfo, OrderGoods
from apps.users.models import Address, User
from meiduo_mall.settings.dev import logger
from utils.response_code import RETCODE
class OrderSettlementView(LoginRequiredMixin,View):
def get(self, request):
user = request.user
try:
addresses = Address.objects.filter(user=user,is_deleted=False)
except Exception as e:
addresses = None
redis_client = get_redis_connection('carts')
carts_data = redis_client.hgetall(user.id)
carts_dict = {}
for key,value in carts_data.items():
sku_key = int(key.decode())
sku_dict = json.loads(value.decode())
if sku_dict["selected"]:
carts_dict[sku_key] = sku_dict
skus = SKU.objects.filter(id__in = carts_dict.keys())
total_count = 0
total_amount = Decimal('0.00')
for sku in skus:
sku.count = carts_dict[sku.id]['count']
sku.amount = sku.price * sku.count
total_count += sku.count
total_amount += sku.price * sku.count
freight = Decimal('10.00')
context = {
'addresses': addresses,
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
'freight': freight,
'payment_amount': total_amount + freight,
'default_address_id': user.default_address_id
}
return render(request, 'place_order.html', context)
class OrderCommitView(LoginRequiredMixin,View):
def post(self,request):
#接收参数
json_dict = json.loads(request.body.decode())
address_id = json.loads(request.body.decode())['address_id']
pay_method = json.loads(request.body.decode())['pay_method']
user = request.user
#效验
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('WUXIAO')
if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'],OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:
return http.HttpResponseForbidden('不支持')
#订单表__生成订单号 时间戳+9为
# user = request.user
order_id = datetime.now().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id)
#事务
from django.db import transaction
with transaction.atomic():
# --------事物保存点--------
save_id = transaction.savepoint()
try:
order = OrderInfo.objects.create(
order_id=order_id,
user = user,
address = address,
total_count = 0,
total_amount = Decimal('0.00'),
freight = Decimal("10.00"),
pay_method = pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else
OrderInfo.ORDER_STATUS_ENUM['UNSEND']
)
redis_client = get_redis_connection('carts')
carts_data = redis_client.hgetall(user.id)
carts_dict = {}
for key,value in carts_data.items():
sku_id = int(key.decode())
sku_dict = json.loads(value.decode())
if sku_dict['selected']:
carts_dict[sku_id] = sku_dict
sku_ids = carts_dict.keys()
for sku_id in sku_ids:
while True:
sku = SKU.objects.get(id=sku_id)
# sku.stock -= cart_count
# sku.sales += cart_count
# sku.sava()
original_stock = sku.stock
original_sales = sku.sales
#判断库存
cart_count = carts_dict[sku_id]['count']
if cart_count > sku.stock:
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
import time
# time.sleep(10)
new_stock = original_stock - cart_count
new_sales = original_sales + cart_count
result = SKU.objects.filter(id=sku_id, stock=original_stock).update(stock=new_stock,sales=new_sales)
if result == 0:
continue
sku.stock -= cart_count
sku.sales += cart_count
sku.save()
sku.spu.sales += cart_count
sku.spu.save()
# 创建订单商品数据
OrderGoods.objects.create(
order_id = order_id,
sku = sku,
count = cart_count,
price = sku.price,
)
#总个数和总金额(没运费)
order.total_count += cart_count
order.total_amount += sku.price * cart_count
#下单成功或者失败退出
break
#加运费 总金额
order.total_amount += order.freight
order.save()
except Exception as e :
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
transaction.savepoint_commit(save_id)
#清空购物车
# redis_client.hdel(user.id, *carts_dict)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '下单成功', 'order_id': order.order_id})
class OrderSuccessView(View):
def get(self,request):
order_id = request.GET.get("order_id")
pay_method = request.GET.get("pay_method")
payment_amount = request.GET.get("payment_amount")
context={
"order_id":order_id,
"pay_method":pay_method,
"payment_amount":payment_amount,
}
return render(request,'order_success.html',context)
class OrderShowView(LoginRequiredMixin,View):
def get(self,request,page_num):
username = request.COOKIES.get('username')
user = User.objects.get(username=username)
user_id = user.id
order_data = OrderInfo.objects.all()
goods_data = OrderGoods.objects.all()
order_ids = order_data.filter(user_id=user_id).values('order_id')
# order_ids = OrderInfo.objects.filter(user_id=user_id)
page_orders = {}
# 所有订单号的列表
order_list = []
order_id_count = goods_data.values('order_id', 'count')
order_id_set = set()
for order_data_co in order_id_count:
a = order_data_co['order_id']
order_list.append(a)
order_list =list(set(order_list))
print(order_list)
for order_id in order_ids:
order_id = order_id['order_id'] # 订单号
time_old = order_data.filter(order_id=order_id).values('create_time') # 时间
time = str(time_old[0]['create_time'])
time_new = time[0:16] # 时间
freight = time_old.values('freight')[0]['freight'] # 运费
"""<QuerySet [{'address_id': 1, 'user_id': 19, 'total_count': 1,
'order_id': '20190927003440000000019',
'status': 1, 'pay_method': 2,
'create_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 214624, tzinfo=<UTC>),
'update_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 235034, tzinfo=<UTC>),
'freight': Decimal('10.00'), 'total_amount': Decimal('6698.00')}]>
"""
# if total_amount-freight == 0.00 or total_amount == 0.00:
# continue
#
# page_orders = {}
# for Goods in goods_data:
# page_orders.setdefault(order_id,[time,freight,]).append(Goods)
page_num = 1
"""
下单时间 订单号
商品信息 数量 单价 总价 运费 支付方式 订单状态 """
context = {
"page_orders": page_orders,
# # # 总页数
# # 'total_page': total_page,
# # # 当前页
'page_num': page_num,
}
return render(request,'user_center_order.html',context)
| StarcoderdataPython |
1783464 | <reponame>MithunNallana/Time-Scaled-Collision-Cone
from tscc.model.updatestate import updatestate
def computecoefficients(stateObst, stateRobo, radiusObst, radiusRobo, deltaT):
''' Compute collision cone constraint coefficients '''
clearanceR = 0.0
R = radiusObst + radiusRobo + clearanceR
# unrolling state and control data of robot and obstacle
xRobo = stateRobo[0]
yRobo = stateRobo[1]
thetaRobo = stateRobo[2]
velRobo = stateRobo[3]
omegaRobo = stateRobo[4]
xObst = stateObst[0]
yObst = stateObst[1]
thetaObst = stateObst[2]
velObst = stateObst[3]
# updating state of robot and obstacle
xRoboNxt, yRoboNxt, _ = updatestate(xRobo, yRobo, thetaRobo, velRobo,
omegaRobo, deltaT)
xdotRobo = (xRoboNxt-xRobo)/deltaT
ydotRobo = (yRoboNxt-yRobo)/deltaT
xObstNxt, yObstNxt, _ = updatestate(xObst, yObst, thetaObst,
velObst, 0, deltaT)
xdotObst = (xObstNxt-xObst)/deltaT
ydotObst = (yObstNxt-yObst)/deltaT
if ((xRoboNxt - xObstNxt)**2 + (yRoboNxt - yObstNxt)**2 < R**2):
raise ValueError("Your lookahead is already in collision. \
Try reducing deltaT")
# coefficients of quadratic collision cone constraints
# detailed in additional_material.pdf
m = (xdotRobo*(xRoboNxt-xObstNxt)) + (ydotRobo*(yRoboNxt-yObstNxt))
n = (xdotObst*(xObstNxt-xRoboNxt)) + (ydotObst*(yObstNxt-yRoboNxt))
u = -(xRoboNxt-xObstNxt)**2 - (yRoboNxt-yObstNxt)**2 + R**2
p = xdotRobo**2 + ydotRobo**2
q = -2*(xdotRobo*xdotObst + ydotRobo*ydotObst)
r = xdotObst**2 + ydotObst**2
A = m**2 + (u*p)
B = 2*m*n + (u*q)
C = n**2 + (u*r)
return A, B, C
def computeconstraints(stateObst, stateRobo, radiusObst, radiusRobo, deltaT):
''' compute the a,b and c coefficients of quadratic collision
cone equation'''
a = []
b = []
c = []
nObst = len(stateObst)
for i in range(nObst):
tempa, tempb, tempc = computecoefficients(stateObst[i], stateRobo,
radiusObst[i], radiusRobo,
deltaT)
a.append(tempa)
b.append(tempb)
c.append(tempc)
return a, b, c
| StarcoderdataPython |
6500854 | """
@brief Wrapper interface for pyLikelihood.Composite2 to provide
more natural symantics for use in python alongside other analysis classes.
@author <NAME> <<EMAIL>>
"""
#
# $Header: /nfs/slac/g/glast/ground/cvs/ScienceTools-scons/pyLikelihood/python/Composite2.py,v 1.5 2010/07/10 17:01:49 jchiang Exp $
#
import pyLikelihood as pyLike
from SrcModel import SourceModel
class Composite2(object):
def __init__(self, optimizer='Minuit'):
self.composite = pyLike.Composite2()
self.components = []
self.tolType = pyLike.ABSOLUTE
self.tol = 1e-2
self.covariance = None
self.covar_is_current = False
self.optObject = None
self.optimizer = optimizer
def addComponent(self, like):
self.composite.addComponent(like.logLike)
self.components.append(like)
def tieParameters(self, pars):
my_pars = tuple([(x[0].logLike, x[0].par_index(x[1], x[2]))
for x in pars])
self.composite.tieParameters(my_pars)
def __call__(self):
return -self.composite.value()
def fit(self, verbosity=3, tol=None, optimizer=None,
covar=False, optObject=None):
if tol is None:
tol = self.tol
errors = self._errors(optimizer, verbosity, tol, covar=covar,
optObject=optObject)
return self()
def optimize(self, verbosity=3, tol=None, optimizer=None):
self.composite.syncParams()
if optimizer is None:
optimizer = self.optimizer
if tol is None:
tol = self.tol
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(optimizer, self.composite)
myOpt.find_min_only(verbosity, tol, self.tolType)
def minosError(self, component, srcname, parname, level=1):
freeParams = pyLike.ParameterVector()
self.composite.getFreeParams(freeParams)
saved_values = [par.getValue() for par in freeParams]
indx = self._compositeIndex(component, srcname, parname)
if indx == -1:
raise RuntimeError("Invalid parameter specification.")
try:
errors = self.optObject.Minos(indx, level)
self.composite.setFreeParamValues(saved_values)
return errors
except RuntimeError as message:
print ("Minos error encountered for parameter %i." % indx)
self.composite.setFreeParamValues(saved_values)
def _compositeIndex(self, target_component, target_src, target_par):
indx = target_component.par_index(target_src, target_par)
return self.composite.findIndex(target_component.logLike, indx)
def _errors(self, optimizer=None, verbosity=0, tol=None,
useBase=False, covar=False, optObject=None):
self.composite.syncParams()
if optimizer is None:
optimizer = self.optimizer
if tol is None:
tol = self.tol
if optObject is None:
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(optimizer, self.composite)
else:
myOpt = optObject
self.optObject = myOpt
myOpt.find_min(verbosity, tol, self.tolType)
errors = myOpt.getUncertainty(useBase)
if covar:
self.covariance = myOpt.covarianceMatrix()
self.covar_is_current = True
else:
self.covar_is_current = False
self._set_errors(errors)
return errors
def _set_errors(self, errors):
my_errors = list(errors)
self.composite.setErrors(my_errors)
for component in self.components:
component.model = SourceModel(component.logLike)
def __getattr__(self, attrname):
return getattr(self.composite, attrname)
def __repr__(self):
my_string = []
for i, component in enumerate(self.components):
my_string.append("\n")
my_string.append("Component %i:\n" % i)
my_string.append(str(component.model))
return "\n".join(my_string)
| StarcoderdataPython |
6656891 | #
# Copyright 2014-2016 CloudVelox Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains the implementation of the 'dhcp' command
"""
import getopt
import common
from common import CommandError
from common import DisplayOptions
from common import CommandOutput
from common import ResourceSelector
class DHCPCommand(common.BaseCommand):
"""Implements the 'dchp' command
"""
@staticmethod
def __dhcp_display(dhcp_opt, disp, pg):
"""Display dhcp info
"""
if disp.display == DisplayOptions.LONG:
pg.prt("%-20s", dhcp_opt.id)
elif disp.display == DisplayOptions.EXTENDED:
pg.prt("%s", dhcp_opt.id)
for option_name in dhcp_opt.options:
option_value = dhcp_opt.options[option_name]
if isinstance(option_value, list):
disp_str = ", ".join(option_value)
else:
disp_str = option_value
pg.prt("%25s : %s", option_name, disp_str)
if disp.display_tags:
common.display_tags(dhcp_opt.tags, pg)
else:
pg.prt("%s", dhcp_opt.id)
if disp.display_tags:
common.display_tags(dhcp_opt.tags, pg)
def __dhcp_list_cmd(self, region, selector, disp):
"""Implements the list function of the dhcp command
"""
if not selector.has_selection():
return
vpc_conn = self.get_vpc_conn(region)
dhcp_opt_list = vpc_conn.get_all_dhcp_options(
dhcp_options_ids=selector.resource_id_list)
self.cache_insert(region, [dhcp_opt.id for dhcp_opt in dhcp_opt_list])
with CommandOutput() as pg:
for dhcp_opt in dhcp_opt_list:
self.__dhcp_display(dhcp_opt, disp, pg)
def __dhcp_delete_cmd(self, region, dhcp_opt_id_list):
"""Implements the delete function of the dhcp command
"""
vpc_conn = self.get_vpc_conn(region)
for dhcp_opt_id in dhcp_opt_id_list:
if vpc_conn.delete_dhcp_options(dhcp_opt_id):
self.cache_remove(region, [dhcp_opt_id])
def __dhcp_associate_cmd(self, region, arg_list):
"""Implements the associate function of the dhcp command
"""
dhcp_opt_id = None
vpc_id = None
for arg in arg_list:
if arg == "default" or arg.startswith("dopt-"):
dhcp_opt_id = arg
elif arg.startswith("vpc-"):
vpc_id = arg
else:
raise CommandError("Unexpected argument: %s" % (arg,))
if vpc_id is None:
raise CommandError("No VPC ID specified")
if dhcp_opt_id is None:
raise CommandError("No DHCP OPT ID specified")
vpc_conn = self.get_vpc_conn(region)
vpc_conn.associate_dhcp_options(dhcp_opt_id, vpc_id)
def __dhcp_cmd(self, argv):
"""Implements the dhcp command
"""
disp = DisplayOptions()
selector = ResourceSelector()
region = None
cmd_delete = False
cmd_associate = False
opt_list, args = getopt.getopt(argv, "aDlr:Stx")
if opt_list:
for opt in opt_list:
if opt[0] == '-a':
selector.select_all = True
elif opt[0] == '-D':
cmd_delete = True
elif opt[0] == '-l':
disp.display = DisplayOptions.LONG
elif opt[0] == '-r':
region = opt[1]
elif opt[0] == '-S':
cmd_associate = True
elif opt[0] == '-t':
disp.display_tags = True
elif opt[0] == '-x':
disp.display = DisplayOptions.EXTENDED
if cmd_delete:
self.__dhcp_delete_cmd(region, args)
elif cmd_associate:
self.__dhcp_associate_cmd(region, args)
else:
selector.resource_id_list = args
self.__dhcp_list_cmd(region, selector, disp)
def do_dhcp(self, ln):
"""
dhcp [std-options] [list-options] [-D] [-S]
Options:
-S dhcp-opt-id vpc-id : associate dhcp-opt-id with vpc-id; use
'default' to set the DHCP options of the
specified VPC to the default dhcp options
-D dhcp-opt-id : delete the specified DHCP options
"""
self.dispatch(self.__dhcp_cmd, ln)
| StarcoderdataPython |
4827453 | <filename>setup.py
#!/usr/bin/python3
import setuptools
import os
import suid_sudo
#with open("README.md", "r") as fh:
# long_description = fh.read()
with open("VERSION", "r") as fh:
version = fh.readline().strip()
setuptools.setup(
# setup_requires=['setuptools_scm'],
# use_scm_version=True,
name="suid_sudo",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Library for emulating setuid by sudo",
long_description=suid_sudo.__doc__,
long_description_content_type="text/markdown",
license="Apache License, Version 2.0",
url="https://github.com/yoiwa_personal/suid_sudo/",
py_modules=['suid_sudo'],
data_files=[('doc', ['README.md', 'doc/APIs.md'])],
classifiers=(
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: Apache Software License",
"Operating System :: Linux",
),
)
| StarcoderdataPython |
6567351 | from .middlewares import AutoExtractMiddleware # noqa: F401
| StarcoderdataPython |
1859782 | <reponame>Fluke667/catcher<filename>catcher/steps/http.py
import json
from typing import Union
import requests
from requests import request
from catcher.steps.step import Step, update_variables
from catcher.utils.file_utils import read_file
from catcher.utils.logger import debug
from catcher.utils.misc import fill_template, fill_template_str
class Http(Step):
"""
:Input:
:<method>: http method. See https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html for details
- headers: Dictionary with custom headers. *Optional*
- url: url to call
- response_code: Code to await. *Optional* default is 200.
- body: body to send (only for methods which support it).
- body_from_file: File can be used as data source. *Optional* Either `body` or `body_from_file` should present.
- verify: Verify SSL Certificate in case of https. *Optional*. Default is true.
- should_fail: true, if this request should fail, f.e. to test connection refused. Will fail the test if no errors.
:Examples:
Post data to server with custom header
::
http:
post:
headers: {Content-Type: 'application/json', Authorization: '{{ token }}'}
url: 'http://test.com?user_id={{ user_id }}'
body: {'foo': bar}
Post file to remote server
::
http:
post:
url: 'http://test.com'
body_from_file: "data/answers.json"
SSL without verification
::
http:
post:
url: 'https://my_server.de'
body: {'user':'test'}
verify: false
Json body from a variable:
::
http:
post:
url: 'http://test.com?user_id={{ user_id }}'
body: '{{ var |tojson }}'
Test disconnected service:
::
steps:
- docker:
disconnect:
hash: '{{ my_container }}'
- http:
get:
url: '{{ my_container_url }}'
should_fail: true
"""
def __init__(self, response_code=200, **kwargs) -> None:
super().__init__(**kwargs)
method = Step.filter_predefined_keys(kwargs) # get/post/put...
self.method = method.lower()
conf = kwargs[method]
self.url = conf['url']
self.headers = conf.get('headers', {})
self.body = None
self.verify = conf.get('verify', True)
self._should_fail = conf.get('should_fail', False)
if not self.verify:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.code = response_code
if self.method != 'get':
self.body = conf.get('body', None)
if self.body is None:
self.file = conf['body_from_file']
@update_variables
def action(self, includes: dict, variables: dict) -> Union[tuple, dict]:
url = fill_template(self.url, variables)
r = None
try:
r = request(self.method, url, **self._form_request(url, variables))
if self._should_fail: # fail expected
raise RuntimeError('Request expected to fail, but it doesn\'t')
except requests.exceptions.ConnectionError as e:
debug(str(e))
if self._should_fail: # fail expected
return variables
debug(r.text)
if r.status_code != self.code:
raise RuntimeError('Code mismatch: ' + str(r.status_code) + ' vs ' + str(self.code))
try:
response = r.json()
except ValueError:
response = r.text
return variables, response
def _form_request(self, url, variables: dict) -> dict:
headers = dict([(fill_template_str(k, variables), fill_template_str(v, variables))
for k, v in self.headers.items()])
rq = dict(verify=self.verify, headers=headers)
isjson, body = self.__form_body(variables)
debug('http ' + str(self.method) + ' ' + str(url) + ', ' + str(headers) + ', ' + str(body))
content_type = self.__get_content_type(headers)
if isjson or isinstance(body, dict): # contains tojson or dict supplied
if isinstance(body, dict) and content_type == 'application/json':
# json body formed manually via python dict
rq['json'] = body
else: # json string or form-data dict
rq['data'] = body
else: # raw body (or body is None)
rq['data'] = body
return rq
@staticmethod
def __get_content_type(headers):
content_type = headers.get('Content-Type')
if content_type is None:
content_type = headers.get('content-type')
return content_type
def __form_body(self, variables) -> str or dict:
if self.method == 'get':
return False, None
body = self.body
if body is None:
body = read_file(fill_template_str(self.file, variables))
if isinstance(body, dict): # dump body to json to be able fill templates in
body = json.dumps(body)
isjson = 'tojson' in body
return isjson, fill_template(body, variables, isjson=isjson)
| StarcoderdataPython |
3584955 | # The MIT License (MIT)
# Copyright (c) 2018 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import glob
import logging
import os
import time
from typing import Any, Dict, List, Optional
import fiona
import numpy as np
import pandas as pd
import s3fs
import xarray as xr
import zarr
from . import __version__
from .cache import MemoryCacheStore, Cache, FileCacheStore
from .defaults import DEFAULT_CMAP_CBAR, DEFAULT_CMAP_VMIN, \
DEFAULT_CMAP_VMAX, TRACE_PERF, MEM_TILE_CACHE_CAPACITY, FILE_TILE_CACHE_CAPACITY, FILE_TILE_CACHE_PATH, \
FILE_TILE_CACHE_ENABLED, API_PREFIX, DEFAULT_NAME
from .errors import ServiceConfigError, ServiceError, ServiceBadRequestError, ServiceResourceNotFoundError
from .logtime import log_time
from .reqparams import RequestParams
COMPUTE_DATASET = 'compute_dataset'
ALL_FEATURES = "all"
_LOG = logging.getLogger('xcube')
Config = Dict[str, Any]
# noinspection PyMethodMayBeStatic
class ServiceContext:
def __init__(self,
name: str = DEFAULT_NAME,
base_dir: str = None,
config: Config = None):
self._name = name
self.base_dir = os.path.abspath(base_dir or '')
self._config = config or dict()
self.dataset_cache = dict() # contains tuples of form (ds, ds_descriptor, tile_grid_cache)
# TODO by forman: move pyramid_cache, mem_tile_cache, rgb_tile_cache into dataset_cache values
self.pyramid_cache = dict()
self.mem_tile_cache = Cache(MemoryCacheStore(),
capacity=MEM_TILE_CACHE_CAPACITY,
threshold=0.75)
if FILE_TILE_CACHE_ENABLED:
tile_cache_dir = os.path.join(FILE_TILE_CACHE_PATH, 'v%s' % __version__, 'tiles')
self.rgb_tile_cache = Cache(FileCacheStore(tile_cache_dir, ".png"),
capacity=FILE_TILE_CACHE_CAPACITY,
threshold=0.75)
else:
self.rgb_tile_cache = None
self._feature_collection_cache = dict()
@property
def config(self) -> Config:
return self._config
@config.setter
def config(self, config: Config):
if self._config:
old_dataset_descriptors = self._config.get('Datasets')
new_dataset_descriptors = config.get('Datasets')
if not new_dataset_descriptors:
for ds, _, _ in self.dataset_cache.values():
ds.close()
self.dataset_cache.clear()
if new_dataset_descriptors and old_dataset_descriptors:
ds_names = list(self.dataset_cache.keys())
for ds_name in ds_names:
dataset_descriptor = self.find_dataset_descriptor(new_dataset_descriptors, ds_name)
if dataset_descriptor is None:
ds, _, _ = self.dataset_cache[ds_name]
ds.close()
del self.dataset_cache[ds_name]
self._config = config
def get_service_url(self, base_url, *path: str):
return base_url + '/' + self._name + API_PREFIX + '/' + '/'.join(path)
def get_dataset_and_variable(self, ds_name: str, var_name: str):
dataset = self.get_dataset(ds_name)
if var_name in dataset:
return dataset, dataset[var_name]
raise ServiceResourceNotFoundError(f'Variable "{var_name}" not found in dataset "{ds_name}"')
def get_dataset_descriptors(self):
dataset_descriptors = self.config.get('Datasets')
if not dataset_descriptors:
raise ServiceConfigError(f"No datasets configured")
return dataset_descriptors
def get_dataset_descriptor(self, ds_name: str) -> Dict[str, str]:
dataset_descriptors = self.get_dataset_descriptors()
if not dataset_descriptors:
raise ServiceConfigError(f"No datasets configured")
dataset_descriptor = self.find_dataset_descriptor(dataset_descriptors, ds_name)
if dataset_descriptor is None:
raise ServiceResourceNotFoundError(f'Dataset "{ds_name}" not found')
return dataset_descriptor
def get_color_mapping(self, ds_name: str, var_name: str):
dataset_descriptor = self.get_dataset_descriptor(ds_name)
style_name = dataset_descriptor.get('Style', 'default')
styles = self.config.get('Styles')
if styles:
style = None
for s in styles:
if style_name == s['Identifier']:
style = s
# TODO: check color_mappings is not None
if style:
color_mappings = style.get('ColorMappings')
if color_mappings:
# TODO: check color_mappings is not None
color_mapping = color_mappings.get(var_name)
if color_mapping:
cmap_cbar = color_mapping.get('ColorBar', DEFAULT_CMAP_CBAR)
cmap_vmin, cmap_vmax = color_mapping.get('ValueRange', (DEFAULT_CMAP_VMIN, DEFAULT_CMAP_VMAX))
return cmap_cbar, cmap_vmin, cmap_vmax
_LOG.warning(f'color mapping for variable {var_name!r} of dataset {ds_name!r} undefined: using defaults')
return DEFAULT_CMAP_CBAR, DEFAULT_CMAP_VMIN, DEFAULT_CMAP_VMAX
def get_dataset(self, ds_name: str) -> xr.Dataset:
if ds_name in self.dataset_cache:
ds, _, _ = self.dataset_cache[ds_name]
else:
dataset_descriptor = self.get_dataset_descriptor(ds_name)
path = dataset_descriptor.get('Path')
if not path:
raise ServiceConfigError(f"Missing 'path' entry in dataset descriptor {ds_name}")
t1 = time.clock()
fs_type = dataset_descriptor.get('FileSystem', 'local')
if fs_type == 'obs':
data_format = dataset_descriptor.get('Format', 'zarr')
if data_format != 'zarr':
raise ServiceConfigError(f"Invalid format={data_format!r} in dataset descriptor {ds_name!r}")
client_kwargs = {}
if 'Endpoint' in dataset_descriptor:
client_kwargs['endpoint_url'] = dataset_descriptor['Endpoint']
if 'Region' in dataset_descriptor:
client_kwargs['region_name'] = dataset_descriptor['Region']
s3 = s3fs.S3FileSystem(anon=True, client_kwargs=client_kwargs)
store = s3fs.S3Map(root=path, s3=s3, check=False)
cached_store = zarr.LRUStoreCache(store, max_size=2 ** 28)
with log_time(f"opened remote dataset {path}"):
ds = xr.open_zarr(cached_store)
elif fs_type == 'local':
if not os.path.isabs(path):
path = os.path.join(self.base_dir, path)
data_format = dataset_descriptor.get('Format', 'nc')
if data_format == 'nc':
with log_time(f"opened local NetCDF dataset {path}"):
ds = xr.open_dataset(path)
elif data_format == 'zarr':
with log_time(f"opened local zarr dataset {path}"):
ds = xr.open_zarr(path)
else:
raise ServiceConfigError(f"Invalid format={data_format!r} in dataset descriptor {ds_name!r}")
elif fs_type == 'computed':
if not os.path.isabs(path):
path = os.path.join(self.base_dir, path)
with open(path) as fp:
python_code = fp.read()
local_env = dict()
global_env = None
try:
exec(python_code, global_env, local_env)
except Exception as e:
raise ServiceError(f"Failed to compute dataset {ds_name!r} from {path!r}: {e}") from e
callable_name = dataset_descriptor.get('Function', COMPUTE_DATASET)
callable_args = dataset_descriptor.get('Args', [])
callable_obj = local_env.get(callable_name)
if callable_obj is None:
raise ServiceConfigError(f"Invalid dataset descriptor {ds_name!r}: "
f"no callable named {callable_name!r} found in {path!r}")
elif not callable(callable_obj):
raise ServiceConfigError(f"Invalid dataset descriptor {ds_name!r}: "
f"object {callable_name!r} in {path!r} is not callable")
args = list()
for arg_value in callable_args:
if isinstance(arg_value, str) and len(arg_value) > 2 \
and arg_value.startswith('@') and arg_value.endswith('@'):
ref_ds_name = arg_value[1:-1]
if not self.get_dataset_descriptor(ref_ds_name):
raise ServiceConfigError(f"Invalid dataset descriptor {ds_name!r}: "
f"argument {arg_value!r} of callable {callable_name!r} "
f"must reference another dataset")
args.append(self.get_dataset(ref_ds_name))
else:
args.append(arg_value)
try:
with log_time(f"created computed dataset {ds_name}"):
ds = callable_obj(*args)
except Exception as e:
raise ServiceError(f"Failed to compute dataset {ds_name!r} "
f"from function {callable_name!r} in {path!r}: {e}") from e
if not isinstance(ds, xr.Dataset):
raise ServiceError(f"Failed to compute dataset {ds_name!r} "
f"from function {callable_name!r} in {path!r}: "
f"expected an xarray.Dataset but got a {type(ds)}")
else:
raise ServiceConfigError(f"Invalid fs={fs_type!r} in dataset descriptor {ds_name!r}")
tile_grid_cache = dict()
self.dataset_cache[ds_name] = ds, dataset_descriptor, tile_grid_cache
t2 = time.clock()
if TRACE_PERF:
print(f'PERF: opening {ds_name!r} took {t2-t1} seconds')
return ds
def get_legend_label(self, ds_name: str, var_name: str):
dataset = self.get_dataset(ds_name)
if var_name in dataset:
ds = self.get_dataset(ds_name)
units = ds[var_name].units
return units
raise ServiceResourceNotFoundError(f'Variable "{var_name}" not found in dataset "{ds_name}"')
def get_feature_collections(self) -> List[Dict]:
features_configs = self._config.get("Features", [])
feature_collections = []
for features_config in features_configs:
feature_collections.append(dict(id=features_config.get("Identifier"),
title=features_config.get("Title")))
return feature_collections
def get_feature_collection(self, collection_name: str = ALL_FEATURES) -> Dict:
if ALL_FEATURES not in self._feature_collection_cache:
features_configs = self._config.get("Features", [])
all_features = []
feature_index = 0
for features_config in features_configs:
curr_collection_name = features_config.get("Identifier")
if not curr_collection_name:
raise ServiceError("Missing 'Identifier' entry in 'Features'")
if curr_collection_name == ALL_FEATURES:
raise ServiceError("Invalid 'Identifier' entry in 'Features'")
curr_collection_wc = features_config.get("Path")
if not curr_collection_wc:
raise ServiceError("Missing 'Path' entry in 'Features'")
if not os.path.isabs(curr_collection_wc):
curr_collection_wc = os.path.join(self.base_dir, curr_collection_wc)
features = []
collection_files = glob.glob(curr_collection_wc)
for collection_file in collection_files:
with fiona.open(collection_file) as feature_collection:
for feature in feature_collection:
self._remove_feature_id(feature)
feature["id"] = str(feature_index)
feature_index += 1
features.append(feature)
self._feature_collection_cache[curr_collection_name] = dict(type="FeatureCollection",
features=features)
all_features.extend(features)
self._feature_collection_cache[ALL_FEATURES] = dict(type="FeatureCollection",
features=all_features)
if collection_name not in self._feature_collection_cache:
raise ServiceResourceNotFoundError(f'Feature collection "{collection_name}" not found')
return self._feature_collection_cache[collection_name]
@classmethod
def _remove_feature_id(cls, feature: Dict):
cls._remove_id(feature)
if "properties" in feature:
cls._remove_id(feature["properties"])
@classmethod
def _remove_id(cls, properties: Dict):
if "id" in properties:
del properties["id"]
if "ID" in properties:
del properties["ID"]
def get_dataset_and_coord_variable(self, ds_name: str, dim_name: str):
ds = self.get_dataset(ds_name)
if dim_name not in ds.coords:
raise ServiceResourceNotFoundError(f'Dimension {dim_name!r} has no coordinates in dataset {ds_name!r}')
return ds, ds.coords[dim_name]
@classmethod
def get_var_indexers(cls,
ds_name: str,
var_name: str,
var: xr.DataArray,
dim_names: List[str],
params: RequestParams) -> Dict[str, Any]:
var_indexers = dict()
for dim_name in dim_names:
if dim_name not in var.coords:
raise ServiceBadRequestError(
f'dimension {dim_name!r} of variable {var_name!r} of dataset {ds_name!r} has no coordinates')
coord_var = var.coords[dim_name]
dim_value_str = params.get_query_argument(dim_name, None)
try:
if dim_value_str is None:
var_indexers[dim_name] = coord_var.values[0]
elif dim_value_str == 'current':
var_indexers[dim_name] = coord_var.values[-1]
elif np.issubdtype(coord_var.dtype, np.floating):
var_indexers[dim_name] = float(dim_value_str)
elif np.issubdtype(coord_var.dtype, np.integer):
var_indexers[dim_name] = int(dim_value_str)
elif np.issubdtype(coord_var.dtype, np.datetime64):
var_indexers[dim_name] = pd.to_datetime(dim_value_str)
else:
raise ValueError(f'unable to dimension value {dim_value_str!r} to {coord_var.dtype!r}')
except ValueError as e:
raise ServiceBadRequestError(
f'{dim_value_str!r} is not a valid value for dimension {dim_name!r} '
f'of variable {var_name!r} of dataset {ds_name!r}') from e
return var_indexers
@classmethod
def find_dataset_descriptor(cls,
dataset_descriptors: List[Dict[str, Any]],
ds_name: str) -> Optional[Dict[str, Any]]:
# TODO: optimize by dict/key lookup
return next((dsd for dsd in dataset_descriptors if dsd['Identifier'] == ds_name), None)
| StarcoderdataPython |
6443990 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
from typing import Dict, List, Tuple, Any
from netaddr import IPAddress
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_NAME = 'Public DNS Feed'
class Client:
def __init__(self, feed_url: str, tags: Optional[list] = None,
tlp_color: Optional[str] = None, insecure: bool = False):
self._feed_url: str = feed_url
self._verify: bool = insecure
self._proxies = handle_proxy(proxy_param_name='proxy', checkbox_default_value=False)
self.Tags = [] if tags is None else tags
self.Tlp_color = tlp_color
def build_iterator(self) -> List:
"""Retrieves all entries from the feed.
Returns:
A list of objects, containing the indicators.
"""
feed_url = self._feed_url
try:
response = requests.get(
url=feed_url,
verify=self._verify,
proxies=self._proxies,
)
response.raise_for_status()
data = response.text
indicators = data.split('\n')
except requests.exceptions.SSLError as err:
demisto.debug(str(err))
raise Exception(f'Connection error in the API call to {INTEGRATION_NAME}.\n'
f'Check your not secure parameter.\n\n{err}')
except requests.ConnectionError as err:
demisto.debug(str(err))
raise Exception(f'Connection error in the API call to {INTEGRATION_NAME}.\n'
f'Check your Server URL parameter.\n\n{err}')
except requests.exceptions.HTTPError as err:
demisto.debug(str(err))
raise Exception(f'Connection error in the API call to {INTEGRATION_NAME}.\n')
return indicators
def test_module(client: Client) -> Tuple[str, Dict[Any, Any], Dict[Any, Any]]:
"""Builds the iterator to check that the feed is accessible.
Args:
client: Client object.
Returns:
Outputs.
"""
client.build_iterator()
return 'ok', {}, {}
def fetch_indicators(client: Client, limit: int = -1) -> List[Dict]:
"""Retrieves indicators from the feed
Args:
client: Client object with request
limit: limit the results
Returns:
Indicators.
"""
iterator = client.build_iterator()
indicators = []
if limit > 0:
iterator = iterator[:limit]
for item in iterator:
type_ = 'Ip'
ip = IPAddress(item)
if ip.version == 6:
type_ = 'IPv6'
indicators.append({
'value': item,
'type': type_,
'rawJSON': {'value': item, 'type': type_},
'fields': {'tags': client.Tags, 'trafficlightprotocol': client.Tlp_color}
})
return indicators
def get_indicators_command(client: Client) -> Tuple[str, Dict[Any, Any], Dict[Any, Any]]:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
Returns:
Outputs.
"""
limit = int(demisto.args().get('limit')) if 'limit' in demisto.args() else 10
indicators = fetch_indicators(client, limit)
human_readable = tableToMarkdown(f'{INTEGRATION_NAME}:', indicators,
headers=['value', 'type'], removeNull=True)
return human_readable, {'Indicator': indicators}, {'raw_response': indicators}
def fetch_indicators_command(client: Client) -> List[Dict]:
"""Wrapper for fetching indicators from the feed to the Indicators tab.
Args:
client: Client object with request
Returns:
Indicators.
"""
indicators = fetch_indicators(client)
return indicators
def main():
params = demisto.params()
url = params.get('url', 'https://public-dns.info/nameservers-all.txt')
tags = argToList(params.get('feedTags'))
tlp_color = params.get('tlp_color')
use_ssl = not params.get('insecure', False)
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
client = Client(url, tags, tlp_color, use_ssl)
commands: Dict = {
'test-module': test_module,
'public-dns-get-indicators': get_indicators_command
}
if command in commands:
return_outputs(*commands[command](client))
elif command == 'fetch-indicators':
indicators = fetch_indicators_command(client)
for iter_ in batch(indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
except Exception as err:
err_msg = f'Error in {INTEGRATION_NAME} Integration. [{err}]'
return_error(err_msg)
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| StarcoderdataPython |
1984730 | """ This function calculates similarity scores with different methods
It calculates similarity scores with :
- difflib library to find matching sequences.
- Jaccard Similarity
- words counting,
- overlapping words
"""
import difflib
from utils import remove_numbers, remove_stop_words, lemmatize
def difflib_overlap(word_token1: list, word_token2: list) -> float:
""" Get similarity percentage from matching sequences between two strings """
seq = difflib.SequenceMatcher(a=word_token1, b=word_token2)
# Return similarity percentage based on difflib library Sequence Matcher
return round(seq.ratio() * 100, 3)
def calculate_overlap(word_token1: list, word_token2: list) -> float:
""" Get similarity percentage from usage of similar words in two strings """
overlapping_words = []
for word in word_token1:
if word in word_token2:
overlapping_words.append(word)
overlap_percentage = len(overlapping_words) / len(word_token1) * 100
return round(overlap_percentage, 3)
def calculate_jaccard(word_tokens1: list, word_tokens2: list) -> float:
""" Calculates intersection over union and return Jaccard similarity score """
list1, list2 = remove_numbers(word_tokens1), remove_numbers(word_tokens2)
list1, list2 = remove_stop_words(list1), remove_stop_words(list2)
list1, list2 = lemmatize(list1), lemmatize(list2)
# Combine both tokens to find union
both_tokens = list1 + list2
union = set(both_tokens)
# Calculate intersection
intersection = set()
for word in list1:
if word in list2:
intersection.add(word)
jaccard_score = len(intersection) / len(union)
return round(jaccard_score, 3)
| StarcoderdataPython |
148839 | <reponame>wharvey31/project-diploid-assembly<filename>scripts/utilities/version_checker.py<gh_stars>0
#!/usr/bin/env python
import os
import sys
import argparse
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--outfile', '-o', type=str, dest='outfile')
parser.add_argument('--at-least', '-a', type=str, dest='atleast')
parser.add_argument('--logfile', '-l', type=str, dest='logfile')
args = parser.parse_args()
outfile = args.outfile
logfile = args.logfile
try:
os.makedirs(os.path.dirname(os.path.abspath(outfile)), exist_ok=True)
os.makedirs(os.path.dirname(os.path.abspath(logfile)), exist_ok=True)
except TypeError:
# since Conda environments (or the Singularity module on Hilbert)
# only support Python2 (...), exist_ok may cause an exception
# Ignore that and hope that Snakemake creates everything...
pass
req_version = [int(v) for v in args.atleast.split('.')]
version_pattern = re.compile('[0-9]+\\.[0-9]+(\\.[0-9]+)?')
match_found = False
with open(logfile, 'w') as log:
_ = log.write('Minimum version required: {}\n'.format(args.atleast))
for line in sys.stdin.readlines():
_ = log.write('Processing line: {}\n'.format(line.strip()))
mobj = version_pattern.search(line.strip())
if mobj is not None:
version_info = mobj.group(0)
_ = log.write('Potential version info found: {}\n'.format(version_info))
tool_version = [int(v) for v in version_info.split('.')]
for min_v, is_v in zip(req_version, tool_version):
if is_v > min_v and not is_v < min_v:
_ = log.write('Minimum version matched...\n')
match_found = True
break
if match_found:
break
else:
_ = log.write('Version info did not match...\n')
if match_found:
exit_code = 0
with open(outfile, 'w') as touch:
_ = touch.write('Version confirmed: {}\n'.format('.'.join([str(v) for v in tool_version])))
else:
exit_code = 1
_ = log.write('No match found')
return exit_code
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
1668130 | <reponame>djconly85/PPA2_0_code
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 13:13:38 2020
@author: dconly
https://www.reportlab.com/documentation/tutorial/#json-to-pdf-invoice-tutorial
"""
| StarcoderdataPython |
1928792 | <filename>indico/modules/events/contributions/forms.py
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import timedelta
from flask import request
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields import BooleanField, HiddenField, SelectField, StringField, TextAreaField
from wtforms.validators import DataRequired, ValidationError
from indico.core.db import db
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.modules.events.abstracts.settings import BOASortField
from indico.modules.events.contributions.fields import (ContributionPersonLinkListField,
SubContributionPersonLinkListField)
from indico.modules.events.contributions.models.references import ContributionReference, SubContributionReference
from indico.modules.events.contributions.models.types import ContributionType
from indico.modules.events.fields import ReferencesField
from indico.modules.events.util import check_permissions
from indico.util.date_time import get_day_end
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import (HiddenFieldList, IndicoDateTimeField, IndicoEnumSelectField, IndicoLocationField,
IndicoProtectionField, IndicoTagListField)
from indico.web.forms.fields.datetime import IndicoDurationField
from indico.web.forms.fields.principals import PermissionsField
from indico.web.forms.validators import DateTimeRange, MaxDuration
from indico.web.forms.widgets import SwitchWidget
class ContributionForm(IndicoForm):
title = StringField(_("Title"), [DataRequired()])
description = TextAreaField(_("Description"))
start_dt = IndicoDateTimeField(_("Start date"),
[DataRequired(),
DateTimeRange(earliest=lambda form, field: form._get_earliest_start_dt(),
latest=lambda form, field: form._get_latest_start_dt())],
allow_clear=False,
description=_("Start date of the contribution"))
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
type = QuerySelectField(_("Type"), get_label='name', allow_blank=True, blank_text=_("No type selected"))
person_link_data = ContributionPersonLinkListField(_("People"))
location_data = IndicoLocationField(_("Location"))
keywords = IndicoTagListField(_('Keywords'))
references = ReferencesField(_("External IDs"), reference_class=ContributionReference,
description=_("Manage external resources for this contribution"))
board_number = StringField(_("Board Number"))
code = StringField(_('Programme code'))
@generated_data
def render_mode(self):
return RenderMode.markdown
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.contrib = kwargs.pop('contrib', None)
self.session_block = kwargs.get('session_block')
self.timezone = self.event.timezone
to_schedule = kwargs.pop('to_schedule', False)
super().__init__(*args, **kwargs)
self.type.query = self.event.contribution_types
if self.event.type != 'conference':
self.person_link_data.label.text = _("Speakers")
if not self.type.query.count():
del self.type
if not to_schedule and (self.contrib is None or not self.contrib.is_scheduled):
del self.start_dt
def _get_earliest_start_dt(self):
return self.session_block.start_dt if self.session_block else self.event.start_dt
def _get_latest_start_dt(self):
return self.session_block.end_dt if self.session_block else self.event.end_dt
def validate_duration(self, field):
start_dt = self.start_dt.data if self.start_dt else None
if start_dt:
end_dt = start_dt + field.data
if self.session_block and end_dt > self.session_block.end_dt:
raise ValidationError(_("With the current duration the contribution exceeds the block end date"))
if end_dt > self.event.end_dt:
raise ValidationError(_('With the current duration the contribution exceeds the event end date'))
@property
def custom_field_names(self):
return tuple(field_name for field_name in self._fields if field_name.startswith('custom_'))
class ContributionProtectionForm(IndicoForm):
permissions = PermissionsField(_("Permissions"), object_type='contribution')
protection_mode = IndicoProtectionField(_('Protection mode'), protected_object=lambda form: form.protected_object,
acl_message_url=lambda form: url_for('contributions.acl_message',
form.protected_object))
def __init__(self, *args, **kwargs):
self.protected_object = contribution = kwargs.pop('contrib')
self.event = contribution.event
super().__init__(*args, **kwargs)
def validate_permissions(self, field):
except_msg = check_permissions(self.event, field)
if except_msg:
raise ValidationError(except_msg)
class SubContributionForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
description = TextAreaField(_('Description'))
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
speakers = SubContributionPersonLinkListField(_('Speakers'), allow_submitters=False, allow_authors=False,
description=_('The speakers of the subcontribution'))
references = ReferencesField(_("External IDs"), reference_class=SubContributionReference,
description=_("Manage external resources for this sub-contribution"))
code = StringField(_('Programme code'))
@generated_data
def render_mode(self):
return RenderMode.markdown
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.subcontrib = kwargs.pop('subcontrib', None)
super().__init__(*args, **kwargs)
class ContributionStartDateForm(IndicoForm):
start_dt = IndicoDateTimeField(_('Start date'), [DataRequired(),
DateTimeRange(earliest=lambda form, field: form.event.start_dt,
latest=lambda form, field: form.event.end_dt)],
allow_clear=False)
def __init__(self, *args, **kwargs):
self.contrib = kwargs.pop('contrib')
self.event = self.contrib.event
self.timezone = self.event.timezone
super().__init__(*args, **kwargs)
def validate_start_dt(self, field):
event = self.contrib.event
day = self.contrib.start_dt.astimezone(event.tzinfo).date()
if day == event.end_dt_local.date():
latest_dt = event.end_dt
error_msg = _("With this time, the contribution would exceed the event end time.")
else:
latest_dt = get_day_end(day, tzinfo=event.tzinfo)
error_msg = _("With this time, the contribution would exceed the current day.")
if field.data + self.contrib.duration > latest_dt:
raise ValidationError(error_msg)
class ContributionDurationForm(IndicoForm):
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
def __init__(self, *args, **kwargs):
self.contrib = kwargs.pop('contrib')
super().__init__(*args, **kwargs)
def validate_duration(self, field):
if field.errors:
return
if self.contrib.is_scheduled:
event = self.contrib.event
day = self.contrib.start_dt.astimezone(event.tzinfo).date()
if day == event.end_dt_local.date():
latest_dt = event.end_dt
error_msg = _("With this duration, the contribution would exceed the event end time.")
else:
latest_dt = get_day_end(day, tzinfo=event.tzinfo)
error_msg = _("With this duration, the contribution would exceed the current day.")
if self.contrib.start_dt + field.data > latest_dt:
raise ValidationError(error_msg)
class ContributionDefaultDurationForm(IndicoForm):
duration = IndicoDurationField(_('Duration'), [DataRequired(), MaxDuration(timedelta(hours=24))],
default=timedelta(minutes=20))
class ContributionTypeForm(IndicoForm):
"""Form to create or edit a ContributionType."""
name = StringField(_("Name"), [DataRequired()])
is_private = BooleanField(_("Private"), widget=SwitchWidget(),
description=_("If selected, this contribution type cannot be chosen by users "
"submitting an abstract."))
description = TextAreaField(_("Description"))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.contrib_type = kwargs.get('obj')
super().__init__(*args, **kwargs)
def validate_name(self, field):
query = self.event.contribution_types.filter(db.func.lower(ContributionType.name) == field.data.lower())
if self.contrib_type:
query = query.filter(ContributionType.id != self.contrib_type.id)
if query.count():
raise ValidationError(_("A contribution type with this name already exists"))
class ContributionExportTeXForm(IndicoForm):
"""Form for TeX-based export selection"""
format = SelectField(_('Format'), default='PDF')
sort_by = IndicoEnumSelectField(_('Sort by'), enum=BOASortField, default=BOASortField.abstract_title,
sorted=True)
contribution_ids = HiddenFieldList()
submitted = HiddenField()
def __init__(self, *args, **kwargs):
self.contribs = kwargs.get('contribs')
super().__init__(*args, **kwargs)
if not self.contribution_ids.data:
self.contribution_ids.data = [c.id for c in self.contribs]
def is_submitted(self):
return super().is_submitted() and 'submitted' in request.form
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.