id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11544115
|
from error import Error
class Token():
def __init__(self, type, value):
self.type = type
self.value = value
class Lexer():
def __init__(self, code):
"""Initializes an instance of Lexer
Arguments:
code {str} -- The raw text written by the user
"""
self.code = code
self.line_number = 1
self.position = 0
self.current_char = code[self.position]
# Contains all words(tokens) recognized by this language
self.tokens = {
'KEYWORD': ['INPUT', 'OUTPUT', 'DECLARE', 'OF', 'IF', 'THEN', 'ELSEIF',
'ELSE', 'ENDIF', 'FOR', 'TO', 'STEP', 'ENDFOR', 'REPEAT',
'UNTIL', 'WHILE', 'ENDWHILE', 'CASE', 'OF', 'OTHERWISE', 'ENDCASE', 'PROCEDURE', 'ENDPROCEDURE', 'FUNCTION', 'ENDFUNCTION', 'RETURN', 'CALL', 'BYVAL', 'BYREF', 'OPENFILE', 'READFILE', 'WRITEFILE', 'CLOSEFILE', 'TYPE', 'ENDTYPE', 'CONSTANT'
],
'BUILTIN_FUNCTION': ['CHR', 'ASC', 'LENGTH', 'LEFT', 'RIGHT', 'MID',
'CONCAT', 'INT', 'LCASE', 'UCASE', 'TONUM', 'TOSTRING', 'SUBSTR', 'ONECHAR', 'CHARACTERCOUNT', 'EOF'
],
'OPERATION': ['+', '-', '/', '*', 'DIV', 'MOD', '^'
],
'PARENTHESIS': ['(', ')', '{', '}', '[', ']'
],
'COMPARISON': ['>', '<', '='
],
'BOOLEAN': ['TRUE', 'FALSE'
],
'LOGICAL': ['AND', 'OR', 'NOT'
],
'FILE_MODE': ['READ', 'WRITE', 'APPEND'
]
}
def next_token(self):
"""Returns the next token in the text
Returns:
Token -- The token made from the current characters in the raw text
"""
while self.current_char != 'EOF':
if self.current_char.isspace():
self.advance()
elif self.current_char.isalpha():
return self.make_word()
elif self.current_char.isnumeric():
return self.make_number()
elif self.current_char == '"':
self.advance()
token = Token('STRING', self.make_string())
self.advance()
return token
elif self.current_char in self.tokens['OPERATION']:
token = Token('OPERATION', self.current_char)
self.advance()
return token
elif self.current_char in self.tokens['PARENTHESIS']:
token = Token('PARENTHESIS', self.current_char)
self.advance()
return token
elif self.current_char == '<' and self.peek() == '-':
self.advance()
self.advance()
return Token('ASSIGNMENT', '<-')
elif self.current_char == '.' and self.peek() == '.':
self.advance()
self.advance()
return Token('RANGE', '..')
elif self.current_char == '#':
token = self.ignore_line()
return token
elif self.current_char == '.':
self.advance()
return Token('PERIOD', '.')
elif self.current_char == ':':
token = Token('COLON', self.current_char)
self.advance()
return token
elif self.current_char == ',':
token = Token('COMMA', self.current_char)
self.advance()
return token
elif self.current_char in self.tokens['COMPARISON']:
token = Token('COMPARISON', self.make_comparison())
self.advance()
return token
else:
Error().syntax_error(self.current_char, self.line_number)
return Token('EOF', 'EOF')
def advance(self):
"""Advances the Lexer instance by one character
"""
self.position += 1
if self.position > len(self.code) - 1:
# The end of the file has been reached
self.current_char = 'EOF'
else:
self.current_char = self.code[self.position]
def peek(self):
"""Peeks at the next character in the code without actually advancing
Returns:
str -- The next character in the code
"""
peek_position = self.position + 1
if peek_position > len(self.code) - 1:
# The end of the file has been reached
self.current_char = 'EOF'
else:
return self.code[peek_position]
def make_string(self):
"""Makes a string out of the characters following the " before the next "
Returns:
str -- The string contained inside " and "
"""
string = ''
while self.current_char != '"' and self.current_char != 'EOF':
string += self.current_char
self.advance()
return string
def make_word(self):
"""Checks the current word against the Lexer word dictionary
Returns:
Token -- the word formed and its type
"""
word = ''
while self.current_char.isalnum() and self.current_char != 'EOF':
word += self.current_char
self.advance()
if word in self.tokens['KEYWORD']:
return Token('KEYWORD', word)
elif word in self.tokens['BUILTIN_FUNCTION']:
return Token('BUILTIN_FUNCTION', word)
elif word in self.tokens['OPERATION']:
return Token('OPERATION', word)
elif word in self.tokens['LOGICAL']:
return Token('LOGICAL', word)
elif word in self.tokens['BOOLEAN']:
return Token('BOOLEAN', word)
elif word in self.tokens['FILE_MODE']:
return Token('FILE_MODE', word)
elif word == 'EOL':
self.line_number += 1
return self.next_token()
else:
return Token('VARIABLE', word)
def make_number(self):
"""Forms a number
Returns:
Token -- the number formed and its type
"""
number = ''
while self.current_char.isnumeric() or (self.current_char == '.' and self.peek() != '.'):
number += self.current_char
self.advance()
if number.find('.') == -1:
return Token('INTEGER', int(number))
else:
return Token('REAL', float(number))
def make_comparison(self):
"""Forms a comparison operator
Returns:
Token -- the operator formed and its type
"""
char = self.current_char
if char == '=':
if self.peek() == '<' or self.peek() == '>':
self.advance()
char += self.current_char
elif char == '<':
if self.peek() == '>' or self.peek() == '=':
self.advance()
char += self.current_char
elif char == '>':
if self.peek() == '=':
self.advance()
char += self.current_char
return char
def ignore_line(self):
"""Ignores all tokens until an EOL is seen
Returns:
Token -- The next non-commented Token formed by the code
"""
line = self.line_number
# Line changes after a new line
while self.line_number == line:
self.advance()
token = self.next_token()
return token
|
11544116
|
import pykd
def get_address(localAddr):
res = pykd.dbgCommand("x " + localAddr)
result_count = res.count("\n")
if result_count == 0:
print localAddr + " not found."
return None
if result_count > 1:
print "[-] Warning, more than one result for", localAddr
return res.split()[0]
class handle_allocate_heap(pykd.eventHandler):
def __init__(self):
addr = get_address("ntdll!RtlAllocateHeap")
if addr == None:
return
self.bp_init = pykd.setBp(int(addr, 16), self.enter_call_back)
self.bp_end = None
def enter_call_back(self,bp):
print "RtlAllocateHeap called."
if self.bp_end == None:
disas = pykd.dbgCommand("uf ntdll!RtlAllocateHeap").split('\n')
for i in disas:
if 'ret' in i:
self.ret_addr = i.split()[0]
break
self.bp_end = pykd.setBp(int(self.ret_addr, 16), self.return_call_back)
return False
def return_call_back(self,bp):
print "RtlAllocateHeap returned."
return False
handle_allocate_heap()
pykd.go()
|
11544147
|
from pydantic.types import ConstrainedInt, ConstrainedFloat
class PositiveInt(ConstrainedInt):
# These ensure mypy knows what is going on with constrained types
# Directly using confloat, conint works, but mypy gets unhappy.
# https://github.com/samuelcolvin/pydantic/issues/239
ge = 0
class UnitFloat(ConstrainedFloat):
ge = 0.0
le = 1.0
|
11544203
|
from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name='pytest-faulthandler',
version="2.0.1",
url='https://github.com/pytest-dev/pytest-faulthandler',
license='MIT',
install_requires=['pytest>=5.0'],
author='<NAME>',
author_email='<EMAIL>',
description='py.test plugin that activates the fault handler module for tests (dummy package)',
long_description=long_description,
keywords='pytest faulthandler',
classifiers=[
'Development Status :: 6 - Mature',
'Framework :: Pytest',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Testing',
]
)
|
11544216
|
import datetime
from sdcclient._common import _SdcCommon
class ActivityAuditDataSource:
CMD = "command"
NET = "connection"
KUBE_EXEC = "kubernetes"
FILE = "fileaccess"
_seconds_to_nanoseconds = 10 ** 9
class ActivityAuditClientV1(_SdcCommon):
def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True, custom_headers=None):
super(ActivityAuditClientV1, self).__init__(token, sdc_url, ssl_verify, custom_headers)
self.product = "SDS"
def list_events(self, from_date=None, to_date=None, scope_filter=None, limit=0,
data_sources=None):
"""
List the events in the Activity Audit.
Args:
from_date (datetime.datetime): the start of the time range from which to get events. The default value is yesterday.
to_date (datetime.datetime): the end of the time range from which to get events. The default value is now.
scope_filter (List): a list of Sysdig Monitor-like filter (e.g `processName in ("ubuntu")`).
limit (int): max number of events to retrieve. A limit of 0 or negative will retrieve all events.
data_sources (List): a list of data sources to retrieve events from. None or an empty list retrieves all events.
Examples:
>>> client = ActivityAuditClientV1(token=SECURE_TOKEN)
>>>
>>> now = datetime.datetime.now()
>>> three_days_ago = now - datetime.timedelta(days=3)
>>> max_event_number_retrieved = 50
>>> data_sources = [ActivityAuditDataSource.CMD, ActivityAuditDataSource.KUBE_EXEC]
>>>
>>> ok, events = client.list_events(from_date=three_days_ago,
>>> to_date=now,
>>> limit=max_event_number_retrieved,
>>> data_sources=data_sources)
Returns:
A list of event objects from the Activity Audit.
"""
number_of_events_per_query = 50
if from_date is None:
from_date = datetime.datetime.now() - datetime.timedelta(days=1)
if to_date is None:
to_date = datetime.datetime.now()
filters = scope_filter if scope_filter else []
if data_sources:
quoted_data_sources = [f'"{data_source}"' for data_source in data_sources]
data_source_filter = f'type in ({",".join(quoted_data_sources)})'
filters.append(data_source_filter)
query_params = {
"from": int(from_date.timestamp()) * _seconds_to_nanoseconds,
"to": int(to_date.timestamp()) * _seconds_to_nanoseconds,
"limit": number_of_events_per_query,
"filter": " and ".join(filters),
}
res = self.http.get(self.url + '/api/v1/activityAudit/events', headers=self.hdrs, verify=self.ssl_verify,
params=query_params)
ok, res = self._request_result(res)
if not ok:
return False, res
events = []
# Pagination required by Secure API
while "page" in res and \
"total" in res["page"] and \
res["page"]["total"] > number_of_events_per_query:
events = events + res["data"]
if 0 < limit < len(events):
events = events[0:limit - 1]
break
paginated_query_params = {
"limit": number_of_events_per_query,
"filter": " and ".join(filters),
"cursor": res["page"]["prev"]
}
res = self.http.get(self.url + '/api/v1/activityAudit/events', headers=self.hdrs, verify=self.ssl_verify,
params=paginated_query_params)
ok, res = self._request_result(res)
if not ok:
return False, res
else:
events = events + res["data"]
return True, events
def list_trace(self, traceable_event):
"""
Lists the events from an original traceable event.
Args:
traceable_event(object): an event retrieved from the list_events method. The event must be traceable,
this is, it must have the "traceable" key as true.
Examples:
>>> client = ActivityAuditClientV1(token=SECURE_TOKEN)
>>>
>>> ok, events = client.list_events()
>>> if not ok:
>>> return
>>> traceable_events = [event for event in events if event["traceable"]]
>>>
>>> ok, trace = client.list_trace(traceable_events[0])
>>> if not ok:
>>> return
>>>
>>> for event in trace:
>>> print(event)
Returns:
All the related events that are the trace of the given event.
"""
if not traceable_event or not traceable_event["traceable"]:
return False, "a traceable event must be provided"
endpoint = f'/api/v1/activityAudit/events/{traceable_event["type"]}/{traceable_event["id"]}/trace'
res = self.http.get(self.url + endpoint, headers=self.hdrs, verify=self.ssl_verify)
ok, res = self._request_result(res)
if not ok:
return False, res
return True, res["data"]
|
11544231
|
from django.db import models, IntegrityError
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from modeltests.delete.models import (R, RChild, S, T, U, A, M, MR, MRNull,
create_a, get_default_r, User, Avatar, HiddenUser, HiddenUserProfile)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').rel.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
s = S.objects.create(r=R.objects.create())
for i in xrange(2*GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertEqual(None, obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertEqual(None, a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
t1 = T.objects.create(pk=1, s=s1)
t2 = T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.post_delete.disconnect(log_pre_delete)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
p = HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
|
11544234
|
from enum import Enum, auto
from typing import Tuple
class EndpointType(Enum):
# endpoint / collection types
GCP = auto()
GCSV5_ENDPOINT = auto()
GUEST_COLLECTION = auto()
MAPPED_COLLECTION = auto()
SHARE = auto()
NON_GCSV5_ENDPOINT = auto() # most likely GCSv4, but not necessarily
@classmethod
def collections(cls) -> Tuple["EndpointType", ...]:
return (cls.GUEST_COLLECTION, cls.MAPPED_COLLECTION)
@classmethod
def traditional_endpoints(cls) -> Tuple["EndpointType", ...]:
return (cls.GCP, cls.SHARE, cls.NON_GCSV5_ENDPOINT)
@classmethod
def non_collection_types(cls) -> Tuple["EndpointType", ...]:
return tuple(x for x in cls if x not in cls.collections())
@classmethod
def gcsv5_types(cls) -> Tuple["EndpointType", ...]:
return tuple(
x for x in cls if (x is cls.GCSV5_ENDPOINT or x in cls.collections())
)
@classmethod
def nice_name(cls, eptype: "EndpointType") -> str:
return {
cls.GCP: "Globus Connect Personal",
cls.GCSV5_ENDPOINT: "Globus Connect Server v5 Endpoint",
cls.GUEST_COLLECTION: "Guest Collection",
cls.MAPPED_COLLECTION: "Mapped Collection",
cls.SHARE: "Shared Endpoint",
cls.NON_GCSV5_ENDPOINT: "GCSv4 Endpoint",
}.get(eptype, "UNKNOWN")
@classmethod
def determine_endpoint_type(cls, ep_doc: dict) -> "EndpointType":
"""
Given an endpoint document from transfer, determine what type of
endpoint or collection it is for
"""
if ep_doc.get("is_globus_connect") is True:
return EndpointType.GCP
if ep_doc.get("non_functional") is True:
return EndpointType.GCSV5_ENDPOINT
shared = ep_doc.get("host_endpoint_id") is not None
if ep_doc.get("gcs_version"):
try:
major, _minor, _patch = ep_doc["gcs_version"].split(".")
except ValueError: # split -> unpack didn't give 3 values
major = None
gcsv5 = major == "5"
else:
gcsv5 = False
if gcsv5:
if shared:
return EndpointType.GUEST_COLLECTION
else:
return EndpointType.MAPPED_COLLECTION
elif shared:
return EndpointType.SHARE
return EndpointType.NON_GCSV5_ENDPOINT
|
11544240
|
from ztag.annotation import *
class DLinkVoIPRouter(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
def process(self, obj, meta):
if obj["title"].strip() == "D-Link VoIP Router":
meta.global_metadata.manufacturer = Manufacturer.DLINK
meta.global_metadata.product = "VoIP Router"
meta.global_metadata.device_type = Type.SOHO_ROUTER
meta.tags.add("embedded")
return meta
class DLinkWirelessRouter(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
def process(self, obj, meta):
if obj["title"].strip() == "D-LINK SYSTEMS, INC. | WIRELESS ROUTER":
meta.global_metadata.manufacturer = Manufacturer.DLINK
meta.global_metadata.product = "Wireless Router"
meta.global_metadata.device_type = Type.SOHO_ROUTER
meta.tags.add("embedded")
return meta
|
11544248
|
from fastapi import APIRouter
from fastdash.api.routes import fastdash
router = APIRouter()
router.include_router(fastdash.router, tags=["fastdash"], prefix="/fastdash")
|
11544255
|
from xml.dom.minidom import Document
import cv2
import os
import glob
import shutil
import numpy as np
from tools.convert_utils import build_voc_dirs
def generate_xml(img_name, lines, img_size, class_sets):
doc = Document()
def append_xml_node_attr(child, parent=None, text=None):
ele = doc.createElement(child)
if not text is None:
text_node = doc.createTextNode(text)
ele.appendChild(text_node)
parent = doc if parent is None else parent
parent.appendChild(ele)
return ele
cls = 'text'
# create header
annotation = append_xml_node_attr('annotation')
append_xml_node_attr('folder', parent=annotation, text='text')
append_xml_node_attr('filename', parent=annotation, text=img_name)
source = append_xml_node_attr('source', parent=annotation)
append_xml_node_attr('database', parent=source, text='coco_text_database')
append_xml_node_attr('annotation', parent=source, text='text')
append_xml_node_attr('image', parent=source, text='text')
append_xml_node_attr('flickrid', parent=source, text='000000')
owner = append_xml_node_attr('owner', parent=annotation)
append_xml_node_attr('name', parent=owner, text='ms')
size = append_xml_node_attr('size', annotation)
append_xml_node_attr('width', size, str(img_size[1]))
append_xml_node_attr('height', size, str(img_size[0]))
append_xml_node_attr('depth', size, str(img_size[2]))
append_xml_node_attr('segmented', parent=annotation, text='0')
# create objects
objs = []
for line in lines:
splitted_line = line.strip().lower().split()
obj = append_xml_node_attr('object', parent=annotation)
occlusion = int(0)
x1, y1, x2, y2 = int(float(splitted_line[0])), int(float(splitted_line[1])), \
int(float(splitted_line[2])), int(float(splitted_line[3]))
truncation = float(0)
difficult = 0
truncted = 0 if truncation < 0.5 else 1
append_xml_node_attr('name', parent=obj, text=cls)
append_xml_node_attr('pose', parent=obj, text='none')
append_xml_node_attr('truncated', parent=obj, text=str(truncted))
append_xml_node_attr('difficult', parent=obj, text=str(int(difficult)))
bb = append_xml_node_attr('bndbox', parent=obj)
append_xml_node_attr('xmin', parent=bb, text=str(x1))
append_xml_node_attr('ymin', parent=bb, text=str(y1))
append_xml_node_attr('xmax', parent=bb, text=str(x2))
append_xml_node_attr('ymax', parent=bb, text=str(y2))
o = {'class': cls, 'box': np.asarray([x1, y1, x2, y2], dtype=float),
'truncation': truncation, 'difficult': difficult, 'occlusion': occlusion}
objs.append(o)
return doc, objs
def _is_hard(cls, truncation, occlusion, x1, y1, x2, y2):
hard = False
if y2 - y1 < 25 and occlusion >= 2:
hard = True
return hard
if occlusion >= 3:
hard = True
return hard
if truncation > 0.8:
hard = True
return hard
return hard
if __name__ == '__main__':
outdir = '/home/cwq/data/ICDAR13/icdar13_voc'
dest_label_dir, dest_img_dir, dest_set_dir = build_voc_dirs(outdir)
for dset in ['train']:
_labeldir = '/home/cwq/data/ICDAR13/Challenge2_Training_Task1_GT_splited'
_imagedir = '/home/cwq/data/ICDAR13/Challenge2_Training_Task12_Images_splited'
class_sets = ('text', 'dontcare')
class_sets_dict = dict((k, i) for i, k in enumerate(class_sets))
fs = [open(os.path.join(dest_set_dir, cls + '_' + dset + '.txt'), 'w') for cls in class_sets]
ftrain = open(os.path.join(dest_set_dir, dset + '.txt'), 'w')
files = glob.glob(os.path.join(_labeldir, '*.txt'))
files.sort()
for file in files:
path, basename = os.path.split(file)
stem, ext = os.path.splitext(basename)
img_id = stem.split('_')[1]
img_name = img_id + '.jpg'
stem = "icdar13_" + img_id
with open(file, 'r') as f:
lines = f.readlines()
img_file = os.path.join(_imagedir, img_name)
print(img_file)
img = cv2.imread(img_file)
img_size = img.shape
save_img_name = "icdar13_" + img_name
doc, objs = generate_xml(save_img_name, lines, img_size, class_sets=class_sets)
cv2.imwrite(os.path.join(dest_img_dir, save_img_name), img)
xmlfile = os.path.join(dest_label_dir, stem + '.xml')
with open(xmlfile, 'w') as f:
f.write(doc.toprettyxml(indent=' '))
ftrain.writelines(stem + '\n')
(f.close() for f in fs)
ftrain.close()
|
11544264
|
class stacki:
size=0
capacity=1000
li=[0]*capacity
def __init__(self):
pass
def empty(self):
return self.size==0
def full(self):
return self.size==self.capacity
def push(self,a):
if self.size==self.capacity:
print("Stack full")
else:
self.li[self.size]=a
self.size+=1
def pop(self):
if self.size==0:
print("Stack empty")
else:
self.size-=1
def top(self):
return self.li[self.size-1]
def precedence(c):
if c=="^":
return 3
if c=="*" or c=="/":
return 2
if c=="+" or c=="-":
return 1
return 0
def convert(s):
sed=stacki()
ans=""
for i in range(len(s)):
if s[i]=="+" or s[i]=="-" or s[i]=="*" or s[i]=="/" or s[i]=="^":
while not sed.empty() and precedence(s[i])<=precedence(sed.top()):
ans+=sed.top()
sed.pop()
sed.push(s[i])
elif s[i]=="(":
sed.push(s[i])
elif s[i]==")":
while not sed.empty() and sed.top()!="(":
ans+=sed.top()
sed.pop()
sed.pop()
else:
ans+=s[i]
while not sed.empty():
ans+=sed.top()
sed.pop()
return ans
s=input()
print(convert(s))
|
11544405
|
from django.db import models
from django.utils import timezone
class DoorStatus(models.Model):
datetime = models.DateTimeField()
status = models.BooleanField(default=False)
name = models.CharField(max_length=20)
def __str__(self):
return self.name
@staticmethod
def get_door_by_name(name):
# Creates the object if it does not exist
try:
door = DoorStatus.objects.get(name=name)
return door
except DoorStatus.DoesNotExist:
door = DoorStatus.objects.create(name=name, datetime=timezone.now())
return door
class Meta:
verbose_name_plural = "Door Statuses"
class OpenData(models.Model):
opened = models.DateTimeField()
closed = models.DateTimeField()
def __str__(self):
return str(self.opened)
|
11544422
|
from services.VideoRehabService.ConfigManager import ConfigManager
config_man = ConfigManager()
redis_client = None
api_user_token_key = None
api_device_token_key = None
api_device_static_token_key = None
api_participant_token_key = None
api_participant_static_token_key = None
|
11544425
|
import astropy.units as astropy_units
import numpy as np
from past.utils import old_div
import astromodels.functions.numba_functions as nb_func
from astromodels.core.units import get_units
from astromodels.functions.function import (Function1D, FunctionMeta,
ModelAssertionViolation)
def get_polynomial(order: int) -> Function1D:
"""
get a polynomial function of order
:param order: the order of the polynomical
:type order: int
:returns:
"""
return [Constant(), Line(),Quadratic(), Cubic(), Quartic()][order]
class Constant(Function1D, metaclass=FunctionMeta):
r"""
description :
Return k
latex : $ k $
parameters :
k :
desc : Constant value
initial value : 0
"""
def _set_units(self, x_unit, y_unit):
self.k.unit = y_unit
def evaluate(self, x, k):
return k * np.ones(np.shape(x))
class Line(Function1D, metaclass=FunctionMeta):
r"""
description :
A linear function
latex : $ b * x + a $
parameters :
a :
desc : intercept
initial value : 0
b :
desc : coeff
initial value : 1
"""
def _set_units(self, x_unit, y_unit):
# a has units of y_unit / x_unit, so that a*x has units of y_unit
self.a.unit = y_unit
# b has units of y
self.b.unit = y_unit / x_unit
def evaluate(self, x, a, b):
return b * x + a
class Quadratic(Function1D, metaclass=FunctionMeta):
r"""
description :
A Quadratic function
latex : $ a + b \cdot x + c \cdot x^2 $
parameters :
a :
desc : coefficient
initial value : 1
b :
desc : coefficient
initial value : 1
c :
desc : coefficient
initial value : 1
"""
def _set_units(self, x_unit, y_unit):
# a has units of y_unit / x_unit, so that a*x has units of y_unit
self.a.unit = y_unit
# b has units of y
self.b.unit = y_unit / x_unit
self.c.unit = y_unit / (x_unit) ** 2
def evaluate(self, x, a, b, c):
return a + b * x + c * x * x
class Cubic(Function1D, metaclass=FunctionMeta):
r"""
description :
A cubic function
latex : $ a + b \cdot x + c \cdot x^2 + d \cdot x^3$
parameters :
a :
desc : coefficient
initial value : 1
b :
desc : coefficient
initial value : 1
c :
desc : coefficient
initial value : 1
d :
desc : coefficient
initial value : 1
"""
def _set_units(self, x_unit, y_unit):
# a has units of y_unit / x_unit, so that a*x has units of y_unit
self.a.unit = y_unit
# b has units of y
self.b.unit = y_unit / x_unit
self.c.unit = y_unit / (x_unit) ** 2
self.d.unit = y_unit / (x_unit) ** 3
def evaluate(self, x, a, b, c, d):
x2 = x * x
x3 = x2 * x
return a + b * x + c * x2 + d * x3
class Quartic(Function1D, metaclass=FunctionMeta):
r"""
description :
A quartic function
latex : $ a + b \cdot x + c \cdot x^2 + d \cdot x^3 + e \cdot x^4$
parameters :
a :
desc : coefficient
initial value : 1
b :
desc : coefficient
initial value : 1
c :
desc : coefficient
initial value : 1
d :
desc : coefficient
initial value : 1
e :
desc : coefficient
initial value : 1
"""
def _set_units(self, x_unit, y_unit):
# a has units of y_unit / x_unit, so that a*x has units of y_unit
self.a.unit = y_unit
# b has units of y
self.b.unit = y_unit / x_unit
self.c.unit = y_unit / (x_unit) ** 2
self.d.unit = y_unit / (x_unit) ** 3
self.e.unit = y_unit / (x_unit) ** 4
def evaluate(self, x, a, b, c, d, e):
x2 = x * x
x3 = x2 * x
x4 = x3 * x
return a + b * x + c * x2 + d * x3 + e * x4
|
11544455
|
import pandas as pd
import numpy as np
from surprise import Reader, Dataset
from surprise.model_selection import cross_validate
import surprise as sp
train_data = pd.read_csv('train.csv', parse_dates=["date"])
test_data = pd.read_csv('test.csv', parse_dates=["date"])
# Prepare data
reader = Reader()
data = Dataset.load_from_df(train_data[['userId', 'itemId', 'rating']], reader)
# Use matrix factorization to implement collaborative filtering
alg = sp.SVD()
cross_validate(alg, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)
# Prediction
test_data["prediction"] = test_data.apply(lambda x: alg.predict(x["userId"], x["itemId"]).est, axis=1)
test_data["prediction"] = np.clip(test_data["prediction"], 1, 5)
test_data["prediction"].to_csv('output.csv', index=False)
|
11544462
|
import logging
from dataclasses import dataclass, field
from datetime import datetime, timezone, timedelta
from typing import Optional
import coloredlogs
import requests
from dataclasses_json import dataclass_json
from dateutil.parser import parse, parserinfo
from pyquery import PyQuery as pq
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG', logger=logger)
tz = timezone(timedelta(hours=+8))
class CustomParserInfo(parserinfo):
HMS = [('時')]
JUMP = ['迄', '起', ':', '(', ')', '-']
@dataclass_json
@dataclass
class ReservoirData:
name: str = field(hash=False, repr=True, compare=False, default=None)
capavailable: float = field(hash=False, repr=True, compare=False, default=None)
statisticTimeS: datetime = field(hash=False, repr=True, compare=False, default=None)
statisticTimeE: datetime = field(hash=False, repr=True, compare=False, default=None)
rainFall: Optional[float] = field(hash=False, repr=True, compare=False, default=None)
inFlow: Optional[float] = field(hash=False, repr=True, compare=False, default=None)
outFlow: float = field(hash=False, repr=True, compare=False, default=None)
waterlevediff: float = field(hash=False, repr=True, compare=False, default=None)
recordTime: datetime = field(hash=False, repr=True, compare=False, default=None)
caplevel: float = field(hash=False, repr=True, compare=False, default=None)
currcap: float = field(hash=False, repr=True, compare=False, default=None)
currcapper: float = field(hash=False, repr=True, compare=False, default=None)
class ReservoirCrawler:
def __init__(self):
self.url = 'http://fhy.wra.gov.tw/ReservoirPage_2011/StorageCapacity.aspx'
self.search = {0: '防汛重點水庫', 1: '所有水庫', 2: '水庫及攔河堰'}
self.rowlist = ['name', 'capavailable', 'statisticTimeS', 'statisticTimeE', 'rainFall', 'inFlow', 'outFlow',
'waterlevediff', 'recordTime', 'caplevel', 'currcap', 'currcapper']
# self.query = pq(self.req)('table#ctl00_cphMain_gvList.list.nowrap')('tr')
def fetch_jsp(self):
r = requests.get(self.url)
self.viewstate = pq(r.text)('input#__VIEWSTATE').attr('value')
self.viewstategenerator = pq(r.text)('input#__VIEWSTATEGENERATOR').attr('value')
self.hiddenfield = pq(r.text)('input#ctl00_ctl02_HiddenField').attr('value')
def fetch_data(self, date: datetime, search: int = 0):
# search
# 0 -> 防汛重點水庫
# 1 -> 所有水庫
# 2 -> 水庫及攔河堰
payload = {'ctl00$ctl02': 'ctl00$cphMain$ctl00|ctl00$cphMain$cboSearch',
'ctl00_ctl02_HiddenField': self.hiddenfield,
'__EVENTTARGET': 'ctl00$cphMain$cboSearch',
'__EVENTARGUMENT': '',
'__LASTFOCUS': '',
'__VIEWSTATE': self.viewstate,
'__VIEWSTATEGENERATOR': self.viewstategenerator,
'ctl00$cphMain$ucDate$cboYear': date.year,
'ctl00$cphMain$ucDate$cboMonth': date.month,
'ctl00$cphMain$ucDate$cboDay': date.day,
'__ASYNCPOST': True}
payload.update({'ctl00$cphMain$cboSearch': self.search[search]})
fetch_data = requests.get(self.url, data=payload)
if fetch_data.status_code != 200:
logger.critical(f'HTTP {fetch_data.status_code}')
return fetch_data
def clean_horizon(self, horizon_: list):
for x in range(len(horizon_)):
if '--' in horizon_[x]:
horizon_[x] = None
elif '起' in horizon_[x] or '迄' in horizon_[x] or '時' in horizon_[x]:
horizon_[x] = parse(horizon_[x], parserinfo=CustomParserInfo())
return horizon_
def parse_data(self, data):
query = pq(data.text)('table#ctl00_cphMain_gvList.list.nowrap')('tr')
reservoir_list = list()
for reservoir_row in list(query.items())[2:-1]:
horizon = reservoir_row.text().split('\n')
horizon = self.clean_horizon(horizon)
reservoir_list.append(
ReservoirData(
name=horizon[0], capavailable=horizon[1],
statisticTimeS=horizon[2], statisticTimeE=horizon[3],
rainFall=horizon[4], inFlow=horizon[5], outFlow=horizon[6],
waterlevediff=horizon[7], recordTime=horizon[8],
caplevel=horizon[9], currcap=horizon[10], currcapper=horizon[11]
)
)
return reservoir_list
def fetch(self, date: datetime = datetime.now(tz)):
self.fetch_jsp()
data = self.fetch_data(date)
return self.parse_data(data)
|
11544534
|
from django.urls import path
from . import views
urlpatterns = [
path('site_topology/', views.SiteTopologyView.as_view(), name='site_topology'),
path('topology/', views.TopologyView.as_view(), name='topology'),
]
|
11544565
|
from .base import BaseConfiguration
class Local(BaseConfiguration):
# Apps
INSTALLED_APPS = BaseConfiguration.INSTALLED_APPS
INSTALLED_APPS += ["debug_toolbar"]
# Debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#internal-ips
INTERNAL_IPS = ["127.0.0.1"]
# Check if we are inside a Docker container
if BaseConfiguration.env.bool("USING_DOCKER", False):
import socket
# Add the container IP to INTERNAL_IPS
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1" for ip in ips]
# Email
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
11544588
|
from pytest import raises
from sqlalchemy_continuum import (
ClassNotVersioned,
transaction_class,
versioning_manager
)
from tests import TestCase
class TestTransactionClass(TestCase):
def test_with_versioned_class(self):
assert (
transaction_class(self.Article) ==
versioning_manager.transaction_cls
)
def test_with_unknown_type(self):
with raises(ClassNotVersioned):
transaction_class(None)
|
11544641
|
import numpy as np
from geopandas import GeoDataFrame
from shapely.geometry import Polygon
from region.tests.util import region_list_from_array, convert_from_geodataframe
from region.util import dataframe_to_dict
# The following data describe the max-p-regions problem depicted in figure 2 on
# p. 402 in [DAR2012]_. They are commented out because the test cases take too
# long on Travis CI.
#attr = np.array([350.2, 400.5, 430.8,
# 490.4, 410.9, 450.4,
# 560.1, 500.7, 498.6])
#spatially_extensive_attr = np.array([30, 25, 31,
# 28, 32, 30,
# 35, 27, 33])
#threshold = 120
#optimal_clustering = region_list_from_array(np.array([0, 0, 0,
# 1, 0, 0,
# 1, 1, 1]))
#
#attr_str = "attr"
#spatially_extensive_attr_str = "spatially_extensive_attr"
#gdf = GeoDataFrame(
# {attr_str: attr,
# spatially_extensive_attr_str: spatially_extensive_attr},
# geometry=[Polygon([(x, y),
# (x, y+1),
# (x+1, y+1),
# (x+1, y)]) for y in range(3) for x in range(3)]
#)
# New, more simple data (less areas than in figure 2 on p. 402 in [DAR2012]_).
# Using these data the tests run much faster than with the data from [DAR2012]_.
attr = np.array([0, 0,
1, 1])
# The following two lines mean that each region must consist of two regions.
# Since we have four areas, the optimal solution of the max-p problem
# will have 2 regions.
spatially_extensive_attr = np.ones(4)
threshold = 2
optimal_clustering = region_list_from_array(np.array([0, 0,
1, 1]))
attr_str = "attr"
spatially_extensive_attr_str = "spatially_extensive_attr"
gdf = GeoDataFrame(
{attr_str: attr,
spatially_extensive_attr_str: spatially_extensive_attr},
geometry=[Polygon([(x, y),
(x, y+1),
(x+1, y+1),
(x+1, y)]) for y in range(2) for x in range(2)]
)
# for tests with scalar attr & spatially_extensive_attr per area
attr = attr.reshape(-1, 1)
spatially_extensive_attr = spatially_extensive_attr.reshape(-1, 1)
adj, graph, neighbors_dict, w = convert_from_geodataframe(gdf)
attr_dict = dataframe_to_dict(gdf, attr_str)
spatially_extensive_attr_dict = dataframe_to_dict(gdf,
spatially_extensive_attr_str)
# for tests where attr & spatially_extensive_attr are vectors in each area
double_attr = np.column_stack((attr, attr))
double_spatially_extensive_attr = np.column_stack((spatially_extensive_attr,
spatially_extensive_attr))
double_threshold = np.hstack((threshold, threshold))
double_attr_dict = dataframe_to_dict(gdf, [attr_str] * 2)
double_spatially_extensive_attr_dict = dataframe_to_dict(
gdf, [spatially_extensive_attr_str] * 2)
|
11544660
|
import sys
import os
from github import Github
from dotenv import load_dotenv
load_dotenv()
path = os.getenv("FILEPATH")
username = os.getenv("USERNAME")
password = os.getenv("PASSWORD")
def create():
folderName = str(sys.argv[1])
folderpath = os.path.join(path,folderName)
if os.path.exists(folderpath):
print("Folder already exists.. Link to the path - "+ folderpath)
os.makedirs(folderpath)
user = Github(username, password).get_user()
repo = user.create_repo(sys.argv[1])
print("Succesfully created repository {}".format(sys.argv[1]))
if __name__ == "__main__":
create()
|
11544662
|
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
class SwitchToWindow():
def test(self):
baseUrl = "https://letskodeit.teachable.com/pages/practice"
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(baseUrl)
# Find parent handle -> Main Window
parentHandle = driver.current_window_handle
print("Parent Handle: " + parentHandle)
# Find open window button and click it
driver.find_element(By.ID, "openwindow").click()
time.sleep(2)
# Find all handles, there should two handles after clicking open window button
handles = driver.window_handles
# Switch to window and search course
for handle in handles:
print("Handle: " + handle)
if handle not in parentHandle:
driver.switch_to.window(handle)
print("Switched to window:: " + handle)
searchBox = driver.find_element(By.ID, "search-courses")
searchBox.send_keys("python")
time.sleep(2)
driver.close()
break
# Switch back to the parent handle
driver.switch_to.window(parentHandle)
driver.find_element(By.ID, "name").send_keys("Test Successful")
ff = SwitchToWindow()
ff.test()
|
11544686
|
import sys
from ralph.settings import * # noqa
# for dhcp agent test
sys.path.append(os.path.join(BASE_DIR, '..', '..', 'contrib', 'dhcp_agent'))
DEBUG = False
TEST_DB_ENGINE = os.environ.get('TEST_DB_ENGINE', 'mysql')
if TEST_DB_ENGINE == 'psql':
DATABASES['default'].update({
'ENGINE': 'transaction_hooks.backends.postgresql_psycopg2',
'PORT': os.environ.get('DATABASE_PORT', 5432),
'OPTIONS': {},
})
elif TEST_DB_ENGINE == 'mysql':
DATABASES['default']['TEST'].update({
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci',
})
INSTALLED_APPS += (
'ralph.lib.mixins',
'ralph.tests',
'ralph.lib.custom_fields.tests',
'ralph.lib.permissions.tests',
'ralph.lib.polymorphic.tests',
'ralph.lib.mixins.tests',
)
USE_CACHE = False
PASSWORD_HASHERS = ('<PASSWORD>',)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
ROOT_URLCONF = 'ralph.urls.test'
# specify all url modules to reload during specific tests
# see `ralph.tests.mixins.ReloadUrlsMixin` for details
URLCONF_MODULES = ['ralph.urls.base', ROOT_URLCONF]
# Uncomment lines below if you want some additional output from loggers
# during tests.
# LOGGING['loggers']['ralph'].update(
# {'level': 'DEBUG', 'handlers': ['console']}
# )
RQ_QUEUES['ralph_job_test'] = dict(ASYNC=False, **REDIS_CONNECTION)
RQ_QUEUES['ralph_async_transitions']['ASYNC'] = False
RALPH_INTERNAL_SERVICES.update({
'JOB_TEST': {
'queue_name': 'ralph_job_test',
'method': 'ralph.lib.external_services.tests.test_job_func',
}
})
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
},
'template_fragments': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
SKIP_MIGRATIONS = os.environ.get('SKIP_MIGRATIONS', None)
if SKIP_MIGRATIONS:
print('skipping migrations')
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
MIGRATION_MODULES = DisableMigrations()
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
ENABLE_EMAIL_NOTIFICATION = True
ENABLE_HERMES_INTEGRATION = True
HERMES['ENABLED'] = ENABLE_HERMES_INTEGRATION
|
11544688
|
from .general_r2 import GeneralOnR2
from .rot2d_on_r2 import Rot2dOnR2
from .fliprot2d_on_r2 import FlipRot2dOnR2
from .flip2d_on_r2 import Flip2dOnR2
from .trivial_on_r2 import TrivialOnR2
__all__ = [
"GeneralOnR2",
"Rot2dOnR2",
"FlipRot2dOnR2",
"Flip2dOnR2",
"TrivialOnR2",
]
|
11544714
|
from mod_pywebsocket import common
from mod_pywebsocket import stream
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
payload1 = b'Invalid continuation frame to be ignored.'
payload2 = b'Valid frame after closing should be disposed.'
request.connection.write(
stream.create_header(common.OPCODE_CONTINUATION,
len(payload1), 1, 0, 0, 0, 0) + payload1)
request.connection.write(
stream.create_header(common.OPCODE_TEXT, len(payload2), 1, 0, 0, 0, 0) +
payload2)
|
11544722
|
from wandb.plot.bar import bar
from wandb.plot.confusion_matrix import confusion_matrix
from wandb.plot.histogram import histogram
from wandb.plot.line import line
from wandb.plot.line_series import line_series
from wandb.plot.pr_curve import pr_curve
from wandb.plot.roc_curve import roc_curve
from wandb.plot.scatter import scatter
__all__ = [
"line",
"histogram",
"scatter",
"bar",
"roc_curve",
"pr_curve",
"confusion_matrix",
"line_series",
]
|
11544731
|
from .single_level import SingleRoIExtractor
from .single_level_straight3d import SingleRoIStraight3DExtractor
__all__ = [
'SingleRoIExtractor', 'SingleRoIStraight3DExtractor'
]
|
11544743
|
from orchestra.contrib.settings import Setting
ORDERS_BILLING_BACKEND = Setting('ORDERS_BILLING_BACKEND',
'orchestra.contrib.orders.billing.BillsBackend',
validators=[Setting.validate_import_class],
help_text="Pluggable backend for bill generation.",
)
ORDERS_SERVICE_MODEL = Setting('ORDERS_SERVICE_MODEL',
'services.Service',
validators=[Setting.validate_model_label],
help_text="Pluggable service class.",
)
ORDERS_EXCLUDED_APPS = Setting('ORDERS_EXCLUDED_APPS',
(
'orders',
'admin',
'contenttypes',
'auth',
'migrations',
'sessions',
'orchestration',
'bills',
'services',
'mailer',
'issues',
),
help_text="Prevent inspecting these apps for service accounting."
)
ORDERS_METRIC_ERROR = Setting('ORDERS_METRIC_ERROR',
0.05,
help_text=("Only account for significative changes.<br>"
"metric_storage new value: <tt>lastvalue*(1+threshold) > currentvalue or lastvalue*threshold < currentvalue</tt>."),
)
ORDERS_BILLED_METRIC_CLEANUP_DAYS = Setting('ORDERS_BILLED_METRIC_CLEANUP_DAYS',
40,
help_text=("Number of days after a billed stored metric is deleted."),
)
|
11544745
|
from libsaas import http, parsers
from libsaas.services import base
from . import resource
class Replies(resource.PaginatedDeskResource):
path = 'replies'
class Reply(resource.DeskResource):
path = 'replies'
class CasesBase(resource.DeskResource):
path = 'cases'
class Cases(CasesBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.apimethod
def get(self, embed=None, fields=None, per_page=None, page=None):
"""
Retrieve a paginated list of all cases.
Upstream documentation: http://dev.desk.com/API/cases#list
"""
params = base.get_params(None, locals())
return http.Request('GET', self.get_url(), params), parsers.parse_json
@base.apimethod
def search(self, name=None, first_name=None, last_name=None, email=None,
phone=None, company=None, twitter=None, labels=None,
case_id=None, subject=None, description=None,
status=None, priority=None, assigned_group=None,
assigned_user=None, channels=None, notes=None, attachments=None,
created=None, updated=None, since_created_at=None,
max_created_at=None, since_updated_at=None, max_updated_at=None,
since_id=None, max_id=None, per_page=None, page=None,
embed=None, fields=None, **case_custom_fields):
"""
Search cases based on a combination of parameters with pagination.
Upstream documentation: http://dev.desk.com/API/cases#search
"""
store = locals()
store.update(store.pop('case_custom_fields'))
params = base.get_params(None, store)
url = '{0}/{1}'.format(self.get_url(), 'search')
return http.Request('GET', url, params), parsers.parse_json
class Case(CasesBase):
def __init__(self, parent, object_id, is_external=False):
case_id = 'e-%s' % object_id if is_external else object_id
super(Case, self).__init__(parent, case_id)
@base.apimethod
def message(self):
"""
Retrieve the original message for this case.
Upstream documentation: http://dev.desk.com/API/cases#message-show
"""
url = '{0}/{1}'.format(self.get_url(), 'message')
return http.Request('GET', url), parsers.parse_json
@base.apimethod
def history(self, per_page=None, page=None):
"""
The case history endpoint will display a paginated list of all
events/actions that have happened to the case
Upstream documentation: http://dev.desk.com/API/cases#history
"""
params = base.get_params(None, locals())
url = '{0}/{1}'.format(self.get_url(), 'history')
return http.Request('GET', url, params), parsers.parse_json
@base.resource(Replies)
def replies(self):
"""
Return the resource corresponding to the case replies
"""
return Replies(self)
@base.resource(Reply)
def reply(self, reply_id):
"""
Return the resource corresponding to a single reply
"""
return Reply(self, reply_id)
|
11544781
|
import array
from PIL import Image
SLICES = 361
size = (100, 100, 100)
imout = Image.new("L", (size[0], size[1]*size[2]))
for i in range(size[2]):
z = i * SLICES/size[2]
filename = "bunny/"+str(z+1)+".png"
im = Image.open(filename)
im = im.resize((110, 110))
region = im.crop((3, 7, 103, 107))
box = (0, i*size[1], size[0], (i+1)*size[1])
imout.paste(region, box)
print "processed slice "+str(z)+" : "+filename
imout.save("test.png", "PNG")
|
11544813
|
from setuptools import setup, find_packages
def get_readme_rst():
import subprocess
try:
proc = subprocess.Popen(
['pandoc', 'README.md', '--to', 'rst'],
stdout=subprocess.PIPE
)
description = proc.communicate()[0].decode()
except OSError:
with open('README.md') as fp:
description = fp.read()
return description
def get_version():
import os
data = {}
fname = os.path.join('ipyaml', '__init__.py')
exec(compile(open(fname).read(), fname, 'exec'), data)
return data.get('__version__')
install_requires = ['pyyaml', 'nbformat']
tests_require = ['pytest']
classes = """
Development Status :: 3 - Alpha
Environment :: Console
Framework :: IPython
Intended Audience :: Developers
Intended Audience :: Education
Intended Audience :: End Users/Desktop
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Topic :: Software Development :: Libraries
Topic :: Utilities
"""
classifiers = [x.strip() for x in classes.splitlines() if x]
setup(
name='ipyaml',
version=get_version(),
author='<NAME>',
author_email='<EMAIL>',
description='Convert IPython notebooks to YAML and vice-versa',
long_description=get_readme_rst(),
license="BSD",
url='https://github.com/prabhuramachandran/ipyaml',
classifiers=classifiers,
packages=find_packages(),
install_requires=install_requires,
tests_require=tests_require,
package_dir={'ipyaml': 'ipyaml'},
entry_points="""
[console_scripts]
ipyaml = ipyaml.cli:main
"""
)
|
11544831
|
def error (x,y) :
if y != '(' and y != ')' and y != '0' and y != '1' and y != '*':
print ("Недопустимый символ", y)
elif x == 0 and y == '(':
print ("Ожидался символ (")
elif x == 1 and y == ')' :
print ("Количество единиц не удовлетворяет условию. Ожидался символ 1")
elif x == 1 :
print ("Ожидался символ )")
elif x == 4 and y == '1':
print ("Количество единиц не удовлетворяет условию! Ожидался конец строки.")
elif x == 4 and y == '1':
print ("Количество единиц не удовлетворяет условию! Ожидался символ 1")
elif x == 5 and y == '*':
print ("Количество единиц не удовлетворяет условию! Ожидался символ 1")
elif x == 4 and y != '*' :
print ("Ожидался конец строки")
elif x == 5 and y != '*':
print ("Ожидался конец строки")
stack = []
step = 0 #состояние
iterator = 0
string = input('Введите строку: ')
string += '*'; #маркер конца строки
for symbol in string:
iterator += 1 #для обозначения места первого несоответствия
if step == 0 and symbol == '(' and len(stack) == 0 :
step = 0
stack.append(symbol)
elif step == 0 and symbol == '0' and len(stack) != 0 and stack[len(stack) - 1] == '(':
step = 0
stack.append(symbol)
stack.append(symbol)
elif step == 0 and symbol == ')' and len(stack) != 0 and stack[len(stack) - 1] == '(':
step = 4
stack.pop()
elif step == 0 and symbol == '0' and len(stack) != 0 and stack[len(stack) - 1] == '0':
step = 0
stack.append(symbol)
stack.append(symbol)
elif step == 0 and symbol == '1' and len(stack) != 0 and stack[len(stack) - 1] == '0':
step = 1
stack.pop()
elif step == 1 and symbol == '1' and len(stack) != 0 and stack[len(stack) - 1] == '0':
step = 1
stack.pop()
elif step == 1 and symbol == ')' and len(stack) != 0 and stack[len(stack) - 1] == '(' :
step = 4
stack.pop()
elif step == 4 and symbol == '*' and len(stack) == 0 :
step = 3
print('Входная цепочка принадлежит данному языку.')
elif step == 4 and symbol == '0' and len(stack) == 0 :
step = 4
stack.append(symbol)
elif step == 4 and symbol == '0' and len(stack) != 0 and stack[len(stack) - 1] == '0' :
step = 4
stack.append(symbol)
elif step == 4 and symbol == '1' and len(stack) > 1 and stack[len(stack) - 1] == '0' :
step = 5
stack.pop()
stack.pop()
elif step == 5 and symbol == '1' and len(stack) > 1 and stack[len(stack) - 1] == '0' :
step = 5
stack.pop()
stack.pop()
elif step == 5 and symbol == '*' and len(stack) == 0 :
step = 3
print('Входная цепочка принадлежит данному языку.')
else :
print('Входная цепочка не принадлежит данному языку.')
print('Первое несоответствие цепочки языку имеет позицию ', iterator)
print('Это символ', symbol)
error(step, symbol)
break;
|
11544832
|
from typing import Type
from .charybdis import CharybdisController
class SolanumController(CharybdisController):
software_name = "Solanum"
binary_name = "solanum"
def get_irctest_controller_class() -> Type[SolanumController]:
return SolanumController
|
11544853
|
import timeit
from random import randint
def stooge_sort(collection, left, right, counter):
if left >= right:
return
if collection[right] < collection[left]:
collection[left], collection[right] = collection[right], collection[left]
counter += 1
print("Step %i -->" % counter, collection)
if (right - left + 1) > 2:
list_part = (right - left + 1) // 3
counter = stooge_sort(collection, left, right - list_part, counter)
counter = stooge_sort(collection, left + list_part, right, counter)
counter = stooge_sort(collection, left, right - list_part, counter)
return counter
def visualization():
counter = 0
length_list = 10
collection = [randint(0, length_list) for _ in range(length_list)]
print("Initial list:", collection)
print("Visualization of algorithm work.")
counter = stooge_sort(collection, 0, length_list - 1, counter)
print("Final list:", collection)
print("Total numbers of passages:", counter)
def main():
elapsed_time = timeit.timeit(visualization, number=1)
print("Elapsed time: ", round(elapsed_time, 7), "sec.")
if __name__ == '__main__':
main()
|
11544904
|
class Solution:
def numSubseq(self, nums: List[int], target: int) -> int:
nums.sort()
if nums[0] * 2 > target:
return 0
MOD = 10 ** 9 + 7
left = 0
right = len(nums) - 1
ret = 0
while left <= right:
if nums[left] + nums[right] <= target:
ret += 2 ** (right - left)
ret %= MOD
left += 1
else:
right -= 1
return ret
|
11544908
|
from django.urls import path
import qatrack.reports.views as views
urlpatterns = [
path('', views.select_report, name="reports"),
path('filter/', views.get_filter, name="reports-filter"),
path('preview/', views.report_preview, name="reports-preview"),
path('save/', views.save_report, name="reports-save"),
path('load/', views.load_report, name="reports-load"),
path('delete/', views.delete_report, name="reports-delete"),
path('saved-reports/', views.saved_reports_datatable, name="reports-saved"),
path('schedule/<int:report_id>/', views.report_schedule_form, name="reports-schedule-form"),
path('schedule/delete/', views.delete_schedule, name="reports-schedule-delete"),
path('schedule/', views.schedule_report, name="reports-schedule"),
]
|
11544923
|
def count_provider(count: int):
sum = 0
for i in range(count):
sum = sum+i
return sum
|
11544944
|
import os
import sys
from io import open
import torch
from torch import nn
import numpy as np
from GPG.models.model_utils import init_wt_normal
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, position_embeddings_flag=False):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.bert_input_size, padding_idx=0)
self.position_embeddings = None
self.position_embeddings_flag = position_embeddings_flag
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
if (self.position_embeddings_flag):
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.position_emb_size)
self.emb_size = config.bert_input_size + config.position_emb_size
else:
self.emb_size = config.bert_input_size
self.LayerNorm = torch.nn.LayerNorm(self.emb_size, eps=config.layer_norm_eps)
self.LayerNorm_no_position = torch.nn.LayerNorm(config.bert_input_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids=None):
words_embeddings = self.word_embeddings(input_ids)
if torch.isnan(torch.sum(words_embeddings)).item():
print("words_embeddings is na")
print(words_embeddings)
if(self.position_embeddings_flag and (position_ids is not None)):
# position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
# position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if torch.isnan(torch.sum(position_embeddings)).item():
print(" position_embeddings is na")
print(position_embeddings)
embeddings = torch.cat([words_embeddings, position_embeddings], 2)
embeddings = self.LayerNorm(embeddings)
else:
embeddings = words_embeddings
embeddings = self.LayerNorm_no_position(embeddings)
embeddings = self.dropout(embeddings)
if torch.isnan(torch.sum(embeddings)).item():
print("embedding is na")
print(embeddings)
emb_dim = self.emb_size
return embeddings, emb_dim
class GloveEmbeddings(nn.Module):
def __init__(self, config, vocab):
super(GloveEmbeddings, self).__init__()
# self.vocab = vocab
# self.config = config
self.emb_dim = config.emb_dim # 300
self.position_embeddings = None
self.position_embeddings_flag = config.position_embeddings_flag
self.word_embeddings = self.share_embedding(vocab, config.emb_file, self.emb_dim)
if (self.position_embeddings_flag):
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.position_emb_size)
# self.position_embeddings.weight.requires_grad = False
self.emb_dim = self.emb_dim + config.position_emb_size
self.LayerNorm = torch.nn.LayerNorm(self.emb_dim, eps=config.layer_norm_eps)
self.LayerNorm_no_position = torch.nn.LayerNorm(config.bert_input_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids=None):
words_embeddings = self.word_embeddings(input_ids)
if position_ids is not None:
position_embeddings = self.position_embeddings(position_ids)
embeddings = torch.cat([words_embeddings, position_embeddings], 2)
embeddings = self.LayerNorm(embeddings)
else:
embeddings = words_embeddings
embeddings = self.LayerNorm_no_position(embeddings)
embeddings = self.dropout(embeddings)
emb_dim = self.emb_dim
return embeddings, emb_dim
def gen_embeddings(self, vocab, emb_dim, emb_file):
embeddings = np.random.randn(vocab.voc_size, emb_dim) * 0.01
print('Embeddings: %d x %d' % (vocab.voc_size, emb_dim))
if emb_file is not None:
print('Loading embedding file: %s' % emb_file)
pre_trained = 0
for line in open(emb_file).readlines():
sp = line.split()
if(len(sp) == emb_dim + 1):
if sp[0] in vocab._word2id:
pre_trained += 1
embeddings[vocab._word2id[sp[0]]] = [float(x) for x in sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / vocab.voc_size))
return embeddings
def share_embedding(self, vocab, emb_file, emb_dim =300, pretrain=True):
embedding = nn.Embedding(vocab.voc_size, emb_dim)
init_wt_normal(embedding.weight)
if(pretrain):
pre_embedding = self.gen_embeddings(vocab, emb_dim, emb_file)
embedding.weight.data.copy_(torch.FloatTensor(pre_embedding))
embedding.weight.requires_grad = False
return embedding
|
11544961
|
import PikaStdDevice
// kernel
// TODO
// hal
class GPIO(PikaStdDevice.GPIO):
def platformHigh():
pass
def platformLow():
pass
def platformEnable():
pass
def platformDisable():
pass
def platformSetMode():
pass
def platformRead():
pass
|
11544983
|
import pkg_resources
# These test files need to be available to the rest of the test suite
# They are included in MANIFEST.in
example_data_dir = pkg_resources.resource_filename(__name__, 'data')
|
11545024
|
from django.core.management.base import BaseCommand
from onadata.apps.logger.models import XForm, Instance
from onadata.apps.fsforms.models import XformHistory, FInstance
from onadata.settings.local_settings import XML_VERSION_MAX_ITER
import os
import re
class Command(BaseCommand):
help = 'Check if XForm and XformHistory contains version of FInstance'
def add_arguments(self, parser):
parser.add_argument('username', type=str)
def handle(self, *args, **options):
n = XML_VERSION_MAX_ITER
batchsize = options.get("batchsize", 50)
username = options['username']
stop = False
offset = 0
while stop is not True:
limit = offset + batchsize
finstances = FInstance.objects.filter(instance__xform__user__username=username)[offset:limit]
inst = list(finstances)
if finstances:
self.stdout.write("Checking version in xform from #{} to #{}\n".format(
inst[0].id,
inst[-1].id))
for finstance in finstances:
finstance_version = finstance.version
if finstance.project_fxf:
xml = finstance.project_fxf.xf.xml
else:
xml = finstance.site_fxf.xf.xml
# check for version in tag<(id) id="" version="">
pattern = re.compile('version="(.*)">')
m = pattern.search(xml)
if m:
xform_version = m.group(1)
else:
#second priority version labels
#_version__006 has more priority than _verison__005
for i in range(n, 0, -1):
#for old version labels(containing both letters and alphabets)
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version__00{0}" """.format(i))
m = p.search(xml)
if m:
xform_version = m.group(1)
print('Version found')
else:
#for old version labels(containing only numbers)
p = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/_version__00{0}" """.format(i))
m1 = p.search(xml)
if m1:
xform_version = m1.group(1)
if xform_version:
pass
else:
#next priority version label
#for old version labels
p = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/_version_" """)
m = p.search(xml)
if m:
xform_version = m.group(1)
else:
#for old version labels
p1 = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/_version_" """)
m1 = p.search(xml)
if m1:
xform_version = m1.group(1)
else:
#next priority version label
#for new version labels
p1 = re.compile("""<bind calculate="\'(.*)\'" nodeset="/(.*)/__version__" """)
m1 = p1.search(xml)
if m1:
xform_version = m1.group(1)
else:
#for new version labels
p1 = re.compile("""<bind calculate="(.*)" nodeset="/(.*)/__version__" """)
m1 = p1.search(xml)
if m1:
xform_version = m1.group(1)
if finstance_version == xform_version:
continue
elif XformHistory.objects.filter(xform__user__username=username, version=finstance_version).exists():
continue
else:
xform_hist = XformHistory()
if finstance.project_fxf:
xform_hist.xform = finstance.project_fxf.xf
xform_hist.title = finstance.project_fxf.xf.title
xform_hist.uuid = finstance.project_fxf.xf.uuid
else:
xform_hist.xform = finstance.site_fxf.xf
xform_hist.title = finstance.site_fxf.xf.title
xform_hist.uuid = finstance.site_fxf.xf.uuid
xform_hist.version = finstance.version
xform_hist.save()
print('Finstance version not matching in xform and xform history.', finstance.id)
else:
stop = True
offset += batchsize
|
11545053
|
import json
import logging
from pocket import Pocket, PocketException
from .logger import Log
logger = Log.get_logger(__name__)
class PocketAPIClient:
pocket_client = None
def __init__(self, consumer_key, access_token):
self.pocket_client = Pocket(consumer_key, access_token)
def get_articles_data(self, *args, **kwargs):
# Fetch the articles
try:
# For list of optional parameters the API supports - https://getpocket.com/developer/docs/v3/retrieve
response, headers = self.pocket_client.get(*args, **kwargs)
return response.get('list')
except PocketException as e:
print(e)
def add_tags_to_articles(self, articles_with_tags, replace=False):
try:
total_articles = len(articles_with_tags.items())
if total_articles == 0:
return
pocket_instance = self.pocket_client
# Start a bulk operation
for id, data in articles_with_tags.items():
pocket_instance = pocket_instance.tags_add(id, data['tags'])
# and commit
response, headers = self.pocket_client.commit()
logger.info('Added the tags to articles.')
except PocketException as e:
logger.error(e)
|
11545057
|
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.views.decorators.http import require_safe
from django.http import HttpResponseForbidden
from core.csv_export import CSV_EXPORT_FORMATS, csv_response
from core.models import Event
from ..models import EventSurvey, EventSurveyResult, GlobalSurvey, GlobalSurveyResult
@require_safe
def survey_export_view(request, event_slug='', survey_slug='', format='xlsx'):
if event_slug:
event = get_object_or_404(Event, slug=event_slug)
survey = get_object_or_404(EventSurvey, event=event, slug=survey_slug, is_active=True)
SurveyResult = EventSurveyResult
slug = f'{event.slug}-{survey.slug}'
else:
event = None
survey = get_object_or_404(GlobalSurvey, slug=survey_slug, is_active=True)
SurveyResult = GlobalSurveyResult
slug = survey.slug
if not (request.user.is_superuser or request.user == survey.owner):
return HttpResponseForbidden()
results = SurveyResult.objects.filter(survey=survey).order_by('created_at')
timestamp = now().strftime('%Y%m%d%H%M%S')
filename = f'{slug}-results-{timestamp}.{format}'
return csv_response(
event,
SurveyResult,
results,
filename=filename,
dialect=CSV_EXPORT_FORMATS[format],
m2m_mode='comma_separated'
)
|
11545065
|
import click
from prog.cli import delete
from prog.cli import set
from prog.cli import show
from prog import client
from prog import output
from prog import utils
import time
def _list_profile_display_format(rule):
rule["type"] = client.CfgTypeDisplay[rule["cfg_type"]]
rule["modified"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(rule["last_modified_timestamp"]))
rule["created"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(rule["created_timestamp"]))
f = "name"
if f not in rule:
rule[f] = ""
f = "path"
if f not in rule:
rule[f] = ""
f = "user"
if f not in rule:
rule[f] = ""
@show.group("process")
@click.pass_obj
def show_process(data):
"""Show process profile."""
@show_process.group("profile", invoke_without_command=True)
@click.option('--scope', default="all", type=click.Choice(['fed', 'local', 'all']),
help="Show federal, local or all profiles")
@click.option("--page", default=5, type=click.IntRange(1), help="list page size, default=5")
@click.option('--sort_dir', type=click.Choice(['asc', 'desc']), default='asc', help="sort direction.")
@click.pass_obj
@click.pass_context
def show_process_profile(ctx, data, scope, page, sort_dir):
"""Show process profile."""
if ctx.invoked_subcommand is not None:
return
args = {'start': 0, 'limit': page}
if scope == 'fed' or scope == 'local':
args['scope'] = scope
# args = {'sort': "group", 'sort_dir': sort_dir, 'start': 0, 'limit': page}
while True:
pfs = data.client.list("process_profile", "process_profile", **args)
for p in pfs:
click.echo("Group: %s, Mode: %s, Baseline: %s" % (p["group"], p["mode"], p["baseline"]))
columns = ("name", "path", "user", "action", "type", "uuid", "allow_update")
for r in p["process_list"]:
_list_profile_display_format(r)
output.list(columns, p["process_list"])
if args["limit"] > 0 and len(pfs) < args["limit"]:
break
click.echo("Press <esc> to exit, press other key to continue ...")
c = utils.keypress()
if ord(c) == 27:
break
args["start"] += page
@show_process_profile.command("")
@click.argument("group")
@click.pass_obj
def group(data, group):
"""Show process profile."""
profile = data.client.show("process_profile", "process_profile", group)
if not profile:
return
for r in profile["process_list"]:
_list_profile_display_format(r)
click.echo("Mode: %s" % profile["mode"])
if profile["baseline"] != "":
click.echo("Baseline: %s" % profile["baseline"])
columns = ("name", "path", "user", "action", "type", "uuid", "allow_update")
output.list(columns, profile["process_list"])
@set.group("process")
@click.pass_obj
def set_process(data):
"""Set process profile. """
@set_process.command("profile")
@click.argument('group')
@click.option("--name", help="process name")
@click.option("--path", default="", help="process path")
@click.option("--user", default="", help="allowed user")
@click.option("--action", type=click.Choice(['allow', 'deny']), help="process action")
@click.option("--disable_alert", type=click.Choice(['true', 'false']), help="disable_alert")
@click.option("--baseline", type=click.Choice(['basic', 'zero-drift']), help="profile baseline")
@click.option("--allow_update", default='false', type=click.Choice(['true', 'false']),
help="allow modified executable file")
@click.pass_obj
def set_process_profile(data, group, path, name, user, action, disable_alert, baseline, allow_update):
"""Set process profile. """
cfg = {"group": group}
if name == None and disable_alert == None and baseline == None:
click.echo("Missing config")
return
if name != None:
if action == None:
click.echo("Rule must have an action")
return
cfg["process_change_list"] = [
{"name": name, "path": path, "user": user, "action": action, "allow_update": allow_update == "true", }]
if disable_alert != None:
cfg["alert_disabled"] = (disable_alert == "true")
if baseline != None:
cfg["baseline"] = baseline
# if enable_hash != None:
# cfg["hash_enabled"] = (enable_hash=="true")
data.client.config("process_profile", group, {"process_profile_config": cfg})
@delete.group("process")
@click.pass_obj
def delete_process(data):
"""Delete process profile. """
@delete_process.command("profile")
@click.argument('group')
@click.option("--name", help="process name")
@click.option("--path", default="", help="process path")
@click.option("--user", default="", help="allowed user")
@click.pass_obj
def delete_process_profile(data, group, path, name, user):
"""Delete process profile. """
if name != None:
cfg = {"group": group, "process_delete_list": [{"name": name, "path": path, "user": user}]}
else:
click.echo("Invalid config!")
return
data.client.config("process_profile", group, {"process_profile_config": cfg})
@show_process.command("rule")
@click.argument("uuid")
@click.pass_obj
def show_process_rule(data, uuid):
"""Show process from uuid."""
args = {'limit': 1}
rules = data.client.list("process_rules/%s" % uuid, "process_rule", **args)
if len(rules) != 1:
click.echo("\nNot found: %s" % uuid)
return
_list_profile_display_format(rules[0]["rule"])
click.echo("\nGroup: %s, Active: %d" % (rules[0]["group"], rules[0]["active"]))
columns = ("uuid", "name", "path", "user", "action", "type", "update_alert", "created", "modified")
output.show(columns, rules[0]["rule"])
|
11545103
|
import json
import sys
def load(path):
with open(path, "r") as f:
return f.read().strip()
args = sys.argv[1:]
assert len(args) % 3 == 0, "must be a multiple of three arguments for (name, hash, version) triplets, not: %d" % len(sys.argv[1:])
version_cache = {}
for name_file, hash_file, version_file in zip(args[0::3], args[1::3], args[2::3]):
name = load(name_file)
assert name not in version_cache, "collision between two packages with the name: %s" % name
version_cache[name] = {
"hash": load(hash_file),
"version": load(version_file),
}
print(json.dumps(version_cache))
|
11545113
|
from .app import SparkCeleryApp
from .task import SparkCeleryTask
from .cache import cache
from .main import main
|
11545118
|
from unittest import TestCase
from office365.sharepoint.client_context import ClientContext
from office365.sharepoint.directory.SPHelper import SPHelper
from office365.sharepoint.directory.directory_session import DirectorySession
from tests import test_user_credentials, test_site_url
class TestDirectorySession(TestCase):
session = None # type: DirectorySession
@classmethod
def setUpClass(cls):
super(TestDirectorySession, cls).setUpClass()
cls.client = ClientContext(test_site_url).with_credentials(test_user_credentials)
cls.session = DirectorySession(cls.client)
def test_1_init_session(self):
session = self.__class__.session.get().execute_query()
self.assertIsInstance(session, DirectorySession)
def test_2_get_me(self):
me = self.__class__.session.me.get().execute_query()
self.assertIsNotNone(me.resource_path)
#def test_3_get_my_groups(self):
# result = self.__class__.session.me.get_my_groups().execute_query()
# self.assertIsNotNone(result.value)
def test_4_check_site_availability(self):
result = SPHelper.check_site_availability(self.client, test_site_url).execute_query()
self.assertIsNotNone(result.value)
|
11545144
|
from abc import ABC, abstractmethod
class Factory(ABC):
@abstractmethod
def instance(self):
pass
|
11545152
|
from followthemoney.dedupe.judgement import Judgement
import structlog
from itertools import combinations
from opensanctions.core.dataset import Dataset
from opensanctions.core.context import Context
from opensanctions.core.loader import Database
from opensanctions.core.entity import Entity
from opensanctions.core.http import get_session
log = structlog.getLogger(__name__)
NOMINATIM = "https://nominatim.openstreetmap.org/search.php"
EXPIRE_CACHE = 84600 * 200
def query_nominatim(address: Entity):
session = get_session()
for full in address.get("full"):
params = {
"q": full,
"countrycodes": address.get("country"),
"format": "jsonv2",
"accept-language": "en",
"addressdetails": 1,
}
res = session.request(
"GET", NOMINATIM, params=params, expire_after=EXPIRE_CACHE
)
results = res.json()
if not res.from_cache:
log.info(
"OpenStreetMap/Nominatim geocoded",
address=address.caption,
results=len(results),
)
for result in results:
yield result
def xref_geocode(dataset: Dataset):
context = Context(dataset)
resolver = context.resolver
db = Database(dataset, resolver)
loader = db.view(dataset)
nodes = {}
entities = {}
try:
for entity in loader:
if not entity.schema.is_a("Address"):
continue
# log.info("Dedupe", address=entity.caption)
for result in query_nominatim(entity):
# osm_id = result.get("osm_id")
osm_id = result.get("display_name")
if osm_id not in entities:
entities[osm_id] = set()
nodes[osm_id] = {
"name": result.get("display_name"),
"importance": result.get("importance"),
"forms": set(),
}
entities[osm_id].add(entity.id)
nodes[osm_id]["forms"].add(entity.caption)
# context.pprint(result)
except KeyboardInterrupt:
pass
resolver.prune()
for osm_id, ids in entities.items():
if len(ids) < 2:
continue
data = nodes[osm_id]
for (a, b) in combinations(ids, 2):
if not resolver.check_candidate(a, b):
continue
judgement = resolver.get_judgement(a, b)
if judgement == Judgement.NO_JUDGEMENT:
resolver.suggest(a, b, data["importance"])
log.info("Suggested match", address=data["name"], forms=data["forms"])
resolver.save()
|
11545157
|
SECURITY_BYPASS_HEADERS = {
'Connection': 'keep-alive',
'Tele2-User-Agent': '"mytele2-app/3.17.0"; "unknown"; "Android/9"; "Build/12998710"',
'X-API-Version': '1',
'User-Agent': 'okhttp/4.2.0'
}
MAIN_API = 'https://my.tele2.ru/api/subscribers/'
SMS_VALIDATION_API = 'https://my.tele2.ru/api/validation/number/'
TOKEN_API = 'https://my.tele2.ru/auth/realms/tele2-b2c/protocol/openid-connect/token/'
|
11545160
|
class DestList:
def __init__(self):
self.destList = []
def getDestList(self):
return self.destList
def setDestList(self, destList):
self.destList = destList
|
11545187
|
from pytest import mark, skip
from sys import version_info
from argparse import OPTIONAL, ZERO_OR_MORE, ONE_OR_MORE, REMAINDER
@mark.parametrize('action', ['store', 'append', 'extend'])
def test_actions_with_argument(empty_parser, autocomplete_and_compare, action):
if action == 'extend' and version_info.minor < 8:
skip('The extend action is supported from python >= 3.8')
empty_parser.add_argument('arg', action=action)
autocomplete_and_compare(empty_parser, [r':arg:_files'])
@mark.parametrize('action', ['store_const', 'append_const'])
def test_actions_without_argument_requiring_const(empty_parser,
autocomplete_and_compare,
action):
empty_parser.add_argument('arg', action=action, const=1)
autocomplete_and_compare(empty_parser, [r''])
@mark.parametrize('action', ['store_true', 'store_false', 'count'])
def test_actions_without_argument_requiring_no_const(empty_parser,
autocomplete_and_compare,
action):
empty_parser.add_argument('arg', action=action)
autocomplete_and_compare(empty_parser, [r''])
@mark.parametrize('nargs', [None, 1])
def test_one_subargument(empty_parser, autocomplete_and_compare, nargs):
empty_parser.add_argument('arg', nargs=nargs)
autocomplete_and_compare(empty_parser, [r':arg:_files'])
def test_optional_subargument(empty_parser, autocomplete_and_compare):
empty_parser.add_argument('arg', nargs=OPTIONAL)
autocomplete_and_compare(empty_parser, [r'::arg:_files'])
@mark.parametrize('nargs', [2, 10])
def test_multiple_subarguments(empty_parser, autocomplete_and_compare, nargs):
empty_parser.add_argument('arg', nargs=nargs)
autocomplete_and_compare(empty_parser, nargs * [r':arg:_files'])
@mark.parametrize('nargs', [ZERO_OR_MORE, ONE_OR_MORE, REMAINDER])
def test_variable_subarguments(empty_parser, autocomplete_and_compare, nargs):
empty_parser.add_argument('arg', nargs=nargs)
autocomplete_and_compare(empty_parser, [r'*:arg:_files'])
@mark.parametrize('arg_type', [int, float, complex])
def test_types_to_not_complete(empty_parser, autocomplete_and_compare,
arg_type):
empty_parser.add_argument('arg', type=arg_type)
autocomplete_and_compare(empty_parser, [r':arg: '])
@mark.parametrize('choices', [[], ['choice1'], ['choice1', 'choise2'],
['A choice that needs escaping because it has '
'spaces and :']])
def test_choices(empty_parser, autocomplete_and_compare, choices):
choices_as_str = ' '.join([choice.replace(r':', r'\:').replace(r' ', r'\ ')
for choice in choices])
empty_parser.add_argument('arg', choices=choices)
autocomplete_and_compare(
empty_parser, [r':arg:({})'.format(choices_as_str)])
def test_empty_help(empty_parser, autocomplete_and_compare):
empty_parser.add_argument('arg', help='')
autocomplete_and_compare(empty_parser, [r':arg:_files'])
@mark.parametrize('help', ['Help about argument', 'help: argument description',
'help with\nnewlines'])
def test_help(empty_parser, autocomplete_and_compare, help):
help_as_str = help.replace(r':', r'\:').replace('\n', ' ')
empty_parser.add_argument('arg', help=help)
autocomplete_and_compare(
empty_parser, [r':arg - {}:_files'.format(help_as_str)])
def test_formatted_help(empty_parser, autocomplete_and_compare):
empty_parser.add_argument('arg', nargs=1, const=None, default='default',
type=str, choices=['choice1', 'choice2'],
metavar='metavar',
help='%(nargs)s %(const)s %(default)s %(type)s '
'%(choices)s %(required)s %(metavar)s %(dest)s '
'%(help)s')
autocomplete_and_compare(empty_parser, [
r":metavar - 1 None default <class 'str'> ['choice1', 'choice2'] True "
r'metavar arg %(nargs)s %(const)s %(default)s %(type)s %(choices)s '
r'%(required)s %(metavar)s %(dest)s %(help)s:(choice1 choice2)'
])
def test_metavar(empty_parser, autocomplete_and_compare):
empty_parser.add_argument('arg', metavar='name')
autocomplete_and_compare(empty_parser, [r':name:_files'])
|
11545200
|
from distutils.core import setup
setup(
name='typy',
version='0.2.0',
author='<NAME>',
author_email='<EMAIL>',
packages=('typy',),
py_modules=('typy',),
url='http://www.github.com/cyrus-/typy',
license='MIT',
description='A programmable static type system, embedded into Python.',
long_description='',
install_requires=('astunparse','six','ordereddict')
)
|
11545204
|
COMPANIES_HOUSE_COMPANY = 'companies-house-company'
NON_COMPANIES_HOUSE_COMPANY = 'non-companies-house-company'
NOT_COMPANY = 'not-company'
OVERSEAS_COMPANY = 'overseas-company'
# companies that have companies house numbers prefixed with below do not have address in companies house
COMPANY_NUMBER_PREFIXES_INCOMPLETE_INFO = (
'IP', # Industrial & Provident Company
'SP', # Scottish Industrial/Provident Company
'IC', # ICVC
'SI', # Scottish ICVC
'RS', # Registered Society
'NP', # Northern Ireland Industrial/Provident Company or Credit Union
'NV', # Northern Ireland ICVC
'RC', # Royal Charter Company
'SR', # Scottish Royal Charter Company
'NR', # Northern Ireland Royal Charter Company
'CS', # Scottish charitable incorporated organisation
'CE', # Charitable incorporated organisation
)
# Enrollment view constants
SESSION_KEY_ENROL_KEY = 'ENROL_KEY'
SESSION_KEY_ENROL_KEY_COMPANY_DATA = 'ENROL_KEY_COMPANY_DATA'
SESSION_KEY_INGRESS_ANON = 'ANON_INGRESS'
SESSION_KEY_COMPANY_CHOICE = 'COMPANY_CHOICE'
SESSION_KEY_COMPANY_DATA = 'ENROL_KEY_COMPANY_DATA'
SESSION_KEY_REFERRER = 'REFERRER_URL'
SESSION_KEY_BUSINESS_PROFILE_INTENT = 'BUSINESS_PROFILE_INTENT'
SESSION_KEY_BACKFILL_DETAILS_INTENT = 'BACKFILL_DETAILS_INTENT'
SESSION_KEY_EXPORT_OPPORTUNITY_INTENT = 'EXPORT_OPPORTUNITY_INTENT'
SESSION_KEY_INVITE_KEY = 'INVITE_KEY'
PROGRESS_STEP_LABEL_USER_ACCOUNT = 'Enter your business email address and set a password'
PROGRESS_STEP_LABEL_INDIVIDUAL_USER_ACCOUNT = 'Enter your email address and set a password'
PROGRESS_STEP_LABEL_VERIFICATION = 'Enter your confirmation code'
PROGRESS_STEP_LABEL_RESEND_VERIFICATION = 'Resend verification'
PROGRESS_STEP_LABEL_PERSONAL_INFO = 'Enter your personal details'
PROGRESS_STEP_LABEL_BUSINESS_TYPE = 'Select your business type'
PROGRESS_STEP_LABEL_BUSINESS_DETAILS = 'Enter your business details'
RESEND_VERIFICATION = 'resend'
USER_ACCOUNT = 'user-account'
VERIFICATION = 'verification'
COMPANY_SEARCH = 'company-search'
ADDRESS_SEARCH = 'address-search'
BUSINESS_INFO = 'business-details'
PERSONAL_INFO = 'personal-details'
FINISHED = 'finished'
FAILURE = 'failure'
INVITE_EXPIRED = 'invite-expired'
|
11545207
|
import json
import os
import shlex
import subprocess
from jinja2 import Environment, BaseLoader
import pytest
## Uncomment following lines for running in shell
# os.environ['TEST_PROFILE_DIR'] = 'profiles/webapp'
# os.environ['PIPDEPTREE_EXE'] = 'profiles/webapp/.env_python3.6_pip-latest/bin/pipdeptree'
test_profile_dir = os.environ['TEST_PROFILE_DIR']
pipdeptree_path = os.environ['PIPDEPTREE_EXE']
def load_test_spec():
test_spec_path = os.path.join(test_profile_dir, 'test_spec.json')
with open(test_spec_path) as f:
return json.load(f)
test_spec = load_test_spec()
def final_command(s):
tmpl = Environment(loader=BaseLoader).from_string(s)
return tmpl.render(pipdeptree=pipdeptree_path)
def _test_cmp_with_file_contents(spec):
p = subprocess.Popen(shlex.split(spec['command']),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
assert spec['expected_returncode'] == p.returncode
if spec['expected_output_file'] is not None:
exp_output_file = os.path.join(test_profile_dir,
spec['expected_output_file'])
with open(exp_output_file, 'rb') as f:
expected_output = f.read()
assert expected_output == out
else:
assert out == b''
if spec['expected_err_file'] is not None:
exp_err_file = os.path.join(test_profile_dir,
spec['expected_err_file'])
with open(exp_err_file, 'rb') as f:
expected_err = f.read()
assert expected_err == err
else:
assert err == b''
@pytest.mark.parametrize('spec', test_spec)
def test_all_tests_in_profile(spec):
spec['command'] = final_command(spec['command'])
if spec['method'] == 'cmp_with_file_contents':
_test_cmp_with_file_contents(spec)
|
11545236
|
from . import color_space, generators, pc_io, quality_eval, parallel_process, grid
__all__ = ['color_space', 'generators', 'pc_io', 'quality_eval', 'parallel_process', 'grid']
|
11545243
|
class Solution:
def preorder(self, root: 'Node') -> List[int]:
# base_case:
traverse_stack = [root]
path = []
# general case:
while traverse_stack:
current = traverse_stack.pop()
if current:
# Travese current node with preorder:
path.append( current.val )
if not current.children:
continue
# Traverse children with preorder:
# Left part if of higher priority than right part.
# Thus, push right part before left part.
for i in range( len(current.children)-1, -1, -1 ):
traverse_stack.append( current.children[i] )
return path
# n : the number of nodes in n-ary tree
## Time Complexity: O( n )
#
# The overhead in time is the DFS traversal of a n-ary tree, which is of O( n ).
## Space Complexity: O( n )
#
# The overhead in space is the storage for DFS traversal path, which is of O( n ).
|
11545244
|
from abc import ABC, abstractmethod
from typing import Union
from discord import User
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.commands import Cog
from .classes import Letter
class MixinMeta(ABC):
def __init__(self, *_args):
self.bot: Red
self.config: Config
@staticmethod
@abstractmethod
def format_list(letters: dict, *, add_was_read: bool = True):
raise NotImplementedError()
@abstractmethod
async def alert_new_letter(self, user_id: int, letter: Letter, letter_id: int):
raise NotImplementedError()
@abstractmethod
async def add_new_letter_in_letterbox(self, receiver_id: int, letter: Letter) -> int:
raise NotImplementedError()
@abstractmethod
async def get_letter_in_letterbox(self, user_id: int, letter_id: int) -> Letter:
raise NotImplementedError()
@abstractmethod
async def delete_letter_in_letterbox(self, receiver_id: int, letter_id: int):
raise NotImplementedError()
@abstractmethod
async def allow_service(self, user: Union[User, int]):
raise NotImplementedError()
class CompositeMetaClass(type(Cog), type(ABC)):
"""
Allows the metaclass used for proper type detection to coexist with discord.py's metaclass.
Credit to https://github.com/Cog-Creators/Red-DiscordBot (mod cog) for all mixin stuff.
Credit to the top of the file "base.py".
"""
pass
|
11545263
|
import pickle
import sys
from typing import Dict
import elasticsearch
import time
import tqdm
sys.setrecursionlimit(10000)
from preprocessing.pipeline_job import PipelineJob
def some_query_terms_entity_pair(
triple, hits, es, INDEX_NAME, filter_stopwords, entity_mentions_map_filtered_low_count_implicits_dict
):
# logger.info(q1, '#' ,q2)
def _internal_func(q1, field):
result = es.search(
index=INDEX_NAME,
size=hits,
body={"query": {"bool": {"must": [{"match": {field: w}} for w in filter_stopwords(q1.split())]}}},
)
return [
(
h["_source"]["subject_mention"],
h["_source"]["relation"],
h["_source"]["object_mention"],
h["_source"]["triple_id"],
)
for h in sorted(result["hits"]["hits"], key=lambda x: x["_score"], reverse=True)
]
q1 = triple[0][0]
r = triple[0][1]
q2 = triple[0][2]
all_pairs = []
q1_stack = [q1]
q2_stack = [q2]
if triple[1][0] is not None and triple[1][0] in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][0]])
if triple[1][1] is not None and triple[1][1] in entity_mentions_map_filtered_low_count_implicits_dict:
q2_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][1]])
for q1_mention in q1_stack:
for q2_mention in q2_stack:
all_pairs.append(
(" ".join(filter_stopwords(q1_mention) + filter_stopwords(q2_mention)), "subject_mention_filt")
)
all_pairs.append(
(" ".join(filter_stopwords(q1_mention) + filter_stopwords(q2_mention)), "object_mention_filt")
)
all_pairs = set(all_pairs)
result = list()
for q1, field in all_pairs:
_inter_func_res = _internal_func(q1, field)
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
return set(result)
def some_query_match_entity_pair(
triple, hits, es, INDEX_NAME, filter_stopwords, entity_mentions_map_filtered_low_count_implicits_dict
):
# logger.info(q1, '#' ,q2)
def _internal_func(q1, q2):
result = es.search(
index=INDEX_NAME,
size=hits,
body={
"query": {
"bool": {
"must": [
{"match_phrase": {"subject_mention_filt": q1}},
{"match_phrase": {"object_mention_filt": q2}},
]
}
}
},
)
return [
(
h["_source"]["subject_mention"],
h["_source"]["relation"],
h["_source"]["object_mention"],
h["_source"]["triple_id"],
)
for h in sorted(result["hits"]["hits"], key=lambda x: x["_score"], reverse=True)
]
q1 = triple[0][0]
r = triple[0][1]
q2 = triple[0][2]
all_pairs = []
q1_stack = [q1]
q2_stack = [q2]
if triple[1][0] is not None and triple[1][0] in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][0]])
if triple[1][1] is not None and triple[1][1] in entity_mentions_map_filtered_low_count_implicits_dict:
q2_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][1]])
for q1_mention in q1_stack:
for q2_mention in q2_stack:
all_pairs.append((" ".join(filter_stopwords(q1_mention)), " ".join(filter_stopwords(q2_mention))))
all_pairs.append((" ".join(filter_stopwords(q2_mention)), " ".join(filter_stopwords(q1_mention))))
all_pairs = set(all_pairs)
result = list()
for q1, q2 in all_pairs:
_inter_func_res = _internal_func(q1, q2)
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
return set(result)
def some_query_match_entity_pair_in_relation(
triple, hits, es, INDEX_NAME, filter_stopwords, entity_mentions_map_filtered_low_count_implicits_dict
):
# logger.info(q1, '#' ,q2)
def _internal_func(q1, q2, filt_field):
result = es.search(
index=INDEX_NAME,
size=hits,
body={
"query": {
"bool": {"must": [{"match_phrase": {filt_field: q1}}, {"match_phrase": {"relation_filt": q2}}]}
}
},
)
return [
(
h["_source"]["subject_mention"],
h["_source"]["relation"],
h["_source"]["object_mention"],
h["_source"]["triple_id"],
)
for h in sorted(result["hits"]["hits"], key=lambda x: x["_score"], reverse=True)
]
q1 = triple[0][0]
r = triple[0][1]
q2 = triple[0][2]
all_pairs = []
q1_stack = [q1]
q2_stack = [q2]
if triple[1][0] is not None and triple[1][0] in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][0]])
if triple[1][1] is not None and triple[1][1] in entity_mentions_map_filtered_low_count_implicits_dict:
q2_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][1]])
for q1_mention in q1_stack:
for q2_mention in q2_stack:
all_pairs.append((" ".join(filter_stopwords(q1_mention)), " ".join(filter_stopwords(q2_mention))))
all_pairs.append((" ".join(filter_stopwords(q2_mention)), " ".join(filter_stopwords(q1_mention))))
all_pairs = set(all_pairs)
result = list()
for q1, q2 in all_pairs:
_inter_func_res = _internal_func(q1, q2, "subject_mention_filt")
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
_inter_func_res = _internal_func(q1, q2, "object_mention_filt")
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
return set(result)
def some_query_match_entity_relation_pair(
triple, hits, es, INDEX_NAME, filter_stopwords, entity_mentions_map_filtered_low_count_implicits_dict
):
# logger.info(q1, '#' ,q2)
def _internal_func(q1, r, filt_field="subject_mention_filt"):
result = es.search(
index=INDEX_NAME,
size=hits,
body={
"query": {
"bool": {"must": [{"match_phrase": {filt_field: q1}}, {"match_phrase": {"relation_filt": r}}]}
}
},
)
return [
(
h["_source"]["subject_mention"],
h["_source"]["relation"],
h["_source"]["object_mention"],
h["_source"]["triple_id"],
)
for h in sorted(result["hits"]["hits"], key=lambda x: x["_score"], reverse=True)
]
q1, r, q2 = triple[0]
all_pairs = []
q1_stack = [q1, q2]
r_stack = [r]
e1, e2 = triple[1]
if e1 is not None and e1 in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[e1])
if e2 is not None and e2 in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[e2])
for q1_mention in q1_stack:
for r_mention in r_stack:
all_pairs.append(
(" ".join(filter_stopwords(q1_mention)), " ".join(filter_stopwords(r_mention)), "subject_mention_filt")
)
all_pairs.append(
(" ".join(filter_stopwords(q1_mention)), " ".join(filter_stopwords(r_mention)), "object_mention_filt")
)
all_pairs = set(all_pairs)
result = list()
for q1, r, field in all_pairs:
_inter_func_res = _internal_func(q1, r, field)
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
return set(result)
def some_query_match_entity(
triple, hits, es, INDEX_NAME, filter_stopwords, entity_mentions_map_filtered_low_count_implicits_dict
):
# logger.info(q1, '#' ,q2)
def _internal_func(q1, filt_field):
print(q1, filt_field)
result = es.search(
index=INDEX_NAME, size=hits, body={"query": {"bool": {"must": [{"match_phrase": {filt_field: q1}},]}}}
)
return [
(
h["_source"]["subject_mention"],
h["_source"]["relation"],
h["_source"]["object_mention"],
h["_source"]["triple_id"],
)
for h in sorted(result["hits"]["hits"], key=lambda x: x["_score"], reverse=True)
]
q1, r, q2 = triple[0]
all_pairs = []
q1_stack = [q1, q2]
e1, e2 = triple[1]
if e1 is not None and e1 in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[e1])
if e2 is not None and e2 in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[e2])
for q1_mention in q1_stack:
all_pairs.append((" ".join(filter_stopwords(q1_mention)), "subject_mention_filt"))
all_pairs.append((" ".join(filter_stopwords(q1_mention)), "object_mention_filt"))
all_pairs = set(all_pairs)
result = list()
print(all_pairs)
for q1, field in all_pairs:
_inter_func_res = _internal_func(q1, field)
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
return set(result)
def some_query_exact_entity_pair(
triple, hits, es, INDEX_NAME, filter_stopwords, entity_mentions_map_filtered_low_count_implicits_dict
):
# logger.info(q1, '#' ,q2)
def _internal_func(q1, q2):
result = es.search(
index=INDEX_NAME,
size=hits,
body={
"query": {
"bool": {"must": [{"term": {"subject_mention_exact": q1}}, {"term": {"object_mention_exact": q2}}]}
}
},
)
return [
(
h["_source"]["subject_mention"],
h["_source"]["relation"],
h["_source"]["object_mention"],
h["_source"]["triple_id"],
)
for h in sorted(result["hits"]["hits"], key=lambda x: x["_score"], reverse=True)
]
q1 = triple[0][0]
r = triple[0][1]
q2 = triple[0][2]
all_pairs = []
q1_stack = [q1]
q2_stack = [q2]
if triple[1][0] is not None and triple[1][0] in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][0]])
if triple[1][1] is not None and triple[1][1] in entity_mentions_map_filtered_low_count_implicits_dict:
q2_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][1]])
for q1_mention in q1_stack:
for q2_mention in q2_stack:
all_pairs.append((" ".join(filter_stopwords(q1_mention)), " ".join(filter_stopwords(q2_mention))))
all_pairs.append((" ".join(filter_stopwords(q2_mention)), " ".join(filter_stopwords(q1_mention))))
all_pairs = set(all_pairs)
result = list()
for q1, q2 in all_pairs:
_inter_func_res = _internal_func(q1, q2)
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
return set(result)
def some_query_full_triple(
triple, hits, es, INDEX_NAME, filter_stopwords, entity_mentions_map_filtered_low_count_implicits_dict
):
# logger.info(q1, '#' ,q2)
r = " ".join(triple[0][1])
def _internal_func(q1, q2):
result = es.search(
index=INDEX_NAME,
size=hits,
body={
"query": {
"bool": {
"must": [
{"term": {"subject_mention_exact": q1}},
{"match": {"relation": r}},
{"term": {"object_mention_exact": q2}},
]
}
}
},
)
return [
(
h["_source"]["subject_mention"],
h["_source"]["relation"],
h["_source"]["object_mention"],
h["_source"]["triple_id"],
)
for h in sorted(result["hits"]["hits"], key=lambda x: x["_score"], reverse=True)
]
q1 = triple[0][0]
q2 = triple[0][2]
all_pairs = []
q1_stack = [q1]
q2_stack = [q2]
if triple[1][0] is not None and triple[1][0] in entity_mentions_map_filtered_low_count_implicits_dict:
q1_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][0]])
if triple[1][1] is not None and triple[1][1] in entity_mentions_map_filtered_low_count_implicits_dict:
q2_stack.extend(entity_mentions_map_filtered_low_count_implicits_dict[triple[1][1]])
for q1_mention in q1_stack:
for q2_mention in q2_stack:
all_pairs.append((" ".join(filter_stopwords(q1_mention)), " ".join(filter_stopwords(q2_mention))))
all_pairs.append((" ".join(filter_stopwords(q2_mention)), " ".join(filter_stopwords(q1_mention))))
all_pairs = set(all_pairs)
result = list()
for q1, q2 in all_pairs:
_inter_func_res = _internal_func(q1, q2)
if len(_inter_func_res) > 0:
result.extend(_inter_func_res)
return set(result)
class CreateTrainingData(PipelineJob):
"""
"""
def __init__(self, preprocess_jobs: Dict[str, PipelineJob], opts):
super().__init__(
requires=[
f"data/versions/{opts.data_version_name}/elasticsearch_index_created",
f"data/versions/{opts.data_version_name}/indexes/triples_list_lc_unique_most_popular_links_most_common_mentions_and_relations.pickle",
f"data/versions/{opts.data_version_name}/indexes/entity_mentions_map_filtered_low_count_implicits_dict.pickle",
f"data/versions/{opts.data_version_name}/indexes/mention_token_dict_most_common_dict.pickle",
f"data/versions/{opts.data_version_name}/indexes/relation_token_dict_most_common_dict.pickle",
f"data/versions/{opts.data_version_name}/indexes/validation_data.pickle",
f"data/versions/{opts.data_version_name}/indexes/validation_linked_data.pickle",
f"data/versions/{opts.data_version_name}/indexes/test_data.pickle",
f"data/versions/{opts.data_version_name}/indexes/validation_data_ids.pickle",
f"data/versions/{opts.data_version_name}/indexes/validation_linked_data_ids.pickle",
f"data/versions/{opts.data_version_name}/indexes/test_data_ids.pickle",
],
provides=[
f"data/versions/{opts.data_version_name}/train_data_simple.txt",
f"data/versions/{opts.data_version_name}/train_data_basic.txt",
f"data/versions/{opts.data_version_name}/train_data_thorough.txt",
f"data/versions/{opts.data_version_name}/validation_data.txt",
f"data/versions/{opts.data_version_name}/validation_data_linked.txt",
f"data/versions/{opts.data_version_name}/validation_data_linked_no_mention.txt",
f"data/versions/{opts.data_version_name}/test_data.txt",
],
preprocess_jobs=preprocess_jobs,
opts=opts,
)
def _run(self):
with open(
f"data/versions/{self.opts.data_version_name}/indexes/triples_list_lc_unique_most_popular_links_most_common_mentions_and_relations.pickle",
"rb",
) as f:
triples_list_lc_unique_most_popular_links_most_common_mentions_and_relations = pickle.load(f)
with open(
f"data/versions/{self.opts.data_version_name}/indexes/entity_mentions_map_filtered_low_count_implicits_dict.pickle",
"rb",
) as f:
entity_mentions_map_filtered_low_count_implicits_dict = pickle.load(f)
with open(
f"data/versions/{self.opts.data_version_name}/indexes/mention_token_dict_most_common_dict.pickle", "rb"
) as f:
mention_token_dict_most_common_dict = pickle.load(f)
with open(
f"data/versions/{self.opts.data_version_name}/indexes/relation_token_dict_most_common_dict.pickle", "rb"
) as f:
relation_token_dict_most_common_dict = pickle.load(f)
with open(f"data/versions/{self.opts.data_version_name}/indexes/validation_data.pickle", "rb",) as f:
validation_data = pickle.load(f)
with open(f"data/versions/{self.opts.data_version_name}/indexes/validation_linked_data.pickle", "rb",) as f:
validation_linked_data = pickle.load(f)
with open(f"data/versions/{self.opts.data_version_name}/indexes/test_data.pickle", "rb",) as f:
test_data = pickle.load(f)
with open(f"data/versions/{self.opts.data_version_name}/indexes/validation_data_ids.pickle", "rb",) as f:
validation_data_ids = pickle.load(f)
with open(f"data/versions/{self.opts.data_version_name}/indexes/validation_linked_data_ids.pickle", "rb",) as f:
validation_linked_data_ids = pickle.load(f)
with open(f"data/versions/{self.opts.data_version_name}/indexes/test_data_ids.pickle", "rb",) as f:
test_data_ids = pickle.load(f)
stopwords = list(mention_token_dict_most_common_dict.keys())[:25]
stopwords += list(relation_token_dict_most_common_dict.keys())[:25]
stopwords = set(stopwords)
def filter_stopwords(toks):
result = tuple([t for t in toks if t not in stopwords])
if len(result) > 0:
return result
return toks
INDEX_NAME = "training_triples_mentions"
es = elasticsearch.Elasticsearch([{"host": self.opts.create_elasticsearch_index__host, "port": 9200}])
filter_ids_for_train_data_simple = set()
filter_ids_for_train_data_thorough = set()
for data in [test_data, validation_data, validation_linked_data]:
for j, ((s, r, o), (se, oe)) in enumerate(tqdm.tqdm(data)):
# easy
for _, _, _, triple_id in some_query_full_triple(
((s, r, o), (se, oe)),
1000,
es,
INDEX_NAME,
filter_stopwords,
entity_mentions_map_filtered_low_count_implicits_dict,
):
filter_ids_for_train_data_simple.add(triple_id)
res = some_query_match_entity_pair(
((s, r, o), (se, oe)),
1000,
es,
INDEX_NAME,
filter_stopwords,
entity_mentions_map_filtered_low_count_implicits_dict,
)
for _, _, _, triple_id in res:
filter_ids_for_train_data_thorough.add(triple_id)
res = some_query_terms_entity_pair(
((s, r, o), (se, oe)),
1000,
es,
INDEX_NAME,
filter_stopwords,
entity_mentions_map_filtered_low_count_implicits_dict,
)
if len(res) < 1000:
for _, _, _, triple_id in res:
filter_ids_for_train_data_thorough.add(triple_id)
res = some_query_match_entity_pair_in_relation(
((s, r, o), (se, oe)),
1000,
es,
INDEX_NAME,
filter_stopwords,
entity_mentions_map_filtered_low_count_implicits_dict,
)
if len(res) < 1000:
for _, _, _, triple_id in res:
filter_ids_for_train_data_thorough.add(triple_id)
evaluation_ids = test_data_ids
evaluation_ids.extend(validation_data_ids)
evaluation_ids.extend(validation_linked_data_ids)
evaluation_ids = set(evaluation_ids)
# # ############# Create training data ###############
# # logger.info("Create training data")
train_data_simple = list()
train_data_basic = list()
train_data_thorough = list()
# start = time.time()
for i, ((s, r, o), (se, oe)) in enumerate(
tqdm.tqdm(triples_list_lc_unique_most_popular_links_most_common_mentions_and_relations)
):
if i not in evaluation_ids:
train_data_simple.append(((s, r, o), (se, oe)))
if i not in filter_ids_for_train_data_simple:
train_data_basic.append(((s, r, o), (se, oe)))
if i not in filter_ids_for_train_data_thorough and i not in filter_ids_for_train_data_simple:
train_data_thorough.append(((s, r, o), (se, oe)))
with open(f"data/versions/{self.opts.data_version_name}/train_data_simple.txt", "w") as f:
for ((s, r, o), (se, oe)) in train_data_simple:
f.writelines(
"{}\t{}\t{}\t{}\t{}\n".format(" ".join(s), " ".join(r), " ".join(o), " ".join(s), " ".join(o),)
)
with open(f"data/versions/{self.opts.data_version_name}/train_data_basic.txt", "w") as f:
for ((s, r, o), (se, oe)) in train_data_basic:
f.writelines(
"{}\t{}\t{}\t{}\t{}\n".format(" ".join(s), " ".join(r), " ".join(o), " ".join(s), " ".join(o),)
)
with open(f"data/versions/{self.opts.data_version_name}/train_data_thorough.txt", "w") as f:
for ((s, r, o), (se, oe)) in train_data_thorough:
f.writelines(
"{}\t{}\t{}\t{}\t{}\n".format(" ".join(s), " ".join(r), " ".join(o), " ".join(s), " ".join(o),)
)
def get_mentions_for_entity(e, m_default):
return (
list(
set(
[" ".join(m) for m, c in entity_mentions_map_filtered_low_count_implicits_dict[e].items()]
+ [" ".join(m_default)]
)
)
if e in entity_mentions_map_filtered_low_count_implicits_dict
and len(entity_mentions_map_filtered_low_count_implicits_dict[e]) > 0
else [" ".join(m_default)]
)
with open(f"data/versions/{self.opts.data_version_name}/validation_data.txt", "w") as f:
for ((s, r, o), (se, oe)) in validation_data:
f.writelines(
"{}\t{}\t{}\t{}\t{}\n".format(" ".join(s), " ".join(r), " ".join(o), " ".join(s), " ".join(o),)
)
with open(f"data/versions/{self.opts.data_version_name}/validation_data_linked.txt", "w") as f:
for ((s, r, o), (se, oe)) in validation_linked_data:
f.writelines(
"{}\t{}\t{}\t{}\t{}\n".format(
" ".join(s),
" ".join(r),
" ".join(o),
"|||".join(get_mentions_for_entity(se, s)),
"|||".join(get_mentions_for_entity(oe, o)),
)
)
with open(f"data/versions/{self.opts.data_version_name}/validation_data_linked_no_mention.txt", "w") as f:
for ((s, r, o), (se, oe)) in validation_linked_data:
f.writelines(
"{}\t{}\t{}\t{}\t{}\n".format(" ".join(s), " ".join(r), " ".join(o), " ".join(s), " ".join(o),)
)
with open(f"data/versions/{self.opts.data_version_name}/test_data.txt", "w") as f:
for ((s, r, o), (se, oe)) in test_data:
f.writelines(
"{}\t{}\t{}\t{}\t{}\n".format(
" ".join(s),
" ".join(r),
" ".join(o),
"|||".join(get_mentions_for_entity(se, s)),
"|||".join(get_mentions_for_entity(oe, o)),
)
)
# with open(f"data/versions/{self.opts.data_version_name}/elasticsearch_index_created", "w",) as f:
# f.writelines(["SUCCESS"])
|
11545269
|
from wordcloud import WordCloud, STOPWORDS
def cwordcloud(words, filename='output.png', height=2000, width=4000):
words = words.replace(';', ' ')
word_cloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=height, width=width).generate(words)
word_cloud.to_file(filename)
return 1
|
11545284
|
from __future__ import print_function
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import time
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train (default: 1)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--mixf', action='store_true', default=False,
help='enables using mixed float precision')
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(2048, 2048, kernel_size=1)
def forward(self, x):
x = F.relu(self.conv1(x))
return x
model = Net()
if args.mixf:
model.cuda().half()
else:
model.cuda()
ITERS = 300
def train(epoch):
model.train()
# dummy dataset the same size as imagenet
data_ = torch.FloatTensor(np.random.randn(4096, 2048, 1, 1))
#lets get copy time out of conv time:
if args.mixf:
data = data_.cuda().half()
else:
data = data_.cuda()
#time the entire thing, with proper cuda synchronization
torch.cuda.synchronize()
start = time.time()
for batch_idx in range(ITERS):
output = model(Variable(data))
torch.cuda.synchronize()
print("Time / iteration: ", (time.time()-start)/ITERS)
for epoch in range(1, args.epochs + 1):
train(epoch)
|
11545311
|
import re
import typing
from typing import (
AbstractSet,
AsyncContextManager,
AsyncGenerator,
AsyncIterator,
Callable,
ChainMap,
Collection,
Container,
Counter,
Deque,
Dict,
FrozenSet,
ItemsView,
Iterable,
Iterator,
KeysView,
List,
Mapping,
Match,
MutableMapping,
MutableSet,
OrderedDict,
Pattern,
Reversible,
Sequence,
Set,
Type,
Union,
ValuesView,
)
def very_complex_function(
a: typing.Tuple[List[Dict[Set[FrozenSet[Type, Deque]]]]],
b: typing.DefaultDict[OrderedDict[Counter, ChainMap]],
c: typing.Awaitable,
d: typing.Coroutine,
e: typing.AsyncIterable[AsyncIterator[AsyncGenerator[Iterable, Iterator]]],
f: typing.Generator[Reversible],
g: typing.Union[Callable, AbstractSet],
h: typing.Union[MutableSet[Mapping, MutableMapping], Sequence],
i: typing.MutableSequence[Container[Collection]],
j: typing.ByteString,
k: typing.MappingView[KeysView[ItemsView[ValuesView]]],
l: typing.ContextManager,
m: typing.Optional[AsyncContextManager],
) -> Union[Pattern, Match]:
return re.compile(f'{a},{b},{c},{d},{e},{f},{g},{h},{i},{j},{k},{l},{m}')
|
11545344
|
import os
import unittest
from collections import namedtuple
from celescope.tools.step import Step
class Tests(unittest.TestCase):
def setUp(self):
pass
@unittest.skip("tested")
def test_stat_to_metric(self):
os.chdir('/SGRNJ01/RD_dir/pipeline_test/zhouyiqi/multi_tests/rna')
args_dict = {
'sample': 'test1',
'assay': 'rna',
'thread': 1,
'outdir': 'test1/06.analysis',
'debug': True,
}
Args = namedtuple('Args', list(args_dict.keys()))
args = Args(**args_dict)
obj = Step(args, 'analysis')
obj.stat_to_metric()
print(obj.content_dict['metric'])
def test_test(self):
assert 0 == 0
|
11545350
|
import numpy as np
from random import randint
def flip(image, option_value):
"""
Args:
image : numpy array of image
option_value = random integer between 0 to 3
Return :
image : numpy array of flipped image
"""
if option_value == 0:
# vertical
image = np.flip(image, option_value)
elif option_value == 1:
# horizontal
image = np.flip(image, option_value)
elif option_value == 2:
# horizontally and vertically flip
image = np.flip(image, 0)
image = np.flip(image, 1)
else:
image = image
# no effect
return image
def add_gaussian_noise(image, mean=0, std=1):
"""
Args:
image : numpy array of image
mean : pixel mean of image
standard deviation : pixel standard deviation of image
Return :
image : numpy array of image with gaussian noise added
"""
gaus_noise = np.random.normal(mean, std, image.shape)
image = image.astype("int16")
noise_img = image + gaus_noise
image = ceil_floor_image(image)
return noise_img
def add_uniform_noise(image, low=-10, high=10):
"""
Args:
image : numpy array of image
low : lower boundary of output interval
high : upper boundary of output interval
Return :
image : numpy array of image with uniform noise added
"""
uni_noise = np.random.uniform(low, high, image.shape)
image = image.astype("int16")
noise_img = image + uni_noise
image = ceil_floor_image(image)
return noise_img
def change_brightness(image, value):
"""
Args:
image : numpy array of image
value : brightness
Return :
image : numpy array of image with brightness added
"""
image = image.astype("int16")
image = image + value
image = ceil_floor_image(image)
return image
def ceil_floor_image(image):
"""
Args:
image : numpy array of image in datatype int16
Return :
image : numpy array of image in datatype uint8 with ceilling(maximum 255) and flooring(minimum 0)
"""
image[image > 255] = 255
image[image < 0] = 0
image = image.astype("uint8")
return image
def approximate_image(image):
"""
Args:
image : numpy array of image in datatype int16
Return :
image : numpy array of image in datatype uint8 only with 255 and 0
"""
image[image > 127.5] = 255
image[image < 127.5] = 0
image = image.astype("uint8")
return image
def normalization2(image, max, min):
"""Normalization to range of [min, max]
Args :
image : numpy array of image
mean :
Return :
image : numpy array of image with values turned into standard scores
"""
image_new = (image - np.min(image))*(max - min)/(np.max(image)-np.min(image)) + min
return image_new
def stride_size(image_len, crop_num, crop_size):
"""return stride size
Args :
image_len(int) : length of one size of image (width or height)
crop_num(int) : number of crop in certain direction
crop_size(int) : size of crop
Return :
stride_size(int) : stride size
"""
return int((image_len - crop_size)/(crop_num - 1))
# def multi_cropping(image, crop_size, crop_num1, crop_num2):
# """crop the image and pad it to in_size
# Args :
# images : numpy arrays of images
# crop_size(int) : size of cropped image
# crop_num2 (int) : number of crop in horizontal way
# crop_num1 (int) : number of crop in vertical way
# Return :
# cropped_imgs : numpy arrays of stacked images
# """
# img_height, img_width = image.shape[0], image.shape[1]
# assert crop_size*crop_num1 >= img_width and crop_size * \
# crop_num2 >= img_height, "Whole image cannot be sufficiently expressed"
# assert crop_num1 <= img_width - crop_size + 1 and crop_num2 <= img_height - \
# crop_size + 1, "Too many number of crops"
# cropped_imgs = []
# # int((img_height - crop_size)/(crop_num1 - 1))
# dim1_stride = stride_size(img_height, crop_num1, crop_size)
# # int((img_width - crop_size)/(crop_num2 - 1))
# dim2_stride = stride_size(img_width, crop_num2, crop_size)
# for i in range(crop_num1):
# for j in range(crop_num2):
# cropped_imgs.append(cropping(image, crop_size,
# dim1_stride*i, dim2_stride*j))
# return np.asarray(cropped_imgs)
# def cropping(image, vert_crop_size, hort_crop_size, dim1, dim2):
# """crop the image and pad it to in_size
# Args :
# images : numpy array of images
# crop_size(int) : size of cropped image
# dim1(int) : vertical location of crop
# dim2(int) : horizontal location of crop
# Return :
# cropped_img: numpy array of cropped image
# """
# cropped_img = image[dim1:dim1+vert_crop_size, dim2:dim2+hort_crop_size]
# return cropped_img
def add_padding(image, in_size, out_size, mode):
"""Pad the image to in_size
Args :
images : numpy array of images
in_size(int) : the input_size of model
out_size(int) : the output_size of model
mode(str) : mode of padding
Return :
padded_img: numpy array of padded image
"""
pad_size = int((in_size - out_size)/2)
padded_img = np.pad(image, pad_size, mode=mode)
return padded_img
def division_array(crop_size, crop_num1, crop_num2, dim1, dim2):
"""Make division array
Args :
crop_size(int) : size of cropped image
crop_num2 (int) : number of crop in horizontal way
crop_num1 (int) : number of crop in vertical way
dim1(int) : vertical size of output
dim2(int) : horizontal size_of_output
Return :
div_array : numpy array of numbers of 1,2,4
"""
div_array = np.zeros([dim1, dim2]) # make division array
one_array = np.ones([crop_size, crop_size]) # one array to be added to div_array
dim1_stride = stride_size(dim1, crop_num1, crop_size) # vertical stride
dim2_stride = stride_size(dim2, crop_num2, crop_size) # horizontal stride
for i in range(crop_num1):
for j in range(crop_num2):
# add ones to div_array at specific position
div_array[dim1_stride*i:dim1_stride*i + crop_size,
dim2_stride*j:dim2_stride*j + crop_size] += one_array
return div_array
def image_concatenate(image, crop_num1, crop_num2, dim1, dim2):
"""concatenate images
Args :
image : output images (should be square)
crop_num2 (int) : number of crop in horizontal way (2)
crop_num1 (int) : number of crop in vertical way (2)
dim1(int) : vertical size of output (512)
dim2(int) : horizontal size_of_output (512)
Return :
div_array : numpy arrays of numbers of 1,2,4
"""
crop_size = image.shape[1] # size of crop
empty_array = np.zeros([dim1, dim2]).astype("float64") # to make sure no overflow
dim1_stride = stride_size(dim1, crop_num1, crop_size) # vertical stride
dim2_stride = stride_size(dim2, crop_num2, crop_size) # horizontal stride
index = 0
for i in range(crop_num1):
for j in range(crop_num2):
# add image to empty_array at specific position
empty_array[dim1_stride*i:dim1_stride*i + crop_size,
dim2_stride*j:dim2_stride*j + crop_size] += image[index]
index += 1
return empty_array
|
11545358
|
import sqlite3
from .db import DB
class DailySummary(DB):
def __init__(self):
super().__init__()
self.table_name = 'KL_daily_summary'
self.table_desc = 'Summary of new COVID-19 cases (last 24 hours)'
self.cols = self.getcolumns()
def getcolumns(self):
cols = {
'date': 'DATE NOT NULL PRIMARY KEY',
'positive_cases': 'INT',
'recovered': 'INT',
'new_persons_in_surveillance': 'INT',
'new_persons_in_home_ins_isolation': 'INT',
'new_persons_in_hospital_isolation': 'INT',
'daily_deaths': 'INT',
'deaths_declared_as_per_appeal': 'INT',
'pending_deaths': 'INT'
}
return cols
|
11545369
|
import logging
from typing import Callable
from ..helpers import Helpers
class StreamHandler(object):
def __init__(self, event: str, invocation_id: str):
self.event = event
self.invocation_id = invocation_id
self.logger = Helpers.get_logger()
self.next_callback =\
lambda _: self.logger.warning(
"next stream handler fired, no callback configured")
self.complete_callback =\
lambda _: self.logger.warning(
"next complete handler fired, no callback configured")
self.error_callback =\
lambda _: self.logger.warning(
"next error handler fired, no callback configured")
def subscribe(self, subscribe_callbacks: dict):
error =\
" subscribe object must be a dict like {0}"\
.format({
"next": None,
"complete": None,
"error": None
})
if subscribe_callbacks is None or\
type(subscribe_callbacks) is not dict:
raise TypeError(error)
if "next" not in subscribe_callbacks or\
"complete" not in subscribe_callbacks \
or "error" not in subscribe_callbacks:
raise KeyError(error)
if not callable(subscribe_callbacks["next"])\
or not callable(subscribe_callbacks["next"]) \
or not callable(subscribe_callbacks["next"]):
raise ValueError("Suscribe callbacks must be functions")
self.next_callback = subscribe_callbacks["next"]
self.complete_callback = subscribe_callbacks["complete"]
self.error_callback = subscribe_callbacks["error"]
class InvocationHandler(object):
def __init__(self, invocation_id: str, complete_callback: Callable):
self.invocation_id = invocation_id
self.complete_callback = complete_callback
|
11545372
|
from collections import namedtuple
Performance = namedtuple(
'Performance', ['name', 'get', 'get_buy_and_hold', 'hline'])
sharpe_ratio_performance = Performance(
name='Sharpe ratio',
get=lambda environment: environment.get_sharpe_ratio(),
get_buy_and_hold=lambda environment: environment.get_sharpe_ratio_buy_and_hold(),
hline=0)
|
11545390
|
import json
import pytest
import requests_mock
from pubg_python.base import PUBG, Shard, APIClient
from pubg_python.domain.base import Match, Roster, Participant, Asset
from pubg_python.domain.telemetry.resources import GAME_MODE, SEASON_STATE
api = PUBG('apikey', Shard.STEAM)
BASE_URL = APIClient.BASE_URL
@pytest.fixture()
def mock():
with requests_mock.Mocker() as mock:
yield mock
@pytest.fixture()
def match_response():
with open('tests/match_response.json') as json_file:
yield json.load(json_file)
def test_match_get(mock, match_response):
match_id = 'f80126f4-9520-4c66-9198-57820d04bf00'
url = '{}shards/steam/matches/{}'.format(BASE_URL, match_id)
mock.get(url, json=match_response)
match = api.matches().get(match_id)
asset = match.assets[0]
roster = match.rosters[0]
participant = roster.participants[0]
print(match.title_id)
assert isinstance(match, Match)
assert isinstance(asset, Asset)
assert isinstance(roster, Roster)
assert isinstance(participant, Participant)
assert isinstance(match.created_at, str)
assert isinstance(match.duration, int)
assert isinstance(match.match_type, str)
assert match.game_mode in GAME_MODE
assert match.shard_id in Shard._value2member_map_
assert match.season_state in SEASON_STATE
assert match.id == match_id
|
11545401
|
from typing import List, Optional, Tuple
import cv2
import numpy as np
from l5kit.data.zarr_dataset import AGENT_DTYPE
from l5kit.data.filter import filter_agents_by_labels, filter_agents_by_track_id
from l5kit.geometry import rotation33_as_yaw # , transform_points
from l5kit.rasterization.rasterizer import EGO_EXTENT_HEIGHT, EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, Rasterizer
from l5kit.rasterization.render_context import RenderContext
from l5kit.rasterization.semantic_rasterizer import CV2_SHIFT, cv2_subpixel
from lib.utils.numba_utils import transform_points_nb
def get_ego_as_agent(frame: np.ndarray) -> np.ndarray: # TODO this can be useful to have around
"""
Get a valid agent with information from the frame AV. Ford Fusion extent is used
Args:
frame (np.ndarray): the frame we're interested in
Returns: an agent np.ndarray of the AV
"""
ego_agent = np.zeros(1, dtype=AGENT_DTYPE)
ego_agent[0]["centroid"] = frame["ego_translation"][:2]
ego_agent[0]["yaw"] = rotation33_as_yaw(frame["ego_rotation"])
ego_agent[0]["extent"] = np.asarray((EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, EGO_EXTENT_HEIGHT))
return ego_agent
def draw_boxes(
raster_size: Tuple[int, int],
raster_from_world: np.ndarray,
agents: np.ndarray,
color: int,
) -> np.ndarray:
im = np.zeros((raster_size[1], raster_size[0]), dtype=np.uint8)
corners_base_coords = np.asarray([[-1, -1], [-1, 1], [1, 1], [1, -1]])
# corners = corners_base_coords[np.newaxis, :] agents["extent"][:, :2] / 2
corners = corners_base_coords[np.newaxis, :, :] * agents["extent"][:, np.newaxis, :2] / 2
# r_m_T = r_m.T;
# r_m = [
# [cos(yaw), -sin(yaw)],
# [sin(yaw), cos(yaw)],
# ]
r_m_T = np.zeros((len(agents), 2, 2))
r_m_T[:, 0, 0] = np.cos(agents["yaw"])
r_m_T[:, 1, 1] = np.cos(agents["yaw"])
r_m_T[:, 0, 1] = np.sin(agents["yaw"])
r_m_T[:, 1, 0] = -np.sin(agents["yaw"])
box_world_coords = np.sum(corners[:, :, :, np.newaxis] * r_m_T[:, np.newaxis, :, :], axis=-2)
box_world_coords = box_world_coords + agents["centroid"][:, np.newaxis, :2]
box_raster_coords = transform_points_nb(box_world_coords.reshape((-1, 2)), raster_from_world)
# fillPoly wants polys in a sequence with points inside as (x,y)
box_raster_coords = cv2_subpixel(box_raster_coords.reshape((-1, 4, 2)))
cv2.fillPoly(im, box_raster_coords, color=color, lineType=cv2.LINE_AA, shift=CV2_SHIFT)
return im
class TunedBoxRasterizer(Rasterizer):
@staticmethod
def from_cfg(cfg, data_manager=None):
raster_cfg = cfg["raster_params"]
render_context = RenderContext(
raster_size_px=np.array(raster_cfg["raster_size"]),
pixel_size_m=np.array(raster_cfg["pixel_size"]),
center_in_raster_ratio=np.array(raster_cfg["ego_center"]),
)
filter_agents_threshold = raster_cfg["filter_agents_threshold"]
history_num_frames = cfg["model_params"]["history_num_frames"]
rotate_yaw = raster_cfg.get("rotate_yaw", True)
return TunedBoxRasterizer(render_context, filter_agents_threshold, history_num_frames, rotate_yaw)
def __init__(
self, render_context: RenderContext, filter_agents_threshold: float, history_num_frames: int,
rotate_yaw: bool = True,
):
"""
Args:
render_context (RenderContext): Render context
filter_agents_threshold (float): Value between 0 and 1 used to filter uncertain agent detections
history_num_frames (int): Number of frames to rasterise in the past
"""
super(TunedBoxRasterizer, self).__init__()
self.render_context = render_context
self.raster_size = render_context.raster_size_px
self.filter_agents_threshold = filter_agents_threshold
self.history_num_frames = history_num_frames
self.rotate_yaw = rotate_yaw
self.raster_channels = (self.history_num_frames + 1) * 2
def rasterize(
self,
history_frames: np.ndarray,
history_agents: List[np.ndarray],
history_tl_faces: List[np.ndarray],
agent: Optional[np.ndarray] = None,
) -> np.ndarray:
# all frames are drawn relative to this one"
frame = history_frames[0]
if agent is None:
ego_translation_m = history_frames[0]["ego_translation"]
ego_yaw_rad = rotation33_as_yaw(frame["ego_rotation"]) if self.rotate_yaw else 0.
else:
ego_translation_m = np.append(agent["centroid"], history_frames[0]["ego_translation"][-1])
ego_yaw_rad = agent["yaw"] if self.rotate_yaw else 0.
raster_from_world = self.render_context.raster_from_world(ego_translation_m, ego_yaw_rad)
# this ensures we always end up with fixed size arrays, +1 is because current time is also in the history
out_shape = (self.raster_size[1], self.raster_size[0], 2 * (self.history_num_frames + 1))
ego_offset = (self.history_num_frames + 1)
out_im = np.zeros(out_shape, dtype=np.uint8)
for i, (frame, agents) in enumerate(zip(history_frames, history_agents)):
agents = filter_agents_by_labels(agents, self.filter_agents_threshold)
# note the cast is for legacy support of dataset before April 2020
av_agent = get_ego_as_agent(frame).astype(agents.dtype)
if agent is None:
agents_image = draw_boxes(self.raster_size, raster_from_world, agents, 255)
ego_image = draw_boxes(self.raster_size, raster_from_world, av_agent, 255)
else:
agent_ego = filter_agents_by_track_id(agents, agent["track_id"])
if len(agent_ego) == 0: # agent not in this history frame
agents_image = draw_boxes(self.raster_size, raster_from_world, np.append(agents, av_agent), 255)
ego_image = np.zeros_like(agents_image)
else: # add av to agents and remove the agent from agents
agents = agents[agents != agent_ego[0]]
agents_image = draw_boxes(self.raster_size, raster_from_world, np.append(agents, av_agent), 255)
ego_image = draw_boxes(self.raster_size, raster_from_world, agent_ego, 255)
out_im[..., i] = agents_image
out_im[..., ego_offset + i] = ego_image
return out_im.astype(np.float32) / 255
def to_rgb(self, in_im: np.ndarray, **kwargs: dict) -> np.ndarray:
"""
get an rgb image where agents further in the past have faded colors
Args:
in_im: the output of the rasterize function
kwargs: this can be used for additional customization (such as colors)
Returns: an RGB image with agents and ego coloured with fading colors
"""
hist_frames = in_im.shape[-1] // 2
in_im = np.transpose(in_im, (2, 0, 1))
# this is similar to the draw history code
out_im_agent = np.zeros((self.raster_size[1], self.raster_size[0], 3), dtype=np.float32)
agent_chs = in_im[:hist_frames][::-1] # reverse to start from the furthest one
agent_color = (0, 0, 1) if "agent_color" not in kwargs else kwargs["agent_color"]
for ch in agent_chs:
out_im_agent *= 0.85 # magic fading constant for the past
out_im_agent[ch > 0] = agent_color
out_im_ego = np.zeros((self.raster_size[1], self.raster_size[0], 3), dtype=np.float32)
ego_chs = in_im[hist_frames:][::-1]
ego_color = (0, 1, 0) if "ego_color" not in kwargs else kwargs["ego_color"]
for ch in ego_chs:
out_im_ego *= 0.85
out_im_ego[ch > 0] = ego_color
out_im = (np.clip(out_im_agent + out_im_ego, 0, 1) * 255).astype(np.uint8)
return out_im
|
11545420
|
import math
import sys
def cont():
print("Do you want to continue ")
print("Press Y/N to continue and exit respectively")
inputchar = input()
if inputchar == "Y" or inputchar == "y":
switch()
elif inputchar == "N" or inputchar == "n":
sys.exit()
def switch():
print("-" * 40)
print("1) Check Class of IPV4 Address")
print("2) Exit")
print("-" * 40)
option = int(input("your option : "))
if option == 1:
data = input("Enter the ip address to check its class:")
octet = data.split(".")
(first_byte, second_byte, third_byte, fourth_byte) = (
int(octet[0]),
int(octet[1]),
int(octet[2]),
int(octet[3]),
)
if len(str(first_byte)) > 3:
print("Incorrect lenght of IP in first octet")
cont()
elif len(str(second_byte)) > 3:
print("Incorrect lenght of IP in second octet")
cont()
elif len(str(third_byte)) > 3:
print("Incorrect lenght of IP in third octet")
elif len(str(fourth_byte)) > 3:
print("Incorrect lenght of IP in fourth octet")
cont()
elif octet[4:]:
print("Extra bits encountered so Give IP in Format as A.B.C.D ")
cont()
if first_byte == 0 or first_byte <= 127:
if second_byte == 0 or second_byte <= 255:
if third_byte == 0 or third_byte <= 255:
if fourth_byte == 0 or fourth_byte <= 255:
print("Class A IP")
cont()
else:
print("Invalid 4th octet of Class A IP")
cont()
else:
print("Invalid 3rd octet of Class A IP")
cont()
else:
print("Invalid 2nd octet of Class A IP")
cont()
elif first_byte == 128 or first_byte <= 191:
if second_byte == 0 or second_byte <= 255:
if third_byte == 0 or third_byte <= 255:
if fourth_byte == 0 or fourth_byte <= 255:
print("Class B IP")
cont()
else:
print("Invalid 4th octet of Class B IP")
cont()
else:
print("Invalid 3rd octet of Class B IP")
cont()
else:
print("Invalid 2nd octet of Class B IP")
cont()
elif first_byte == 192 or first_byte <= 223:
if second_byte == 0 or second_byte <= 255:
if third_byte == 0 or third_byte <= 255:
if fourth_byte == 0 or fourth_byte <= 255:
print("Class C IP")
cont()
else:
print("Invalid 4th octet of Class C IP")
cont()
else:
print("Invalid 3rd octet of Class C IP")
cont()
else:
print("Invalid 2nd octet of Class C IP")
cont()
elif first_byte == 224 or first_byte <= 239:
if second_byte == 0 or second_byte <= 255:
if third_byte == 0 or third_byte <= 255:
if fourth_byte == 0 or fourth_byte <= 255:
print("Class D IP")
cont()
else:
print("Invalid 4th octet of Class D IP")
cont()
else:
print("Invalid 3rd octet of Class D IP")
cont()
else:
print("Invalid 2nd octet of Class D IP")
cont()
elif first_byte == 240 or first_byte <= 255:
if second_byte == 0 or second_byte <= 255:
if third_byte == 0 or third_byte <= 255:
if fourth_byte == 0 or fourth_byte <= 255:
print("Class E IP")
cont()
else:
print("Invalid 4th octet of Class E IP")
cont()
else:
print("Invalid 3rd octet of Class E IP")
cont()
else:
print("Invalid 2nd octet of Class E IP")
cont()
else:
print("Invalid IP")
cont()
elif option == 2:
print("Exit")
switch()
|
11545450
|
import json
import requests
from utils.logger import log
from utils.cmd_args import args
class ArgsCheckException(Exception):
def __init__(self, _message):
self.message = _message
def __str__(self):
return self.message
user_name = list("aaaaaaaaaaaa")
def unifie_to_string(_arg):
if not isinstance(_arg, str):
prepared = str(_arg)
else:
prepared = _arg
prepared = prepared.strip()
prepared = prepared.replace(" ", "")
return prepared
def check_call_args(_call_args, _response, _arg_prefix):
splited_response = _response.split()
for idx, line in enumerate(splited_response):
if _arg_prefix in line:
key = line[line.find(_arg_prefix+".")+len(_arg_prefix+"."):-1]
#unifie to string
call = unifie_to_string(_call_args[key])
spli = unifie_to_string(splited_response[idx+1])
if call in spli:
_call_args.pop(key)
else:
log.error("Call arg `{0}` expected `{1}`".format(_call_args[key], str(splited_response[idx+1])))
raise ArgsCheckException("Incossisten value for `{0}` key".format(key))
if not _call_args:
break
if _call_args:
raise ArgsCheckException("No all values checked, those `{0}` still remains. ".format(_call_args))
def call_and_check(_func, _call_args, _arg_prefix):
response = _func(*_call_args.values())
log.info("Call response: {0}".format(response))
check_call_args(_call_args, response, _arg_prefix)
return response
def call_and_check_transaction(_func, _call_args, _arg_prefix, _broadcast):
response = _func(*_call_args.values(), _broadcast)
check_call_args(_call_args, response, _arg_prefix)
def last_message_as_json( _message):
if "message:" in _message:
_message = _message[_message.rfind("message:")+len("message:"):]
_message.strip()
o = 0
#lame... but works
for index, ch in enumerate(_message):
if str(ch) == "{":
o +=1
continue
if str(ch) == "}":
o -=1
if o == 0:
_message = _message[:index+1]
break
else:
_message = "{}"
return json.loads(_message)
def find_creator_proposals(_creator, _proposal_list):
proposals = []
if "result" in _proposal_list:
result = _proposal_list["result"]
for rs in result:
if rs["creator"] == _creator:
proposals.append(rs)
return proposals
def find_voter_proposals(_voter, _proposal_list):
if "result" in _proposal_list:
result = _proposal_list["result"]
for user, user_propsals in result.items():
if user == _voter:
return user_propsals
return []
def ws_to_http(_url):
pos = _url.find(":")
return "http" + _url[pos:]
def get_valid_steem_account_name():
http_url = ws_to_http(args.server_rpc_endpoint)
while True:
params = {"jsonrpc":"2.0", "method":"condenser_api.get_accounts", "params":[["".join(user_name)]], "id":1}
resp = requests.post(http_url, json=params)
data = resp.json()
if "result" in data:
if len(data["result"]) == 0:
return ''.join(user_name)
if ord(user_name[0]) >= ord('z'):
for i, _ in enumerate("".join(user_name)):
if user_name[i] >= 'z':
user_name[i] = 'a'
continue
user_name[i] = chr(ord(user_name[i]) + 1)
break
else:
user_name[0] = chr(ord(user_name[0]) + 1)
if len(set(user_name)) == 1 and user_name[0] == 'z':
break
def make_user_for_tests(_cli_wallet, _value_for_vesting = None, _value_for_transfer_tests = None, _value_for_transfer_tbd = None):
value_for_vesting = _value_for_vesting if _value_for_vesting else "20.000 TESTS"
value_for_transfer_tests = _value_for_transfer_tests if _value_for_transfer_tests else "20.000 TESTS"
value_for_transfer_tbd = _value_for_transfer_tbd if _value_for_transfer_tbd else "20.000 TBD"
creator = get_valid_steem_account_name()
_cli_wallet.create_account( args.creator, creator, "", "true")
receiver = get_valid_steem_account_name()
_cli_wallet.create_account( args.creator, receiver, "", "true")
_cli_wallet.transfer_to_vesting( args.creator, creator, value_for_vesting, "true")
_cli_wallet.transfer( args.creator, creator, value_for_transfer_tests, "initial transfer", "true" )
_cli_wallet.transfer( args.creator, creator, value_for_transfer_tbd, "initial transfer", "true")
return creator, receiver
|
11545514
|
import numpy as np
from scipy.stats.mstats import winsorize
from sklearn.linear_model import LinearRegression
from sklearn import linear_model
from sklearn.mixture import GaussianMixture
import scipy
from scipy.signal import find_peaks
DESCRIPTION = {
'S1': "High shaggy aEEG baseline (constantly at 4-200 mV).",
'S2': "Low aEEG baseline continually at low amplitude <= 2-3 mV.",
'S3': "aEEG baseline never falls to near-zero (< 1mV).",
'S4': "Abrupt, recurring and high amplitude (> 7.5 mV) spikes.",
'S5': "spiky aEEG with higher baseline but frequent and abrupt falls to near-zero"
}
class diagnoseEEG:
def __init__(self, EEG, filledNaNs, thresholds, labels, verbose=True, explain=True):
self.ABSTAIN = labels['ABSTAIN']
self.NORMAL = labels['NORMAL']
self.SUPPRESSED = labels['SUPPRESSED']
self.SUPPRESSED_WITH_ICTAL = labels['SUPPRESSED_WITH_ICTAL']
self.BURST_SUPRESSION = labels['BURST_SUPRESSION']
self.EEG = EEG
self.length_EEG = len(EEG)
self.filledNaNs = filledNaNs
filledNaNs = np.logical_and(self.EEG < 50, filledNaNs) # Maximum aEEG value 50 mV
if self.filledNaNs is not None:
self.EEG = self.EEG[~self.filledNaNs] # Keep only not NaN indices
else:
self.EEG = self.EEG
self.EEG = winsorize(a=self.EEG, limits=[0.01, 0.01], inplace=False) # To remove artefacts
self.thresholds = thresholds
self.NoBaseline = False
self.verbose = verbose
self.explain = explain
self.means = []
self.weights = []
self.bic = []
self.description = DESCRIPTION
self.slope, self.intial_EEG_baseline, _, self.y_pred = _fitRobustLine(self.EEG)
self.final_EEG_baseline = float(self.y_pred[-1])
self.average_EEG_baseline = np.sum(self.y_pred) / len(self.y_pred)
self.peaks, _ = find_peaks(self.EEG.reshape((-1,)), prominence=2, width=1)
self.prominences = self.EEG[self.peaks].reshape((-1,)) - self.y_pred.reshape((-1,))[self.peaks]
# Number of high and low amplitude peaks
if len(self.peaks) > 0: # There may be no peaks at all
self.n_peaks_VHA = len(self.peaks[self.prominences > self.thresholds['EEG__HIGH_15']])
self.n_peaks_HA = len(self.peaks[self.prominences > self.thresholds['EEG__HIGH_10']])
self.n_peaks_LA = len(self.peaks[self.prominences > self.thresholds['EEG__HIGH_5']]) - self.n_peaks_HA
else:
self.n_peaks_VHA = 0
self.n_peaks_HA = 0
self.n_peaks_LA = 0
self.many_high_amp_spikes = self.n_peaks_HA > self.thresholds['n_high_amplitude_peaks_per_hour']
self.many_low_amp_spikes = self.n_peaks_LA > self.thresholds['n_high_amplitude_peaks_per_hour']
self.low_baseline = self.average_EEG_baseline < self.thresholds['EEG__LOW']
self.dur_low_amplitude_EEG = len(self.EEG[self.EEG < self.thresholds['near_zero']])
# Fit Gaussian Mixtures
for n_components in [1, 2]:
obj = GaussianMixture(n_components=n_components)
obj.fit(self.EEG)
self.bic.append(obj.bic(self.EEG))
self.weights.append(obj.weights_.squeeze())
self.means.append(obj.means_.squeeze())
if self.verbose:
self.print_statistics()
def print_statistics(self):
"""
Prints decision making statistics
"""
print('\t #############')
print(
f'\t Slope: {np.around(float(self.slope), 3)} y-intercept: {np.around(float(self.intial_EEG_baseline), 3)}')
print(f'\t Average EEG baseline: {np.around(self.average_EEG_baseline, 3)}')
print(f'\t NaN time period: {np.sum(self.filledNaNs)}')
print(f'\t Peaks (> 5mV): {len(self.peaks)}')
print(f'\t 1-component GMM: means = {self.means[0]} | weights = {self.weights[0]} BIC = {self.bic[0]}')
print(f'\t 2-component GMM: means = {self.means[1]} | weights = {self.weights[1]} BIC = {self.bic[1]}')
print(f"\t Number of high amplitude (> {self.thresholds['EEG__HIGH_10']} mV) peaks {self.n_peaks_HA}")
print(f"\t Number of low amplitude ({self.thresholds['EEG__HIGH_5']} < _ < 10 mV) peaks {self.n_peaks_LA}")
print(f'\t Duration of near-zero aEEG amplitude (< 1mV): {self.dur_low_amplitude_EEG}')
print(f'\t Not-NaNs EEG signal length: {len(self.EEG)} \n\t Minimum EEG value: {min(self.EEG)}')
print('\t #############')
def high_aEEG_baseline_NORMAL(self, threshold_EEG_HIGH=10):
"""
High shaggy aEEG baseline constantly at an amplitude of around 10-20 mV, then NORMAL EEG.
"""
if ((self.average_EEG_baseline >= threshold_EEG_HIGH) and (not self.NoBaseline)):
return self.NORMAL
else:
return self.ABSTAIN
def unimodal_aEEG_NORMAL(self):
if (min(self.weights[1]) < 0.05) and (self.means[0] > self.thresholds['EEG__LOW']): # Unimodal distribution
return self.NORMAL
else:
return self.ABSTAIN
def unimodal_aEEG_SUPPRESSED(self):
if (min(self.weights[1]) < 0.05) and (self.means[0] < self.thresholds['EEG__LOW']): # Unimodal distribution
return self.SUPPRESSED
else:
return self.ABSTAIN
def bimodal_aEEG_SUPPRESSED(self):
if (min(self.weights[1]) > 0.05) and np.max(self.means[1]) < self.thresholds['EEG__LOW']:
return self.SUPPRESSED
else:
return self.ABSTAIN
def bimodal_aEEG_SUPPRESSED_WITH_ICTAL(self):
if (min(self.weights[1]) > 0.05) and (np.min(self.means[1]) < self.thresholds['EEG__LOW']) and (
np.max(self.means[1]) > self.thresholds['EEG__HIGH_5']):
return self.SUPPRESSED_WITH_ICTAL
else:
return self.ABSTAIN
def bimodal_aEEG_BURST_SUPRESSION(self):
if (min(self.weights[1]) > 0.05) and (np.min(self.means[1]) < self.thresholds['EEG__LOW']) and (
np.max(self.means[1]) < self.thresholds['EEG__HIGH_5']):
return self.BURST_SUPRESSION
else:
return self.ABSTAIN
def bimodal_aEEG_NORMAL(self):
if (min(self.weights[1]) > 0.05) and np.min(self.means[1]) > self.thresholds['EEG__LOW']:
return self.NORMAL
else:
return self.ABSTAIN
def bimodal_aEEG(self):
if min(self.weights[1]) > 0.05:
if np.max(self.means[1]) < self.thresholds['EEG__LOW']:
return self.SUPPRESSED
elif (np.min(self.means[1]) < self.thresholds['EEG__LOW']) and (
np.max(self.means[1]) > self.thresholds['EEG__HIGH_5']):
return self.SUPPRESSED_WITH_ICTAL
elif (np.min(self.means[1]) < self.thresholds['EEG__LOW']) and (
np.max(self.means[1]) < self.thresholds['EEG__HIGH_5']):
return self.BURST_SUPRESSION
elif np.min(self.means[1]) > self.thresholds['EEG__LOW']:
return self.NORMAL
else:
return self.ABSTAIN
else: # Unimodal distribution
if self.means[0] < self.thresholds['EEG__LOW']:
return self.SUPPRESSED
if self.means[0] > self.thresholds['EEG__LOW']:
return self.NORMAL
else:
return self.ABSTAIN
def aEEG_NOT_near_zero_NORMAL(self):
if (np.sum(self.EEG <= self.thresholds['near_zero']) < self.thresholds['near_zero_duration_tol']):
return self.NORMAL
else:
return self.ABSTAIN
def very_spiky_aEEG_SUPPRESSED_WITH_ICTAL(self):
"""
aEEF having spikes having > 15 mV more than once every minutes on an average is most probably ictal.
"""
if self.n_peaks_VHA > self.thresholds['n_high_amplitude_peaks_per_hour']:
return self.SUPPRESSED_WITH_ICTAL
else:
return self.ABSTAIN
def well_separated_aEEG_modes_SUPPRESSED_WITH_ICTAL(self):
"""
If aEEG values are well separated, i.e. their distribution has two peaks separated by atleast 4mV,
then the aEEG is more likey to be Supressed with ictal
"""
if abs(self.means[1][0] - self.means[1][1]) > self.thresholds['min_separation'] and min(self.weights[1]) > 0.05:
return self.SUPPRESSED_WITH_ICTAL
else:
return self.ABSTAIN
def low_baseline_SUPPRESSED_WITH_ICTAL(self):
if (not self.NoBaseline) and (self.low_baseline) and (self.many_high_amp_spikes):
return self.SUPPRESSED_WITH_ICTAL
else:
return self.ABSTAIN
def low_baseline_BURST_SUPRESSION(self):
if (not self.NoBaseline) and (self.low_baseline) and (self.many_low_amp_spikes):
return self.SUPPRESSED_WITH_ICTAL
else:
return self.ABSTAIN
def low_baseline_SUPPRESSED(self):
if (not self.NoBaseline) and (self.low_baseline) and (not self.many_high_amp_spikes) and (
not self.many_low_amp_spikes):
return self.SUPPRESSED
else:
return self.ABSTAIN
def low_baseline_aEEG(self):
if self.NoBaseline:
return self.ABSTAIN
if self.low_baseline:
if self.many_high_amp_spikes:
return self.SUPPRESSED_WITH_ICTAL
elif self.many_low_amp_spikes:
return self.BURST_SUPRESSION
else:
return self.SUPPRESSED
else:
return self.ABSTAIN
def high_baseline_infrequent_drops_NORMAL(self):
if (not self.NoBaseline) and (not self.low_baseline) and (
self.dur_low_amplitude_EEG <= self.thresholds['near_zero_duration_tol']):
return self.NORMAL
else:
return self.ABSTAIN
def high_baseline_frequent_drops_SUPPRESSED_WITH_ICTAL(self):
if (not self.NoBaseline) and (not self.low_baseline) and (
self.dur_low_amplitude_EEG > self.thresholds['near_zero_duration_tol']) and (
self.bimodal_aEEG_BURST_SUPRESSION() != self.BURST_SUPRESSION):
return self.SUPPRESSED_WITH_ICTAL
else:
return self.ABSTAIN
def high_baseline_frequent_drops_BURST_SUPRESSION(self):
if (not self.NoBaseline) and (not self.low_baseline) and (
self.dur_low_amplitude_EEG > self.thresholds['near_zero_duration_tol']) and (
self.bimodal_aEEG_BURST_SUPRESSION() == self.BURST_SUPRESSION):
return self.BURST_SUPRESSION
else:
return self.ABSTAIN
def high_baseline_frequent_drops(self):
if self.NoBaseline:
return self.ABSTAIN
if not self.low_baseline:
if self.dur_low_amplitude_EEG <= self.thresholds['near_zero_duration_tol']:
return self.NORMAL
elif self.dur_low_amplitude_EEG > self.thresholds[
'near_zero_duration_tol'] and self.bimodal_aEEG_BURST_SUPRESSION() == self.BURST_SUPRESSION:
return self.BURST_SUPRESSION
else:
return self.SUPPRESSED_WITH_ICTAL
else:
return self.ABSTAIN
def get_vote_vector(self):
return [self.bimodal_aEEG(), self.low_baseline_aEEG(), self.high_baseline_frequent_drops(),
self.very_spiky_aEEG_SUPPRESSED_WITH_ICTAL(), self.well_separated_aEEG_modes_SUPPRESSED_WITH_ICTAL(),
self.aEEG_NOT_near_zero_NORMAL(), self.high_aEEG_baseline_NORMAL(threshold_EEG_HIGH=4),
self.high_aEEG_baseline_NORMAL(threshold_EEG_HIGH=10), self.unimodal_aEEG_NORMAL(),
self.unimodal_aEEG_SUPPRESSED(), self.bimodal_aEEG_SUPPRESSED(),
self.bimodal_aEEG_SUPPRESSED_WITH_ICTAL(),
self.bimodal_aEEG_BURST_SUPRESSION(), self.bimodal_aEEG_NORMAL(),
self.low_baseline_SUPPRESSED_WITH_ICTAL(),
self.low_baseline_BURST_SUPRESSION(), self.low_baseline_SUPPRESSED(),
self.high_baseline_infrequent_drops_NORMAL(),
self.high_baseline_frequent_drops_SUPPRESSED_WITH_ICTAL(),
self.high_baseline_frequent_drops_BURST_SUPRESSION()]
@staticmethod
def get_LF_names():
return ['bimodal_aEEG', 'low_baseline_aEEG', 'high_baseline_frequent_drops',
'very_spiky_aEEG_SUPPRESSED_WITH_ICTAL',
'well_separated_aEEG_modes_SUPPRESSED_WITH_ICTAL', 'aEEG_NOT_near_zero_NORMAL',
'high_aEEG_baseline_NORMAL_4', 'high_aEEG_baseline_NORMAL_10',
'unimodal_aEEG_NORMAL', 'unimodal_aEEG_SUPPRESSED', 'bimodal_aEEG_SUPPRESSED',
'bimodal_aEEG_SUPPRESSED_WITH_ICTAL',
'bimodal_aEEG_BURST_SUPRESSION', 'bimodal_aEEG_NORMAL', 'low_baseline_SUPPRESSED_WITH_ICTAL',
'low_baseline_BURST_SUPRESSION', 'low_baseline_SUPPRESSED', 'high_baseline_infrequent_drops_NORMAL',
'high_baseline_frequent_drops_SUPPRESSED_WITH_ICTAL', 'high_baseline_frequent_drops_BURST_SUPRESSION']
def _fitRobustLine(T):
# T is a time series
"""
Fit the RANSAC robust linear regressor
Returns:
Coefficient, intercept, coefficient of determination (R^2) and predicted line
"""
ransac = linear_model.RANSACRegressor(base_estimator=linear_model.Ridge(alpha=1000))
# ransac = linear_model.RANSACRegressor()
ransac.fit(np.arange(len(T)).reshape((-1, 1)), T)
y = ransac.predict(np.arange(len(T)).reshape((-1, 1)))
return ransac.estimator_.coef_, ransac.estimator_.intercept_, ransac.estimator_.score(y, T), y
## Labelling functions
# obj.bimodal_aEEG()
# obj.low_baseline_aEEG()
# obj.high_baseline_frequent_drops()
# obj.very_spiky_aEEG_SUPPRESSED_WITH_ICTAL()
# obj.well_separated_aEEG_modes_SUPPRESSED_WITH_ICTAL()
# obj.aEEG_NOT_near_zero_NORMAL()
# obj.high_aEEG_baseline_NORMAL(threshold_EEG_HIGH = 4)
# obj.high_aEEG_baseline_NORMAL(threshold_EEG_HIGH = 10)
# obj.unimodal_aEEG_NORMAL()
# obj.unimodal_aEEG_SUPPRESSED()
# obj.bimodal_aEEG_SUPPRESSED()
# obj.bimodal_aEEG_SUPPRESSED_WITH_ICTAL()
# obj.bimodal_aEEG_BURST_SUPRESSION()
# obj.bimodal_aEEG_NORMAL()
# obj.low_baseline_SUPPRESSED_WITH_ICTAL()
# obj.low_baseline_BURST_SUPRESSION()
# obj.low_baseline_SUPPRESSED()
# obj.high_baseline_infrequent_drops_NORMAL()
# obj.high_baseline_frequent_drops_SUPPRESSED_WITH_ICTAL()
# obj.high_baseline_frequent_drops_BURST_SUPRESSION()
|
11545570
|
from setuptools import setup, find_packages
long_description = '''\
image-diet is a Django application for removing unnecessary bytes from image
files. It optimizes images without changing their look or visual quality
("losslessly").
It works on images in JPEG, GIF and PNG formats and will leave others
unchanged. Provides a seemless integration with easy_thumbnails app, but can
work with others too.'''
setup(
author="<NAME>",
author_email="<EMAIL>",
name='image-diet',
version='0.7.1',
description='Remove unnecessary bytes from images',
long_description=long_description,
url='https://github.com/samastur/image-diet/',
platforms=['OS Independent'],
license='MIT License',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Utilities',
],
install_requires=[
'django>=1.3',
],
include_package_data=True,
packages=find_packages(),
zip_safe=False
)
|
11545603
|
import subprocess
from django.conf import settings
def create_dict(local=None, field=None, **kwargs):
"""
以字典的形式从局部变量locals()中获取指定的变量
:param local: dict
:param field: str[] 指定需要从local中读取的变量名称
:param kwargs: 需要将变量指定额外名称时使用
:return: dict
"""
if field is None or local is None:
return {}
result = {k: v for k, v in local.items() if k in field}
result.update(**kwargs)
return result
def push():
"""推送生成的静态页面到远程仓库"""
return subprocess.Popen('git pull && git add . && git commit -m "Auto commit by Maltose" && git push', cwd=settings.BLOG_REPOSITORIES, shell=True)
|
11545636
|
from brownie import *
from brownie.network.contract import InterfaceContainer
from brownie.network.state import _add_contract, _remove_contract
import shared
from munch import Munch
#todo only deploy one token, read the other
def deployTokens(acct):
print("Deploying test tokens.")
tokens = Munch()
tokens.susd = acct.deploy(TestToken, "SUSD", "SUSD", 18, 1e50)
if network.show_active() == "development":
tokens.wrbtc = acct.deploy(TestWrbtc) ## 0x3194cBDC3dbcd3E11a07892e7bA5c3394048Cc87
tokens.wrbtc.deposit({'value':10e18})#needed because of local swap impl or sovryn swap simulator
tokens.susd.mint(acct, 10000e18)
else:
tokens.wrbtc = acct.deploy(WRBTC)
return tokens
def deployWRBTC(acct, susdAddress):
tokens = Munch()
if network.show_active() == "development":
tokens.wrbtc = acct.deploy(TestWrbtc) ## 0x3194cBDC3dbcd3E11a07892e7bA5c3394048Cc87
tokens.wrbtc.deposit({'value':10e18})#needed because of local swap impl or sovryn swap simulator
else:
tokens.wrbtc = acct.deploy(WRBTC)
tokens.wrbtc.deposit({'value':1e17})#needed not for the sovry protocol, but for later swap deployment
tokens.susd = Contract.from_abi("SUSD", address=susdAddress, abi=TestToken.abi, owner=acct)
def readTokens(owner, wrbtcAddress, susdAddress):
print("Reading test tokens.")
tokens = Munch()
if network.show_active() == "development":
tokens.wrbtc = Contract.from_abi("TestWrbtc", address=wrbtcAddress, abi=TestWrbtc.abi, owner=owner)
else:
tokens.wrbtc = Contract.from_abi("WRBTC", address=wrbtcAddress, abi=WRBTC.abi, owner=owner)
tokens.susd = Contract.from_abi("TestSUSD", address=susdAddress, abi=TestToken.abi, owner=owner)
return tokens
|
11545657
|
import git
import json
import psutil
def commit_info(path_to_repo):
repo = git.Repo.init(path_to_repo)
return repo.commit().committed_datetime.isoformat(), str(repo.commit())
def store_additional_data(rev, cdate, resources, filename):
data = {
'revision': rev,
'softmasking': True,
'commit_date': cdate,
'resources': resources
}
with open(filename, 'w') as file:
json.dump(data, file, indent=4)
def check_memory():
# startup memory check
m = psutil.virtual_memory()
print(f'Total memory: {m.total*10**(-9):.2f} GB')
print(f'Currently used memory: {m.used*10**(-9):.2f} GB')
|
11545750
|
in_global_scope = 'in_global_scope_value'
class SomeClass(object):
def method(self):
print('breakpoint here')
if __name__ == '__main__':
SomeClass().method()
print('TEST SUCEEDED')
|
11545764
|
from settree.set_data import *
from settree.set_tree import *
from settree.gbest import *
from settree.set_rf import *
from settree.operations import *
from settree.explainability import *
|
11545771
|
import time
import subprocess as sp
import json
import six
from dwencode.ffpath import get_ffprobe_path
def probe(vid_file_path, ffprobe_path=None):
"""
From https://stackoverflow.com/a/36743499/1442895
Give a json from ffprobe command line
@vid_file_path : The absolute (full) path of the video file, string.
"""
ffprobe_path = get_ffprobe_path(ffprobe_path)
print(ffprobe_path)
command = [
ffprobe_path, "-loglevel", "quiet", "-print_format", "json",
"-show_format", "-show_streams", vid_file_path]
shell = bool(six.PY2)
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT, shell=shell)
out = pipe.communicate()[0]
if not six.PY2:
out = out.decode('ascii')
return json.loads(out)
def get_format(vid_file_path, ffprobe_path=None):
ffprobe_path = get_ffprobe_path(ffprobe_path)
data = probe(vid_file_path, ffprobe_path)
vid_stream = [s for s in data['streams'] if 'coded_width' in s][0]
return vid_stream['coded_width'], vid_stream['coded_height']
def _get_formats(vid_file_paths, ffprobe_path):
processes = dict()
for path in vid_file_paths:
command = [
ffprobe_path, "-loglevel", "quiet", "-print_format", "json",
"-show_format", "-show_streams", path]
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT, shell=True)
processes[path] = p
formats = dict()
for path, p in processes.items():
try:
data = json.loads(p.communicate()[0])
vid_stream = [s for s in data['streams'] if 'coded_width' in s][0]
format_ = vid_stream['coded_width'], vid_stream['coded_height']
formats[path] = format_
except BaseException:
formats[path] = (0, 0)
return formats
def _chunks(list_, chunk_size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(list_), chunk_size):
yield list_[i:i + chunk_size]
def get_formats(vid_file_paths, chunk_size=64, ffprobe_path=None):
# Cannot open infinite number of files at the same time, so cut the list
# into pieces:
ffprobe_path = get_ffprobe_path(ffprobe_path)
start_time = time.time()
formats = dict()
count = len(vid_file_paths)
for i, paths_chunk in enumerate(_chunks(vid_file_paths, chunk_size)):
print('Getting movies formats: %i/%i' % ((i + 1) * chunk_size, count))
formats.update(_get_formats(paths_chunk, ffprobe_path=ffprobe_path))
print(time.time() - start_time)
return formats
|
11545782
|
from pudzu.charts import *
df = pd.read_csv("datasets/flagsnewold.csv")
groups = list(remove_duplicates(df.group))
array = [[dict(r) for _,r in df.iterrows() if r.group == g] for g in groups]
COLS = 3
arrays = list(generate_batches(array, ceil(len(array) / COLS)))
datas = [pd.DataFrame(array) for array in arrays]
FONT = calibri or sans
fg, bg="black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
def process(d):
if not d: return None
description = get_non(d, 'description')
description = "({})".format(description) if description else " "
if get_non(d, 'image'):
flag = Image.from_url_with_cache(get_non(d, 'image', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=198) if flag.width / flag.height < 1.3 else flag.resize((318,198))
flag = flag.pad(0 if "coat" in d['group'] else (1,1,0,1) if "Venice" in d['name'] else 1, "grey")
else:
flag = Rectangle((320,200), bg)
return Image.from_column([
Image.from_text_bounded(d['name'].replace(r"\n","\n"), (320,200), 32, partial(FONT, bold=True), beard_line=True, align="center", fg=fg),
Image.from_text(description, FONT(24, italics=True), fg=fg),
flag
], padding=2, bg=bg, equal_widths=True)
title = Image.from_text(f"'New' flags versus 'old'".upper(), FONT(180, bold=True), fg=fg, bg=bg).pad(40, bg)
footer = Image.from_text("""¹ New Mexico's name actually predates that of Mexico the country! ² New Caledonia's flag is widespread but unofficial; the official flag is the French Tricolore. ³ Unrelated to Zealand in Denmark""", FONT(40), fg=fg, bg=bg).pad(20, bg)
grids = [grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=(0.5,1,0.5)) for data in datas]
grid = Image.from_row(grids, padding=(60,0), bg=bg, yalign=0)
img = Image.from_column([title, grid, footer], bg=bg)
#img.place(Image.from_text("/u/Udzu", FONT(24), fg=fg, bg=bg, padding=5).pad((1,1,0,0), fg), align=1, padding=5, copy=False)
img.save("output/flagsnewold.png")
|
11545794
|
from collections import OrderedDict
from indy_common.constants import ROLE, CLAIM_DEF_SIGNATURE_TYPE, CLAIM_DEF_SCHEMA_REF
from plenum.common.constants import TXN_TYPE, TARGET_NYM, \
DATA, ENC, RAW, HASH, ALIAS, TXN_TIME, VERKEY
from plenum.common.types import f
def getTxnOrderedFields():
return OrderedDict([
(f.IDENTIFIER.nm, (str, str)),
(f.REQ_ID.nm, (str, int)),
(f.SIG.nm, (str, str)),
(TXN_TIME, (str, int)),
(TXN_TYPE, (str, str)),
(TARGET_NYM, (str, str)),
(VERKEY, (str, str)),
(DATA, (str, str)),
(ALIAS, (str, str)),
(RAW, (str, str)),
(ENC, (str, str)),
(HASH, (str, str)),
(ROLE, (str, str)),
(CLAIM_DEF_SCHEMA_REF, (str, str)),
(CLAIM_DEF_SIGNATURE_TYPE, (str, str))
])
|
11545798
|
import FWCore.ParameterSet.Config as cms
source = cms.Source("EmptySource")
generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(11),
MinEta = cms.double(0.0),
MaxEta = cms.double(0.0),
MinPhi = cms.double(1.57079632679),
MaxPhi = cms.double(1.57079632679),
MinE = cms.double(10.0),
MaxE = cms.double(10.0)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0)
)
# Don't smear our vertex!
VtxSmeared = cms.EDProducer("GaussEvtVtxGenerator",
src = cms.InputTag("generator","unsmeared"),
MeanX = cms.double(0.0),
MeanY = cms.double(-2.0),
MeanZ = cms.double(0.0),
SigmaX = cms.double(0.0),
SigmaY = cms.double(0.0),
SigmaZ = cms.double(0.0),
TimeOffset = cms.double(0.0)
)
|
11545811
|
import pytest
from padl.dumptools import packagefinder
@pytest.fixture
def ignore_padl_requirement(monkeypatch):
monkeypatch.setattr(packagefinder, '_ignore_requirements',
packagefinder._ignore_requirements + ['padl'])
|
11545835
|
from typing import Optional
from collections import namedtuple
import numpy as np
__all__ = ["calibration_bins", "multiclass_calibration_bins", "binary_calibration_bins", "average_confidence", "Bins"]
Bins = namedtuple("Bins", "accs confs counts edges")
def multiclass_calibration_bins(truth: np.ndarray, probs: np.ndarray, bins: int, class_weights: Optional[np.ndarray] = None) -> Bins:
"""Calculate the binned confidence and accuracy for a multiclass problem.
:param truth: A 1D array of the true labels for some examples.
:param probs: A 1D array of the probabilities from a model. Each row represents an example in the dataset.
and each column represents the probably assigned by the model to each class for that example.
:param bins: The number of bins to use when aggregating.
:param class_weights: A 1D array of scores that can add extra weight to examples of specific classes.
:returns: The metrics aggregated by bins
"""
preds = np.argmax(probs, axis=1)
pred_probs = np.max(probs, axis=1)
credit = truth == preds
if class_weights is not None:
weighted_labels = class_weights[truth]
credit = credit * weighted_labels
return binary_calibration_bins(credit, pred_probs, bins=bins)
calibration_bins = multiclass_calibration_bins
def binary_calibration_bins(credit: np.ndarray, probs: np.ndarray, bins: int) -> Bins:
"""Calculate the accuracy and confidence inside bins. This is similar to the sklearn function.
Note:
This is different than the multiclass function. This will look at the scores for a specific
class even if it is below the 1/num_classes threshold that will cause the multiclass
version to not select this class.
:param credit: How much credit the model gets for this example. If it is wrong the value will be zero
If it is right one. You can also give different classes different weights by passing real values.
:param probs: The probabilities assigned to the positive class by the model.
:param bins: The number of bins to use when aggregating.
:returns: The metrics aggregated by bins
"""
bins = np.linspace(0.0, 1.0, num=bins + 1, endpoint=True)
bin_idx = np.digitize(probs, bins) - 1
bin_conf_sum = np.bincount(bin_idx, weights=probs, minlength=len(bins))
bin_acc_sum = np.bincount(bin_idx, weights=credit, minlength=len(bins))
bin_counts = np.bincount(bin_idx, minlength=len(bins))
mask = bin_counts == 0
denom = bin_counts + mask
bin_mean_conf = bin_conf_sum / denom
bin_mean_acc = bin_acc_sum / denom
return Bins(bin_mean_acc[:-1], bin_mean_conf[:-1], bin_counts[:-1], bins[:-1])
def average_confidence(probs: np.ndarray) -> float:
"""Calculate the average (maximum) confidence for a collection of predictions
:param probs: `[B, C]` A matrix of probabilities, each row is an example and
each column is a class.
"""
if probs.ndim == 1:
probs = np.expand_dims(probs, axis=-1)
return np.mean(np.max(probs, axis=1))
|
11545840
|
from imutils import face_utils
import datetime
import imutils
import time
import dlib
import cv2, math
import numpy as np
from imutils import face_utils, rotate_bound
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
model = "filters/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(model) # link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
video_capture = cv2.VideoCapture(0)
cv2.imshow('Video', np.empty((5,5),dtype=float))
#points are tuples in the form (x,y)
# returns angle between points in degrees
def calculate_inclination(point1, point2):
x1,x2,y1,y2 = point1[0], point2[0], point1[1], point2[1]
incl = -180/math.pi*math.atan((float(y2-y1))/(x2-x1))
return incl
def calculate_boundbox(list_coordinates):
x = min(list_coordinates[:,0])
y = min(list_coordinates[:,1])
w = max(list_coordinates[:,0]) - x
h = max(list_coordinates[:,1]) - y
return (x,y,w,h)
def get_face_boundbox(points, face_part):
if face_part == 1:
(x,y,w,h) = calculate_boundbox(points[17:22]) #left eyebrow
elif face_part == 2:
(x,y,w,h) = calculate_boundbox(points[22:27]) #right eyebrow
elif face_part == 3:
(x,y,w,h) = calculate_boundbox(points[36:42]) #left eye
elif face_part == 4:
(x,y,w,h) = calculate_boundbox(points[42:48]) #right eye
elif face_part == 5:
(x,y,w,h) = calculate_boundbox(points[29:36]) #nose
elif face_part == 6:
(x,y,w,h) = calculate_boundbox(points[48:68]) #mouth
return (x,y,w,h)
while cv2.getWindowProperty('Video', 0) >= 0:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for i in range(1,7):
(x,y,w,h) = get_face_boundbox(shape, i)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1)
incl = calculate_inclination(shape[17], shape[26])
img = cv2.imread("./sprites/doggy_ears.png")
rows,cols = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D((cols/2,rows/2),incl,1)
dst = cv2.warpAffine(img,M,(cols,rows))
dst = rotate_bound(img, incl)
cv2.imshow('sprite',dst)
print "Pixels distance points in mouth: ", shape[66][1] - shape[62][1]
x,y, w, h = rect.left(), rect.top(), rect.width(), rect.height()
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
11545869
|
from __future__ import annotations
import logging
import typing
from typing import Any, Dict, List
from silex_client.action.command_base import CommandBase
from silex_client.utils.thread import execute_in_thread
if typing.TYPE_CHECKING:
from silex_client.action.action_query import ActionQuery
import os
import pathlib
import shutil
from silex_client.utils.parameter_types import ListParameterMeta
class Move(CommandBase):
"""
Copy file and override if necessary
"""
parameters = {
"src": {
"label": "File path",
"type": ListParameterMeta(pathlib.Path),
"value": None,
},
"dst": {
"label": "Destination directory",
"type": pathlib.Path,
"value": None,
},
}
@CommandBase.conform_command()
async def __call__(
self,
parameters: Dict[str, Any],
action_query: ActionQuery,
logger: logging.Logger,
):
src: List[str] = [str(source) for source in parameters["src"]]
dst: str = str(parameters["dst"])
def move(src, dst):
# remove if dst already exist
if os.path.isdir(dst):
# clean tree
shutil.rmtree(dst)
os.makedirs(dst)
if os.path.isfile(dst):
os.remove(dst)
logger.info(f"source : {src}")
logger.info(f"destination : {dst}")
# move folder or file
if os.path.isdir(src):
# move all file in dst folder
file_names = os.listdir(src)
for file_name in file_names:
shutil.move(os.path.join(src, file_name), dst)
else:
shutil.move(src, dst)
# Check for file to copy
if not os.path.exists(dst):
raise Exception(f"{dst} doesn't exist.")
for item in src:
# Check for file to copy
if not os.path.exists(item):
raise Exception(f"{item} doesn't exist.")
# Execute the move in a different thread to not block the event loop
await execute_in_thread(move, item, dst)
|
11545945
|
from django.core.urlresolvers import reverse
from django.template.context import Context
from django.template.loader import get_template
from core.prismriver.dashboard.plugins import pluginbase
from core.prismriver.settings import CUSTOM_MENU
from core.prismriver.dashboard.settings import APP_MENU
from core.prismriver.views import load_apps, load_custom_models
from copy import deepcopy
class AppList(pluginbase.DashboardPlugin):
def get_custom_menu(self, request):
apps = deepcopy(APP_MENU)
for app in apps:
app["models"], app["enabled"] = load_custom_models(request, app["items"])
c = Context({"apps": apps})
t = get_template('plugins/app_menu.html')
return t.render(c)
def get_menu(self, request):
current_url = request.path.replace(reverse('admin:index'), "").lower()
c = Context({"apps": load_apps(request)})
t = get_template('plugins/app_menu.html')
return t.render(c)
def render(self, request):
if CUSTOM_MENU:
return self.get_custom_menu(request)
else:
return self.get_menu(request)
|
11545955
|
import py, sys, subprocess
currpath = py.path.local(__file__).dirpath()
def setup_make(targetname):
if sys.platform == 'win32':
py.test.skip("win32 not supported so far")
import pypy.module._cppyy.capi.loadable_capi as lcapi
popen = subprocess.Popen(["make", targetname], cwd=str(currpath),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = popen.communicate()
if popen.returncode:
if '-std=c++11' in stdout:
py.test.skip("gcc does not seem to support -std=c++11")
raise OSError("'make' failed:\n%s" % (stdout,))
|
11545992
|
from datetime import timedelta
from celery.task.schedules import crontab
from django.utils import timezone
from orchestra.contrib.tasks import periodic_task
from . import settings
from .models import BackendLog
@periodic_task(run_every=crontab(hour=7, minute=0))
def backend_logs_cleanup():
days = settings.ORCHESTRATION_BACKEND_CLEANUP_DAYS
epoch = timezone.now()-timedelta(days=days)
return BackendLog.objects.filter(created_at__lt=epoch).only('id').delete()
|
11546014
|
import pandas as pd
import numpy as np
import streamlit as st
import plotly.express as px
import plotly.graph_objects as go
import matplotlib
import matplotlib.pyplot as plt
import pages.home
import csv
import scipy.stats as scs
def write():
with st.spinner("Loading Modelling ..."):
st.title('A/B Testing')
st.markdown('## Live Feedback A/B Testing')
st.markdown(
'''
In this research study two different type of models have been examined: Agent Based Modelling and Compartmental Modelling. Compartmental Modelling
represents the most traditional way (and gold standard) to model epidemical developments (Control Group), while Agent Based Modelling offers an alternative view to how to approach this
type of problem (Treatment Group).
- **Compartmental Modelling:** SIR and SEIR Modelling, Advanced SEIR Modelling, Vaccination Modelling, Coronavirus Modelling.
- **Agent Based Modelling:** Population Modelling, Track and Trace, Central Hubs, Finance Simulation.
Which of the two approaches do you think would make you feel most confortable to make a decision about possible interventions to apply (aiding your decision making)?
You can express just a single vote, subsequent ones will be automatically discarded.
'''
)
data = pd.read_csv('src/pages/record.csv')
last_record = list(data.sum(axis=0))
ba = st.button('Compartmental Modelling')
if ba:
f = open("src/pages/vote.txt", "r")
status = int(f.read())
f.close()
if status == 0:
with open('src/pages/record.csv', 'a') as fd:
writer = csv.writer(fd)
writer.writerow([])
writer.writerow(['1', '0'])
data = pd.read_csv('src/pages/record.csv')
last_record = list(data.sum(axis=0))
f = open("src/pages/vote.txt", "w")
f.write("1")
f.close()
st.write(last_record[0])
bb = st.button('Agent Based Modelling')
if bb:
f = open("src/pages/vote.txt", "r")
status = int(f.read())
f.close()
if status == 0:
with open('src/pages/record.csv', 'a') as fd:
writer = csv.writer(fd)
writer.writerow([])
writer.writerow(['0', '1'])
data = pd.read_csv('src/pages/record.csv')
last_record = list(data.sum(axis=0))
f = open("src/pages/vote.txt", "w")
f.write("1")
f.close()
st.write(last_record[1])
st.write("Sample Size (logged responses): ",
round(sum(last_record), 3))
if ba == False and bb == False:
pass
else:
c_a = last_record[0]/sum(last_record)
c_b = last_record[1]/sum(last_record)
cr_uplift = (c_b - c_a) / c_a
se_a, se_b = np.sqrt((c_a * (1 - c_a)) / sum(last_record)
), np.sqrt((c_b * (1 - c_b)) / sum(last_record))
se_diff = np.sqrt(se_a**2 + se_b**2)
z_score = (c_b - c_a) / se_diff
p_value = 1 - scs.norm(0, 1).cdf(z_score)
sides = st.radio("Type of Hypotesys", ('One Sided', 'Two Sided'))
if sides == 'One Sided':
sided = 0
else:
sided = 1
interval = st.slider("Required Confidence: ",
min_value=0.0, max_value=1.0,
value=0.9, step=0.01)
x_a = np.linspace(last_record[0]-49, last_record[0]+50, 100)
y_a = scs.binom(sum(last_record), c_a).pmf(x_a)
x_b = np.linspace(last_record[1]-49, last_record[1]+50, 100)
y_b = scs.binom(sum(last_record), c_b).pmf(x_b)
fig = go.Figure()
fig.add_trace(go.Scatter(x=x_a, y=y_a,
mode='lines',
name='Control Group'))
fig.add_trace(go.Scatter(x=x_b, y=y_b,
mode='lines',
name='Treatment Group'))
fig.update_layout(
title_text="Binomial Distribution Representation of Control and Treatment Groups")
fig.update_xaxes(title="Count of Possible Outcomes")
fig.update_yaxes(title="Probability")
fig.update_layout(
autosize=False,
width=700,
height=500,
)
st.plotly_chart(fig)
st.write("Conversion Rate for Compartmental Modelling: ",
round(c_a*100, 3), "%")
st.write("Conversion Rate for Agent Based Modelling: ",
round(c_b*100, 3), "%")
st.write("Relative Uplift: ", round(cr_uplift*100, 3), "%")
st.write("Z Score: ", round(z_score, 3))
st.write("P Value: ", round(p_value, 3))
if ((p_value < (1 - interval) and sided == 0) or ((p_value > (interval + (1 - interval)/2) or p_value < (1 - interval - (1 - interval)/2)) and sided == 1)):
st.write("Statistically significant: ", True)
else:
st.write("Statistically significant: ", False)
|
11546038
|
def v_star(y, α, β, μ):
"""
True value function
"""
c1 = np.log(1 - α * β) / (1 - β)
c2 = (μ + α * np.log(α * β)) / (1 - α)
c3 = 1 / (1 - β)
c4 = 1 / (1 - α * β)
return c1 + c2 * (c3 - c4) + c4 * np.log(y)
def σ_star(y, α, β):
"""
True optimal policy
"""
return (1 - α * β) * y
|
11546065
|
from IPayment import IPayment
from CPU import CPU
from VGA import VGA
class CreditCard(IPayment):
def visit(self, component):
if type(component) is CPU:
print("Purchase CPU with Credit Card")
elif type(component) is VGA:
print("Purchase VGA with Credit Card")
|
11546067
|
import threading
import time
class ThreadManager:
# Define thread types for starting threads
SINGLE = 0 # One thread at once, block if already running
MULTIPLE = 1 # Multiple threads, name with counter, and run
KILLABLE = 2 # Thread can be killed with a flag
REPLACEABLE = 3 # Like SINGLE, but instead of blocking, kill and restart
def is_alive(self, thread_name):
"""Check if a thread by a given name is active.
Args:
thread_name (String): The name of the thread to check.
Returns:
threading.Thread: If thread found by name is active.
bool: False if thread not found, or thread is not active.
"""
for thread in threading.enumerate():
if thread.name == thread_name and thread.is_alive():
return thread
return False
def garbage_collect(self):
"""Remove threads from threadlist that aren't active anymore."""
self.threadlist = {name: thread for name, thread in self.threadlist.items() if thread['thread'].is_alive()}
self.progress_threads = [thread for thread in self.progress_threads if thread.is_alive()]
def __init__(self):
"""Create and manage threads for backup and operation."""
self.threadlist = {}
self.counter = 0
self.progress_threads = []
def thread_garbage_collect():
"""Periodically run garbage collection."""
while 1:
time.sleep(20)
self.garbage_collect()
self._gc_thread = threading.Thread(target=thread_garbage_collect, name='ThreadManager_GC', daemon=True)
self._gc_thread.start()
def start(self, thread_type, is_progress_thread=False, callback=None, *args, **kwargs):
"""Create and start a thread if one doesn't already exist.
Args:
thread_type (int): The constant corresponding to the thread type to create.
is_progress_thread (bool): Whether or not the thread controls the progress
bar (default: False).
callback (def, optional): For KILLABLE and REPLACEABLE threads, the function to
run to kill the thread.
Returns:
String: If a thread is successfully created, the thread name is returned.
bool: False if an active thread exists with that name.
"""
if 'name' in kwargs:
thread_name = kwargs['name']
else:
thread_name = f"thread{self.counter}"
self.counter += 1
def dummy():
"""A dummy function to pass as a default callback to KILLABLE threads."""
pass
# SINGLE: block if already running
# MULTIPLE: run again, and increment counter
# KILLABLE: Add flag to let it be killed
# REPLACEABLE: SINGLE thread, but instead of blocking, kill and restart
if thread_type == self.SINGLE or thread_type == self.KILLABLE or thread_type == self.REPLACEABLE:
if 'name' in kwargs:
thread_name = kwargs['name']
else:
self.counter += 1
thread_name = f"thread{self.counter}"
elif thread_type == self.MULTIPLE:
self.counter += 1
thread_name = f"{kwargs['name'] if 'name' in kwargs else 'thread'}_{self.counter}"
# If the thread either isn't in the list, or isn't active, create and run the thread
if thread_type == self.SINGLE and not self.is_alive(thread_name):
# if thread_name not in self.threadlist.keys() or not self.threadlist[thread_name]['thread'].is_alive():
self.threadlist[thread_name] = {
'type': thread_type,
'thread': threading.Thread(**kwargs)
}
# If thread controls progress bar, add it to list
if is_progress_thread:
self.progress_threads.append(self.threadlist[thread_name]['thread'])
self.threadlist[thread_name]['thread'].start()
return thread_name
elif thread_type == self.MULTIPLE and not self.is_alive(thread_name):
self.threadlist[thread_name] = {
'type': thread_type,
'thread': threading.Thread(**kwargs)
}
# If thread controls progress bar, add it to list
if is_progress_thread:
self.progress_threads.append(self.threadlist[thread_name]['thread'])
self.threadlist[thread_name]['thread'].start()
return thread_name
elif thread_type == self.KILLABLE and not self.is_alive(thread_name):
self.threadlist[thread_name] = {
'type': thread_type,
'thread': threading.Thread(**kwargs),
'killFlag': False,
'callback': callback if callback is not None else dummy
}
# If thread controls progress bar, add it to list
if is_progress_thread:
self.progress_threads.append(self.threadlist[thread_name]['thread'])
self.threadlist[thread_name]['thread'].start()
return thread_name
elif thread_type == self.REPLACEABLE:
# If thread is active already, kill it before starting a new thread
replaceable_thread = self.is_alive(thread_name)
if replaceable_thread:
self.kill(replaceable_thread)
# Wait until thread is killed
while self.is_alive(thread_name):
pass
self.threadlist[thread_name] = {
'type': thread_type,
'thread': threading.Thread(**kwargs),
'killFlag': False,
'callback': args[0] if args else dummy
}
# If thread controls progress bar, add it to list
if is_progress_thread:
self.progress_threads.append(self.threadlist[thread_name]['thread'])
self.threadlist[thread_name]['thread'].start()
return thread_name
return False
def kill(self, name):
"""Kill a KILLABLE or REPLACEABLE thread by name.
Kills a thread by running the callback function defined during creation. This
only works on KILLABLE and REPLACEABLE threads.
Args:
name (String): The name of the thread, as set in threadlist.
"""
if (name in self.threadlist.keys()
and self.threadlist[name]['thread'].is_alive()
and (self.threadlist[name]['type'] == self.KILLABLE or self.threadlist[name]['type'] == self.REPLACEABLE)
and self.threadlist[name]['killFlag'] is not True):
# Thread exists, is active, is KILLABLE or REPLACEABLE, and has not been killed
self.threadlist[name]['killFlag'] = True
self.threadlist[name]['callback']()
def get_progress_threads(self):
"""List the progress-influencing threads that are running.
Returns:
list: The list of thread instances that control the progress bar.
"""
self.progress_threads = [thread for thread in self.progress_threads if thread.is_alive()]
return self.progress_threads
|
11546083
|
from models import Base, User
from flask import Flask, jsonify, request, url_for, abort
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
from flask import Flask
engine = create_engine('sqlite:///users.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
app = Flask(__name__)
@app.route('/api/users', methods = ['POST'])
def new_user():
username = request.json.get('username')
password = request.json.get('password')
if username is None or password is None:
abort(400) # missing arguments
if session.query(User).filter_by(username = username).first() is not None:
abort(400) # existing user
user = User(username = username)
user.hash_password(password)
session.add(user)
session.commit()
return jsonify({ 'username': user.username }), 201, {'Location': url_for('get_user', id = user.id, _external = True)}
@app.route('/api/users/<int:id>')
def get_user(id):
user = session.query(User).filter_by(id=id).one()
if not user:
abort(400)
return jsonify({'username': user.username})
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
11546104
|
import tensorflow as tf
def clip_boxes_to_img_boundaries(decode_boxes, img_shape):
xmin = decode_boxes[:, 0]
ymin = decode_boxes[:, 1]
xmax = decode_boxes[:, 2]
ymax = decode_boxes[:, 3]
img_h, img_w = img_shape[1], img_shape[2]
img_h, img_w = tf.cast(img_h, tf.float32), tf.cast(img_w, tf.float32)
xmin = tf.maximum(tf.minimum(xmin, img_w-1.), 0.)
ymin = tf.maximum(tf.minimum(ymin, img_h-1.), 0.)
xmax = tf.maximum(tf.minimum(xmax, img_w-1.), 0.)
ymax = tf.maximum(tf.minimum(ymax, img_h-1.), 0.)
return tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
|
11546116
|
from typing import Any, Dict
from pytest_assert_utils import assert_dict_is_subset, assert_model_attrs
from pytest_common_subject import precondition_fixture
from pytest_lambda import lambda_fixture, static_fixture
from pytest_drf import (
APIViewTest,
Returns200,
Returns201,
Returns204,
UsesDeleteMethod,
UsesDetailEndpoint,
UsesGetMethod,
UsesListEndpoint,
UsesPatchMethod,
UsesPostMethod,
ViewSetTest,
)
from pytest_drf.util import pluralized, url_for
from tests.testapp.models import KeyValue
class DescribeQueryParams(
APIViewTest,
UsesGetMethod,
):
# NOTE: this view simply returns the request's query params as the response
url = lambda_fixture(lambda: url_for('views-query-params'))
# This fixture supports passing query params (e.g. ?key=val) with the requested URL
query_params = static_fixture({
'key': 'val',
'param': 'value',
'pink': 'floyd',
})
def it_passes_query_params(self, json, query_params):
expected = query_params
actual = json
assert expected == actual
class DescribeHeaders(
APIViewTest,
UsesGetMethod,
):
# NOTE: this view simply returns the request's headers as the response
url = lambda_fixture(lambda: url_for('views-headers'))
# This fixture supports passing headers (e.g. `Authorization: Api-Key 123`) in the request
headers = static_fixture({
'Custom-Header': 'abc',
'Head': 'Shoulders, Knees, Toes',
})
def it_passes_headers(self, json, headers):
expected = headers
actual = json
assert_dict_is_subset(expected, actual)
class DescribeData(
APIViewTest,
UsesPostMethod,
):
# NOTE: this view simply returns the request's POST data as the response
url = lambda_fixture(lambda: url_for('views-data'))
# This fixture supports passing POST data in the request
data = static_fixture({
'post': 'malone',
'fizzbuzz': 'zibbzuff',
})
def it_posts_data(self, json, data):
expected = data
actual = json
assert expected == actual
def express_key_value(kv: KeyValue) -> Dict[str, Any]:
"""Return the expected API representation of a KeyValue
Expression methods can be a handy tool for API scaffolding. Instead of
focusing on how to create a certain representation using serializers and
fields, an expression method enables one to simply consider which
information they'd like available from an endpoint, and implement it by
whatever means are convenient. After the expression method is created, one
can focus all their attention on writing a beautiful, maintainable serializer.
"""
return {
'id': kv.id,
'key': kv.key,
'value': kv.value,
}
###
# Expression methods only accept a single model/object. When dealing with list
# endpoints, we often have a list of models/objects. For this situation,
# the `pluralized` method/decorator converts our single-object expression
# method into a pluralized version that accepts lists of objects. The pluralized
# method will then call our expression method on each object in the passed list,
# and return the expressed versions as a list, in the same order.
#
express_key_values = pluralized(express_key_value)
class DescribeKeyValueViewSet(ViewSetTest):
list_url = lambda_fixture(
lambda:
url_for('views-key-values-list'))
detail_url = lambda_fixture(
lambda key_value:
url_for('views-key-values-detail', pk=key_value.pk))
class DescribeList(
UsesGetMethod,
UsesListEndpoint,
Returns200,
):
# Here, we create some rows in the DB to play with. We set autouse=True,
# so the fixture is evaluated even though nothing explicitly requests it.
# The @pytest.mark.late (from pytest-fixture-order) mark ensures our
# http request is run *after* all autouse fixtures.
key_values = lambda_fixture(
lambda: (
KeyValue.objects.create_batch(
alpha='beta',
delta='gamma',
)
),
autouse=True,
)
def it_returns_key_values_rows(self, key_values, results):
expected = express_key_values(key_values)
actual = results
assert expected == actual
class DescribeCreate(
UsesPostMethod,
UsesListEndpoint,
Returns201,
):
data = static_fixture({
'key': 'apple',
'value': 'π',
})
###
# precondition_fixture uses the pytest dependency graph to ensure that,
# if requested, this fixture is *always* evaluated before our HTTP request
# is made.
#
# Here, we record the existing KeyValue IDs, so we can verify that a
# new row was indeed created by our endpoint.
#
initial_key_value_ids = precondition_fixture(
lambda:
set(KeyValue.objects.values_list('pk', flat=True)))
def it_creates_key_value(self, initial_key_value_ids, json):
expected = initial_key_value_ids | {json['id']}
actual = set(KeyValue.objects.values_list('pk', flat=True))
assert expected == actual
def it_returns_key_value(self, json):
key_value = KeyValue.objects.get(pk=json['id'])
expected = express_key_value(key_value)
actual = json
assert expected == actual
def it_sets_model_fields(self, data, json):
key_value = KeyValue.objects.get(pk=json['id'])
expected = data
assert_model_attrs(key_value, expected)
class DescribeRetrieve(
UsesGetMethod,
UsesDetailEndpoint,
Returns200,
):
# NOTE: autouse=True is not used, because the detail_url requests this
# fixture
key_value = lambda_fixture(
lambda:
KeyValue.objects.create(
key='apple',
value='π',
))
def it_returns_key_value(self, key_value, json):
expected = express_key_value(key_value)
actual = json
assert expected == actual
class DescribeUpdate(
UsesPatchMethod,
UsesDetailEndpoint,
Returns200,
):
# NOTE: autouse=True is not used, because the detail_url requests this
# fixture
key_value = lambda_fixture(
lambda:
KeyValue.objects.create(
key='apple',
value='π',
))
data = static_fixture({
'key': 'banana',
'value': 'ρ',
})
###
# precondition_fixture uses the pytest dependency graph to ensure that,
# if requested, this fixture is *always* evaluated before our HTTP request
# is made.
#
# Here, we record the existing KeyValue IDs, so we can verify that no
# new rows are created by our endpoint. We request the `key_value` fixture,
# to ensure it's included in this set.
#
initial_key_value_ids = precondition_fixture(
lambda key_value:
set(KeyValue.objects.values_list('pk', flat=True)))
def it_updates_key_value(self, key_value, data):
# After updating, refreshing our DB row is vital — otherwise, it
# will appear as though our endpoint is not doing its job.
key_value.refresh_from_db()
expected = data
assert_model_attrs(key_value, expected)
def it_returns_key_value(self, key_value, json):
# After updating, refreshing our DB row is vital — otherwise, it
# will appear as though our endpoint is not doing its job.
key_value.refresh_from_db()
expected = express_key_value(key_value)
actual = json
assert expected == actual
def it_doesnt_create_or_destroy_rows(self, initial_key_value_ids):
expected = initial_key_value_ids
actual = set(KeyValue.objects.values_list('pk', flat=True))
assert expected == actual
class DescribeDestroy(
UsesDeleteMethod,
UsesDetailEndpoint,
Returns204,
):
# NOTE: autouse=True is not used, because the detail_url requests this
# fixture
key_value = lambda_fixture(
lambda:
KeyValue.objects.create(
key='apple',
value='π',
))
###
# precondition_fixture uses the pytest dependency graph to ensure that,
# if requested, this fixture is *always* evaluated before our HTTP request
# is made.
#
# Here, we record the existing KeyValue IDs, so we can verify that our
# endpoint actually deletes the row
#
initial_key_value_ids = precondition_fixture(
lambda:
set(KeyValue.objects.values_list('pk', flat=True)))
def it_deletes_key_value(self, key_value, initial_key_value_ids):
expected = initial_key_value_ids - {key_value.id}
actual = set(KeyValue.objects.values_list('pk', flat=True))
assert expected == actual
|
11546160
|
import sys, re
from optparse import OptionParser
usage = "usage: %prog [options] infile outfile"
parser = OptionParser(usage=usage)
parser.add_option("-f", "--from", dest="from_string", type="string",
help="The value to that will be replaced.")
parser.add_option("-t", "--to", dest="to_string", type="string",
help="The value used to replace the from string.")
parser.add_option("-v", "--verbose", action="store_true", default="false")
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_usage()
exit (-1)
infile = args[0]
outfile = args[1]
if options.verbose:
print ("infile: %s" %infile)
print ("outfile: %s" %outfile)
if options.from_string != None and options.to_string != None:
print ("Replacing '%s' with '%s'" % (options.from_string, options.to_string))
elif options.to_string == None:
print ("No replacement (missing --to argument)")
elif options.from_string == None:
print ("No replacement (missing --from argument)")
with open(infile, 'r') as f:
lines = f.readlines()
with open(outfile, 'w') as o:
for line in lines:
if options.from_string != None and options.to_string != None:
o.write(re.sub(options.from_string, options.to_string, line))
else:
o.write(line)
|
11546165
|
from typing import List
from boa3.builtin import public
from boa3.builtin.interop.runtime import Notification
from boa3.builtin.type import UInt160
from boa3_test.test_sc.interop_test.runtime.GetNotifications import with_param
@public
def main(args: list, key: UInt160) -> List[Notification]:
return with_param(args, key)
|
11546173
|
import nox
@nox.session(python=["3.7", "3.8", "3.9", "3.10"])
def tests(session):
session.install(".[all]")
session.install(".[tests]")
session.run("pytest", "--disable-warnings", "--asyncio-mode=auto")
|
11546184
|
import unittest
import json
import os
from jinja2 import TemplateNotFound
from PyStacks.PyStacks.template import getResources
from PyStacks.PyStacks.template import template
from PyStacks.PyStacks.template import writecompiled
from PyStacks.PyStacks.template import templateCF
from PyStacks.PyStacks.template import voltron
class TestTemplate(unittest.TestCase):
def test_getResources_maps_array(self):
actual = getResources(resources=[1, 2, 3, 4, 5])
self.assertEqual([1, 2, 3, 4, 5], actual)
def test_template_exists(self):
actual = template(template='./resources/ec2', ec2={})
self.assertEqual(5, len(actual))
def test_template_file_not_exist(self):
with self.assertRaises(TemplateNotFound):
template(template='./resources/SHOULDNOTEXIST', ec2={})
def test_writecompiled_file_checkvalue(self):
writecompiled(data='this is some data', name='test', region='aps2')
filedirectory = os.path.dirname(os.path.realpath(__file__))
data = None
filepath = os.path.join(filedirectory,
'../configs/user/compiled/aps2/test.json')
with open(filepath) as data_file:
data = json.load(data_file)
self.assertEqual('this is some data', data)
os.remove(filepath)
def test_writecompiled_file_array(self):
writecompiled(data=[1, 2, 3], name='test_array', region='aps2')
filedirectory = os.path.dirname(os.path.realpath(__file__))
data = None
filepath = os.path.join(
filedirectory, '../configs/user/compiled/aps2/test_array.json')
with open(filepath) as data_file:
data = json.load(data_file)
self.assertEqual([1, 2, 3], data)
os.remove(filepath)
def test_writecompiled_compile_folder_not_exist(self):
filedirectory = os.path.dirname(os.path.realpath(__file__))
directorypath = os.path.join(filedirectory,
'../configs/user/compiled/NOTAREGION/')
if os.path.exists(directorypath):
os.rmdir(directorypath)
writecompiled(
data='this is some data', name='test', region='NOTAREGION')
data = None
filepath = os.path.join(
filedirectory, '../configs/user/compiled/NOTAREGION/test.json')
with open(filepath) as data_file:
data = json.load(data_file)
self.assertEqual('this is some data', data)
os.remove(filepath)
if os.path.exists(directorypath):
os.rmdir(directorypath)
def test_templateCF_empty_string_empty_dict(self):
actual = templateCF(resources='', path='')
self.assertEqual({}, actual)
def test_templateCF_empty_resources_empty_dict(self):
actual = templateCF(resources=[], path=None)
self.assertEqual({}, actual)
# def test_templateCF(self):
# resources = {
# 's3': {
# 'S3Bucket': {
# 'name': 'stuff.holder',
# 'accesscontrol': 'PublicRead',
# 'versioning': True,
# 'tags': {
# 'Name': 'Api'
# }
# }
# },
# 's3_policies': {
# 'S3BucketPolicies': {
# 'policy': '"what": "on earth"'
# }
# }
# }
# expected = {
# 'S3BucketPolicies': {
# 'Type': 'AWS::S3::BucketPolicy',
# 'Properties': {
# 'what': 'on earth'
# }
# },
# 'S3Bucket': {
# 'Type': 'AWS::S3::Bucket',
# 'Properties': {
# 'AccessControl': 'PublicRead',
# 'VersioningConfiguration': {
# 'Status': 'Enabled'
# },
# 'BucketName': 'stuff.holder',
# 'Tags': [
# {
# 'Key': 'Name',
# 'Value': 'Api'
# }
# ]
# }
# }
# }
# actual = templateCF(resources, 'resources')
# self.assertDictEqual(actual, expected)
def test_voltron(self):
resources = {
's3': {
'S3Bucket': {
'name': 'stuff.holder',
'accesscontrol': 'PublicRead',
'versioning': True,
'tags': {
'Name': 'Api'
}
}
},
's3_policies': {
'S3BucketPolicies': {
'policy': '"what": "on earth"'
}
}
}
actual = voltron(
stack='MyLittleStack',
description='MyLittleDescription',
parameters='MyLittleParameter',
mappings='MyLittleMapping',
resources=resources,
outputs=resources
)
expected = {
'Description': 'MyLittleDescription',
'Parameters': 'MyLittleParameter',
'AWSTemplateFormatVersion': '2010-09-09',
'Outputs': {
'S3Bucket': {
'Description': 'S3Bucket Object',
'Export': {'Name': {'Fn::Sub': '${AWS::StackName}-S3-S3Bucket'}},
'Value': {'Ref': 'S3Bucket'}
},
"S3BucketARN": {
"Description": "S3Bucket ARN",
"Export": {
"Name": {
"Fn::Sub": "${AWS::StackName}-S3-S3Bucket-ARN"
}
},
"Value": {
"Fn::GetAtt": [
"S3Bucket",
"Arn"
]
}
},
'S3BucketDomainName': {
'Description': 'S3Bucket Domain Name',
'Export': {
'Name': {'Fn::Sub': '${AWS::StackName}-S3-S3Bucket-DomainName'}
},
'Value': {'Fn::GetAtt': ['S3Bucket', 'DomainName']}
}
},
'Resources': {
'S3BucketPolicies': {
'Type': 'AWS::S3::BucketPolicy',
'Properties': {
'what': 'on earth'
}
},
'S3Bucket': {
'Type': 'AWS::S3::Bucket',
'Properties': {
'AccessControl': 'PublicRead',
'VersioningConfiguration': {
'Status': 'Enabled'
},
'BucketName': 'stuff.holder',
'Tags': [
{
'Key': 'Name',
'Value': 'Api'
}
]
}
}
},
'Mappings': 'MyLittleMapping'
}
print json.dumps(actual, sort_keys=True, indent=4, separators=(',', ': '))
print expected
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.