code stringlengths 281 23.7M |
|---|
class OptionPlotoptionsDumbbellSonificationContexttracksMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def prod_inline(p):
p = inline(p, 'producer(_)')
p = inline(p, 'consumer(_)')
p = fuse_at(p, 'f', 'g', p.find_loop('i #1'))
loop = p.find_loop('i')
p = store_at(p, 'f', 'g', loop)
p = fuse_at(p, 'f', 'g', p.find_loop('j #1'))
loop = p.find_loop('j')
p = store_at(p, 'f', 'g', loop)
p = p
p = unroll_loop(p, 'ji')
p = unroll_loop(p, 'ii')
for i in range(5):
p = inline_assign(p, p.find('g[_] = _').prev())
p = delete_buffer(p, 'f : _')
p = rename(p, 'p_inline')
return p |
class OptionSeriesVectorDataMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def states(self) -> 'OptionSeriesVectorDataMarkerStates':
return self._config_sub_data('states', OptionSeriesVectorDataMarkerStates)
def symbol(self):
return self._config_get(None)
def symbol(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
class SelectOp(Node):
def forward(self, *args, **kwargs):
if (len(args[0]) <= args[1]):
return None
return args[0][args[1]]
def follow(self, *args, **kwargs):
l = args[0]
idx = args[1]
if ((l is None) and (idx is None)):
return None
elif ((l is not None) and (len(l) == (idx + 1))):
return fmap(('eos', None), ('*', l[idx]))
else:
return None
def final(self, ops, operands, result, **kwargs):
l = ops[0]
idx = ops[1]
if (idx != 'fin'):
return 'var'
if (l == 'fin'):
return 'fin'
if ((result is not None) and ((l == 'fin') or (l == 'inc'))):
return 'fin'
else:
return 'var' |
class CoinChanger(object):
def make_change(self, coins, total):
if ((coins is None) or (total is None)):
raise TypeError('coins or total cannot be None')
if ((not coins) or (total == 0)):
return 0
cache = {}
return self._make_change(coins, total, cache)
def _make_change(self, coins, total, cache):
if (total == 0):
return 0
if (total in cache):
return cache[total]
min_ways = sys.maxsize
for coin in coins:
if ((total - coin) < 0):
continue
ways = self._make_change(coins, (total - coin), cache)
if (ways < min_ways):
min_ways = ways
cache[total] = (min_ways + 1)
return cache[total] |
.parametrize('writer_schema,reader_schema', [('int', 'null'), ('long', 'null'), ('int', 'boolean'), ('null', 'int'), ('boolean', 'int'), ('long', 'int'), ('float', 'int'), ('double', 'int'), ('float', 'long'), ('double', 'long'), ('double', 'float'), ('string', 'double'), ('string', FIXED_4_BYTES), ('boolean', 'string'), ('int', 'string'), ('null', 'bytes'), ('int', 'bytes'), ('int', A_INT_RECORD), (LONG_ARRAY, INT_ARRAY), (INT_ARRAY, INT_MAP), (INT_MAP, INT_ARRAY), (LONG_MAP, INT_MAP), (ENUM2_AB, 'int'), ('int', ENUM2_AB), (FIXED_4_BYTES, FIXED_8_BYTES), (FIXED_8_BYTES, FIXED_4_BYTES)])
def test_schema_incompatibility(writer_schema, reader_schema):
bio = BytesIO()
writer_data = generate_one(writer_schema)
fastavro.writer(bio, writer_schema, [writer_data])
bio.seek(0)
with pytest.raises(SchemaResolutionError):
list(fastavro.reader(bio, reader_schema)) |
class OptionSeriesBubbleDataMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def states(self) -> 'OptionSeriesBubbleDataMarkerStates':
return self._config_sub_data('states', OptionSeriesBubbleDataMarkerStates)
def symbol(self):
return self._config_get(None)
def symbol(self, text: str):
self._config(text, js_type=False) |
('ecs_deploy.cli.get_client')
def test_cron_without_credentials(get_client, runner):
get_client.return_value = EcsTestClient()
result = runner.invoke(cli.cron, (CLUSTER_NAME, TASK_DEFINITION_FAMILY_1, 'rule'))
assert (result.exit_code == 1)
assert (u'Unable to locate credentials. Configure credentials by running "aws configure".\n\n' in result.output) |
def create_saml_provider_config(provider_id, idp_entity_id, sso_url, x509_certificates, rp_entity_id, callback_url, display_name=None, enabled=None, app=None):
client = _get_client(app)
return client.create_saml_provider_config(provider_id, idp_entity_id=idp_entity_id, sso_url=sso_url, x509_certificates=x509_certificates, rp_entity_id=rp_entity_id, callback_url=callback_url, display_name=display_name, enabled=enabled) |
def extractOnemachineshowBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('amdtvba', 'After My Death, The Villain Blackened Again', 'translated'), ('ebed', 'Quick Transmigration: Ex-Girlfriend Blackens Every Day', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesAreaSonificationContexttracksMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def _load_single_track(cfg, track_repository, track_name, install_dependencies=False):
try:
track_dir = track_repository.track_dir(track_name)
reader = TrackFileReader(cfg)
current_track = reader.read(track_name, track_repository.track_file(track_name), track_dir)
tpr = TrackProcessorRegistry(cfg)
if install_dependencies:
_install_dependencies(current_track.dependencies)
has_plugins = load_track_plugins(cfg, track_name, register_track_processor=tpr.register_track_processor)
current_track.has_plugins = has_plugins
for processor in tpr.processors:
processor.on_after_load_track(current_track)
return current_track
except FileNotFoundError as e:
logging.getLogger(__name__).exception('Cannot load track [%s]', track_name)
raise exceptions.SystemSetupError(f'Cannot load track [{track_name}]. List the available tracks with [{PROGRAM_NAME} list tracks].') from e
except BaseException:
logging.getLogger(__name__).exception('Cannot load track [%s]', track_name)
raise |
class LiteEthEtherboneWishboneSlave(LiteXModule):
def __init__(self):
self.bus = bus = wishbone.Interface()
self.sink = sink = stream.Endpoint(eth_etherbone_mmap_description(32))
self.source = source = stream.Endpoint(eth_etherbone_mmap_description(32))
self.fsm = fsm = FSM(reset_state='IDLE')
fsm.act('IDLE', sink.ready.eq(1), If((bus.stb & bus.cyc), If(bus.we, NextState('SEND_WRITE')).Else(NextState('SEND_READ'))))
fsm.act('SEND_WRITE', source.valid.eq(1), source.last.eq(1), source.last_be.eq((1 << 3)), source.base_addr[2:].eq(bus.adr), source.count.eq(1), source.be.eq(bus.sel), source.we.eq(1), source.data.eq(bus.dat_w), If((source.valid & source.ready), bus.ack.eq(1), NextState('IDLE')))
fsm.act('SEND_READ', source.valid.eq(1), source.last.eq(1), source.last_be.eq((1 << 3)), source.base_addr.eq(0), source.count.eq(1), source.be.eq(bus.sel), source.we.eq(0), source.data[2:].eq(bus.adr), If((source.valid & source.ready), NextState('WAIT_READ')))
fsm.act('WAIT_READ', sink.ready.eq(1), If((sink.valid & sink.we), bus.ack.eq(1), bus.dat_r.eq(sink.data), NextState('IDLE'))) |
def decrypt(data: bytes, password: str):
from Crypto.Cipher import AES
bs = AES.block_size
if (len(data) <= bs):
return data.decode()
unpad = (lambda s: s[0:(- ord(s[(- 1):]))])
iv = data[:bs]
padpass = hashlib.md5(password.encode()).hexdigest().encode()
cipher = AES.new(padpass, AES.MODE_CBC, iv)
data = unpad(cipher.decrypt(data[bs:]))
return data.decode() |
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None):
if isinstance(hashfunc, string_types):
hashfunc = _hash_funcs[hashfunc]
elif (not hashfunc):
hashfunc = hashlib.sha1
data = to_bytes(data)
salt = to_bytes(salt)
if _has_native_pbkdf2:
_test_hash = hashfunc()
if (hasattr(_test_hash, 'name') and (_test_hash.name in _hash_funcs)):
return hashlib.pbkdf2_hmac(_test_hash.name, data, salt, iterations, keylen)
mac = hmac.HMAC(data, None, hashfunc)
if (not keylen):
keylen = mac.digest_size
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return bytearray(h.digest())
buf = bytearray()
for block in range_type(1, ((- ((- keylen) // mac.digest_size)) + 1)):
rv = u = _pseudorandom((salt + _pack_int(block)))
for i in range_type((iterations - 1)):
u = _pseudorandom(bytes(u))
rv = bytearray(starmap(xor, izip(rv, u)))
buf.extend(rv)
return bytes(buf[:keylen]) |
def _check_no_duplicates(plugins: List[EntryPoint]) -> None:
seen: Set[str] = set()
duplicate_plugins = [p for p in plugins if ((p.name in seen) or seen.add(p.name))]
error_msg = f'Found plugins with the same id: {pprint.pformat(duplicate_plugins)}'
enforce((len(duplicate_plugins) == 0), error_msg, AEAPluginError) |
def test_plotting_vector_field():
mesh = UnitSquareMesh(10, 10)
V = VectorFunctionSpace(mesh, 'CG', 1)
f = Function(V)
x = SpatialCoordinate(mesh)
f.interpolate(as_vector(((- x[1]), x[0])))
(fig, axes) = plt.subplots()
contours = tricontourf(f, axes=axes)
assert (contours is not None)
fig.colorbar(contours) |
class NodesSortFilterProxyModel(QSortFilterProxyModel):
def filterAcceptsRow(self, sourceRow, sourceParent):
if (not self.filterRegExp().pattern()):
return True
if self.filterAcceptsRowItself(sourceRow, sourceParent):
return True
if self.hasAcceptedChildren(sourceRow, sourceParent):
return True
return False
def hasAcceptedChildren(self, sourceRow, sourceParent):
index = self.sourceModel().index(sourceRow, 0, sourceParent)
if (not index.isValid()):
return False
childCount = index.model().rowCount(index)
if (childCount == 0):
return False
for i in range(childCount):
if self.filterAcceptsRowItself(i, index):
return True
if self.hasAcceptedChildren(i, index):
return True
return False
def filterAcceptsRowItself(self, sourceRow, sourceParent):
index0 = self.sourceModel().index(sourceRow, 0, sourceParent)
item = self.sourceModel().data(index0)
if (item is not None):
if ('{' not in item):
regex = self.filterRegExp()
return (regex.indexIn(self.sourceModel().data(index0)) != (- 1))
return False |
class ScoresBackend(SettingsBase):
settings = ()
favorite_teams = []
all_games = True
date = None
games = {}
scroll_order = []
last_update = 0
def init(self):
if (len(self.team_colors) != len(self._default_colors)):
self.logger.debug(f'Overriding {self.name} team colors with: {self.team_colors}')
new_colors = copy.copy(self._default_colors)
new_colors.update(self.team_colors)
self.team_colors = new_colors
self.logger.debug(f'{self.name} team colors: {self.team_colors}')
def api_request(self, url):
self.logger.debug(f'Making {self.name} API request to {url}')
try:
with urlopen(url) as content:
try:
if (content.url != url):
self.logger.debug(f'Request to {url} was redirected to {content.url}')
content_type = dict(content.getheaders())['Content-Type']
charset = re.search('charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
response_json = content.read().decode(charset).strip()
if (not response_json):
self.logger.debug(f'JSON response from {url} was blank')
return {}
try:
response = json.loads(response_json)
except json.decoder.JSONDecodeError as exc:
self.logger.exception(f'Error encountered while loading JSON')
self.logger.debug(f'Text that failed to load: {response_json}')
return {}
self.logger.log(5, f'API response: {response}')
return response
except HTTPError as exc:
self.logger.critical(f'Error {exc.code} ({exc.reason}) making request to {exc.url}')
return {}
except (ConnectionResetError, URLError) as exc:
self.logger.critical(f'Error making request to {url}: {exc}')
return {}
def name(self):
return self.__class__.__name__
def get_api_date(self):
api_date = None
if ((self.date is not None) and (not isinstance(self.date, datetime))):
try:
api_date = datetime.strptime(self.date, '%Y-%m-%d')
except (TypeError, ValueError):
self.logger.warning(f"Invalid date '{self.date}'")
if (api_date is None):
utc_time = pytz.utc.localize(datetime.utcnow())
eastern = pytz.timezone('US/Eastern')
api_date = eastern.normalize(utc_time.astimezone(eastern))
if (api_date.hour < 10):
api_date -= timedelta(days=1)
self.date = api_date
def add_ordinal(number):
try:
number = int(number)
except ValueError:
return number
if (4 <= number <= 20):
suffix = 'th'
else:
ord_map = {1: 'st', 2: 'nd', 3: 'rd'}
suffix = ord_map.get((number % 10), 'th')
return f'{number}{suffix}'
def zero_fallback(value):
try:
int(value)
except (TypeError, ValueError):
return '0'
else:
return str(value)
def get_nested(self, data, expr, callback=None, default=''):
if (callback is None):
def callback(x):
return x
try:
for key in expr.split(':'):
if (key.isdigit() and isinstance(data, list)):
key = int(key)
data = data[key]
except (KeyError, IndexError, TypeError):
self.logger.debug(f'No {self.name} data found at {expr}, falling back to {repr(default)}')
return default
return callback(data)
def interpret_api_return(self, data, team_game_map):
favorite_games = []
for team in self.favorite_teams:
for id_ in team_game_map.get(team, []):
if (id_ not in favorite_games):
favorite_games.append(id_)
if self.all_games:
additional_games = [x for x in data if (x not in favorite_games)]
else:
additional_games = []
self.games = {}
for game_id in (favorite_games + additional_games):
self.games[game_id] = self.process_game(data[game_id])
self.scroll_order = [self.games[x]['id'] for x in favorite_games]
for status in self.display_order:
time_map = {x: self.games[x]['start_time'] for x in self.games if ((x not in favorite_games) and (self.games[x]['status'] == status))}
sorted_games = sorted(time_map.items(), key=operator.itemgetter(1))
self.scroll_order.extend([x[0] for x in sorted_games])
self.scroll_order_revmap = {y: x for (x, y) in enumerate(self.scroll_order)} |
def CPluginInit():
tarr = []
filenames = glob.glob('_C*.py')
filenames.sort()
for fname in filenames:
tarr = [0, 0, 0]
tarr[0] = fname
with open(fname, 'r', encoding='utf8') as fcont:
for line in fcont:
if ('CONTROLLER_ID' in line):
tarr[1] = line[(line.find('=') + 1):].strip().replace('"', '')
if ('CONTROLLER_NAME' in line):
tarr[2] = line[(line.find('=') + 1):].strip().replace('"', '')
break
tarr[0] = tarr[0].replace('.py', '')
rpieGlobals.controllerselector.append(tarr)
print('Load controllers from file')
Settings.loadcontrollers()
for x in range(0, len(Settings.Tasks)):
try:
if (Settings.Tasks[x] and (type(Settings.Tasks[x]) is not bool)):
if Settings.Tasks[x].enabled:
Settings.Tasks[x].plugin_init(None)
for y in range(len(Settings.Tasks[x].senddataenabled)):
if Settings.Tasks[x].senddataenabled[y]:
if Settings.Controllers[y]:
if Settings.Controllers[y].enabled:
Settings.Tasks[x].controllercb[y] = Settings.Controllers[y].senddata
except Exception as e:
Settings.Tasks[x].enabled = False
misc.addLog(rpieGlobals.LOG_LEVEL_ERROR, ((('Task ' + str((x + 1))) + ' disabled! ') + str(e)))
for y in range(0, len(Settings.Controllers)):
if Settings.Controllers[y]:
try:
if Settings.Controllers[y].enabled:
Settings.Controllers[y].controller_init(None)
Settings.Controllers[y].setonmsgcallback(Settings.callback_from_controllers)
except Exception as e:
Settings.Controllers[y].enabled = False
misc.addLog(rpieGlobals.LOG_LEVEL_ERROR, ((('Controller ' + str((y + 1))) + ' disabled! ') + str(e)))
return 0 |
class InputTypeDescriptionMap(Enum):
string = 'Please enter a string value:'
number = 'Please enter a number value:'
boolean = 'Please select or deselect the checkbox:'
file = 'Please select a file to use as input:'
inputpath = 'Please select an output from a parent:'
outputpath = None
parameter = 'Please select a parameter to use as input:' |
class Sensor(GenericSensor):
def setup_module(self) -> None:
from w1thermsensor import W1ThermSensor
from w1thermsensor.sensors import Sensor as SensorType
sensor_types: Dict[(str, SensorType)] = {s.name: s for s in list(SensorType)}
self.sensor_type = sensor_types[self.config['type'].upper()]
self.sensor = W1ThermSensor(self.sensor_type, self.config['address'].lower())
def get_value(self, sens_conf: ConfigType) -> SensorValueType:
return cast(float, self.sensor.get_temperature()) |
class TestSampleSheetPredictor(unittest.TestCase):
def setUp(self):
self.hiseq_sample_sheet_content = u'[Header]\nIEMFileVersion,4\nDate,06/03/2014\nWorkflow,GenerateFASTQ\nApplication,HiSeq FASTQ Only\nAssay,Nextera\nDescription,\nChemistry,Amplicon\n\n[Reads]\n101\n101\n\n[Settings]\nReverseComplement,0\nAdapter,CTGTCTCTTATACACATCT\n\n[Data]\nLane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\n1,PJB1-1579,PJB1-1579,,,N701,CGATGTAT,N501,TCTTTCCC,PeterBriggs,\n1,PJB2-1580,PJB2-1580,,,N702,TGACCAAT,N502,TCTTTCCC,PeterBriggs,\n2,PJB1-1579,PJB1-1579,,,N701,CGATGTAT,N501,TCTTTCCC,PeterBriggs,\n2,PJB2-1580,PJB2-1580,,,N702,TGACCAAT,N502,TCTTTCCC,PeterBriggs,\n'
self.miseq_sample_sheet_content = u'[Header]\nIEMFileVersion,4\nDate,4/11/2014\nWorkflow,Metagenomics\nApplication,Metagenomics 16S rRNA\nAssay,Nextera XT\nDescription,\nChemistry,Amplicon\n\n[Reads]\n150\n150\n\n[Settings]\nAdapter,CTGTCTCTTATACACATCT\n\n[Data]\nSample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\nA8,A8,,,N701,TAAGGCGA,S501,TAGATCGC,PJB,\nB8,B8,,,N702,CGTACTAG,S501,TAGATCGC,PJB,\n'
self.casava_sample_sheet_content = u'FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject\nDADA331XX,1,PhiX,PhiX control,CTGCCT,Control,,,Peter,Control\nDADA331XX,2,884-1,PB-884-1,AGTCAA,RNA-seq,,,Peter,AR\nDADA331XX,3,885-1,PB-885-1,AGTTCC,RNA-seq,,,Peter,AR\nDADA331XX,4,886-1,PB-886-1,ATGTCA,RNA-seq,,,Peter,AR\nDADA331XX,5,884-1,PB-884-1,AGTCAA,RNA-seq,,,Peter,AR\nDADA331XX,6,885-1,PB-885-1,AGTTCC,RNA-seq,,,Peter,AR\nDADA331XX,7,886-1,PB-886-1,ATGTCA,RNA-seq,,,Peter,AR\nDADA331XX,8,PhiX,PhiX control,CTGCCT,Control,,,Peter,Control\n'
self.hiseq_sample_sheet_id_and_name_differ_content = u'[Header]\nIEMFileVersion,4\nDate,06/03/2014\nWorkflow,GenerateFASTQ\nApplication,HiSeq FASTQ Only\nAssay,Nextera\nDescription,\nChemistry,Amplicon\n\n[Reads]\n101\n101\n\n[Settings]\nReverseComplement,0\nAdapter,CTGTCTCTTATACACATCT\n\n[Data]\nLane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\n1,PJB1,PJB1-1579,,,N701,CGATGTAT,N501,TCTTTCCC,PeterBriggs,\n1,PJB2,PJB2-1580,,,N702,TGACCAAT,N502,TCTTTCCC,PeterBriggs,\n2,PJB1,PJB1-1579,,,N701,CGATGTAT,N501,TCTTTCCC,PeterBriggs,\n2,PJB2,PJB2-1580,,,N702,TGACCAAT,N502,TCTTTCCC,PeterBriggs,\n'
self.hiseq_sample_sheet_name_no_id_content = u'[Header]\nIEMFileVersion,4\nDate,06/03/2014\nWorkflow,GenerateFASTQ\nApplication,HiSeq FASTQ Only\nAssay,Nextera\nDescription,\nChemistry,Amplicon\n\n[Reads]\n101\n101\n\n[Settings]\nReverseComplement,0\nAdapter,CTGTCTCTTATACACATCT\n\n[Data]\nLane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\n1,,PJB1-1579,,,N701,CGATGTAT,N501,TCTTTCCC,PeterBriggs,\n1,,PJB2-1580,,,N702,TGACCAAT,N502,TCTTTCCC,PeterBriggs,\n2,,PJB1-1579,,,N701,CGATGTAT,N501,TCTTTCCC,PeterBriggs,\n2,,PJB2-1580,,,N702,TGACCAAT,N502,TCTTTCCC,PeterBriggs,\n'
self.hiseq_sample_sheet_no_barcodes = u'[Header]\nIEMFileVersion,4\nDate,06/03/2014\nWorkflow,GenerateFASTQ\nApplication,HiSeq FASTQ Only\nAssay,Nextera\nDescription,\nChemistry,Amplicon\n\n[Reads]\n101\n101\n\n[Settings]\nReverseComplement,0\nAdapter,CTGTCTCTTATACACATCT\n\n[Data]\nLane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\n1,PJB1,PJB1,,,,,,,PeterBriggs,\n2,PJB2,PJB2,,,,,,,PeterBriggs,\n'
self.hiseq_sample_sheet_lanes_out_of_order = u'[Header]\nIEMFileVersion,4\nDate,06/03/2014\nWorkflow,GenerateFASTQ\nApplication,HiSeq FASTQ Only\nAssay,Nextera\nDescription,\nChemistry,Amplicon\n\n[Reads]\n101\n101\n\n[Settings]\nReverseComplement,0\nAdapter,CTGTCTCTTATACACATCT\n\n[Data]\nLane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\n2,AB1,AB1,,,N701,CGATGTAT,N501,TCTTTCCC,AlanBarclay,\n2,AB2,AB2,,,N702,TGACCAAT,N502,TCTTTCCC,AlanBarclay,\n1,CD3,CD3,,,N701,GTATCGAT,N501,TCTTTCCC,CarlDavis,\n1,CD4,CD4,,,N702,CAATTGAC,N502,TCTTTCCC,CarlDavis,\n'
self.hiseq_sample_sheet_no_barcodes = u'[Header]\nIEMFileVersion,4\nDate,06/03/2014\nWorkflow,GenerateFASTQ\nApplication,HiSeq FASTQ Only\nAssay,Nextera\nDescription,\nChemistry,Amplicon\n\n[Reads]\n101\n101\n\n[Settings]\nReverseComplement,0\nAdapter,CTGTCTCTTATACACATCT\n\n[Data]\nLane,Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\n1,PJB1,PJB1,,,,,,,PeterBriggs,\n2,PJB2,PJB2,,,,,,,PeterBriggs,\n'
self.miseq_sample_sheet_no_projects = u'[Header]\nIEMFileVersion,4\nDate,11/23/2015\nWorkflow,GenerateFASTQ\nApplication,FASTQ Only\nAssay,TruSeq HT\nDescription,\nChemistry,Amplicon\n\n[Reads]\n101\n101\n\n[Settings]\nReverseComplement,0\nAdapter,AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\nAdapterRead2,AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\n\n[Data]\nSample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,index,I5_Index_ID,index2,Sample_Project,Description\nSample1,Sample1,,,D701,CGTGTAGG,D501,GACCTGTA,,\nSample2,Sample2,,,D702,CGTGTAGG,D501,ATGTAACT,,\n'
def test_samplesheet_predictor_iem_with_lanes(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_content))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PeterBriggs'])
project = predictor.get_project('PeterBriggs')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['PJB1-1579', 'PJB2-1580'])
sample1 = project.get_sample('PJB1-1579')
sample2 = project.get_sample('PJB2-1580')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CGATGTAT-TCTTTCCC'])
self.assertEqual(sample2.barcode_seqs, ['TGACCAAT-TCTTTCCC'])
self.assertEqual(sample1.lanes('CGATGTAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample2.lanes('TGACCAAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2')
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_L001_R1_001.fastq.gz', 'PJB1-1579_S1_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_L001_R1_001.fastq.gz', 'PJB2-1580_S2_L002_R1_001.fastq.gz'])
predictor.set(package='bcl2fastq2', no_lane_splitting=True, paired_end=False)
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_R1_001.fastq.gz'])
predictor.set(package='bcl2fastq2', no_lane_splitting=False, paired_end=True)
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_L001_R1_001.fastq.gz', 'PJB1-1579_S1_L001_R2_001.fastq.gz', 'PJB1-1579_S1_L002_R1_001.fastq.gz', 'PJB1-1579_S1_L002_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_L001_R1_001.fastq.gz', 'PJB2-1580_S2_L001_R2_001.fastq.gz', 'PJB2-1580_S2_L002_R1_001.fastq.gz', 'PJB2-1580_S2_L002_R2_001.fastq.gz'])
predictor.set(package='bcl2fastq2', no_lane_splitting=True, paired_end=True)
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_R1_001.fastq.gz', 'PJB1-1579_S1_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_R1_001.fastq.gz', 'PJB2-1580_S2_R2_001.fastq.gz'])
predictor.set(package='casava', paired_end=False)
self.assertEqual(project.dir_name, 'Project_PeterBriggs')
self.assertEqual(sample1.dir_name, 'Sample_PJB1-1579')
self.assertEqual(sample1.fastqs(), ['PJB1-1579_CGATGTAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB1-1579_CGATGTAT-TCTTTCCC_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'Sample_PJB2-1580')
self.assertEqual(sample2.fastqs(), ['PJB2-1580_TGACCAAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB2-1580_TGACCAAT-TCTTTCCC_L002_R1_001.fastq.gz'])
predictor.set(package='casava', paired_end=True)
self.assertEqual(project.dir_name, 'Project_PeterBriggs')
self.assertEqual(sample1.dir_name, 'Sample_PJB1-1579')
self.assertEqual(sample1.fastqs(), ['PJB1-1579_CGATGTAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB1-1579_CGATGTAT-TCTTTCCC_L001_R2_001.fastq.gz', 'PJB1-1579_CGATGTAT-TCTTTCCC_L002_R1_001.fastq.gz', 'PJB1-1579_CGATGTAT-TCTTTCCC_L002_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'Sample_PJB2-1580')
self.assertEqual(sample2.fastqs(), ['PJB2-1580_TGACCAAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB2-1580_TGACCAAT-TCTTTCCC_L001_R2_001.fastq.gz', 'PJB2-1580_TGACCAAT-TCTTTCCC_L002_R1_001.fastq.gz', 'PJB2-1580_TGACCAAT-TCTTTCCC_L002_R2_001.fastq.gz'])
def test_samplesheet_predictor_iem_no_lanes(self):
iem = SampleSheet(fp=io.StringIO(self.miseq_sample_sheet_content))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PJB'])
project = predictor.get_project('PJB')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['A8', 'B8'])
sample1 = project.get_sample('A8')
sample2 = project.get_sample('B8')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['TAAGGCGA-TAGATCGC'])
self.assertEqual(sample2.barcode_seqs, ['CGTACTAG-TAGATCGC'])
self.assertEqual(sample1.lanes('TAAGGCGA-TAGATCGC'), [])
self.assertEqual(sample2.lanes('CGTACTAG-TAGATCGC'), [])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2')
self.assertEqual(project.dir_name, 'PJB')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['A8_S1_L001_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['B8_S2_L001_R1_001.fastq.gz'])
predictor.set(package='bcl2fastq2', no_lane_splitting=True)
self.assertEqual(project.dir_name, 'PJB')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['A8_S1_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['B8_S2_R1_001.fastq.gz'])
predictor.set(package='bcl2fastq2', no_lane_splitting=False, paired_end=True)
self.assertEqual(project.dir_name, 'PJB')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['A8_S1_L001_R1_001.fastq.gz', 'A8_S1_L001_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['B8_S2_L001_R1_001.fastq.gz', 'B8_S2_L001_R2_001.fastq.gz'])
predictor.set(package='bcl2fastq2', no_lane_splitting=True, paired_end=True)
self.assertEqual(project.dir_name, 'PJB')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['A8_S1_R1_001.fastq.gz', 'A8_S1_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['B8_S2_R1_001.fastq.gz', 'B8_S2_R2_001.fastq.gz'])
predictor.set(package='bcl2fastq2', no_lane_splitting=False, lanes=(1, 2), paired_end=True)
self.assertEqual(project.dir_name, 'PJB')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['A8_S1_L001_R1_001.fastq.gz', 'A8_S1_L001_R2_001.fastq.gz', 'A8_S1_L002_R1_001.fastq.gz', 'A8_S1_L002_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['B8_S2_L001_R1_001.fastq.gz', 'B8_S2_L001_R2_001.fastq.gz', 'B8_S2_L002_R1_001.fastq.gz', 'B8_S2_L002_R2_001.fastq.gz'])
predictor.set(package='casava', lanes=None, paired_end=False)
self.assertEqual(project.dir_name, 'Project_PJB')
self.assertEqual(sample1.dir_name, 'Sample_A8')
self.assertEqual(sample1.fastqs(), ['A8_TAAGGCGA-TAGATCGC_L001_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'Sample_B8')
self.assertEqual(sample2.fastqs(), ['B8_CGTACTAG-TAGATCGC_L001_R1_001.fastq.gz'])
predictor.set(package='casava', paired_end=True)
self.assertEqual(project.dir_name, 'Project_PJB')
self.assertEqual(sample1.dir_name, 'Sample_A8')
self.assertEqual(sample1.fastqs(), ['A8_TAAGGCGA-TAGATCGC_L001_R1_001.fastq.gz', 'A8_TAAGGCGA-TAGATCGC_L001_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'Sample_B8')
self.assertEqual(sample2.fastqs(), ['B8_CGTACTAG-TAGATCGC_L001_R1_001.fastq.gz', 'B8_CGTACTAG-TAGATCGC_L001_R2_001.fastq.gz'])
predictor.set(package='casava', lanes=(1, 2), paired_end=True)
self.assertEqual(project.dir_name, 'Project_PJB')
self.assertEqual(sample1.dir_name, 'Sample_A8')
self.assertEqual(sample1.fastqs(), ['A8_TAAGGCGA-TAGATCGC_L001_R1_001.fastq.gz', 'A8_TAAGGCGA-TAGATCGC_L001_R2_001.fastq.gz', 'A8_TAAGGCGA-TAGATCGC_L002_R1_001.fastq.gz', 'A8_TAAGGCGA-TAGATCGC_L002_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'Sample_B8')
self.assertEqual(sample2.fastqs(), ['B8_CGTACTAG-TAGATCGC_L001_R1_001.fastq.gz', 'B8_CGTACTAG-TAGATCGC_L001_R2_001.fastq.gz', 'B8_CGTACTAG-TAGATCGC_L002_R1_001.fastq.gz', 'B8_CGTACTAG-TAGATCGC_L002_R2_001.fastq.gz'])
def test_samplesheet_predictor_casava(self):
casava = SampleSheet(fp=io.StringIO(self.casava_sample_sheet_content))
predictor = SampleSheetPredictor(sample_sheet=casava)
self.assertEqual(predictor.nprojects, 2)
self.assertEqual(predictor.project_names, ['AR', 'Control'])
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
project1 = predictor.get_project('Control')
project2 = predictor.get_project('AR')
self.assertEqual(project1.sample_ids, ['PhiX'])
self.assertEqual(project2.sample_ids, ['884-1', '885-1', '886-1'])
sample1 = project1.get_sample('PhiX')
sample2 = project2.get_sample('884-1')
sample3 = project2.get_sample('885-1')
sample4 = project2.get_sample('886-1')
self.assertRaises(KeyError, project1.get_sample, 'DoesntExist')
self.assertRaises(KeyError, project2.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CTGCCT'])
self.assertEqual(sample2.barcode_seqs, ['AGTCAA'])
self.assertEqual(sample3.barcode_seqs, ['AGTTCC'])
self.assertEqual(sample4.barcode_seqs, ['ATGTCA'])
self.assertEqual(sample1.lanes('CTGCCT'), [1, 8])
self.assertEqual(sample2.lanes('AGTCAA'), [2, 5])
self.assertEqual(sample3.lanes('AGTTCC'), [3, 6])
self.assertEqual(sample4.lanes('ATGTCA'), [4, 7])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
self.assertEqual(sample3.s_index, 3)
self.assertEqual(sample4.s_index, 4)
predictor.set(package='bcl2fastq2')
self.assertEqual(project1.dir_name, 'Control')
self.assertEqual(project2.dir_name, 'AR')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PhiX_S1_L001_R1_001.fastq.gz', 'PhiX_S1_L008_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['884-1_S2_L002_R1_001.fastq.gz', '884-1_S2_L005_R1_001.fastq.gz'])
self.assertEqual(sample3.dir_name, None)
self.assertEqual(sample3.fastqs(), ['885-1_S3_L003_R1_001.fastq.gz', '885-1_S3_L006_R1_001.fastq.gz'])
self.assertEqual(sample4.dir_name, None)
self.assertEqual(sample4.fastqs(), ['886-1_S4_L004_R1_001.fastq.gz', '886-1_S4_L007_R1_001.fastq.gz'])
predictor.set(package='casava')
self.assertEqual(project1.dir_name, 'Project_Control')
self.assertEqual(project2.dir_name, 'Project_AR')
self.assertEqual(sample1.dir_name, 'Sample_PhiX')
self.assertEqual(sample1.fastqs(), ['PhiX_CTGCCT_L001_R1_001.fastq.gz', 'PhiX_CTGCCT_L008_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'Sample_884-1')
self.assertEqual(sample2.fastqs(), ['884-1_AGTCAA_L002_R1_001.fastq.gz', '884-1_AGTCAA_L005_R1_001.fastq.gz'])
self.assertEqual(sample3.dir_name, 'Sample_885-1')
self.assertEqual(sample3.fastqs(), ['885-1_AGTTCC_L003_R1_001.fastq.gz', '885-1_AGTTCC_L006_R1_001.fastq.gz'])
self.assertEqual(sample4.dir_name, 'Sample_886-1')
self.assertEqual(sample4.fastqs(), ['886-1_ATGTCA_L004_R1_001.fastq.gz', '886-1_ATGTCA_L007_R1_001.fastq.gz'])
def test_samplesheet_predictor_iem_id_and_names_differ(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_id_and_name_differ_content))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PeterBriggs'])
project = predictor.get_project('PeterBriggs')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['PJB1-1579', 'PJB2-1580'])
sample1 = project.get_sample('PJB1-1579')
sample2 = project.get_sample('PJB2-1580')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CGATGTAT-TCTTTCCC'])
self.assertEqual(sample2.barcode_seqs, ['TGACCAAT-TCTTTCCC'])
self.assertEqual(sample1.lanes('CGATGTAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample2.lanes('TGACCAAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2')
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, 'PJB1')
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_L001_R1_001.fastq.gz', 'PJB1-1579_S1_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'PJB2')
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_L001_R1_001.fastq.gz', 'PJB2-1580_S2_L002_R1_001.fastq.gz'])
predictor.set(package='casava')
self.assertEqual(project.dir_name, 'Project_PeterBriggs')
self.assertEqual(sample1.fastqs(), ['PJB1-1579_CGATGTAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB1-1579_CGATGTAT-TCTTTCCC_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['PJB2-1580_TGACCAAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB2-1580_TGACCAAT-TCTTTCCC_L002_R1_001.fastq.gz'])
def test_samplesheet_predictor_iem_force_sample_dir(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_content))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PeterBriggs'])
project = predictor.get_project('PeterBriggs')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['PJB1-1579', 'PJB2-1580'])
sample1 = project.get_sample('PJB1-1579')
sample2 = project.get_sample('PJB2-1580')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CGATGTAT-TCTTTCCC'])
self.assertEqual(sample2.barcode_seqs, ['TGACCAAT-TCTTTCCC'])
self.assertEqual(sample1.lanes('CGATGTAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample2.lanes('TGACCAAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2', force_sample_dir=True)
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, 'PJB1-1579')
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_L001_R1_001.fastq.gz', 'PJB1-1579_S1_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, 'PJB2-1580')
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_L001_R1_001.fastq.gz', 'PJB2-1580_S2_L002_R1_001.fastq.gz'])
predictor.set(package='casava')
self.assertEqual(project.dir_name, 'Project_PeterBriggs')
self.assertEqual(sample1.fastqs(), ['PJB1-1579_CGATGTAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB1-1579_CGATGTAT-TCTTTCCC_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['PJB2-1580_TGACCAAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB2-1580_TGACCAAT-TCTTTCCC_L002_R1_001.fastq.gz'])
def test_samplesheet_predictor_iem_name_no_id(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_name_no_id_content))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PeterBriggs'])
project = predictor.get_project('PeterBriggs')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['PJB1-1579', 'PJB2-1580'])
sample1 = project.get_sample('PJB1-1579')
sample2 = project.get_sample('PJB2-1580')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CGATGTAT-TCTTTCCC'])
self.assertEqual(sample2.barcode_seqs, ['TGACCAAT-TCTTTCCC'])
self.assertEqual(sample1.lanes('CGATGTAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample2.lanes('TGACCAAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2')
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_L001_R1_001.fastq.gz', 'PJB1-1579_S1_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_L001_R1_001.fastq.gz', 'PJB2-1580_S2_L002_R1_001.fastq.gz'])
predictor.set(package='casava')
self.assertEqual(project.dir_name, 'Project_PeterBriggs')
self.assertEqual(sample1.fastqs(), ['PJB1-1579_CGATGTAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB1-1579_CGATGTAT-TCTTTCCC_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['PJB2-1580_TGACCAAT-TCTTTCCC_L001_R1_001.fastq.gz', 'PJB2-1580_TGACCAAT-TCTTTCCC_L002_R1_001.fastq.gz'])
def test_samplesheet_predictor_iem_no_barcodes(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_no_barcodes))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PeterBriggs'])
project = predictor.get_project('PeterBriggs')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['PJB1', 'PJB2'])
sample1 = project.get_sample('PJB1')
sample2 = project.get_sample('PJB2')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['NoIndex'])
self.assertEqual(sample2.barcode_seqs, ['NoIndex'])
self.assertEqual(sample1.lanes(), [1])
self.assertEqual(sample2.lanes(), [2])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2')
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.fastqs(), ['PJB1_S1_L001_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['PJB2_S2_L002_R1_001.fastq.gz'])
predictor.set(package='casava')
self.assertEqual(project.dir_name, 'Project_PeterBriggs')
self.assertEqual(sample1.fastqs(), ['PJB1_NoIndex_L001_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['PJB2_NoIndex_L002_R1_001.fastq.gz'])
def test_samplesheet_predictor_iem_lanes_out_of_order(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_lanes_out_of_order))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 2)
self.assertEqual(predictor.project_names, ['AlanBarclay', 'CarlDavis'])
project1 = predictor.get_project('AlanBarclay')
self.assertEqual(project1.sample_ids, ['AB1', 'AB2'])
sample1 = project1.get_sample('AB1')
sample2 = project1.get_sample('AB2')
self.assertEqual(sample1.barcode_seqs, ['CGATGTAT-TCTTTCCC'])
self.assertEqual(sample2.barcode_seqs, ['TGACCAAT-TCTTTCCC'])
self.assertEqual(sample1.lanes('CGATGTAT-TCTTTCCC'), [2])
self.assertEqual(sample2.lanes('TGACCAAT-TCTTTCCC'), [2])
self.assertEqual(sample1.s_index, 3)
self.assertEqual(sample2.s_index, 4)
project2 = predictor.get_project('CarlDavis')
self.assertEqual(project2.sample_ids, ['CD3', 'CD4'])
sample3 = project2.get_sample('CD3')
sample4 = project2.get_sample('CD4')
self.assertEqual(sample3.barcode_seqs, ['GTATCGAT-TCTTTCCC'])
self.assertEqual(sample4.barcode_seqs, ['CAATTGAC-TCTTTCCC'])
self.assertEqual(sample3.lanes('GTATCGAT-TCTTTCCC'), [1])
self.assertEqual(sample4.lanes('CAATTGAC-TCTTTCCC'), [1])
self.assertEqual(sample3.s_index, 1)
self.assertEqual(sample4.s_index, 2)
predictor.set(package='bcl2fastq2')
self.assertEqual(project1.dir_name, 'AlanBarclay')
self.assertEqual(sample1.fastqs(), ['AB1_S3_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['AB2_S4_L002_R1_001.fastq.gz'])
self.assertEqual(project2.dir_name, 'CarlDavis')
self.assertEqual(sample3.fastqs(), ['CD3_S1_L001_R1_001.fastq.gz'])
self.assertEqual(sample4.fastqs(), ['CD4_S2_L001_R1_001.fastq.gz'])
predictor.set(package='casava')
self.assertEqual(project1.dir_name, 'Project_AlanBarclay')
self.assertEqual(sample1.fastqs(), ['AB1_CGATGTAT-TCTTTCCC_L002_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['AB2_TGACCAAT-TCTTTCCC_L002_R1_001.fastq.gz'])
self.assertEqual(project2.dir_name, 'Project_CarlDavis')
self.assertEqual(sample3.fastqs(), ['CD3_GTATCGAT-TCTTTCCC_L001_R1_001.fastq.gz'])
self.assertEqual(sample4.fastqs(), ['CD4_CAATTGAC-TCTTTCCC_L001_R1_001.fastq.gz'])
def test_samplesheet_predictor_iem_no_projects(self):
iem = SampleSheet(fp=io.StringIO(self.miseq_sample_sheet_no_projects))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, [''])
project = predictor.get_project('')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['Sample1', 'Sample2'])
sample1 = project.get_sample('Sample1')
sample2 = project.get_sample('Sample2')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CGTGTAGG-GACCTGTA'])
self.assertEqual(sample2.barcode_seqs, ['CGTGTAGG-ATGTAACT'])
self.assertEqual(sample1.lanes('CGTGTAGG-GACCTGTA'), [])
self.assertEqual(sample2.lanes('CGTGTAGG-ATGTAACT'), [])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2')
self.assertEqual(project.dir_name, '')
self.assertEqual(sample1.fastqs(), ['Sample1_S1_L001_R1_001.fastq.gz'])
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample2.fastqs(), ['Sample2_S2_L001_R1_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
predictor.set(package='casava')
self.assertEqual(project.dir_name, '')
self.assertEqual(sample1.fastqs(), ['Sample1_CGTGTAGG-GACCTGTA_L001_R1_001.fastq.gz'])
self.assertEqual(sample2.fastqs(), ['Sample2_CGTGTAGG-ATGTAACT_L001_R1_001.fastq.gz'])
def test_samplesheet_predictor_iem_with_index_reads(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_content))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PeterBriggs'])
project = predictor.get_project('PeterBriggs')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['PJB1-1579', 'PJB2-1580'])
sample1 = project.get_sample('PJB1-1579')
sample2 = project.get_sample('PJB2-1580')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CGATGTAT-TCTTTCCC'])
self.assertEqual(sample2.barcode_seqs, ['TGACCAAT-TCTTTCCC'])
self.assertEqual(sample1.lanes('CGATGTAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample2.lanes('TGACCAAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2', paired_end=True, include_index_reads=True)
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_L001_I1_001.fastq.gz', 'PJB1-1579_S1_L001_I2_001.fastq.gz', 'PJB1-1579_S1_L001_R1_001.fastq.gz', 'PJB1-1579_S1_L001_R2_001.fastq.gz', 'PJB1-1579_S1_L002_I1_001.fastq.gz', 'PJB1-1579_S1_L002_I2_001.fastq.gz', 'PJB1-1579_S1_L002_R1_001.fastq.gz', 'PJB1-1579_S1_L002_R2_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_L001_I1_001.fastq.gz', 'PJB2-1580_S2_L001_I2_001.fastq.gz', 'PJB2-1580_S2_L001_R1_001.fastq.gz', 'PJB2-1580_S2_L001_R2_001.fastq.gz', 'PJB2-1580_S2_L002_I1_001.fastq.gz', 'PJB2-1580_S2_L002_I2_001.fastq.gz', 'PJB2-1580_S2_L002_R1_001.fastq.gz', 'PJB2-1580_S2_L002_R2_001.fastq.gz'])
def test_samplesheet_predictor_iem_with_custom_reads(self):
iem = SampleSheet(fp=io.StringIO(self.hiseq_sample_sheet_content))
predictor = SampleSheetPredictor(sample_sheet=iem)
self.assertEqual(predictor.nprojects, 1)
self.assertEqual(predictor.project_names, ['PeterBriggs'])
project = predictor.get_project('PeterBriggs')
self.assertRaises(KeyError, predictor.get_project, 'DoesntExist')
self.assertEqual(project.sample_ids, ['PJB1-1579', 'PJB2-1580'])
sample1 = project.get_sample('PJB1-1579')
sample2 = project.get_sample('PJB2-1580')
self.assertRaises(KeyError, project.get_sample, 'DoesntExist')
self.assertEqual(sample1.barcode_seqs, ['CGATGTAT-TCTTTCCC'])
self.assertEqual(sample2.barcode_seqs, ['TGACCAAT-TCTTTCCC'])
self.assertEqual(sample1.lanes('CGATGTAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample2.lanes('TGACCAAT-TCTTTCCC'), [1, 2])
self.assertEqual(sample1.s_index, 1)
self.assertEqual(sample2.s_index, 2)
predictor.set(package='bcl2fastq2', reads=('R1', 'R2', 'R3', 'I1'), include_index_reads=True)
self.assertEqual(project.dir_name, 'PeterBriggs')
self.assertEqual(sample1.dir_name, None)
self.assertEqual(sample1.fastqs(), ['PJB1-1579_S1_L001_I1_001.fastq.gz', 'PJB1-1579_S1_L001_R1_001.fastq.gz', 'PJB1-1579_S1_L001_R2_001.fastq.gz', 'PJB1-1579_S1_L001_R3_001.fastq.gz', 'PJB1-1579_S1_L002_I1_001.fastq.gz', 'PJB1-1579_S1_L002_R1_001.fastq.gz', 'PJB1-1579_S1_L002_R2_001.fastq.gz', 'PJB1-1579_S1_L002_R3_001.fastq.gz'])
self.assertEqual(sample2.dir_name, None)
self.assertEqual(sample2.fastqs(), ['PJB2-1580_S2_L001_I1_001.fastq.gz', 'PJB2-1580_S2_L001_R1_001.fastq.gz', 'PJB2-1580_S2_L001_R2_001.fastq.gz', 'PJB2-1580_S2_L001_R3_001.fastq.gz', 'PJB2-1580_S2_L002_I1_001.fastq.gz', 'PJB2-1580_S2_L002_R1_001.fastq.gz', 'PJB2-1580_S2_L002_R2_001.fastq.gz', 'PJB2-1580_S2_L002_R3_001.fastq.gz']) |
class ExperienceConfigCreate(ExperienceConfigSchema):
accept_button_label: str
component: ComponentType
description: str
reject_button_label: str
save_button_label: str
title: str
_validator
def validate_attributes(cls, values: Dict[(str, Any)]) -> Dict[(str, Any)]:
component: Optional[ComponentType] = values.get('component')
if (component == ComponentType.overlay):
required_overlay_fields = ['acknowledge_button_label', 'banner_enabled', 'privacy_preferences_link_label']
for field in required_overlay_fields:
if (not values.get(field)):
raise ValueError(f'The following additional fields are required when defining an overlay: {human_friendly_list(required_overlay_fields)}.')
return values |
.parametrize('header', ['multipart/form-data; boundary=+++; charset=utf-8', 'multipart/form-data; charset=utf-8; boundary=+++', 'multipart/form-data; boundary=+++', 'multipart/form-data; boundary=+++ ;', 'multipart/form-data; boundary="+++"; charset=utf-8', 'multipart/form-data; charset=utf-8; boundary="+++"', 'multipart/form-data; boundary="+++"', 'multipart/form-data; boundary="+++" ;'])
def test_multipart_explicit_boundary(header: str) -> None:
client =
files = {'file': io.BytesIO(b'<file content>')}
headers = {'content-type': header}
response = client.post(' files=files, headers=headers)
boundary_bytes = b'+++'
assert (response.status_code == 200)
assert (response.request.headers['Content-Type'] == header)
assert (response.content == b''.join([((b'--' + boundary_bytes) + b'\r\n'), b'Content-Disposition: form-data; name="file"; filename="upload"\r\n', b'Content-Type: application/octet-stream\r\n', b'\r\n', b'<file content>\r\n', ((b'--' + boundary_bytes) + b'--\r\n')])) |
def create_default_spider(coll_model, coll_spiders):
custom_field_author = coll_model.find_one({'field_class': 'custom', 'field_name': 'author'})
custom_field_abstract = coll_model.find_one({'field_class': 'custom', 'field_name': 'abstract'})
custom_field_tags = coll_model.find_one({'field_class': 'custom', 'field_name': 'tags'})
pprint(custom_field_tags)
spider_config_form = {'_id': 'deprecated', '_xsrf': 'deprecated', 'notes': 'test configuration for debugging / developping purposes...', 'contact': '', 'name': 'test quote', 'page_url': ' 'next_page': '//li[="next"]/a/', 'start_urls': [' 'item_xpath': '//div[="quote"]', 'page_needs_splash': 'false', 'parse_follow': 'false', str(custom_field_tags['_id']): './/div[="tags"]/a[="tag"]/text()', str(custom_field_author['_id']): './/small[="author"]/text()', str(custom_field_abstract['_id']): './span[="text"]/text()'}
contributor_object = SpiderConfig(form=spider_config_form, new_spider=True, user='admin')
contributor = contributor_object.full_config_as_dict()
pprint(contributor)
coll_spiders.insert_one(contributor) |
def playGame(modelName, trainedModelName):
neural_net = getattr(model_architectures, modelName)
neuralnet = neural_net()
nn_location = (os.getcwd() + f' rained_models\{trainedModelName}.pth')
if os.path.exists(nn_location):
neuralnet.load_state_dict(torch.load(nn_location))
neuralnet.eval()
else:
print('that trained model does not exist')
return
dataloader = CreateData()
for i in range(5, 0, (- 1)):
print(i)
time.sleep(1)
pause = False
with torch.no_grad():
while True:
while (not pause):
screen = dataloader.get_screen()
(road, minimap, speed) = (torch.tensor(screen[0]), torch.tensor(screen[1]), torch.tensor(screen[2]))
road = road[(None, None)]
minimap = minimap[(None, None)]
speed = speed[(None, None)]
output = neuralnet.forward((road / 255), (minimap / 255), (speed / 255))
print(calcProb(output))
index = torch.argmax(output)
if (index == 0):
PressKey(17)
time.sleep(0.3)
ReleaseKey(17)
print('forward')
elif (index == 1):
PressKey(30)
time.sleep(0.05)
ReleaseKey(30)
print('left')
elif (index == 2):
PressKey(32)
time.sleep(0.05)
ReleaseKey(32)
print('right')
elif (index == 3):
PressKey(17)
PressKey(30)
time.sleep(0.05)
ReleaseKey(17)
ReleaseKey(30)
print('forward left')
elif (index == 4):
PressKey(17)
PressKey(32)
time.sleep(0.05)
ReleaseKey(17)
ReleaseKey(32)
print('forward right')
elif (index == 5):
time.sleep(0.05)
print('do nothing')
if keyboard.is_pressed('q'):
return
if keyboard.is_pressed('z'):
pause = True
if keyboard.is_pressed('z'):
pause = False |
def render(text: str, font: str, empty_char: str=' ', filters: list=[], alignment: str='l', width: int=None) -> str:
font_data = read_font(font)
boardh = font_data['height']
korsi = (font_data['korsi'] - 1)
glyph_data = font_data['glyphs']
substrings = split_into_directioned_substrings(text, glyph_data)
directioned_chars = []
for substring in substrings:
if (substring[1] == 1):
directioned_chars += substring[0]
else:
directioned_chars += substring[0][::(- 1)]
directioned_text = ''.join(directioned_chars)
text = directioned_text
after_n = list('()!?\n. \u200c:')
before_n = list('() \u200c:.!?\n')
fa = list(('' + '\u200d'))
if (width == None):
if (sys.stdout and sys.stdout.isatty()):
boardw = os.get_terminal_size().columns
else:
boardw = 80
else:
boardw = width
glyphs_width = {}
for character in glyph_data.keys():
try:
max_line_width = len(glyph_data[character][1].split('\n')[korsi])
except IndexError:
print(f'there is an Error in font file. is the Korsi set correctly?', file=sys.stderr)
sys.exit(1)
glyphs_width[character] = max_line_width
board = [[empty_char for _ in range(boardw)] for _ in range(boardh)]
cursor = boardw
text = ((' ' + text) + ' ')
rendered_ascii_art = ''
i = 1
while (i < (len(text) - 1)):
substring = text[i]
variation = 0
if (text[i] in fa):
if ((text[(i + 1)] not in before_n) and (text[i] not in after_n)):
if (text[(i - 1)] not in after_n):
variation = 2
else:
variation = 1
elif (text[(i - 1)] not in after_n):
variation = 3
else:
variation = 4
(substring, variation) = find_longest_substring(text[i:], glyph_data.keys(), variation)
if ((substring, variation) in glyphs_width):
next_width = glyphs_width[(substring, variation)]
elif ((substring, 0) in glyphs_width):
next_width = glyphs_width[(substring, 0)]
else:
next_width = 0
if ((cursor <= next_width) or (text[i] in ['\n', '\r'])):
rendered_ascii_art += print_board(board, cursor, alignment=alignment)
rendered_ascii_art += '\n'
cursor = boardw
board = [[empty_char for _ in range(boardw)] for _ in range(boardh)]
if ((substring, variation) in glyph_data):
(board, lenc) = copyboard_glyph(glyph_data[(substring, variation)][1], cursor, board, korsi)
cursor -= next_width
i += (len(substring) if (len(substring) > 0) else 1)
rendered_ascii_art += print_board(board, cursor, alignment=alignment)
if (filters is not None):
for filter in filters:
rendered_ascii_art = apply_filter(rendered_ascii_art, filter)
return rendered_ascii_art |
class IPyIDAPlugIn(idaapi.plugin_t):
wanted_name = 'IPyIDA'
wanted_hotkey = 'Shift-.'
flags = idaapi.PLUGIN_FIX
comment = ''
help = 'Starts an IPython qtconsole in IDA Pro'
def init(self):
self.kernel = kernel.IPythonKernel()
self.kernel.start()
self.widget = None
monkey_patch_IDAPython_ExecScript()
return idaapi.PLUGIN_KEEP
def run(self, args):
if (self.widget is None):
self.widget = ida_qtconsole.IPythonConsole(self.kernel.connection_file)
self.widget.Show()
def term(self):
if self.widget:
self.widget.Close(0)
self.widget = None
if self.kernel:
self.kernel.stop() |
def returndatacopy(evm: Evm) -> None:
memory_start_index = pop(evm.stack)
return_data_start_position = pop(evm.stack)
size = pop(evm.stack)
words = (ceil32(Uint(size)) // 32)
copy_gas_cost = (GAS_RETURN_DATA_COPY * words)
extend_memory = calculate_gas_extend_memory(evm.memory, [(memory_start_index, size)])
charge_gas(evm, ((GAS_VERY_LOW + copy_gas_cost) + extend_memory.cost))
ensure(((Uint(return_data_start_position) + Uint(size)) <= len(evm.return_data)), OutOfBoundsRead)
evm.memory += (b'\x00' * extend_memory.expand_by)
value = evm.return_data[return_data_start_position:(return_data_start_position + size)]
memory_write(evm.memory, memory_start_index, value)
evm.pc += 1 |
_grpc.register(isolate_proto.HostedRunResult)
def _from_grpc_hosted_run_result(message: isolate_proto.HostedRunResult) -> HostedRunResult[Any]:
if message.return_value.definition:
return_value = from_grpc(message.return_value)
else:
return_value = UNSET
return HostedRunResult(message.run_id, from_grpc(message.status), logs=[from_grpc(log) for log in message.logs], result=return_value) |
def test_success_with_all_filters(client, monkeypatch, elasticsearch_transaction_index, awards_and_transactions):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.post('/api/v2/search/spending_by_category/naics', content_type='application/json', data=json.dumps({'filters': non_legacy_filters()}))
assert (resp.status_code == status.HTTP_200_OK), 'Failed to return 200 Response' |
class EntityManager():
def __init__(self, async_add_entities: AddEntitiesCallback):
self._existing_ids = set()
self._pending_entities: dict[(str, Entity)] = {}
self._entity_appliance_map = {}
self._async_add_entities = async_add_entities
def add(self, entity: Entity) -> None:
if (entity and (entity.unique_id not in self._existing_ids) and (entity.unique_id not in self._pending_entities)):
self._pending_entities[entity.unique_id] = entity
def register(self) -> None:
new_ids = set(self._pending_entities.keys())
new_entities = list(self._pending_entities.values())
for entity in new_entities:
if (entity.haId not in self._entity_appliance_map):
self._entity_appliance_map[entity.haId] = set()
self._entity_appliance_map[entity.haId].add(entity.unique_id)
_LOGGER.debug('Registering new entities: %s', new_ids)
self._async_add_entities(new_entities)
self._existing_ids |= new_ids
self._pending_entities = {}
def remove_appliance(self, appliance: Appliance):
if (appliance.haId in self._entity_appliance_map):
self._existing_ids -= self._entity_appliance_map[appliance.haId]
del self._entity_appliance_map[appliance.haId] |
class DOM():
def __init__(self, instance):
self._dom = XDHqFaaS.DOM_FaaS(instance)
def get_action(self):
return self._dom.getAction()
getAction = get_action
def isQuitting(self):
return self._dom.isQuitting()
def _execute(self, type, script):
return self._dom.call('Execute_1', type, script)
def executeVoid(self, script):
return self._execute(_VOID, script)
execute_void = executeVoid
def executeString(self, script):
return self._execute(_STRING, script)
execute_string = executeString
def executeStrings(self, script):
return self._execute(_STRINGS, script)
execute_strings = executeStrings
def _raw_send(self, type, data):
return self._dom.call('RawSend_1', type, data)
def rawSendVoid(self, data):
return self._raw_send(_VOID, data)
raw_send_void = rawSendVoid
def rawSendString(self, data):
return self._raw_send(_STRING, data)
raw_send_string = rawSendString
def rawSendStrings(self, data):
return self._raw_send(_STRINGS, data)
raw_send_strings = rawSendStrings
def flush(self):
self._dom.call('Flush_1', _STRING)
def alert(self, message):
self._dom.call('Alert_1', _STRING, str(message))
def confirm(self, message):
return (self._dom.call('Confirm_1', _STRING, message) == 'true')
def _handleLayout(self, variant, id, xml, xsl):
self._dom.call('HandleLayout_1', _STRING, variant, id, (xml if isinstance(xml, str) else xml.toString()), xsl)
def prependLayout(self, id, html):
self._handleLayout('Prepend', id, html, '')
prepend_layout = prependLayout
def setLayout(self, id, html):
self._handleLayout('Set', id, html, '')
set_layout = setLayout
def appendLayout(self, id, html):
self._handleLayout('Append', id, html, '')
append_layout = appendLayout
def _handleLayoutXSL(self, variant, id, xml, xsl):
xslURL = xsl
if True:
xslURL = ('data:text/xml;charset=utf-8,' + _encode(_readXSLAsset(xsl)))
self._handleLayout(variant, id, xml, xslURL)
def prependLayoutXSL(self, id, xml, xsl):
self._handleLayoutXSL('Prepend', id, xml, xsl)
prepend_layout_XSL = prependLayoutXSL
def setLayoutXSL(self, id, xml, xsl):
self._handleLayoutXSL('Set', id, xml, xsl)
set_layout_XSL = setLayoutXSL
def appendLayoutXSL(self, id, xml, xsl):
self._handleLayoutXSL('Append', id, xml, xsl)
append_layout_XSL = appendLayoutXSL
def _layout(self, variant, id, xml, xsl):
if xsl:
xsl = ('data:text/xml;charset=utf-8,' + _encode(_readXSLAsset(xsl)))
self._dom.call('HandleLayout_1', _STRING, variant, id, (xml if isinstance(xml, str) else xml.toString()), xsl)
def before(self, id, xml, xsl=''):
self._layout('beforebegin', id, xml, xsl)
def begin(self, id, xml, xsl=''):
self._layout('afterbegin', id, xml, xsl)
def inner(self, id, xml, xsl=''):
self._layout('inner', id, xml, xsl)
def end(self, id, xml, xsl=''):
self._layout('beforeend', id, xml, xsl)
def after(self, id, xml, xsl=''):
self._layout('afterend', id, xml, xsl)
def getContents(self, ids):
return _unsplit(ids, self._dom.call('GetContents_1', _STRINGS, ids))
get_contents = getContents
def getContent(self, id):
return self.getContents([id])[id]
get_content = getContent
def getValues(self, ids):
return _unsplit(ids, self._dom.call('GetValues_1', _STRINGS, ids))
get_values = getValues
def getValue(self, id):
return self.get_values([id])[id]
get_value = getValue
def getMarks(self, ids):
return _unsplit(ids, self._dom.call('GetMarks_1', _STRINGS, ids))
get_marks = getMarks
def getMark(self, id):
return self.get_marks([id])[id]
get_mark = getMark
def setContents(self, ids_and_contents):
[ids, contents] = _split(ids_and_contents)
self._dom.call('SetContents_1', _VOID, ids, contents)
set_contents = setContents
def setContent(self, id, content):
self.set_contents({id: content})
set_content = setContent
def setValues(self, ids_and_values):
[ids, values] = _split(ids_and_values)
self._dom.call('SetValues_1', _VOID, ids, values)
set_values = setValues
def setValue(self, id, value):
self.set_values({id: value})
set_value = setValue
def setMarks(self, ids_and_marks):
[ids, marks] = _split(ids_and_marks)
self._dom.call('SetMarks_1', _VOID, ids, marks)
set_marks = setMarks
def setMark(self, id, mark):
self.set_marks({id: mark})
set_mark = setMark
def _handleClasses(self, variant, idsAndClasses):
[ids, classes] = _split(idsAndClasses)
self._dom.call('HandleClasses_1', _VOID, variant, ids, classes)
def addClasses(self, ids_and_classes):
self._handleClasses('Add', ids_and_classes)
add_classes = addClasses
def removeClasses(self, ids_and_classes):
self._handleClasses('Remove', ids_and_classes)
remove_classes = removeClasses
def toggleClasses(self, ids_and_classes):
self._handleClasses('Toggle', ids_and_classes)
toggle_classes = toggleClasses
def addClass(self, id, clas):
self.addClasses({id: clas})
add_class = addClass
def removeClass(self, id, class_):
self.removeClasses({id: class_})
remove_class = removeClass
def toggleClass(self, id, clas):
self.toggleClasses({id: clas})
toggle_class = toggleClass
def enableElements(self, ids):
self._dom.call('EnableElements_1', _VOID, ids)
enable_elements = enableElements
def enableElement(self, id):
self.enableElements([id])
enable_element = enableElement
def disableElements(self, ids):
self._dom.call('DisableElements_1', _VOID, ids)
disable_elements = disableElements
def disableElement(self, id):
self.disableElements([id])
disable_element = disableElement
def setAttribute(self, id, name, value):
self._dom.call('SetAttribute_1', _VOID, id, name, str(value))
set_attribute = setAttribute
def getAttribute(self, id, name):
return self._dom.call('GetAttribute_1', _STRING, id, name)
get_attribute = getAttribute
def removeAttribute(self, id, name):
self._dom.call('RemoveAttribute_1', _VOID, id, name)
remove_attribute = removeAttribute
def setProperty(self, id, name, value):
self._dom.call('SetProperty_1', _VOID, id, name, value)
set_property = setProperty
def getProperty(self, id, name):
return self._dom.call('GetProperty_1', _STRING, id, name)
get_property = getProperty
def focus(self, id):
self._dom.call('Focus_1', _VOID, id)
def parent(self, id):
return self._dom.call('Parent_1', _STRING, id)
def firstChild(self, id):
return self._dom.call('FirstChild_1', _STRING, id)
first_child = firstChild
def lastChild(self, id):
return self._dom.call('LastChild_1', _STRING, id)
last_child = lastChild
def previousSibling(self, id):
return self._dom.call('PreviousSibling_1', _STRING, id)
previous_sibling = previousSibling
def nextSibling(self, id):
return self._dom.call('NextSibling_1', _STRING, id)
next_sibling = nextSibling
def scrollTo(self, id):
self._dom.call('ScrollTo_1', _VOID, id)
scroll_to = scrollTo
def debugLog(self, switch=True):
self._dom.call('DebugLog_1', _VOID, ('true' if switch else 'false'))
def log(self, message):
self._dom.call('Log_1', _VOID, message) |
class OptionPlotoptionsAreaStatesHover(Options):
def animation(self) -> 'OptionPlotoptionsAreaStatesHoverAnimation':
return self._config_sub_data('animation', OptionPlotoptionsAreaStatesHoverAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def halo(self) -> 'OptionPlotoptionsAreaStatesHoverHalo':
return self._config_sub_data('halo', OptionPlotoptionsAreaStatesHoverHalo)
def lineWidth(self):
return self._config_get(None)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def lineWidthPlus(self):
return self._config_get(1)
def lineWidthPlus(self, num: float):
self._config(num, js_type=False)
def marker(self) -> 'OptionPlotoptionsAreaStatesHoverMarker':
return self._config_sub_data('marker', OptionPlotoptionsAreaStatesHoverMarker) |
class BaseChainPeerReporterRegistry(PeerReporterRegistry[BaseChainPeer]):
def reset_peer_meters(self, peer_id: int) -> None:
head_gauge = self._get_blockheight_gauge(peer_id)
td_gauge = self._get_td_gauge(peer_id)
head_gauge.set_value(0)
td_gauge.set_value(0)
def make_periodic_update(self, peer: BaseChainPeer, peer_id: int) -> None:
head_gauge = self._get_blockheight_gauge(peer_id)
td_gauge = self._get_td_gauge(peer_id)
head_info = peer.head_info
try:
td_gauge.set_value(head_info.head_td)
except PeerConnectionLost:
head_gauge.set_value(0)
td_gauge.set_value(0)
else:
try:
head_number = head_info.head_number
except AttributeError:
head_gauge.set_value(0)
else:
head_gauge.set_value(head_number)
def _get_blockheight_gauge(self, peer_id: int) -> SimpleGauge:
return self.metrics_registry.gauge(f'trinity.p2p/peer_{peer_id}_blockheight.gauge')
def _get_td_gauge(self, peer_id: int) -> SimpleGauge:
return self.metrics_registry.gauge(f'trinity.p2p/peer_{peer_id}_total_difficulty.gauge') |
class GetVersionsByRegexTextTests(unittest.TestCase):
def test_get_versions_by_regex_for_text(self):
text = '\n some release: 0.0.1\n some other release: 0.0.2\n The best release: 1.0.0\n '
regex = '\\d\\.\\d\\.\\d'
mock_project = mock.Mock(version_prefix='', version_filter=None)
versions = backends.get_versions_by_regex_for_text(text, 'url', regex, mock_project)
self.assertEqual(sorted(['0.0.1', '0.0.2', '1.0.0']), sorted(versions))
def test_get_versions_by_regex_for_text_tuples(self):
text = '\n some release: 0.0.1\n some other release: 0.0.2\n The best release: 1.0.0\n '
regex = '(\\d)\\.(\\d)\\.(\\d)'
mock_project = mock.Mock(version_prefix='', version_filter=None)
versions = backends.get_versions_by_regex_for_text(text, 'url', regex, mock_project)
self.assertEqual(sorted(['0.0.1', '0.0.2', '1.0.0']), sorted(versions))
self.assertEqual(3, len(re.findall(regex, '0.0.1')[0]))
def test_get_versions_by_regex_for_text_no_versions(self):
text = "This text doesn't have a release!"
regex = '(\\d)\\.(\\d)\\.(\\d)'
mock_project = mock.Mock(version_prefix='')
self.assertRaises(AnityaPluginException, backends.get_versions_by_regex_for_text, text, 'url', regex, mock_project) |
def push_config_to_hfhub(repo_id, exp_folder, best_ap50=None, input_size=640, task='object-detection', hf_token=None, private=False):
from huggingface_hub import upload_file, create_repo
import json
config = {'input_size': input_size, 'task': task, 'best_ap50': best_ap50}
config_path = (Path(exp_folder) / 'config.json')
with open(config_path, 'w') as file_object:
json.dump(config, file_object)
create_repo(repo_id=repo_id, token=hf_token, private=private, exist_ok=True)
upload_file(repo_id=repo_id, path_or_fileobj=str(config_path), path_in_repo=Path(config_path).name, commit_message='Add yolov5 config', token=hf_token, repo_type='model') |
class HierarchicalFilter():
def _query_string(cls, queryset, require, exclude):
positive_nodes = [cls.node(code, True, require, exclude) for code in require if cls._has_no_parents(code, (require + exclude))]
negative_nodes = [cls.node(code, False, require, exclude) for code in exclude if cls._has_no_parents(code, (require + exclude))]
q = Q()
for node in positive_nodes:
if (node.code not in [neg_node.code for neg_node in negative_nodes]):
q |= node.get_query()
else:
q |= Q(pk__in=[])
for node in negative_nodes:
if (node.children or (node.code not in [pos_node.code for pos_node in positive_nodes])):
q |= node.get_query()
queryset = queryset.filter(q)
return queryset
def _has_no_parents(cls, code, other_codes):
return (not len([match for match in other_codes if cls.code_is_parent_of(match, code)]))
def code_is_parent_of(code, other):
return ((other[:len(code)] == code) and (len(code) < len(other)))
def node(code, positive, positive_codes, negative_codes):
pass |
def get_cli_properties() -> dict:
click_context = click.get_current_context()
if (click_context is None):
return dict()
params = click_context.params
if (params is None):
return dict()
target_path = params.get('target_path')
update_dbt_package = params.get('update_dbt_package')
full_refresh_dbt_package = params.get('full_refresh_dbt_package')
select = params.get('select')
days_back = params.get('days_back')
timezone = params.get('timezone')
group_by = params.get('group_by')
suppression_interval = params.get('suppression_interval')
override_dbt_project_config = params.get('override_dbt_project_config')
return {'target_path': target_path, 'update_dbt_package': update_dbt_package, 'full_refresh_dbt_package': full_refresh_dbt_package, 'select': select, 'days_back': days_back, 'timezone': timezone, 'group_by': group_by, 'suppression_interval': suppression_interval, 'override_dbt_project_config': override_dbt_project_config} |
class Comment(BasicObject):
attributes = {'TEXT': utils.vector}
def __init__(self, attic, lf):
super().__init__(attic, lf=lf)
def text(self):
return self['TEXT']
def describe_attr(self, buf, width, indent, exclude):
utils.describe_array(buf, self.text, width, indent) |
def main() -> None:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError('Could not import Django. Are you sure it is installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?') from exc
execute_from_command_line(sys.argv) |
('copr_backend.vm_alloc.ResallocConnection')
def test_ticket_with_args(rcon):
hf = ResallocHostFactory()
host = hf.get_host(sandbox='somesb', tags=['arch_x86_64'])
host.ticket.closed = False
host.ticket.ready = True
host.ticket.output = '1.1.1.1'
host.wait_ready()
assert (rcon.return_value.newTicket.call_args_list == [mock.call(['copr_builder', 'arch_x86_64'], 'somesb')])
assert (host.hostname == '1.1.1.1') |
class OptionPlotoptionsBoxplotSonificationTracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Track(SoftDeletionModel):
__tablename__ = 'tracks'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
description = db.Column(db.Text)
color = db.Column(db.String, nullable=False)
sessions = db.relationship('Session', backref='track')
event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
position = db.Column(db.Integer, default=0, nullable=False)
def get_service_name():
return 'track'
def __repr__(self):
return ('<Track %r>' % self.name)
def font_color(self):
if self.color.startswith('#'):
h = self.color.lstrip('#')
a = (1 - ((((0.299 * int(h[0:2], 16)) + (0.587 * int(h[2:4], 16))) + (0.114 * int(h[4:6], 16))) / 255))
elif self.color.startswith('rgba'):
h = self.color.lstrip('rgba').replace('(', '', 1).replace(')', '', 1)
h = h.split(',')
a = (1 - ((((0.299 * int(h[0], 16)) + (0.587 * int(h[1], 16))) + (0.114 * int(h[2], 16))) / 255))
return ('#000000' if (a < 0.5) else '#ffffff') |
def get_db_session(postfix='', flask_sess_if_possible=True):
if (flags.IS_FLASK and flask_sess_if_possible):
return g.session
cpid = multiprocessing.current_process().name
ctid = threading.current_thread().name
csid = '{}-{}-{}'.format(cpid, ctid, postfix)
if (not (csid in SESSIONS)):
acquired = False
while True:
acquired = SESSION_LOCK.acquire(timeout=5)
if acquired:
break
else:
print('Error!')
print('Error!')
print('SESSION_LOCK TIMEOUT!')
print('Error!')
print('Error!')
print('Clearing lock as last-resort!')
SESSION_LOCK.release()
try:
if (csid in SESSIONS):
SESSIONS[csid][0] = time.time()
ret = SESSIONS[csid][1]
_ = ret.connection().connection.isolation_level
assert (ret.connection().connection.closed == 0)
return ret
SESSIONS[csid] = [time.time(), scoped_session(sessionmaker(bind=get_engine(), autoflush=False, autocommit=False))()]
if (len(SESSIONS) > C_MAX_DB_SESSIONS):
log.info(('WARN: More then %s active sessions! Deleting oldest session to prevent session contention.' % C_MAX_DB_SESSIONS))
maxsz = sys.maxsize
to_delete = None
for (key, value) in SESSIONS.items():
if (value[0] < maxsz):
to_delete = key
maxsz = value[0]
if to_delete:
del SESSIONS[to_delete]
finally:
SESSION_LOCK.release()
SESSIONS[csid][0] = time.time()
return SESSIONS[csid][1] |
class group_insert_bucket(group_mod):
version = 6
type = 15
command = 3
def __init__(self, xid=None, group_type=None, group_id=None, command_bucket_id=None, buckets=None, properties=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (group_type != None):
self.group_type = group_type
else:
self.group_type = 0
if (group_id != None):
self.group_id = group_id
else:
self.group_id = 0
if (command_bucket_id != None):
self.command_bucket_id = command_bucket_id
else:
self.command_bucket_id = 0
if (buckets != None):
self.buckets = buckets
else:
self.buckets = []
if (properties != None):
self.properties = properties
else:
self.properties = []
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.command))
packed.append(struct.pack('!B', self.group_type))
packed.append(('\x00' * 1))
packed.append(struct.pack('!L', self.group_id))
packed.append(struct.pack('!H', 0))
packed.append(('\x00' * 2))
packed.append(struct.pack('!L', self.command_bucket_id))
packed.append(loxi.generic_util.pack_list(self.buckets))
packed[8] = struct.pack('!H', len(packed[(- 1)]))
packed.append(loxi.generic_util.pack_list(self.properties))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = group_insert_bucket()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 15)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_command = reader.read('!H')[0]
assert (_command == 3)
obj.group_type = reader.read('!B')[0]
reader.skip(1)
obj.group_id = reader.read('!L')[0]
_bucket_array_len = reader.read('!H')[0]
reader.skip(2)
obj.command_bucket_id = reader.read('!L')[0]
obj.buckets = loxi.generic_util.unpack_list(reader.slice(_bucket_array_len), ofp.common.bucket.unpack)
obj.properties = loxi.generic_util.unpack_list(reader, ofp.common.group_prop.unpack)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.group_type != other.group_type):
return False
if (self.group_id != other.group_id):
return False
if (self.command_bucket_id != other.command_bucket_id):
return False
if (self.buckets != other.buckets):
return False
if (self.properties != other.properties):
return False
return True
def pretty_print(self, q):
q.text('group_insert_bucket {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('group_type = ')
value_name_map = {0: 'OFPGT_ALL', 1: 'OFPGT_SELECT', 2: 'OFPGT_INDIRECT', 3: 'OFPGT_FF'}
if (self.group_type in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.group_type], self.group_type)))
else:
q.text(('%#x' % self.group_type))
q.text(',')
q.breakable()
q.text('group_id = ')
q.text(('%#x' % self.group_id))
q.text(',')
q.breakable()
q.text('command_bucket_id = ')
value_name_map = {: 'OFPG_BUCKET_MAX', : 'OFPG_BUCKET_FIRST', : 'OFPG_BUCKET_LAST', : 'OFPG_BUCKET_ALL'}
if (self.command_bucket_id in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.command_bucket_id], self.command_bucket_id)))
else:
q.text(('%#x' % self.command_bucket_id))
q.text(',')
q.breakable()
q.text('buckets = ')
q.pp(self.buckets)
q.text(',')
q.breakable()
q.text('properties = ')
q.pp(self.properties)
q.breakable()
q.text('}') |
_os(*metadata.platforms)
def main():
vssadmin = 'C:\\Windows\\System32\\vssadmin.exe'
powershell = 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe'
png = 'C:\\Windows\\System32\\SecurityAndMaintenance.png'
tmppng = 'C:\\Users\\Public\\SecurityAndMaintenance.png'
renamed = 'C:\\Users\\Public\\renamed.encrypted'
common.copy_file(png, tmppng)
common.log('Deleting Shadow Copies using Vssadmin spawned by cmd')
common.execute([powershell, '/c', vssadmin, 'delete', 'shadows', '/For=C:'], timeout=10)
common.log('Renaming image to unknown extension')
common.execute([powershell, '/c', f'Rename-Item {tmppng} {renamed}'], timeout=10)
common.remove_file(renamed) |
class TestMakeDirs():
def test_makedirs(self, tmpdir):
path = str(tmpdir.join('test'))
makedirs(path)
assert os.path.exists(path)
def test_makedirs_with_exists_path(self, tmpdir):
path = str(tmpdir.join('test'))
makedirs(path)
makedirs(path, exist_ok=True)
with pytest.raises(Exception):
makedirs(path, exist_ok=False)
def test_makedirs_with_ignore_error(self, tmpdir):
path = str(tmpdir.join('test'))
makedirs(path)
makedirs(path, ignore_errors=True)
def test_makedirs_without_ignore_error(self, tmpdir):
path = str(tmpdir.join('test'))
makedirs(path)
with pytest.raises(Exception):
makedirs(path, ignore_errors=False, exist_ok=False)
def test_makedirs_with_mutl_dirs(self, tmpdir):
path = str(tmpdir.join('test/test'))
makedirs(path)
assert os.path.exists(path) |
class OptionSeriesDependencywheelOnpoint(Options):
def connectorOptions(self) -> 'OptionSeriesDependencywheelOnpointConnectoroptions':
return self._config_sub_data('connectorOptions', OptionSeriesDependencywheelOnpointConnectoroptions)
def id(self):
return self._config_get(None)
def id(self, text: str):
self._config(text, js_type=False)
def position(self) -> 'OptionSeriesDependencywheelOnpointPosition':
return self._config_sub_data('position', OptionSeriesDependencywheelOnpointPosition) |
class OptionPlotoptionsErrorbarSonificationTracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_request_middleware_exception(django_elasticapm_client, client):
with override_settings(**middleware_setting(django.VERSION, ['tests.contrib.django.testapp.middleware.BrokenRequestMiddleware'])):
with pytest.raises(ImportError):
client.get(reverse('elasticapm-raise-exc'))
assert (len(django_elasticapm_client.events[ERROR]) == 1)
event = django_elasticapm_client.events[ERROR][0]
assert ('exception' in event)
exc = event['exception']
assert (exc['type'] == 'ImportError')
assert (exc['message'] == 'ImportError: request')
assert (exc['handled'] is False)
assert (event['culprit'] == 'tests.contrib.django.testapp.middleware.process_request') |
def run():
iosites = {}
for (site_name, site_type) in gen_iobs():
iosites[site_name] = site_type
ports = {}
DIN_N = 0
DOUT_N = 0
def remain_sites():
return (set(iosites.keys()) - set(ports.keys()))
def rand_site():
return random.choice(list(remain_sites()))
def get_site():
return next(iter(remain_sites()))
def assign_i(site, name):
nonlocal DIN_N
assert (site not in ports)
cell = ('di_bufs[%u].ibuf' % DIN_N)
DIN_N += 1
ports[site] = (name, 'input', cell)
def assign_o(site, name):
nonlocal DOUT_N
assert (site not in ports)
cell = ('do_bufs[%u].obuf' % DOUT_N)
DOUT_N += 1
ports[site] = (name, 'output', cell)
assign_i(get_site(), 'di[0]')
assign_o(get_site(), 'do[0]')
print(("\n`define N_DI %u\n`define N_DO %u\n\nmodule top(input wire [`N_DI-1:0] di, output wire [`N_DO-1:0] do);\n genvar i;\n\n //Instantiate BUFs so we can LOC them\n\n wire [`N_DI-1:0] di_buf;\n generate\n for (i = 0; i < `N_DI; i = i+1) begin:di_bufs\n IBUF #(\n ) ibuf(.I(di[i]), .O(di_buf[i]));\n end\n endgenerate\n\n wire [`N_DO-1:0] do_unbuf;\n generate\n for (i = 0; i < `N_DO; i = i+1) begin:do_bufs\n OBUF #(\n ) obuf(.I(do_unbuf[i]), .O(do[i]));\n end\n endgenerate\n\n roi roi(.di(di_buf), .do(do_unbuf));\nendmodule\n\n//Arbitrary terminate into LUTs\nmodule roi(input wire [`N_DI-1:0] di, output wire [`N_DO-1:0] do);\n genvar i;\n\n generate\n for (i = 0; i < `N_DI; i = i+1) begin:dis\n (* KEEP, DONT_TOUCH *)\n LUT6 #(\n .INIT(64'h8000_0000_0000_0001)\n ) lut (\n .I0(di[i]),\n .I1(di[i]),\n .I2(di[i]),\n .I3(di[i]),\n .I4(di[i]),\n .I5(di[i]),\n .O());\n end\n endgenerate\n\n generate\n for (i = 0; i < `N_DO; i = i+1) begin:dos\n (* KEEP, DONT_TOUCH *)\n LUT6 #(\n .INIT(64'h8000_0000_0000_0001)\n ) lut (\n .I0(),\n .I1(),\n .I2(),\n .I3(),\n .I4(),\n .I5(),\n .O(do[i]));\n end\n endgenerate\nendmodule\n " % (DIN_N, DOUT_N))) |
def compute_reaching_conditions(graph_slice: TransitionCFG, src: TransitionBlock, cfg: TransitionCFG) -> Dict[(TransitionBlock, LogicCondition)]:
reaching_conditions: Dict[(TransitionBlock, LogicCondition)] = {src: graph_slice.condition_handler.get_true_value()}
sink_nodes = _sink_nodes(graph_slice)
if ((len(sink_nodes) == 1) and _sink_postdominates_source_in_cfg(graph_slice, sink_nodes[0], src, cfg)):
reaching_conditions[sink_nodes[0]] = graph_slice.condition_handler.get_true_value()
_compute_reaching_condition_of_unique_predecessors(sink_nodes[0], graph_slice, reaching_conditions)
for node in graph_slice.iter_topological():
if (node in reaching_conditions):
continue
reaching_conditions[node] = _compute_reaching_condition_of(node, graph_slice, reaching_conditions)
return reaching_conditions |
class OptionPlotoptionsBulletSonificationDefaultinstrumentoptionsMappingHighpass(Options):
def frequency(self) -> 'OptionPlotoptionsBulletSonificationDefaultinstrumentoptionsMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsBulletSonificationDefaultinstrumentoptionsMappingHighpassFrequency)
def resonance(self) -> 'OptionPlotoptionsBulletSonificationDefaultinstrumentoptionsMappingHighpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsBulletSonificationDefaultinstrumentoptionsMappingHighpassResonance) |
def fetch_checkpoints_till_final(checkpoint_dir):
MIN_SLEEP_INTERVAL = 1.0
MAX_SLEEP_INTERVAL = 60.0
sleep_interval = MIN_SLEEP_INTERVAL
finished_checkpoints = set()
def _add_and_log(path):
finished_checkpoints.add(path)
logger.info('Found checkpoint: {}'.format(path))
return path
def _log_and_sleep(sleep_interval):
logger.info('Sleep {} seconds while waiting for model_final.pth'.format(sleep_interval))
time.sleep(sleep_interval)
return min((sleep_interval * 2), MAX_SLEEP_INTERVAL)
def _get_lightning_checkpoints(path: str):
return [os.path.join(path, x) for x in PathManager.ls(path) if (x.endswith(ModelCheckpoint.FILE_EXTENSION) and (not x.startswith(ModelCheckpoint.CHECKPOINT_NAME_LAST)))]
while True:
if (not PathManager.exists(checkpoint_dir)):
sleep_interval = _log_and_sleep(sleep_interval)
continue
checkpoint_paths = DetectionCheckpointer(None, save_dir=checkpoint_dir).get_all_checkpoint_files()
checkpoint_paths = [cpt_path for cpt_path in checkpoint_paths if os.path.basename(cpt_path).startswith('model')]
checkpoint_paths.extend(_get_lightning_checkpoints(checkpoint_dir))
final_model_path = None
periodic_checkpoints = []
for path in sorted(checkpoint_paths):
if (path.endswith('model_final.pth') or path.endswith('model_final.ckpt')):
final_model_path = path
continue
if path.endswith(ModelCheckpoint.FILE_EXTENSION):
model_iter = int(re.findall('(?<=step=)\\d+(?={})'.format(ModelCheckpoint.FILE_EXTENSION), path)[0])
else:
model_iter = int(re.findall('(?<=model_)\\d+(?=\\.pth)', path)[0])
periodic_checkpoints.append((path, model_iter))
periodic_checkpoints = [pc for pc in periodic_checkpoints if (pc[0] not in finished_checkpoints)]
periodic_checkpoints = sorted(periodic_checkpoints, key=(lambda x: x[1]))
for pc in periodic_checkpoints:
(yield _add_and_log(pc[0]))
sleep_interval = MIN_SLEEP_INTERVAL
if (final_model_path is None):
sleep_interval = _log_and_sleep(sleep_interval)
else:
(yield _add_and_log(final_model_path))
break |
def _parse_error_body(response):
error_dict = {}
try:
parsed_body = response.json()
if isinstance(parsed_body, dict):
error_dict = parsed_body.get('error', {})
except ValueError:
pass
code = (error_dict.get('message') if isinstance(error_dict, dict) else None)
custom_message = None
if code:
separator = code.find(':')
if (separator != (- 1)):
custom_message = code[(separator + 1):].strip()
code = code[:separator]
return (code, custom_message) |
def test_truncate_wordpieces(wordpieces, max_length, mask_from_end):
truncated = _truncate_tokens(wordpieces, mask_from_end)
for (i, seq) in enumerate(truncated.strings):
assert (len(seq) <= max_length)
assert (seq == wordpieces.strings[i][:max_length])
assert (truncated.input_ids[i].shape[0] <= max_length)
assert (truncated.token_type_ids[i].shape[0] <= max_length)
assert (truncated.attention_mask[i].shape[0] <= max_length) |
class OptionPlotoptionsAreasplinerangeDragdropDraghandle(Options):
def className(self):
return self._config_get('highcharts-drag-handle')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('#fff')
def color(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('rgba(0, 0, 0, 0.6)')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(901)
def zIndex(self, num: float):
self._config(num, js_type=False) |
def _validate_protocol_buffer_schema_code_snippets(protocol_specification: ProtocolSpecification, custom_types_set: Set[str]) -> Tuple[(bool, str)]:
if ((protocol_specification.protobuf_snippets is not None) and (protocol_specification.protobuf_snippets != '')):
for custom_type in protocol_specification.protobuf_snippets.keys():
if (custom_type not in custom_types_set):
return (False, "Extra protobuf code snippet provided. Type '{}' is not used anywhere in your protocol definition.".format(custom_type))
custom_types_set.remove(custom_type)
if (len(custom_types_set) != 0):
return (False, 'No protobuf code snippet is provided for the following custom types: {}'.format(custom_types_set))
return (True, 'Protobuf code snippet section is valid.') |
def test_load_audio_with_basic_config():
sg_cfg = AudioConfig.BasicSpectrogram(n_fft=2000, hop_length=155)
a2sg = AudioToSpec.from_cfg(sg_cfg)
audio = test_audio_tensor()
sg = a2sg(audio)
assert (sg.n_fft == sg_cfg.n_fft)
assert (sg.width == (int((audio.nsamples / sg_cfg.hop_length)) + 1)) |
class SqlTree(Sql):
_is_select = False
_needs_select = False
def _compile(self, qb):
raise NotImplementedError()
def compile_wrap(self, qb):
return self.compile(qb).wrap(qb)
def compile(self, qb):
sql_code = self._compile(qb.replace(is_root=False))
assert isinstance(sql_code, list), self
assert all((isinstance(c, (str, Parameter)) for c in sql_code)), self
return CompiledSQL(self.type, sql_code, self, self._is_select, self._needs_select)
def finalize_with_subqueries(self, qb, subqueries):
if subqueries:
subqs = [q.compile_wrap(qb).finalize(qb) for (name, q) in subqueries.items()]
sql_code = (['WITH RECURSIVE '] if (qb.target in (postgres, mysql, redshift)) else ['WITH '])
sql_code += join_comma(([q, '\n '] for q in subqs))
else:
sql_code = []
sql_code += self.compile_wrap(qb).finalize(qb)
return ''.join(sql_code) |
def fischer_guess(geom):
cdm = pdist(geom.coords3d)
pair_cov_radii = get_pair_covalent_radii(geom.atoms)
central_atoms = [inds[1:3] for inds in geom.internal.dihedral_atom_indices]
bond_factor = geom.internal.bond_factor
bond_mat = squareform((cdm <= (pair_cov_radii * bond_factor)))
tors_atom_bonds = dict()
for (a, b) in central_atoms:
bond_sum = ((bond_mat[a].sum() + bond_mat[b].sum()) - 2)
tors_atom_bonds[(a, b)] = bond_sum
dist_mat = squareform(cdm)
pair_cov_radii_mat = squareform(pair_cov_radii)
def h_bond(indices):
(a, b) = indices[:2]
r_ab = dist_mat[(a, b)]
r_ab_cov = pair_cov_radii_mat[(a, b)]
return (0.3601 * exp(((- 1.944) * (r_ab - r_ab_cov))))
def h_bend(indices):
(b, a, c) = indices
r_ab = dist_mat[(a, b)]
r_ac = dist_mat[(a, c)]
r_ab_cov = pair_cov_radii_mat[(a, b)]
r_ac_cov = pair_cov_radii_mat[(a, c)]
return (0.089 + ((0.11 / ((r_ab_cov * r_ac_cov) ** (- 0.42))) * exp(((- 0.44) * (((r_ab + r_ac) - r_ab_cov) - r_ac_cov)))))
def h_dihedral(indices):
(c, a, b, d) = indices
r_ab = dist_mat[(a, b)]
r_ab_cov = pair_cov_radii_mat[(a, b)]
bond_sum = max(tors_atom_bonds[(a, b)], 0)
return (0.0015 + (((14.0 * (bond_sum ** 0.57)) / ((r_ab * r_ab_cov) ** 4.0)) * exp(((- 2.85) * (r_ab - r_ab_cov)))))
H = improved_guess(geom, bond_func=h_bond, bend_func=h_bend, dihedral_func=h_dihedral)
return H |
class DockerImageController():
def __init__(self, docker_image: DockerImage) -> None:
self._running_procs = []
self._image = docker_image
def __enter__(self):
running_images = self._image.list_processes()
log.debug(f'Currently Running images: {running_images}')
self._running_procs = running_images
return self
def __exit__(self, type_param, value, traceback) -> None:
running_images = self._image.list_processes()
images_to_stop = filter((lambda img: (img not in self._running_procs)), running_images)
for image_name in images_to_stop:
log.debug(f'Stopping image: {image_name}')
self._image.stop_image(image_name)
def run(self, image_name: str, run_parameters: DockerRunParameters) -> Union[(bytearray, None)]:
client = self._image.get_docker_client()
try:
return client.containers.run(image_name, stdout=True, stderr=True, detach=False, **run_parameters._asdict())
except docker.errors.ContainerError as e:
error_logs = e.container.logs()
log.error(f'Failed to run benchmarking test in image: {image_name}, error message: {e}, container logs: {error_logs}')
exit() |
class flow_mod_failed_error_msg(error_msg):
version = 6
type = 1
err_type = 5
def __init__(self, xid=None, code=None, data=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (code != None):
self.code = code
else:
self.code = 0
if (data != None):
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.err_type))
packed.append(struct.pack('!H', self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = flow_mod_failed_error_msg()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 1)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_err_type = reader.read('!H')[0]
assert (_err_type == 5)
obj.code = reader.read('!H')[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.code != other.code):
return False
if (self.data != other.data):
return False
return True
def pretty_print(self, q):
q.text('flow_mod_failed_error_msg {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('code = ')
value_name_map = {0: 'OFPFMFC_UNKNOWN', 1: 'OFPFMFC_TABLE_FULL', 2: 'OFPFMFC_BAD_TABLE_ID', 3: 'OFPFMFC_OVERLAP', 4: 'OFPFMFC_EPERM', 5: 'OFPFMFC_BAD_TIMEOUT', 6: 'OFPFMFC_BAD_COMMAND', 7: 'OFPFMFC_BAD_FLAGS', 8: 'OFPFMFC_CANT_SYNC', 9: 'OFPFMFC_BAD_PRIORITY', 10: 'OFPFMFC_IS_SYNC'}
if (self.code in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.code], self.code)))
else:
q.text(('%#x' % self.code))
q.text(',')
q.breakable()
q.text('data = ')
q.pp(self.data)
q.breakable()
q.text('}') |
class EquipmentHandler():
save_attribute = 'inventory_slots'
def __init__(self, obj):
self.obj = obj
self._load()
def _load(self):
self.slots = self.obj.attributes.get(self.save_attribute, category='inventory', default={WieldLocation.WEAPON_HAND: None, WieldLocation.SHIELD_HAND: None, WieldLocation.TWO_HANDS: None, WieldLocation.BODY: None, WieldLocation.HEAD: None, WieldLocation.BACKPACK: []})
self.slots[WieldLocation.BACKPACK] = [obj for obj in self.slots[WieldLocation.BACKPACK] if (obj and obj.id)]
def _save(self):
self.obj.attributes.add(self.save_attribute, self.slots, category='inventory')
def count_slots(self):
slots = self.slots
wield_usage = sum(((getattr(slotobj, 'size', 0) or 0) for (slot, slotobj) in slots.items() if (slot is not WieldLocation.BACKPACK)))
backpack_usage = sum(((getattr(slotobj, 'size', 0) or 0) for slotobj in slots[WieldLocation.BACKPACK]))
return (wield_usage + backpack_usage)
def max_slots(self):
return (getattr(self.obj, Ability.CON.value, 1) + 10)
def validate_slot_usage(self, obj):
if (not inherits_from(obj, EvAdventureObject)):
raise EquipmentError(f'{obj.key} is not something that can be equipped.')
size = obj.size
max_slots = self.max_slots
current_slot_usage = self.count_slots()
if ((current_slot_usage + size) > max_slots):
slots_left = (max_slots - current_slot_usage)
raise EquipmentError(f'Equipment full ($int2str({slots_left}) slots remaining, {obj.key} needs $int2str({size}) $pluralize(slot, {size})).')
return True
def get_current_slot(self, obj):
for (equipment_item, slot) in self.all():
if (obj == equipment_item):
return slot
def armor(self):
slots = self.slots
return sum((getattr(slots[WieldLocation.BODY], 'armor', 1), getattr(slots[WieldLocation.SHIELD_HAND], 'armor', 0), getattr(slots[WieldLocation.HEAD], 'armor', 0)))
def weapon(self):
slots = self.slots
weapon = slots[WieldLocation.TWO_HANDS]
if (not weapon):
weapon = slots[WieldLocation.WEAPON_HAND]
if (not weapon):
weapon = get_bare_hands()
return weapon
def display_loadout(self):
slots = self.slots
weapon_str = 'You are fighting with your bare fists'
shield_str = ' and have no shield.'
armor_str = 'You wear no armor'
helmet_str = ' and no helmet.'
two_hands = slots[WieldLocation.TWO_HANDS]
if two_hands:
weapon_str = f'You wield {two_hands} with both hands'
shield_str = " (you can't hold a shield at the same time)."
else:
one_hands = slots[WieldLocation.WEAPON_HAND]
if one_hands:
weapon_str = f'You are wielding {one_hands} in one hand.'
shield = slots[WieldLocation.SHIELD_HAND]
if shield:
shield_str = f'You have {shield} in your off hand.'
armor = slots[WieldLocation.BODY]
if armor:
armor_str = f'You are wearing {armor}'
helmet = slots[WieldLocation.BODY]
if helmet:
helmet_str = f' and {helmet} on your head.'
return f'''{weapon_str}{shield_str}
{armor_str}{helmet_str}'''
def display_backpack(self):
backpack = self.slots[WieldLocation.BACKPACK]
if (not backpack):
return 'Backpack is empty.'
out = []
for item in backpack:
out.append(f'{item.key} [|b{item.size}|n] slot(s)')
return '\n'.join(out)
def display_slot_usage(self):
return f'|b{self.count_slots()}/{self.max_slots}|n'
def move(self, obj):
self.remove(obj)
self.validate_slot_usage(obj)
slots = self.slots
use_slot = getattr(obj, 'inventory_use_slot', WieldLocation.BACKPACK)
to_backpack = []
if (use_slot is WieldLocation.TWO_HANDS):
to_backpack = [slots[WieldLocation.WEAPON_HAND], slots[WieldLocation.SHIELD_HAND]]
slots[WieldLocation.WEAPON_HAND] = slots[WieldLocation.SHIELD_HAND] = None
slots[use_slot] = obj
elif (use_slot in (WieldLocation.WEAPON_HAND, WieldLocation.SHIELD_HAND)):
to_backpack = [slots[WieldLocation.TWO_HANDS]]
slots[WieldLocation.TWO_HANDS] = None
slots[use_slot] = obj
elif (use_slot is WieldLocation.BACKPACK):
to_backpack = [obj]
else:
to_backpack = [slots[use_slot]]
slots[use_slot] = obj
for to_backpack_obj in to_backpack:
if to_backpack_obj:
slots[WieldLocation.BACKPACK].append(to_backpack_obj)
self._save()
def add(self, obj):
self.validate_slot_usage(obj)
self.slots[WieldLocation.BACKPACK].append(obj)
self._save()
def remove(self, obj_or_slot):
slots = self.slots
ret = []
if isinstance(obj_or_slot, WieldLocation):
if (obj_or_slot is WieldLocation.BACKPACK):
ret.extend(slots[obj_or_slot])
slots[obj_or_slot] = []
else:
ret.append(slots[obj_or_slot])
slots[obj_or_slot] = None
elif (obj_or_slot in self.slots.values()):
for (slot, objslot) in slots.items():
if (objslot is obj_or_slot):
slots[slot] = None
ret.append(objslot)
elif (obj_or_slot in slots[WieldLocation.BACKPACK]):
try:
slots[WieldLocation.BACKPACK].remove(obj_or_slot)
ret.append(obj_or_slot)
except ValueError:
pass
if ret:
self._save()
return ret
def get_wieldable_objects_from_backpack(self):
return [obj for obj in self.slots[WieldLocation.BACKPACK] if (obj and obj.id and (obj.inventory_use_slot in (WieldLocation.WEAPON_HAND, WieldLocation.TWO_HANDS, WieldLocation.SHIELD_HAND)))]
def get_wearable_objects_from_backpack(self):
return [obj for obj in self.slots[WieldLocation.BACKPACK] if (obj and obj.id and (obj.inventory_use_slot in (WieldLocation.BODY, WieldLocation.HEAD)))]
def get_usable_objects_from_backpack(self):
character = self.obj
return [obj for obj in self.slots[WieldLocation.BACKPACK] if (obj and obj.at_pre_use(character))]
def all(self, only_objs=False):
slots = self.slots
lst = ([(slots[WieldLocation.WEAPON_HAND], WieldLocation.WEAPON_HAND), (slots[WieldLocation.SHIELD_HAND], WieldLocation.SHIELD_HAND), (slots[WieldLocation.TWO_HANDS], WieldLocation.TWO_HANDS), (slots[WieldLocation.BODY], WieldLocation.BODY), (slots[WieldLocation.HEAD], WieldLocation.HEAD)] + [(item, WieldLocation.BACKPACK) for item in slots[WieldLocation.BACKPACK]])
if only_objs:
return [tup[0] for tup in lst if tup[0]]
return [tup for tup in lst] |
def edsc_log(apiCall, params, jsonData=None, error=None):
try:
with open('tmp/edsc.log', 'a', encoding='utf-8') as fh:
print(('-' * 70), file=fh)
print('API:', apiCall, file=fh)
print('REQ:', str(params), file=fh)
if jsonData:
print('REP:', json.dumps(jsonData, indent=1), file=fh)
if error:
print('ERR:', error)
print(file=fh)
except FileNotFoundError:
pass |
class OptionSeriesVariablepieStatesHoverHalo(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def opacity(self):
return self._config_get(0.25)
def opacity(self, num: float):
self._config(num, js_type=False)
def size(self):
return self._config_get(10)
def size(self, num: float):
self._config(num, js_type=False) |
class N64SegVtx(CommonSegCodeSubsegment):
def __init__(self, rom_start: Optional[int], rom_end: Optional[int], type: str, name: str, vram_start: Optional[int], args: list, yaml):
super().__init__(rom_start, rom_end, type, name, vram_start, args=args, yaml=yaml)
self.file_text: Optional[str] = None
self.data_only = (isinstance(yaml, dict) and yaml.get('data_only', False))
def format_sym_name(self, sym) -> str:
return sym.name
def get_linker_section(self) -> str:
return '.data'
def out_path(self) -> Path:
return ((options.opts.asset_path / self.dir) / f'{self.name}.vtx.inc.c')
def scan(self, rom_bytes: bytes):
self.file_text = self.disassemble_data(rom_bytes)
def disassemble_data(self, rom_bytes) -> str:
assert isinstance(self.rom_start, int)
assert isinstance(self.rom_end, int)
assert isinstance(self.vram_start, int)
vertex_data = rom_bytes[self.rom_start:self.rom_end]
segment_length = len(vertex_data)
if ((segment_length % 16) != 0):
log.error(f'Error: Vtx segment {self.name} length ({segment_length}) is not a multiple of 16!')
lines = []
if (not self.data_only):
lines.append(options.opts.generated_c_preamble)
lines.append('')
vertex_count = (segment_length // 16)
sym = self.create_symbol(addr=self.vram_start, in_segment=True, type='data', define=True)
if (not self.data_only):
lines.append(f'Vtx {self.format_sym_name(sym)}[{vertex_count}] = {{')
for vtx in struct.iter_unpack('>hhhHhhBBBB', vertex_data):
(x, y, z, flg, t, c, r, g, b, a) = vtx
vtx_string = f' {{{{{{ {x:5}, {y:5}, {z:5} }}, {flg}, {{ {t:5}, {c:5} }}, {{ {r:3}, {g:3}, {b:3}, {a:3} }}}}}},'
if (flg != 0):
self.warn(f'Non-zero flag found in vertex data {self.name}!')
lines.append(vtx_string)
if (not self.data_only):
lines.append('};')
lines.append('')
return '\n'.join(lines)
def split(self, rom_bytes: bytes):
if (self.file_text and self.out_path()):
self.out_path().parent.mkdir(parents=True, exist_ok=True)
with open(self.out_path(), 'w', newline='\n') as f:
f.write(self.file_text)
def should_scan(self) -> bool:
return options.opts.is_mode_active('vtx')
def should_split(self) -> bool:
return (self.extract and options.opts.is_mode_active('vtx')) |
class Solution():
def numFactoredBinaryTrees(self, arr: List[int]) -> int:
arr.sort()
tr = {}
s = 0
for n in arr:
t = 1
for k in tr:
if ((n % k) != 0):
continue
d = (n // k)
if (d not in tr):
continue
t += (tr[d] * tr[k])
tr[n] = t
s = ((s + t) % )
return s |
class BaseTopicFormView(BasePostFormView):
poll_option_formset_class = TopicPollOptionFormset
poll_option_formset_general_error_message = _('There are some errors in the poll options you submitted.')
post_form_class = TopicForm
def get(self, request, *args, **kwargs):
self.init_attachment_cache()
post_form_class = self.get_post_form_class()
post_form = self.get_post_form(post_form_class)
attachment_formset_class = self.get_attachment_formset_class()
attachment_formset = self.get_attachment_formset(attachment_formset_class)
poll_option_formset_class = self.get_poll_option_formset_class()
poll_option_formset = self.get_poll_option_formset(poll_option_formset_class)
return self.render_to_response(self.get_context_data(post_form=post_form, attachment_formset=attachment_formset, poll_option_formset=poll_option_formset))
def post(self, request, *args, **kwargs):
self.init_attachment_cache()
self.preview = ('preview' in self.request.POST)
post_form_class = self.get_post_form_class()
post_form = self.get_post_form(post_form_class)
attachment_formset_class = self.get_attachment_formset_class()
attachment_formset = self.get_attachment_formset(attachment_formset_class)
poll_option_formset_class = self.get_poll_option_formset_class()
poll_option_formset = self.get_poll_option_formset(poll_option_formset_class)
post_form_valid = post_form.is_valid()
attachment_formset_valid = (attachment_formset.is_valid() if attachment_formset else None)
poll_option_formset_valid = (poll_option_formset.is_valid() if (poll_option_formset and len(post_form.cleaned_data['poll_question'])) else None)
self.attachment_preview = (self.preview if attachment_formset_valid else None)
self.poll_preview = (self.preview if poll_option_formset_valid else None)
poll_options_validated = (poll_option_formset_valid is not None)
if (post_form_valid and (attachment_formset_valid is not False) and (poll_option_formset_valid is not False)):
return self.form_valid(post_form, attachment_formset, poll_option_formset, poll_options_validated=poll_options_validated)
else:
return self.form_invalid(post_form, attachment_formset, poll_option_formset, poll_options_validated=poll_options_validated)
def get_poll_option_formset(self, formset_class):
if self.request.forum_permission_handler.can_create_polls(self.get_forum(), self.request.user):
return formset_class(**self.get_poll_option_formset_kwargs())
def get_poll_option_formset_class(self):
return self.poll_option_formset_class
def get_poll_option_formset_kwargs(self):
kwargs = {'prefix': 'poll'}
if (self.request.method in ('POST', 'PUT')):
kwargs.update({'data': self.request.POST, 'files': self.request.FILES})
else:
topic = self.get_topic()
poll_option_queryset = TopicPollOption.objects.filter(poll__topic=topic)
kwargs.update({'queryset': poll_option_queryset})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if context['poll_option_formset']:
if (hasattr(self, 'poll_preview') and self.poll_preview):
context['poll_preview'] = self.poll_preview
context['poll_options_previews'] = filter((lambda f: (f['text'].value() and (not f['DELETE'].value()))), context['poll_option_formset'].forms)
return context
def form_valid(self, post_form, attachment_formset, poll_option_formset, **kwargs):
save_poll_option_formset = ((poll_option_formset is not None) and (not self.preview) and kwargs['poll_options_validated'])
valid = super().form_valid(post_form, attachment_formset, poll_option_formset=poll_option_formset, **kwargs)
if save_poll_option_formset:
poll_option_formset.topic = self.forum_post.topic
poll_option_formset.save(poll_question=post_form.cleaned_data.pop('poll_question', None), poll_max_options=post_form.cleaned_data.pop('poll_max_options', None), poll_duration=post_form.cleaned_data.pop('poll_duration', None), poll_user_changes=post_form.cleaned_data.pop('poll_user_changes', None), poll_hide_results=post_form.cleaned_data.pop('poll_hide_results', None))
return valid
def form_invalid(self, post_form, attachment_formset, poll_option_formset, **kwargs):
poll_errors = [k for k in post_form.errors.keys() if k.startswith('poll_')]
if (poll_errors or (poll_option_formset and (not poll_option_formset.is_valid()) and len(post_form.cleaned_data['poll_question']))):
messages.error(self.request, self.poll_option_formset_general_error_message)
return super().form_invalid(post_form, attachment_formset, poll_option_formset=poll_option_formset, **kwargs) |
class TestJsonSerializable(unittest.TestCase):
def test_base_function(self):
_deserializable
class TestClass(JSONSerializable):
def __init__(self):
self.rng = random.random()
original_class = TestClass()
serial = original_class.serialize()
negative_test_class = TestClass()
self.assertNotEqual(original_class.rng, negative_test_class.rng)
positive_test_class: TestClass = TestClass().deserialize(serial)
self.assertEqual(original_class.rng, positive_test_class.rng)
self.assertTrue(isinstance(positive_test_class, TestClass))
positive_test_class: TestClass = TestClass.deserialize(serial)
self.assertEqual(original_class.rng, positive_test_class.rng)
def test_registration_required(self):
class SecondTestClass(JSONSerializable):
def __init__(self):
self.default = True
app = SecondTestClass()
app.default = False
serial = app.serialize()
app: SecondTestClass = SecondTestClass().deserialize(serial)
self.assertTrue(app.default)
SecondTestClass._register_class_as_deserializable(SecondTestClass)
app: SecondTestClass = SecondTestClass().deserialize(serial)
self.assertFalse(app.default)
def test_recursive(self):
random_id = str(random.random())
config = AppConfig(id=random_id, collect_metrics=False)
app = App(config=config)
s = app.serialize()
new_app: App = App.deserialize(s)
self.assertEqual(random_id, new_app.config.id)
def test_special_subclasses(self):
config = BaseLlmConfig(template=Template('My custom template with $query, $context and $history.'))
s = config.serialize()
new_config: BaseLlmConfig = BaseLlmConfig.deserialize(s)
self.assertEqual(config.prompt.template, new_config.prompt.template) |
def resource_permissions_for_users(models_proxy, perm_names, resource_ids=None, user_ids=None, group_ids=None, resource_types=None, limit_group_permissions=False, skip_user_perms=False, skip_group_perms=False, db_session=None):
db_session = get_db_session(db_session)
query = db_session.query(models_proxy.GroupResourcePermission.perm_name, models_proxy.User, models_proxy.Group, sa.literal('group').label('type'), models_proxy.Resource)
query = query.join(models_proxy.Group, (models_proxy.Group.id == models_proxy.GroupResourcePermission.group_id))
query = query.join(models_proxy.Resource, (models_proxy.Resource.resource_id == models_proxy.GroupResourcePermission.resource_id))
if limit_group_permissions:
query = query.outerjoin(models_proxy.User, (models_proxy.User.id == None))
else:
query = query.join(models_proxy.UserGroup, (models_proxy.UserGroup.group_id == models_proxy.GroupResourcePermission.group_id))
query = query.outerjoin(models_proxy.User, (models_proxy.User.id == models_proxy.UserGroup.user_id))
if resource_ids:
query = query.filter(models_proxy.GroupResourcePermission.resource_id.in_(resource_ids))
if resource_types:
query = query.filter(models_proxy.Resource.resource_type.in_(resource_types))
if ((perm_names not in ([ANY_PERMISSION], ANY_PERMISSION)) and perm_names):
query = query.filter(models_proxy.GroupResourcePermission.perm_name.in_(perm_names))
if group_ids:
query = query.filter(models_proxy.GroupResourcePermission.group_id.in_(group_ids))
if (user_ids and (not limit_group_permissions)):
query = query.filter(models_proxy.UserGroup.user_id.in_(user_ids))
query2 = db_session.query(models_proxy.UserResourcePermission.perm_name, models_proxy.User, models_proxy.Group, sa.literal('user').label('type'), models_proxy.Resource)
query2 = query2.join(models_proxy.User, (models_proxy.User.id == models_proxy.UserResourcePermission.user_id))
query2 = query2.join(models_proxy.Resource, (models_proxy.Resource.resource_id == models_proxy.UserResourcePermission.resource_id))
query2 = query2.outerjoin(models_proxy.Group, (models_proxy.Group.id == None))
if ((perm_names not in ([ANY_PERMISSION], ANY_PERMISSION)) and perm_names):
query2 = query2.filter(models_proxy.UserResourcePermission.perm_name.in_(perm_names))
if resource_ids:
query2 = query2.filter(models_proxy.UserResourcePermission.resource_id.in_(resource_ids))
if resource_types:
query2 = query2.filter(models_proxy.Resource.resource_type.in_(resource_types))
if user_ids:
query2 = query2.filter(models_proxy.UserResourcePermission.user_id.in_(user_ids))
if ((not skip_group_perms) and (not skip_user_perms)):
query = query.union(query2)
elif skip_group_perms:
query = query2
users = [PermissionTuple(row.User, row.perm_name, row.type, (row.Group or None), row.Resource, False, True) for row in query]
return users |
def get_excluded_deps_which_are_not_installed(excluded_deps: List[str], venvs: List[str]) -> List[str]:
dependency_names: List[str] = []
if (not excluded_deps):
return dependency_names
excluded_deps_canonicalized = [canonicalize_module_name(arg) for arg in excluded_deps]
for excluded_dep_name in excluded_deps_canonicalized:
for venv in venvs:
if (excluded_dep_name not in get_installed_dependency_names(venv)):
dependency_names.append(excluded_dep_name)
dependency_names = list(set(dependency_names))
if dependency_names:
logger.warning(f"Excluded dependencies not found in virtual environment: {', '.join(dependency_names)}")
return dependency_names |
class OptionPlotoptionsLineStates(Options):
def hover(self) -> 'OptionPlotoptionsLineStatesHover':
return self._config_sub_data('hover', OptionPlotoptionsLineStatesHover)
def inactive(self) -> 'OptionPlotoptionsLineStatesInactive':
return self._config_sub_data('inactive', OptionPlotoptionsLineStatesInactive)
def normal(self) -> 'OptionPlotoptionsLineStatesNormal':
return self._config_sub_data('normal', OptionPlotoptionsLineStatesNormal)
def select(self) -> 'OptionPlotoptionsLineStatesSelect':
return self._config_sub_data('select', OptionPlotoptionsLineStatesSelect) |
class PodFunctionTask(PythonFunctionTask[Pod]):
def __init__(self, task_config: Pod, task_function: Callable, **kwargs):
super(PodFunctionTask, self).__init__(task_config=task_config, task_type='sidecar', task_function=task_function, task_type_version=2, **kwargs)
def _serialize_pod_spec(self, settings: SerializationSettings) -> Dict[(str, Any)]:
containers = self.task_config.pod_spec.containers
primary_exists = False
for container in containers:
if (container.name == self.task_config.primary_container_name):
primary_exists = True
break
if (not primary_exists):
containers.append(k8s_models.V1Container(name=self.task_config.primary_container_name))
final_containers = []
for container in containers:
if (container.name == self.task_config.primary_container_name):
sdk_default_container = super().get_container(settings)
container.image = sdk_default_container.image
container.command = sdk_default_container.command
container.args = sdk_default_container.args
(limits, requests) = ({}, {})
for resource in sdk_default_container.resources.limits:
limits[_sanitize_resource_name(resource)] = resource.value
for resource in sdk_default_container.resources.requests:
requests[_sanitize_resource_name(resource)] = resource.value
resource_requirements = k8s_models.V1ResourceRequirements(limits=limits, requests=requests)
if ((len(limits) > 0) or (len(requests) > 0)):
container.resources = resource_requirements
container.env = ([k8s_models.V1EnvVar(name=key, value=val) for (key, val) in sdk_default_container.env.items()] + (container.env or []))
final_containers.append(container)
self.task_config.pod_spec.containers = final_containers
return k8s_client.ApiClient().sanitize_for_serialization(self.task_config.pod_spec)
def get_k8s_pod(self, settings: SerializationSettings) -> _task_models.K8sPod:
return _task_models.K8sPod(pod_spec=self._serialize_pod_spec(settings), metadata=_task_models.K8sObjectMetadata(labels=self.task_config.labels, annotations=self.task_config.annotations))
def get_container(self, settings: SerializationSettings) -> _task_models.Container:
return None
def get_config(self, settings: SerializationSettings) -> Dict[(str, str)]:
return {_PRIMARY_CONTAINER_NAME_FIELD: self.task_config.primary_container_name}
def local_execute(self, ctx: FlyteContext, **kwargs) -> Union[(Tuple[Promise], Promise, None)]:
logger.warning('Running pod task locally. Local environment may not match pod environment which may cause issues.')
return super().local_execute(ctx=ctx, **kwargs) |
def test_create_speaker_without_email(db, client, user, jwt):
event = get_event(db)
db.session.commit()
data = json.dumps({'data': {'type': 'speaker', 'attributes': {'name': 'Areeb Jamal'}, 'relationships': {'event': {'data': {'id': str(event.id), 'type': 'event'}}}}})
response = client.post('/v1/speakers', content_type='application/vnd.api+json', headers=jwt, data=data)
assert (response.status_code == 201)
assert (json.loads(response.data)['data']['attributes']['email'] == user._email)
data = json.dumps({'data': {'type': 'speaker', 'attributes': {'name': 'Areeb Jamal', 'is-email-overridden': True}, 'relationships': {'event': {'data': {'id': str(event.id), 'type': 'event'}}}}})
response = client.post('/v1/speakers', content_type='application/vnd.api+json', headers=jwt, data=data)
assert (response.status_code == 403)
assert (json.loads(response.data) == {'errors': [{'detail': 'Organizer access required to override email', 'source': {'pointer': '/data/attributes/is_email_overridden'}, 'status': 403, 'title': 'Access Forbidden'}], 'jsonapi': {'version': '1.0'}}) |
class TestRewriteParameters():
def calls(self):
if (not hasattr(self, '_calls')):
self._calls = []
return self._calls
def options(self, *args, **kwargs):
self.calls.append((args, kwargs))
return self
_rewrite_parameters()
def wrapped_func_default(self, *args, **kwargs):
self.calls.append((args, kwargs))
_rewrite_parameters(body_name='document')
def wrapped_func_body_name(self, *args, **kwargs):
self.calls.append((args, kwargs))
_rewrite_parameters(body_fields=('query', 'source'))
def wrapped_func_body_fields(self, *args, **kwargs):
self.calls.append((args, kwargs))
_rewrite_parameters(body_fields=('query',), ignore_deprecated_options={'api_key', 'body', 'params'})
def wrapped_func_ignore(self, *args, **kwargs):
self.calls.append((args, kwargs))
_rewrite_parameters(body_fields=('source',), parameter_aliases={'_source': 'source'})
def wrapped_func_aliases(self, *args, **kwargs):
self.calls.append((args, kwargs))
def test_default(self):
with warnings.catch_warnings(record=True) as w:
self.wrapped_func_default(api_key=('id', 'api_key'), query={'match_all': {}}, params={'key': 'value', 'ignore': 404})
assert (len(w) == 2)
assert (w[0].category == DeprecationWarning)
assert (str(w[0].message) == "The 'params' parameter is deprecated and will be removed in a future version. Instead use individual parameters.")
assert (w[1].category == DeprecationWarning)
assert (str(w[1].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead.")
assert (self.calls == [((), {'api_key': ('id', 'api_key'), 'ignore_status': 404}), ((), {'query': {'match_all': {}}, 'key': 'value'})])
def test_default_params_conflict(self):
with pytest.raises(ValueError) as e:
self.wrapped_func_default(query={'match_all': {}}, params={'query': {'match_all': {}}})
assert (str(e.value) == "Received multiple values for 'query', specify parameters directly instead of using 'params'")
def test_body_name_using_body(self):
with warnings.catch_warnings(record=True) as w:
self.wrapped_func_body_name(api_key=('id', 'api_key'), body={'query': {'match_all': {}}})
assert (len(w) == 1)
assert (w[0].category == DeprecationWarning)
assert (str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead.")
assert (self.calls == [((), {'api_key': ('id', 'api_key')}), ((), {'document': {'query': {'match_all': {}}}})])
def test_body_name(self):
with warnings.catch_warnings(record=True) as w:
self.wrapped_func_body_name(api_key=('id', 'api_key'), document={'query': {'match_all': {}}})
assert (len(w) == 1)
assert (w[0].category == DeprecationWarning)
assert (str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead.")
assert (self.calls == [((), {'api_key': ('id', 'api_key')}), ((), {'document': {'query': {'match_all': {}}}})])
def test_body_name_duplicate(self):
with pytest.raises(TypeError) as e:
self.wrapped_func_body_name(body={}, document={})
assert (str(e.value) == "Can't use 'document' and 'body' parameters together because 'document' is an alias for 'body'. Instead you should only use the 'document' parameter. See for more information")
def test_body_fields(self):
with warnings.catch_warnings(record=True) as w:
self.wrapped_func_body_fields(api_key=('id', 'api_key'), body={'query': {'match_all': {}}})
assert (len(w) == 1)
assert (w[0].category == DeprecationWarning)
assert (str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead.")
assert (self.calls == [((), {'api_key': ('id', 'api_key')}), ((), {'body': {'query': {'match_all': {}}}})])
.parametrize('body, kwargs', [('{"query": {"match_all": {}}}', {'query': {'match_all': {}}}), (b'{"query": {"match_all": {}}}', {'query': {'match_all': {}}})])
def test_error_on_body_merge(self, body, kwargs):
with pytest.raises(ValueError) as e:
self.wrapped_func_body_fields(body=body, **kwargs)
assert (str(e.value) == "Couldn't merge 'body' with other parameters as it wasn't a mapping.")
.parametrize('params', ['{"query": {"match_all": {}}}', b'{"query": {"match_all": {}}}'])
def test_error_on_params_merge(self, params):
with pytest.raises(ValueError) as e:
self.wrapped_func_body_fields(params=params)
assert (str(e.value) == "Couldn't merge 'params' with other parameters as it wasn't a mapping. Instead of using 'params' use individual API parameters")
def test_body_fields_merge(self):
with warnings.catch_warnings(record=True) as w:
self.wrapped_func_body_fields(source=False, body={'query': {}})
assert (len(w) == 1)
assert (w[0].category == DeprecationWarning)
assert (str(w[0].message) == "Received 'source' via a specific parameter in the presence of a 'body' parameter, which is deprecated and will be removed in a future version. Instead, use only 'body' or only specific paremeters.")
def test_body_fields_conflict(self):
with pytest.raises(ValueError) as e:
self.wrapped_func_body_fields(query={'match_all': {}}, body={'query': {}})
assert (str(e.value) == "Received multiple values for 'query', specify parameters using either body or parameters, not both.")
def test_ignore_deprecated_options(self):
with warnings.catch_warnings(record=True) as w:
self.wrapped_func_ignore(api_key=('id', 'api_key'), body={'query': {'match_all': {}}}, params={'key': 'value'}, param=1, 'value'))
assert (len(w) == 1)
assert (w[0].category == DeprecationWarning)
assert (str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead.")
assert (self.calls == [((), {' ('key', 'value')}), ((), {'api_key': ('id', 'api_key'), 'body': {'query': {'match_all': {}}}, 'params': {'key': 'value'}, 'param': 1})])
def test_parameter_aliases(self):
self.wrapped_func_aliases(_source=['key1', 'key2'])
assert (self.calls == [((), {'source': ['key1', 'key2']})])
self.wrapped_func_aliases(source=['key3'])
assert (self.calls[(- 1)] == ((), {'source': ['key3']}))
.parametrize('client_cls', [Elasticsearch, AsyncElasticsearch])
def test_positional_argument_error(self, client_cls):
client = client_cls('
with pytest.raises(TypeError) as e:
client.search('index')
assert (str(e.value) == "Positional arguments can't be used with Elasticsearch API methods. Instead only use keyword arguments.")
with pytest.raises(TypeError) as e:
client.indices.exists('index')
assert (str(e.value) == "Positional arguments can't be used with Elasticsearch API methods. Instead only use keyword arguments.") |
def sort(tree_id, node_id, key_text, reverse):
t = load_tree(tree_id)
try:
code = compile(key_text, '<string>', 'eval')
except SyntaxError as e:
abort(400, f'compiling expression: {e}')
def key(node):
return safer_eval(code, {'node': node, 'name': node.name, 'is_leaf': node.is_leaf, 'length': node.dist, 'dist': node.dist, 'd': node.dist, 'size': node.size, 'dx': node.size[0], 'dy': node.size[1], 'children': node.children, 'ch': node.children, 'len': len, 'sum': sum, 'abs': abs})
ops.sort(t[node_id], key, reverse) |
def _option_spawn(*suboptions):
grid = get_xyzgrid()
def _log(msg):
print(msg)
grid.log = _log
if suboptions:
opts = ''.join(suboptions).strip('()')
try:
(x, y, z) = (part.strip() for part in opts.split(','))
except ValueError:
print("spawn coordinate must be given as (X, Y, Z) tuple, where '*' act wild cards and Z is the mapname/z-coord of the map to load.")
return
else:
(x, y, z) = ('*', '*', '*')
if (x == y == z == '*'):
inp = input('This will (re)spawn the entire grid. If it was built before, it may spawn \nnew rooms or delete rooms that no longer matches the grid.\nDo you want to continue? [Y]/N? ')
else:
inp = input(f'''This will spawn/delete objects in the database matching grid coordinates
({x},{y},{z}) (where '*' is a wildcard).
Do you want to continue? [Y]/N? ''')
if (inp.lower() in ('no', 'n')):
print('Aborted.')
return
print('Starting spawn ...')
grid.spawn(xyz=(x, y, z))
print("... spawn complete!\nIt's recommended to reload the server to refresh caches if this modified an existing grid.") |
.django_db
def test_budget_function_count_ignore_duplicates(client, agency_account_data):
resp = client.get(url.format(code='009', filter='?fiscal_year=2019'))
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.data['budget_function_count'] == 1)
assert (resp.data['budget_sub_function_count'] == 1) |
def checkSensors():
procarr2 = []
for x in range(0, len(Settings.Tasks)):
if (Settings.Tasks[x] and (type(Settings.Tasks[x]) is not bool)):
try:
if Settings.Tasks[x].enabled:
if Settings.Tasks[x].is_read_timely():
t2 = threading.Thread(target=Settings.Tasks[x].plugin_read)
t2.daemon = True
procarr2.append(t2)
t2.start()
except:
pass
if (len(procarr2) > 0):
for process2 in procarr2:
process2.join()
return True |
class OptionPlotoptionsNetworkgraphSonificationContexttracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class TestSolidBarcodeStatistics(unittest.TestCase):
def setUp(self):
self.tmp_stats_file = TestUtils().make_barcode_statistics_file()
self.stats = SolidBarcodeStatistics(self.tmp_stats_file)
def tearDown(self):
os.remove(self.tmp_stats_file)
def test_solid_barcode_statistics(self):
self.assertTrue(isinstance(self.stats, SolidBarcodeStatistics))
self.assertTrue(self.stats)
def test_nRows(self):
self.assertEqual(31, self.stats.nRows())
def test_header(self):
self.assertEqual(['Library', 'Barcode', '0 Mismatches', '1 Mismatch', 'Total'], self.stats.header)
def test_get_data_by_name(self):
self.assertEqual(['All Beads', 'Totals', '', '', ''], self.stats.getDataByName('All Beads'))
self.assertEqual(None, self.stats.getDataByName('All beads'))
def test_total_reads(self):
self.assertEqual(, self.stats.totalReads())
def test_nonexistent_barcode_stats_file(self):
stats = SolidBarcodeStatistics('i_dont_exist')
self.assertFalse(stats) |
def validate_mkey(params):
selector = params['selector']
selector_params = params.get('params', {})
if (selector not in MODULE_MKEY_DEFINITONS):
return (False, {'message': ('unknown selector: ' + selector)})
definition = MODULE_MKEY_DEFINITONS.get(selector, {})
if ((not selector_params) or (len(selector_params) == 0) or (len(definition) == 0)):
return (True, {})
mkey = definition['mkey']
mkey_type = definition['mkey_type']
if (mkey_type is None):
return (False, {'message': ('params are not allowed for ' + selector)})
mkey_value = selector_params.get(mkey)
if (not mkey_value):
return (False, {'message': (("param '" + mkey) + "' is required")})
if (not isinstance(mkey_value, mkey_type)):
return (False, {'message': (((("param '" + mkey) + "' does not match, ") + str(mkey_type)) + ' required')})
return (True, {}) |
def python2_message_handler(addr, tags, data, source):
global monitor, patch, prefix, output_scale, output_offset
monitor.debug(('addr = %s, tags = %s, data = %s, source %s' % (addr, tags, data, OSC.getUrlStr(source))))
if (addr[0] != '/'):
addr = ('/' + addr)
if ((tags == 'f') or (tags == 'i')):
key = (prefix + addr.replace('/', '.'))
val = EEGsynth.rescale(data[0], slope=output_scale, offset=output_offset)
patch.setvalue(key, val)
else:
for i in range(len(data)):
key = ((prefix + addr.replace('/', '.')) + ('.%i' % (i + 1)))
val = EEGsynth.rescale(data[i], slope=output_scale, offset=output_offset)
patch.setvalue(key, val)
monitor.update(key, val) |
class TestPowerlawPsdGaussian(unittest.TestCase):
def test_powerlaw_psd_gaussian_scalar_output_shape(self):
n = cn.powerlaw_psd_gaussian(1, 16)
self.assertEqual(n.shape, (16,))
def test_powerlaw_psd_gaussian_vector_output_shape(self):
n = cn.powerlaw_psd_gaussian(1, (100, 5))
self.assertEqual(n.shape, (100, 5))
def test_powerlaw_psd_gaussian_output_finite(self):
n = cn.powerlaw_psd_gaussian(1, 16, fmin=0.1)
self.assertTrue(np.isfinite(n).all())
def test_var_distribution(self):
size = (100, (2 ** 16))
fmin = 0
rng = np.random.default_rng(1)
for exponent in [0.5, 1, 2]:
y = cn.powerlaw_psd_gaussian(exponent, size, fmin=fmin, random_state=rng)
ystd = y.std(axis=(- 1))
var_in = (abs((1 - ystd)) < (3 * ystd.std())).mean()
self.assertTrue((var_in > 0.95))
def test_small_sample_var(self):
rng = np.random.default_rng(1)
for nsamples in [10, 11]:
ystd = cn.powerlaw_psd_gaussian(0, (500, 500, nsamples), random_state=rng).std(axis=(- 1))
assert ((abs((1 - ystd)) < (3 * ystd.std())).mean() > 0.95)
def test_slope_distribution(self):
size = (100, (2 ** 16))
fmin = 0
rng = np.random.default_rng(1)
for exponent in [0.5, 1, 2]:
y = cn.powerlaw_psd_gaussian(exponent, size, fmin=fmin, random_state=rng)
yfft = np.fft.fft(y)
f = np.fft.fftfreq(y.shape[(- 1)])
m = (f > 0)
(fit, fcov) = np.polyfit(np.log10(f[m]), np.log10(abs((yfft[(..., m)].T ** 2))), 1, cov=True)
slope_in = ((exponent + fit[0]) < (3 * np.sqrt(fcov[(0, 0)]))).mean()
self.assertTrue((slope_in > 0.95))
def test_cumulative_scaling(self):
n_repeats = 1000
n_steps = 100
rng = np.random.default_rng(1)
y = cn.powerlaw_psd_gaussian(0, (n_repeats, n_steps), random_state=rng)
mean_squared_displacement = (y.sum(axis=(- 1)) ** 2).mean(axis=0)
standard_error = ((y.sum(axis=(- 1)) ** 2).std(axis=0) / np.sqrt(n_repeats))
assert (np.abs((n_steps - mean_squared_displacement)) < (3 * standard_error))
def test_random_state_type(self):
exp = 1
n = 5
seed = 1
good_random_states = [np.random.default_rng(seed), np.random.RandomState(seed), int(seed), np.int32(1), True, None]
for random_state in good_random_states:
cn.powerlaw_psd_gaussian(exp, n, random_state=random_state)
bad_random_states = ['1', 0.15, [1]]
for random_state in bad_random_states:
self.assertRaises(ValueError, cn.powerlaw_psd_gaussian, exp, n, random_state=random_state)
def test_random_state_reproducibility(self):
exp = 1
n = 5
seed = 1
rs1 = np.random.default_rng(seed)
rs2 = np.random.default_rng(seed)
y1 = cn.powerlaw_psd_gaussian(exp, n, random_state=rs1)
np.random.seed(123)
y2 = cn.powerlaw_psd_gaussian(exp, n, random_state=rs2)
np.testing.assert_array_equal(y1, y2) |
.EventDecorator()
_assemble
def assemble(expr, *args, **kwargs):
if isinstance(expr, (ufl.form.BaseForm, slate.TensorBase)):
return assemble_base_form(expr, *args, **kwargs)
elif isinstance(expr, ufl.core.expr.Expr):
return _assemble_expr(expr)
else:
raise TypeError(f'Unable to assemble: {expr}') |
class CustomFormTranslates(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
custom_form_id = db.Column(db.Integer, db.ForeignKey('custom_forms.id', ondelete='CASCADE'))
custom_form = db.relationship('CustomForms', backref='custom_form_translate', foreign_keys=[custom_form_id])
language_code = db.Column(db.String, nullable=False)
form_id = db.Column(db.String, nullable=False)
def __repr__(self):
return f'<CustomFormTranslate {self.id}>'
def convert_to_dict(self):
return {'id': self.id, 'name': self.name, 'language_code': self.language_code, 'form_id': self.form_id}
def check_custom_form_translate(custom_form_id, translate_id):
try:
customFormTranslate = CustomFormTranslates.query.filter_by(custom_form_id=custom_form_id).filter_by(id=translate_id).first()
return customFormTranslate
except ModuleNotFoundError:
return None |
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPFeaturesRequest(dp)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
descs = []
for msg in msgs:
stats = msg.ports
for stat in stats.values():
d = {'port_no': UTIL.ofp_port_to_user(stat.port_no), 'hw_addr': stat.hw_addr, 'name': stat.name.decode('utf-8'), 'config': stat.config, 'state': stat.state, 'curr': stat.curr, 'advertised': stat.advertised, 'supported': stat.supported, 'peer': stat.peer}
descs.append(d)
return {str(dp.id): descs} |
class MyEnv(gym.Env):
def __init__(self):
super().__init__()
self.action_space = Discrete(2)
def seed(self, seed=None):
print(('Seed = %d' % seed))
(self.np_random, seed) = seeding.np_random(seed)
def reset(self, env_info={}):
self.x = ((self.np_random.rand() * 2.0) - 1.0)
self.identifier = self.np_random.rand()
return {'x': self.x, 'identifier': self.identifier}
def step(self, action):
if (action == 0):
self.x -= 0.3
else:
self.x += 0.3
return ({'x': self.x, 'identifier': self.identifier}, self.x, ((self.x < (- 1)) or (self.x > 1)), {}) |
class AsfFormat(BaseFormat):
MutagenType = asf.ASF
tag_mapping = {'artist': 'Author', 'album': 'WM/AlbumTitle', 'title': 'Title', 'genre': 'WM/Genre', 'tracknumber': 'WM/TrackNumber', 'date': 'WM/Year', 'albumartist': 'WM/AlbumArtist', 'grouping': 'WM/ContentGroupDescription'}
others = False
writable = True
def _get_tag(self, raw, tag_name):
tag = super(AsfFormat, self)._get_tag(raw, tag_name)
if isinstance(tag, list):
attrs = [asf.ASFUnicodeAttribute, asf.ASFDWordAttribute, asf.ASFQWordAttribute, asf.ASFWordAttribute]
def __process_tag(any_tag):
for attrtype in attrs:
if isinstance(any_tag, attrtype):
return str(any_tag)
return any_tag
return [__process_tag(t) for t in tag] |
def extractWhateverTranslationsMTL(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
if item['title'].lower().startswith('level maker ch'):
return buildReleaseMessageWithType(item, 'Level Maker', vol, chp, frag=frag, postfix=postfix)
if item['title'].lower().startswith('levelmaker ch'):
return buildReleaseMessageWithType(item, 'Level Maker', vol, chp, frag=frag, postfix=postfix)
return False |
class IRFFT(Computation):
def __init__(self, arr_t):
output_size = ((arr_t.shape[(- 1)] - 1) * 2)
out_arr = Type(dtypes.real_for(arr_t.dtype), (arr_t.shape[:(- 1)] + (output_size,)))
Computation.__init__(self, [Parameter('output', Annotation(out_arr, 'o')), Parameter('input', Annotation(arr_t, 'i'))])
def _build_plan(self, plan_factory, device_params, output, input_):
plan = plan_factory()
N = ((input_.shape[(- 1)] - 1) * 2)
WNmk = numpy.exp(((((- 2j) * numpy.pi) * numpy.arange((N // 2))) / N))
A = (0.5 * (1 - (1j * WNmk)))
B = (0.5 * (1 + (1j * WNmk)))
A_arr = plan.persistent_array(A.conj())
B_arr = plan.persistent_array(B.conj())
cfft_arr = Type(input_.dtype, (input_.shape[:(- 1)] + ((N // 2),)))
cfft = FFT(cfft_arr, axes=((len(input_.shape) - 1),))
prepare_output = prepare_irfft_output(cfft.parameter.output)
cfft.parameter.output.connect(prepare_output, prepare_output.input, real_output=prepare_output.output)
temp = plan.temp_array_like(cfft.parameter.input)
batch_size = helpers.product(output.shape[:(- 1)])
plan.kernel_call(TEMPLATE.get_def('prepare_irfft_input'), [temp, input_, A_arr, B_arr], global_size=(batch_size, (N // 2)), render_kwds=dict(slices=((len(input_.shape) - 1), 1), N=N, mul=functions.mul(input_.dtype, input_.dtype), conj=functions.conj(input_.dtype)))
plan.computation_call(cfft, output, temp, inverse=True)
return plan |
class TestAddConnectionFailsWhenConnectionWithSameAuthorAndNameButDifferentVersion():
def setup_class(cls):
cls.runner = CliRunner()
cls.agent_name = 'myagent'
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
cls.connection_name = '
cls.connection_author = 'fetchai'
cls.connection_version = '0.3.0'
cls.connection_id = str(HTTP_CLIENT_PUBLIC_ID)
shutil.copytree(Path(CUR_PATH, '..', 'packages'), Path(cls.t, 'packages'))
os.chdir(cls.t)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'init', '--author', AUTHOR], standalone_mode=False)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'create', '--local', cls.agent_name], standalone_mode=False)
assert (result.exit_code == 0)
os.chdir(cls.agent_name)
result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', '--local', 'connection', cls.connection_id], standalone_mode=False)
assert (result.exit_code == 0)
different_version = '0.1.1'
different_id = ((((cls.connection_author + '/') + cls.connection_name) + ':') + different_version)
config_path = Path(cls.t, 'packages', cls.connection_author, 'connections', cls.connection_name, DEFAULT_CONNECTION_CONFIG_FILE)
config = yaml.safe_load(config_path.open())
config['version'] = different_version
yaml.safe_dump(config, config_path.open(mode='w'))
cls.result = cls.runner.invoke(cli, [*CLI_LOG_OPTION, 'add', '--local', 'connection', different_id], standalone_mode=False)
def test_exit_code_equal_to_1(self):
assert (self.result.exit_code == 1)
def test_error_message_connection_already_existing(self):
s = f"A connection with id '{self.connection_id}' already exists. Aborting..."
assert (self.result.exception.message == s)
def teardown_class(cls):
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass |
def test_datetime():
s = load_source('file', climetlab_file('docs/examples/test.nc'))
assert (s.to_datetime() == datetime.datetime(2020, 5, 13, 12)), s.to_datetime()
assert (s.to_datetime_list() == [datetime.datetime(2020, 5, 13, 12)]), s.to_datetime_list()
s = load_source('climetlab-testing', kind='netcdf', dims=['lat', 'lon', 'time'], variables=['a', 'b'], coord_values=dict(time=[datetime.datetime(1990, 1, 1, 12, 0), datetime.datetime(1990, 1, 2, 12, 0)]))
print(s.to_xarray())
print(s.to_xarray().time)
assert (s.to_datetime_list() == [datetime.datetime(1990, 1, 1, 12, 0), datetime.datetime(1990, 1, 2, 12, 0)]), s.to_datetime_list() |
def cached_property(function):
name = (TraitsCache + function.__name__[5:])
def decorator(self):
result = self.__dict__.get(name, Undefined)
if (result is Undefined):
self.__dict__[name] = result = function(self)
return result
decorator.cached_property = True
return decorator |
def get_finance_summary_items(security_item, start_date=None, report_period=None):
path = get_finance_path(security_item)
if (not os.path.exists(path)):
return pd.DataFrame()
df = pd.read_csv(path)
if start_date:
df = df[(df['reportDate'] >= start_date)]
return df
if report_period:
return df[(df['reportDate'] == report_period)]
return df |
class T1Font(object):
def __init__(self, path, encoding='ascii', kind=None):
if (kind is None):
(self.data, _) = read(path)
elif (kind == 'LWFN'):
self.data = readLWFN(path)
elif (kind == 'PFB'):
self.data = readPFB(path)
elif (kind == 'OTHER'):
self.data = readOther(path)
else:
raise ValueError(kind)
self.encoding = encoding
def saveAs(self, path, type, dohex=False):
write(path, self.getData(), type, dohex)
def getData(self):
if (not hasattr(self, 'data')):
self.data = self.createData()
return self.data
def getGlyphSet(self):
return self['CharStrings']
def __getitem__(self, key):
if (not hasattr(self, 'font')):
self.parse()
return self.font[key]
def parse(self):
from fontTools.misc import psLib
from fontTools.misc import psCharStrings
self.font = psLib.suckfont(self.data, self.encoding)
charStrings = self.font['CharStrings']
lenIV = self.font['Private'].get('lenIV', 4)
assert (lenIV >= 0)
subrs = self.font['Private']['Subrs']
for (glyphName, charString) in charStrings.items():
(charString, R) = eexec.decrypt(charString, 4330)
charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
for i in range(len(subrs)):
(charString, R) = eexec.decrypt(subrs[i], 4330)
subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
del self.data
def createData(self):
sf = self.font
eexec_began = False
eexec_dict = {}
lines = []
lines.extend([self._tobytes(f"%!FontType1-1.1: {sf['FontName']}"), self._tobytes(f'%t1Font: ({fontTools.version})'), self._tobytes(f"%%BeginResource: font {sf['FontName']}")])
size = 3
size += 1
size += (1 + 1)
for key in font_dictionary_keys:
size += int((key in sf))
lines.append(self._tobytes(f'{size} dict dup begin'))
for (key, value) in sf.items():
if eexec_began:
eexec_dict[key] = value
continue
if (key == 'FontInfo'):
fi = sf['FontInfo']
size = 3
for subkey in FontInfo_dictionary_keys:
size += int((subkey in fi))
lines.append(self._tobytes(f'/FontInfo {size} dict dup begin'))
for (subkey, subvalue) in fi.items():
lines.extend(self._make_lines(subkey, subvalue))
lines.append(b'end def')
elif (key in _type1_post_eexec_order):
eexec_dict[key] = value
eexec_began = True
else:
lines.extend(self._make_lines(key, value))
lines.append(b'end')
eexec_portion = self.encode_eexec(eexec_dict)
lines.append(bytesjoin([b'currentfile eexec ', eexec_portion]))
for _ in range(8):
lines.append(self._tobytes(('0' * 64)))
lines.extend([b'cleartomark', b'%%EndResource', b'%%EOF'])
data = bytesjoin(lines, '\n')
return data
def encode_eexec(self, eexec_dict):
lines = []
(RD_key, ND_key, NP_key) = (None, None, None)
lenIV = 4
subrs = std_subrs
sortedItems = sorted(eexec_dict.items(), key=(lambda item: (item[0] != 'Private')))
for (key, value) in sortedItems:
if (key == 'Private'):
pr = eexec_dict['Private']
size = 3
for subkey in Private_dictionary_keys:
size += int((subkey in pr))
lines.append(b'dup /Private')
lines.append(self._tobytes(f'{size} dict dup begin'))
for (subkey, subvalue) in pr.items():
if ((not RD_key) and (subvalue == RD_value)):
RD_key = subkey
elif ((not ND_key) and (subvalue in ND_values)):
ND_key = subkey
elif ((not NP_key) and (subvalue in PD_values)):
NP_key = subkey
if (subkey == 'lenIV'):
lenIV = subvalue
if (subkey == 'OtherSubrs'):
lines.append(self._tobytes(hintothers))
elif (subkey == 'Subrs'):
for subr_bin in subvalue:
subr_bin.compile()
subrs = [subr_bin.bytecode for subr_bin in subvalue]
lines.append(f'/Subrs {len(subrs)} array'.encode('ascii'))
for (i, subr_bin) in enumerate(subrs):
(encrypted_subr, R) = eexec.encrypt(bytesjoin([char_IV[:lenIV], subr_bin]), 4330)
lines.append(bytesjoin([self._tobytes(f'dup {i} {len(encrypted_subr)} {RD_key} '), encrypted_subr, self._tobytes(f' {NP_key}')]))
lines.append(b'def')
lines.append(b'put')
else:
lines.extend(self._make_lines(subkey, subvalue))
elif (key == 'CharStrings'):
lines.append(b'dup /CharStrings')
lines.append(self._tobytes(f"{len(eexec_dict['CharStrings'])} dict dup begin"))
for (glyph_name, char_bin) in eexec_dict['CharStrings'].items():
char_bin.compile()
(encrypted_char, R) = eexec.encrypt(bytesjoin([char_IV[:lenIV], char_bin.bytecode]), 4330)
lines.append(bytesjoin([self._tobytes(f'/{glyph_name} {len(encrypted_char)} {RD_key} '), encrypted_char, self._tobytes(f' {ND_key}')]))
lines.append(b'end put')
else:
lines.extend(self._make_lines(key, value))
lines.extend([b'end', b'dup /FontName get exch definefont pop', b'mark', b'currentfile closefile\n'])
eexec_portion = bytesjoin(lines, '\n')
(encrypted_eexec, R) = eexec.encrypt(bytesjoin([eexec_IV, eexec_portion]), 55665)
return encrypted_eexec
def _make_lines(self, key, value):
if (key == 'FontName'):
return [self._tobytes(f'/{key} /{value} def')]
if (key in ['isFixedPitch', 'ForceBold', 'RndStemUp']):
return [self._tobytes(f"/{key} {('true' if value else 'false')} def")]
elif (key == 'Encoding'):
if (value == StandardEncoding):
return [self._tobytes(f'/{key} StandardEncoding def')]
else:
lines = []
lines.append(b'/Encoding 256 array')
lines.append(b'0 1 255 {1 index exch /.notdef put} for')
for i in range(256):
name = value[i]
if (name != '.notdef'):
lines.append(self._tobytes(f'dup {i} /{name} put'))
lines.append(b'def')
return lines
if isinstance(value, str):
return [self._tobytes(f'/{key} ({value}) def')]
elif isinstance(value, bool):
return [self._tobytes(f"/{key} {('true' if value else 'false')} def")]
elif isinstance(value, list):
return [self._tobytes(f"/{key} [{' '.join((str(v) for v in value))}] def")]
elif isinstance(value, tuple):
return [self._tobytes(f"/{key} {{{' '.join((str(v) for v in value))}}} def")]
else:
return [self._tobytes(f'/{key} {value} def')]
def _tobytes(self, s, errors='strict'):
return tobytes(s, self.encoding, errors) |
def upgrade():
op.create_table('notification_actions', sa.Column('id', sa.Integer(), nullable=False), sa.Column('action_type', sa.String(), nullable=True), sa.Column('subject', sa.String(), nullable=True), sa.Column('notification_topic', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'))
op.add_column('notifications', sa.Column('notification_topic', sa.Integer(), nullable=True))
op.add_column('notifications', sa.Column('subject_id', sa.Integer(), nullable=True))
op.drop_column('notifications', 'action') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.