id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
137691
|
import pandas as pd
def pandas_excel_write(save_dir_path: str):
data1 = """
class precision recall
<18 0.0125 12
18-24 0.0250 16
25-34 0.00350 4
"""
data2 = """
sample values
<18 0
18-24 0.25
25-34 0.35
"""
# create 2 df for sample
df1 = pd.read_csv(pd.compat.StringIO(data1), sep='\s+')
df1.name = "Dataframe1"
df2 = pd.read_csv(pd.compat.StringIO(data2), sep='\s+')
df2.name = "Dataframe2"
print(df1)
print(df2)
write_file_path = f"{save_dir_path}/test_same_sheet.xlsx"
writer = pd.ExcelWriter(write_file_path, engine='xlsxwriter')
workbook = writer.book
worksheet = workbook.add_worksheet('Result')
writer.sheets['Result'] = worksheet
worksheet.write_string(0, 0, df1.name)
df1.to_excel(writer, sheet_name='Result', startrow=1, startcol=0)
worksheet.write_string(df1.shape[0] + 4, 0, df2.name)
df2.to_excel(writer, sheet_name='Result', startrow=df1.shape[0] + 5, startcol=0)
## Different sheets
write_file_path = f"{save_dir_path}/test_diff_sheet.xlsx"
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(write_file_path, engine='xlsxwriter')
s = pd.Series([1, 2, 3])
df_describe = s.describe()
# Write each dataframe to a different worksheet. you could write different string like above if you want
df1.to_excel(writer, sheet_name='Sheet1')
df2.to_excel(writer, sheet_name='Sheet2')
df_describe.to_excel(writer, sheet_name='Sheet3')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
if __name__ == "__main__":
save_dir_path = "../data/data_analysis/"
pandas_excel_write(save_dir_path)
|
137706
|
from capstone import *
from capstone.x86 import *
from elftools.elf.elffile import ELFFile
import sys
NUMBER_OF_CANDIDATES = 20
MAX_RSP_OFFSET = 0x200
ONE_GADGET_LIB_DEBUG = False
# add
# call
# jmp
# lea
# mov
# nop
# push
# sub
# xor
# movq
# movaps
# movhps
# Most practical gadgets have simple constraints.
# So I support only these instructions.
supported_instructions = [
X86_INS_CALL,
X86_INS_LEA,
X86_INS_MOV,
]
def _get_environ_ptr(elffile):
rela_dyn = elffile.get_section_by_name('.rela.dyn')
if not rela_dyn:
raise Exception('.rela.dyn section is not found')
dynsym = elffile.get_section_by_name('.dynsym')
if not dynsym:
raise Exception('.dynsym section is not found')
environ_addr = \
[i['st_value'] for i in dynsym.get_symbol_by_name('environ')]
environ_ptr = []
for rel_entry in rela_dyn.iter_relocations():
addr = dynsym.get_symbol(rel_entry.entry.r_info_sym)['st_value']
if addr in environ_addr:
environ_ptr.append(rel_entry.entry['r_offset'])
return environ_ptr
def _get_execve_offset(elffile):
dynsym_sec = elffile.get_section_by_name('.dynsym')
if not dynsym_sec:
raise Exception('.dynsym section is not found')
sym_list = dynsym_sec.get_symbol_by_name('execve')
for sym in sym_list: # sym_list contains only one item
return sym['st_value']
def _load_code(elffile):
'''
This function extracts code from an ELF file.
'''
text_section = elffile.get_section_by_name('.text')
if not text_section:
raise Exception('.text section is not found')
return text_section.data()
def _get_code_offset(elffile):
'''
This function returns an offset to .text section
'''
text_section = elffile.get_section_by_name('.text')
if not text_section:
raise Exception('.text section is not found')
return text_section['sh_addr']
def _binsh_offset(fobj):
'''
This function finds "/bin/sh" in the file and returns its offset.
'''
data = fobj.read()
return data.find(b'/bin/sh\x00')
def _is_binsh_assignment(instruction, binsh):
'''
If a given instruction assigns "/bin/sh" to rdi,
this function returns True.
Note: I assume that all "/bin/sh" assignments
look like "lea rdi, [rip+<offset>]".
'''
# check whether this instruction is lea or not
if instruction.id != X86_INS_LEA:
return False
# check op_str looks like 'rdi, [rip+<offset>]'
if instruction.op_str != 'rdi, [rip + 0x%x]' % \
(binsh - instruction.address - instruction.size):
return False
return True
def _has_execve_before_rdi_changes(instruction_list, begin, execve_addr):
'''
Determine that the potential gadget calls execve before rdi changes.
'''
for instruction in instruction_list[begin+1:]: # skip rdi = "/bin/sh";
if instruction.id == X86_INS_CALL:
# 'call execve' is the end of the block
if hex(execve_addr) == instruction.op_str:
return True
# other call instructions may change rdi
else:
return False
# When rdi is in the destination operand, it will be changed
elif instruction.op_str.startswith('rdi'):
return False
# FIXME: jmp should be avoided
def _linear_search_execve(instruction_list, begin, execve_addr):
'''
Returns the address of the instruction
below the first 'call execve'.
'''
prev_instruction = None
for instruction in instruction_list[begin:]:
if prev_instruction is not None and \
prev_instruction.id == X86_INS_CALL and \
prev_instruction.op_str == hex(execve_addr):
return instruction.address
prev_instruction = instruction
class ValueX64:
base = None
offset = None
def __init__(self, _base, _offset):
self.base = _base
self.offset = _offset
class ReferenceX64:
base = None
offset = None
def __init__(self, _base, _offset):
self.base = _base
self.offset = _offset
regname = {
None: 'None',
X86_REG_RAX: 'RAX',
X86_REG_RCX: 'RCX',
X86_REG_RDX: 'RDX',
X86_REG_RBX: 'RBX',
X86_REG_RSP: 'RSP',
X86_REG_RBP: 'RBP',
X86_REG_RSI: 'RSI',
X86_REG_RDI: 'RDI',
X86_REG_R8: 'R8',
X86_REG_R9: 'R9',
X86_REG_R10: 'R10',
X86_REG_R11: 'R11',
X86_REG_R12: 'R12',
X86_REG_R13: 'R13',
X86_REG_R14: 'R14',
X86_REG_R15: 'R15',
}
class CpuStateX64:
def __init__(self):
self.reg = [None for i in range(X86_REG_ENDING)]
self.stack = [None for i in range(MAX_RSP_OFFSET)]
self.reg[X86_REG_RSP] = ValueX64(None, None)
def info(self):
for i, r in enumerate(self.reg):
if not r:
continue
print('{} {} {} {}'.format(
regname[i], type(r), regname[r.base], r.offset))
def register_is_filled(self, reg):
if reg is None:
return False
if reg.base is None:
return True
return self.register_is_filled(self.reg[reg.base])
def is_filled(self):
return self.register_is_filled(self.reg[X86_REG_RDI]) and \
self.register_is_filled(self.reg[X86_REG_RSI]) and \
self.register_is_filled(self.reg[X86_REG_RDX])
def constraints(self):
assert(self.reg[X86_REG_RSI].base == X86_REG_RSP)
return '[rsp + {}] == 0'.format(hex(self.reg[X86_REG_RSI].offset))
def _is_call_execve(instruction, execve_addr):
return instruction.id == X86_INS_CALL and \
hex(execve_addr) == instruction.op_str
def _is_one_gadget(cpu, binsh, environ_ptr):
# RDI == "/bin/sh"
if not isinstance(cpu.reg[X86_REG_RDI], ValueX64) or \
cpu.reg[X86_REG_RDI].base is not None or \
cpu.reg[X86_REG_RDI].offset != binsh:
return False
# RSI == [RSP+0xXX]
if isinstance(cpu.reg[X86_REG_RSI], ValueX64) and \
cpu.reg[X86_REG_RSI].base != X86_REG_RSP:
return False
# RDX == [RAX] and RAX == [environ_ptr]
if not isinstance(cpu.reg[X86_REG_RDX], ReferenceX64) or \
cpu.reg[X86_REG_RDX].base != X86_REG_RAX or \
cpu.reg[X86_REG_RDX].offset != 0:
return False
if not isinstance(cpu.reg[X86_REG_RAX], ReferenceX64) or \
cpu.reg[X86_REG_RAX].base is not None or \
cpu.reg[X86_REG_RAX].offset not in environ_ptr:
return False
return True
def _is_complex_instruction(instruction):
if instruction.id == X86_INS_LEA:
if instruction.operands[0].type != X86_OP_REG:
return True
return False
elif instruction.id == X86_INS_MOV:
return False
else: # unsupported instructions
return True
def _has_complex_instructions(instruction_list, begin, execve_addr):
index = begin
instruction = instruction_list[index]
while not _is_call_execve(instruction, execve_addr):
if _is_complex_instruction(instruction):
return True
index = index + 1
instruction = instruction_list[index]
return False
def _execute_instructions_before_binsh(cpu, instruction_list, begin):
'''
Repeatedly check a previous instruction until
all argument registers(rdi, rsi, rdx) are filled.
Index of one-gadget is returned.
'''
index = begin - 1
instruction = instruction_list[index]
while not cpu.is_filled():
if instruction.id == X86_INS_LEA:
assert(len(instruction.operands) == 2)
assert(instruction.operands[0].type == X86_OP_REG)
assert(instruction.operands[1].type == X86_OP_MEM)
dst = instruction.operands[0].reg
base = instruction.operands[1].mem.base
offset = instruction.operands[1].mem.disp
if base == X86_REG_RIP:
base = None
offset = offset + instruction.address + instruction.size
cpu.reg[dst] = ValueX64(base, offset)
elif instruction.id == X86_INS_MOV:
assert(len(instruction.operands) == 2)
if instruction.operands[0].type == X86_OP_REG:
dst = instruction.operands[0].reg
elif instruction.operands[0].type == X86_OP_MEM:
pass
else:
if ONE_GADGET_LIB_DEBUG:
raise Exception('Unsupported mov instruction')
else:
break
if instruction.operands[1].type == X86_OP_REG:
src = instruction.operands[0].reg
cpu.reg[dst] = ValueX64(src, 0)
elif instruction.operands[1].type == X86_OP_MEM:
base = instruction.operands[1].mem.base
if cpu.register_is_filled(cpu.reg[base]):
if ONE_GADGET_LIB_DEBUG:
raise Exception('Unsupported mov instruction')
else:
break
offset = instruction.operands[1].mem.disp
if base == X86_REG_RIP:
base = None
offset = offset + instruction.address + instruction.size
cpu.reg[dst] = ReferenceX64(base, offset)
else:
if ONE_GADGET_LIB_DEBUG:
_print_instruction(instruction)
cpu.info()
raise Exception('Unsupported instruction found')
else:
break
index = index - 1
instruction = instruction_list[index]
return index + 1 # index of one-gadget
def _execute_instructions_between_binsh_and_execve(
cpu, instruction_list, begin, execve_addr):
'''
According to my analysis, most one-gadgets use only lea and mov.
So this code ignore candidates that have other instructions.
'''
index = begin
instruction = instruction_list[index]
# repeat until execve is called
while not _is_call_execve(instruction, execve_addr):
if instruction.id == X86_INS_LEA:
assert(len(instruction.operands) == 2)
assert(instruction.operands[0].type == X86_OP_REG)
assert(instruction.operands[1].type == X86_OP_MEM)
dst = instruction.operands[0].reg
base = instruction.operands[1].mem.base
offset = instruction.operands[1].mem.disp
if base == X86_REG_RIP:
base = None
offset = offset + instruction.address + instruction.size
cpu.reg[dst] = ValueX64(base, offset)
elif instruction.id == X86_INS_MOV:
assert(len(instruction.operands) == 2)
# first operand
if instruction.operands[0].type == X86_OP_REG:
dst = instruction.operands[0].reg
elif instruction.operands[0].type == X86_OP_MEM:
pass # ignore mov to memory
else:
if ONE_GADGET_LIB_DEBUG:
raise Exception('Unsupported mov instruction')
else:
break
# second operand and assignment
if instruction.operands[1].type == X86_OP_REG:
src = instruction.operands[1].reg
cpu.reg[dst] = ValueX64(src, 0)
elif instruction.operands[1].type == X86_OP_MEM:
base = instruction.operands[1].mem.base
offset = instruction.operands[1].mem.disp
if base == X86_REG_RIP:
offest = offset + instruction.address + instruction.size
base = None
cpu.reg[dst] = ReferenceX64(base, offset)
else:
if ONE_GADGET_LIB_DEBUG:
_print_instruction(instruction)
raise Exception('Unknown instruction found')
break
index = index + 1
instruction = instruction_list[index]
def _generate_one_gadget(code, offset, binsh, execve_addr, environ_ptr):
md = Cs(CS_ARCH_X86, CS_MODE_64)
md.syntax = CS_OPT_SYNTAX_INTEL
md.detail = False
instruction_list = list(md.disasm(code, offset))
for i, instruction in enumerate(instruction_list):
if not _is_binsh_assignment(instruction, binsh):
continue
if not _has_execve_before_rdi_changes(
instruction_list, i, execve_addr):
continue
# more detailed disassembly
first_addr = instruction_list[i-NUMBER_OF_CANDIDATES].address
last_addr = \
_linear_search_execve(instruction_list, i, execve_addr)
md.detail = True
detailed_instruction_list = list(
md.disasm(code[first_addr-offset:last_addr-offset], first_addr))
if _has_complex_instructions(
detailed_instruction_list, NUMBER_OF_CANDIDATES, execve_addr):
continue
cpu = CpuStateX64() # TODO: replace with unicorn emulator
_execute_instructions_between_binsh_and_execve(
cpu, detailed_instruction_list, NUMBER_OF_CANDIDATES, execve_addr)
one_gadget_index = _execute_instructions_before_binsh(
cpu, detailed_instruction_list, NUMBER_OF_CANDIDATES)
if not _is_one_gadget(cpu, binsh, environ_ptr):
continue
yield (detailed_instruction_list[one_gadget_index], cpu.constraints())
def _print_instruction_list(ilist):
for i in ilist:
_print_instruction(i)
def _print_instruction(i):
print('0x%x:\t%s\t%s' % (i.address, i.mnemonic, i.op_str))
def generate_one_gadget_full(filename):
'''
This is the main function of this library, which
computes offset to one-gadget and constraints we have to satisfy.
A tuple of instruction and constraints is returned as an iterator.
'''
known_constraints = []
with open(filename, 'rb') as f:
binsh = _binsh_offset(f)
elffile = ELFFile(f)
code = _load_code(elffile)
offset = _get_code_offset(elffile)
execve_addr = _get_execve_offset(elffile)
environ_ptr = _get_environ_ptr(elffile)
for instruction, constraints in \
_generate_one_gadget(
code, offset, binsh, execve_addr, environ_ptr):
if constraints in known_constraints:
continue
known_constraints.append(constraints)
yield instruction, constraints
def generate_one_gadget(filename):
'''
This function yields offset to one-gadget.
'''
for istruction, constraint in generate_one_gadget_full(filename):
yield istruction.address
|
137724
|
from puzzle.utils import (get_gene_info, get_cytoband_coord)
class BaseVariantMixin(object):
"""Base class for variant mixins"""
def variants(self, case_id, skip=0, count=30, filters=None):
"""Return a results tuple with variants and nr_of_variants.
"""
raise NotImplementedError
def variant(self, variant_id):
"""Return a specific variant."""
raise NotImplementedError
def _get_genes(self, variant):
"""Add the genes for a variant
Get the hgnc symbols from all transcripts and add them
to the variant
Args:
variant (dict): A variant dictionary
Returns:
genes (list): A list of Genes
"""
ensembl_ids = []
hgnc_symbols = []
for transcript in variant.transcripts:
if transcript.ensembl_id:
ensembl_ids.append(transcript.ensembl_id)
if transcript.hgnc_symbol:
hgnc_symbols.append(transcript.hgnc_symbol)
genes = get_gene_info(
ensembl_ids=ensembl_ids,
hgnc_symbols=hgnc_symbols
)
return genes
def _add_sv_coordinates(self, variant):
"""Add the neccesary sv coordinates for a variant
Args:
variant (puzzle.models.variant)
"""
variant.stop_chrom = variant.CHROM
variant.start = int(variant.POS)
# If we have a translocation:
if ':' in variant.ALT:
other_coordinates = variant.ALT.strip('ACGTN[]').split(':')
variant.stop_chrom = other_coordinates[0].lstrip('chrCHR')
other_position = other_coordinates[1]
# variant.stop = other_position
#Set 'infinity' to length if translocation
variant.sv_len = float('inf')
variant.sv_type = 'BND'
else:
variant.sv_len = variant.stop - variant.start
variant['cytoband_start'] = get_cytoband_coord(
chrom=variant.CHROM,
pos=variant.start
)
variant['cytoband_stop'] = get_cytoband_coord(
chrom=variant.stop_chrom,
pos=variant.stop
)
|
137733
|
from django.urls import reverse
from django_filters.views import FilterView
from django_tables2.views import SingleTableMixin
from guardian.mixins import LoginRequiredMixin
from service_catalog.filters.operation_filter import OperationFilter
from service_catalog.models import Operation, Service
from service_catalog.tables.operation_tables import OperationTable
class OperationListView(LoginRequiredMixin, SingleTableMixin, FilterView):
table_pagination = {'per_page': 10}
table_class = OperationTable
model = Operation
template_name = 'generics/list.html'
filterset_class = OperationFilter
def get_table_data(self, **kwargs):
filtered = super().get_table_data().distinct()
return Operation.objects.filter(service__id=self.kwargs.get('service_id')).distinct() & filtered
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
service_id = self.kwargs.get('service_id')
context['service_id'] = service_id
context['html_button_path'] = "generics/buttons/add_operation.html"
context['breadcrumbs'] = [
{'text': 'Service catalog', 'url': reverse('service_catalog:service_list')},
{'text': 'Manage services', 'url': reverse('service_catalog:manage_services')},
{'text': Service.objects.get(id=service_id).name, 'url': ""},
{'text': 'Operations', 'url': ""},
]
return context
|
137769
|
from pytest import mark
@mark.django_db
def test_get_menu_not_found(graphql_client, conference_factory):
conference = conference_factory()
resp = graphql_client.query(
"""
query($code: String!, $identifier: String!) {
conference(code: $code) {
menu(identifier: $identifier) {
links {
title
href
}
}
}
}
""",
variables={"code": conference.code, "identifier": "main-nav"},
)
assert "errors" not in resp
assert resp["data"]["conference"]["menu"] is None
@mark.django_db
def test_get_menu(graphql_client, conference_factory, menu_factory, menu_link_factory):
conference = conference_factory()
menu = menu_factory(identifier="main-nav", conference=conference)
menu_link_factory.create_batch(3, menu=menu)
resp = graphql_client.query(
"""
query($code: String!, $identifier: String!) {
conference(code: $code) {
menu(identifier: $identifier) {
links {
title
href
}
}
}
}
""",
variables={"code": conference.code, "identifier": "main-nav"},
)
assert "errors" not in resp
assert len(resp["data"]["conference"]["menu"]["links"]) == 3
|
137770
|
from .conv_head import ConvHead
from .latent_head import LatentHead
__all__ = [
'ConvHead',
'LatentHead',
]
|
137863
|
import unittest
import os
from os.path import exists, join
import numpy as np
from test_helper import TESTDIR, TESTDATA, TMPDATA
import datetime
from copy import copy
import warnings
from karta.vector import shp, read_shapefile
from karta.vector.geometry import (Point, Line, Polygon,
Multipoint, Multiline, Multipolygon)
from karta.crs import LonLatWGS84
class TestShapefile(unittest.TestCase):
def setUp(self):
self.points = [Point((1, 1), properties={"species": "T. officianale"},
crs=LonLatWGS84),
Point((3, 1), properties={"species": "C. tectorum"},
crs=LonLatWGS84),
Point((4, 3), properties={"species": "M. alba"},
crs=LonLatWGS84),
Point((2, 2), properties={"species": "V. cracca"},
crs=LonLatWGS84)]
self.multipoint = Multipoint([(1,1), (3,1), (4,3), (2,2)],
data={"species": ["T. officianale", "C. tectorum",
"M. alba", "V. cracca"]},
crs=LonLatWGS84)
self.line = Line([(1.0,5.0),(5.0,5.0),(5.0,1.0),(3.0,3.0),(1.0,1.0)],
properties={"geom_id": 27, "name": "test line"},
crs=LonLatWGS84)
self.polygon = Polygon([(1.0,5.0),(5.0,5.0),(5.0,1.0),(3.0,3.0),(1.0,1.0)],
crs=LonLatWGS84)
self.points3 = [Point((1, 1, 0), crs=LonLatWGS84),
Point((3, 1, 3), crs=LonLatWGS84),
Point((4, 3, 2), crs=LonLatWGS84),
Point((2, 2, -1), crs=LonLatWGS84)]
self.line3 = Line([(1,5,2),(5,5,-1),(5,1,3),(3,3,1),(1,1,0)], crs=LonLatWGS84)
self.polygon3 = Polygon([(1,5,2),(5,5,-1),(5,1,3),(3,3,1),(1,1,0)], crs=LonLatWGS84)
testfiles = ["points.shp", "line.shp", "polygon.shp"]
if any(not exists(join(TMPDATA, "shapefiles/", fnm)) for fnm in testfiles):
self.saveTestData()
def saveTestData(self):
testfiles = [(self.multipoint, "points"),
(self.line, "line"),
(self.polygon, "polygon")]
if not os.path.isdir(os.path.join(TMPDATA, "shapefiles")):
os.makedirs(os.path.join(TMPDATA, "shapefiles"))
for (geom, fnm) in testfiles:
geom.to_shapefile(os.path.join(TMPDATA, "shapefiles", fnm))
def test_write_point(self):
point = self.points[0]
point.to_shapefile(os.path.join(TESTDIR, "data/point"))
for fnm in ("point.shx", "point.shx", "point.dbf", "point.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_points(self):
points = self.points
shp.write_shapefile(os.path.join(TESTDIR, "data/points.shp"), *points)
for fnm in ("points.shx", "points.shx", "points.dbf", "points.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_line(self):
self.line.to_shapefile(os.path.join(TESTDIR, "data/line"))
for fnm in ("line.shx", "line.shx", "line.dbf", "line.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_poly(self):
self.polygon.to_shapefile(os.path.join(TESTDIR, "data/polygon"))
for fnm in ("polygon.shx", "polygon.shx", "polygon.dbf", "polygon.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_points3(self):
mp = Multipoint(self.points3)
mp.to_shapefile(os.path.join(TESTDIR, "data/multipointz"))
for fnm in ("multipointz.shx", "multipointz.shx", "multipointz.dbf", "multipointz.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_line3(self):
self.line3.to_shapefile(os.path.join(TESTDIR, "data/linez"))
for fnm in ("linez.shx", "linez.shx", "linez.dbf", "linez.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_poly3(self):
self.polygon3.to_shapefile(os.path.join(TESTDIR, "data/polygonz"))
for fnm in ("polygonz.shx", "polygonz.shx", "polygonz.dbf", "polygonz.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_multipoint(self):
mp = Multipoint(self.points)
mp.to_shapefile(os.path.join(TESTDIR, "data/multipoint"))
for fnm in ("multipoint.shx", "multipoint.shx", "multipoint.dbf", "multipoint.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_multiline(self):
g = Multiline([[(0,0), (1,1), (2,2)], [(1,0), (2,1), (3,2)],
[(2,0), (1,1), (0,2)]], crs=LonLatWGS84)
g.to_shapefile(os.path.join(TESTDIR, "data/multiline"))
for fnm in ("multiline.shx", "multiline.shx", "multiline.dbf", "multiline.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_multipolygon(self):
g = Multipolygon([[[(0,0), (2,2), (1,3)]],
[[(2,0), (4,2), (3,3)]],
[[(2,-2), (1,0), (-1,-1)]]], crs=LonLatWGS84)
g.to_shapefile(os.path.join(TESTDIR, "data/multipoly"))
for fnm in ("multipoly.shx", "multipoly.shx", "multipoly.dbf", "multipoly.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_collection_multipoint(self):
mp = Multipoint(self.points)
mp0 = copy(mp)
mp1 = copy(mp.shift((4, 2)))
mp2 = copy(mp.shift((-2, 3)))
shp.write_shapefile(os.path.join(TESTDIR, "data/mp_collection.shp"),
mp0, mp1, mp2)
for fnm in ("mp_collection.shx", "mp_collection.shx", "mp_collection.dbf", "mp_collection.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_write_collection_lines(self):
line0 = copy(self.line)
line1 = copy(self.line.shift((4, 2)))
line2 = copy(self.line.shift((-2, 3)))
shp.write_shapefile(os.path.join(TESTDIR, "data/line_collection.shp"),
line0, line1, line2)
for fnm in ("line_collection.shx", "line_collection.shx", "line_collection.dbf", "line_collection.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
def test_read_points(self):
points = read_shapefile(os.path.join(TESTDATA, "shapefile", "points"))
self.assertEqual(len(points), 4)
pt = points[0]
self.assertTrue("+proj=lonlat" in pt.crs.get_proj4())
self.assertTrue("+a=6378137.0" in pt.crs.get_proj4())
self.assertTrue("+f=0.00335281" in pt.crs.get_proj4())
mp = Multipoint(points)
self.assertEqual(mp.d["species"], ['T. officianale', 'C. tectorum', 'M. alba', 'V. cracca'])
self.assertEqual(mp.d["ID"], ['0', '1', '2', '3'])
x, y = mp.coords()
self.assertTrue(np.all(x == np.array((1.0, 3.0, 4.0, 2.0))))
self.assertTrue(np.all(y == np.array((1.0, 1.0, 3.0, 2.0))))
def test_read_line(self):
line = read_shapefile(os.path.join(TESTDATA, "shapefile", "line"))[0]
self.assertTrue("+proj=lonlat" in line.crs.get_proj4())
self.assertTrue("+a=6378137.0" in line.crs.get_proj4())
self.assertTrue("+f=0.00335281" in line.crs.get_proj4())
x, y = line.coords()
self.assertTrue(np.all(x == np.array([1.0, 5.0, 5.0, 3.0, 1.0])))
self.assertTrue(np.all(y == np.array([5.0, 5.0, 1.0, 3.0, 1.0])))
def test_read_polygon(self):
polygon = read_shapefile(os.path.join(TESTDATA, "shapefile", "polygon"))[0]
self.assertTrue("+proj=lonlat" in polygon.crs.get_proj4())
self.assertTrue("+a=6378137.0" in polygon.crs.get_proj4())
self.assertTrue("+f=0.00335281" in polygon.crs.get_proj4())
x, y = polygon.coords()
self.assertTrue(np.all(x == np.array([1.0, 5.0, 5.0, 3.0, 1.0])))
self.assertTrue(np.all(y == np.array([5.0, 5.0, 1.0, 3.0, 1.0])))
def test_read_points_newp(self):
# Read a multipoint with a projected cooridnate system
newp = read_shapefile(os.path.join(TESTDATA, "shapefile", "newp_nsidc_north"))
proj4 = ('+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 '
'+y_0=0 +a=6378273 +b=6356889.449 +units=m +no_defs')
for part in proj4.split():
self.assertTrue(part[:8] in newp[0].crs.get_proj4())
coords = list(zip(*[pt.vertex()[:2] for pt in newp]))
self.assertEqual(coords, [(521236.8297444395, 521236.8297444395,
521236.8297444395, 547490.4452879033,
547490.4452879033, 547490.4452879033,
587584.1578033275, 587584.1578033275,
587584.1578033275, 571828.4918982167,
571828.4918982167),
(-888853.1384770898, -888853.1384770898,
-888853.1384770898, -902049.3617542256,
-902049.3617542256, -902049.3617542256,
-871214.0673764511, -871214.0673764511,
-871214.0673764511, -850080.914674058,
-850080.914674058)])
meterno = [pt.properties["meterno"] for pt in newp]
self.assertEqual(meterno, ['IMS1/1', 'IMS2/1', '5952/2', 'IMS4/1',
'5953/2', '1963/13', 'IMS5/1', '5213/A',
'2121/13', 'IMS3/1', '3613/2'])
depth = [pt.properties["depth_m"] for pt in newp]
self.assertEqual(depth, ['73', '143', '247', '86', '147', '250', '74',
'142', '235', '150', '248'])
class ShapefileAttributeTests(unittest.TestCase):
def test_infer_ogr_fieldtype(self):
self.assertEqual(shp.ogr_get_fieldtype(1), (0, 32))
self.assertEqual(shp.ogr_get_fieldtype([1, 2]), (1, 1000))
self.assertEqual(shp.ogr_get_fieldtype(1.0), (2, 32))
self.assertEqual(shp.ogr_get_fieldtype([1.0, 1.5]), (3, 1000))
# everything should be interpretted as WideString
self.assertEqual(shp.ogr_get_fieldtype("hello"), (4, 180))
self.assertEqual(shp.ogr_get_fieldtype(["list","of","strings"]),(5, 1000))
# doesn't work on Python 2
#self.assertEqual(shp.ogr_get_fieldtype(b'0b110001'), 8)
# dates
self.assertEqual(shp.ogr_get_fieldtype(datetime.date(2013, 11, 17)), (9, 32))
self.assertEqual(shp.ogr_get_fieldtype(datetime.time(8, 30, 0)), (10, 32))
self.assertEqual(shp.ogr_get_fieldtype(datetime.datetime(2013, 11, 17, 8, 30, 0)), (11, 64))
def test_long_attribute_names(self):
line = Line([(1.0,5.0),(5.0,5.0),(5.0,1.0),(3.0,3.0),(1.0,1.0)],
properties={
"geom_id": 27,
"name": "test line",
"description": "line for testing",
"description_en": "Line for testing."
},
crs=LonLatWGS84)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
line.to_shapefile(os.path.join(TESTDIR, "data/line_truncated_attr"))
for fnm in ("line_truncated_attr.shx",
"line_truncated_attr.shx",
"line_truncated_attr.dbf",
"line_truncated_attr.prj"):
self.assertTrue(os.path.isfile(os.path.join(TESTDIR, "data", fnm)))
line2 = read_shapefile(os.path.join(TESTDIR, "data", "line_truncated_attr"))[0]
self.assertTrue("DESCRIPTIO" in line2.properties)
self.assertTrue("DESCRIPTI2" in line2.properties)
self.assertTrue("GEOM_ID" in line2.properties)
self.assertTrue("NAME" in line2.properties)
class ShapelibTestSuite(unittest.TestCase):
""" Open and verify the shapefiles provided with the shapelib testsuite. """
def setUp(self):
self.dirname = os.path.join(TESTDATA, "shapefile", "shapelib")
def test_(self):
res = read_shapefile(os.path.join(self.dirname, "test.shp"))
def test_0(self):
res = read_shapefile(os.path.join(self.dirname, "test0.shp"))
def test_1(self):
res = read_shapefile(os.path.join(self.dirname, "test1.shp"))
self.assertEqual(type(res[0]), Point)
self.assertEqual(len(res), 2)
def test_2(self):
res = read_shapefile(os.path.join(self.dirname, "test2.shp"))
self.assertEqual(type(res[0]), Point)
self.assertEqual(len(res), 2)
def test_3(self):
res = read_shapefile(os.path.join(self.dirname, "test3.shp"))
self.assertEqual(type(res[0]), Point)
self.assertEqual(len(res), 2)
def test_4(self):
res = read_shapefile(os.path.join(self.dirname, "test4.shp"))
self.assertEqual(type(res[0]), Multipoint)
self.assertEqual(len(res), 3)
def test_5(self):
res = read_shapefile(os.path.join(self.dirname, "test5.shp"))
self.assertEqual(type(res[0]), Multipoint)
self.assertEqual(len(res), 3)
def test_6(self):
res = read_shapefile(os.path.join(self.dirname, "test6.shp"))
self.assertEqual(type(res[0]), Multipoint)
self.assertEqual(len(res), 3)
def test_7(self):
res = read_shapefile(os.path.join(self.dirname, "test7.shp"))
self.assertEqual(type(res[0]), Line)
self.assertEqual(type(res[3]), Multiline)
self.assertEqual(len(res), 4)
def test_8(self):
res = read_shapefile(os.path.join(self.dirname, "test8.shp"))
self.assertEqual(type(res[0]), Line)
self.assertEqual(len(res), 4)
def test_9(self):
res = read_shapefile(os.path.join(self.dirname, "test9.shp"))
self.assertEqual(type(res[0]), Line)
self.assertEqual(len(res), 4)
def test_10(self):
res = read_shapefile(os.path.join(self.dirname, "test10.shp"))
self.assertEqual(type(res[0]), Polygon)
self.assertEqual(len(res), 4)
def test_11(self):
res = read_shapefile(os.path.join(self.dirname, "test11.shp"))
self.assertEqual(type(res[0]), Polygon)
self.assertEqual(len(res), 4)
def test_12(self):
res = read_shapefile(os.path.join(self.dirname, "test12.shp"))
self.assertEqual(type(res[0]), Polygon)
self.assertEqual(len(res), 4)
def test_13(self):
res = read_shapefile(os.path.join(self.dirname, "test13.shp"))
self.assertEqual(type(res[0]), Multipolygon)
self.assertEqual(len(res), 4)
if __name__ == "__main__":
unittest.main()
|
137880
|
import os
import re
import sys
import shutil
import filecmp
from functools import reduce
fcm_types = ['accounts', 'storage', 'topics']
avail_nodes = []
dumps_root_dir = ''
investigation_name = 'iss'
rounds_avail = {}
account_fcm_pattern = re.compile(r'accounts-round(\d+)[.]fcm')
first_round_post_iss = 0
def prepare_env():
global avail_nodes, dumps_root_dir, investigation_name
if len(sys.argv) < 3:
print('USAGE: python3 {} '.format(sys.argv[0]) +
'<dumps-root-dir> [<investigation-name>]')
sys.exit(1)
dumps_root_dir = sys.argv[1]
investigation_name = sys.argv[2] or investigation_name
avail_nodes = [n for n in next(os.walk(dumps_root_dir))[1]]
if not os.path.exists(os.path.join('.', investigation_name)):
os.mkdir(investigation_name)
for node in avail_nodes:
if not os.path.exists(
os.path.join('.', investigation_name, node)):
os.mkdir(os.path.join(investigation_name, node))
def load_rounds_avail():
for node in avail_nodes:
rounds_dir = os.path.join(dumps_root_dir, node)
rounds = set([num_from(fcm) for fcm in next(os.walk(rounds_dir))[2]
if re.match(account_fcm_pattern, fcm)])
rounds_avail[node] = rounds
def pick_first_round():
global first_round_post_iss, rounds_avail
reducer = lambda x, y: x.intersection(y)
first_round_post_iss = min(reduce(reducer, rounds_avail.values()))
def num_from(accounts_fcm):
m = re.match(account_fcm_pattern, accounts_fcm)
return int(m.group(1))
def copy_round_fcms():
for node in avail_nodes:
for fcm_type in fcm_types:
f = fcm_file(fcm_type)
shutil.copyfile(
os.path.join(dumps_root_dir, node, f),
os.path.join('.', investigation_name, node, f))
def diff_matrix(fcm_type, f):
field_width = max(map(len, avail_nodes)) + 1
write_and_print('\n' + ''.join('-' for _ in range(len(fcm_type))), f)
write_and_print(fcm_type.upper(), f)
write_and_print(''.join('-' for _ in range(len(fcm_type))), f)
write_and_print('{:{w}}'.format('', w=field_width) +
''.join(['{:{w}}'.format(node, w=field_width)
for node in avail_nodes]), f)
blank = '{:{w}}'.format('', w=field_width)
for i, node in enumerate(avail_nodes):
l = ['{:<{w}}'.format(node, w=field_width)]
for j, other in enumerate(avail_nodes):
if j < i:
l.append(blank)
else:
answer = 'X' if differ(node, other, fcm_type) else '.'
l.append('{:{w}}'.format(answer, w=field_width))
line = ''.join(l)
write_and_print(line, f)
def write_and_print(s, f):
print(s)
f.write(s + '\n')
def differ(node1, node2, fcm_type):
fcm1, fcm2 = fcm_path(node1, fcm_type), fcm_path(node2, fcm_type)
return not filecmp.cmp(fcm1, fcm2)
def fcm_file(fcm_type):
return '{}-round{}.fcm'.format(fcm_type, first_round_post_iss)
def fcm_path(node, fcm_type):
return os.path.join('.', investigation_name, node, fcm_file(fcm_type))
def write_list_literals():
p = os.path.join('.', investigation_name, 'fcm-paths.excerpt')
with open(p, 'w') as f:
for fcm_type in fcm_types:
f.write(' final List<String> {}Locs = List.of(\n'.format(
fcm_type))
for i, node in enumerate(avail_nodes):
fq = os.path.join(
os.path.abspath('.'),
investigation_name, node, fcm_file(fcm_type))
opt_comma = '' if (i == len(avail_nodes) - 1) else ','
f.write(' "{}"{}\n'.format(fq, opt_comma))
f.write(' );\n')
if __name__ == '__main__':
prepare_env()
load_rounds_avail()
pick_first_round()
print('\nRound {} is first available for all nodes.'.format(
first_round_post_iss) + ' The dumped FCMs differ as below.')
copy_round_fcms()
p = os.path.join('.', investigation_name, 'fcm-diffs.txt')
with open(p, 'w') as f:
for fcm_type in fcm_types:
diff_matrix(fcm_type, f)
write_list_literals()
|
137882
|
import sys
import asyncio
import aiohttp
from asyncqt import QEventLoop, asyncSlot, asyncClose
# from PyQt5.QtWidgets import (
from PySide2.QtWidgets import (
QApplication, QWidget, QLabel, QLineEdit, QTextEdit, QPushButton,
QVBoxLayout)
class MainWindow(QWidget):
"""Main window."""
_DEF_URL = 'https://jsonplaceholder.typicode.com/todos/1'
"""str: Default URL."""
_SESSION_TIMEOUT = 1.
"""float: Session timeout."""
def __init__(self):
super().__init__()
self.setLayout(QVBoxLayout())
self.lblStatus = QLabel('Idle', self)
self.layout().addWidget(self.lblStatus)
self.editUrl = QLineEdit(self._DEF_URL, self)
self.layout().addWidget(self.editUrl)
self.editResponse = QTextEdit('', self)
self.layout().addWidget(self.editResponse)
self.btnFetch = QPushButton('Fetch', self)
self.btnFetch.clicked.connect(self.on_btnFetch_clicked)
self.layout().addWidget(self.btnFetch)
self.session = aiohttp.ClientSession(
loop=asyncio.get_event_loop(),
timeout=aiohttp.ClientTimeout(total=self._SESSION_TIMEOUT))
@asyncClose
async def closeEvent(self, event):
await self.session.close()
@asyncSlot()
async def on_btnFetch_clicked(self):
self.btnFetch.setEnabled(False)
self.lblStatus.setText('Fetching...')
try:
async with self.session.get(self.editUrl.text()) as r:
self.editResponse.setText(await r.text())
except Exception as exc:
self.lblStatus.setText('Error: {}'.format(exc))
else:
self.lblStatus.setText('Finished!')
finally:
self.btnFetch.setEnabled(True)
if __name__ == '__main__':
app = QApplication(sys.argv)
loop = QEventLoop(app)
asyncio.set_event_loop(loop)
mainWindow = MainWindow()
mainWindow.show()
with loop:
sys.exit(loop.run_forever())
|
137895
|
from __future__ import absolute_import
import time
from .ProtectFlags import ProtectFlags
from .TimeStamp import TimeStamp
from .MetaInfo import MetaInfo
from .FSString import FSString
TS_FORMAT = "%Y-%m-%d %H:%M:%S"
class MetaInfoFSUAE:
@staticmethod
def is_meta_file(path):
return path.lower().endswith(".uaem")
@staticmethod
def get_suffix():
return ".uaem"
def load_meta(self, path):
with open(path, "rb") as fh:
data = fh.read().decode("utf-8")
return self.parse_data(data)
def parse_data(self, data):
if data.endswith("\n"):
data = data[:-1]
# first protect flags
if len(data) > 8:
protect = data[0:8]
flags = ProtectFlags()
flags.parse_full(protect)
data = data[9:]
else:
raise ValueError("no protect flags in .uaem file!")
# time stamp (unix) with ticks
# 2019-02-22 22:36:14.24
# 0123456789012345678901
if len(data) >= 22:
time_stamp = data[0:19]
ticks = int(data[20:22])
data = data[23:]
ts = time.strptime(time_stamp, TS_FORMAT)
secs = int(time.mktime(ts))
mod_ts = TimeStamp()
mod_ts.from_secs(secs, ticks)
else:
raise ValueError("no timestamp in .uaem file!")
# comment
if len(data) > 0:
comment = FSString(data)
else:
comment = None
# create meta info
return MetaInfo(flags.get_mask(), mod_ts, comment)
def generate_data(self, meta_info):
protect = meta_info.get_protect_str()
time_stamp = meta_info.get_mod_ts()
ts = time_stamp.format(TS_FORMAT)
ts += ".%02d" % time_stamp.get_sub_secs()
comment = meta_info.get_comment_unicode_str()
return "%s %s %s\n" % (protect, ts, comment)
def save_meta(self, path, meta_info):
with open(path, "wb") as fh:
txt = self.generate_data(meta_info)
fh.write(txt.encode("utf-8"))
|
137929
|
def colored(string, color):
"""
Returns the given string wrapped with a ANSI escape code that gives it color when printed to a terminal.
Args:
string: String to be colored.
color: Chosen color for the string. Can be 'r' for red, 'g' for green, 'y' for yellow, 'b' for blue, 'p' for
pink, 't' for teal, or 'gray' for gray.
Returns:
"""
colors = {'r': 31, 'g': 32, 'y': 33, 'b': 34, 'p': 35, 't': 36, 'gray': 37}
return f'\x1b[{colors[color]}m{string}\x1b[0m'
# Example usage
if __name__ == '__main__':
print(f"{colored('This', 't')} {colored('is', 'y')} {colored('RED', 'r')}.")
|
137941
|
import os,time,sys
import ROOT as rt
from larcv import larcv
from ROOT import dbscan,std
import numpy as np
colors = [ rt.kRed, rt.kBlue, rt.kMagenta, rt.kGreen, rt.kCyan ]
npoints = 1000
ndims = 2
gauscenters = [(10,10),(2,2)]
gauscovs = [
[[1,0],[0,1]],
[[1,0.7],[0.7,1]]
]
# gen pts and put them into a vector. Also, graph them
g = rt.TGraph( npoints )
algo = dbscan.DBSCANAlgo()
data = dbscan.dbPoints()
for ipt in xrange(0,npoints):
ig = np.random.randint(0,len(gauscenters))
mean = gauscenters[ig]
cov = gauscovs[ig]
points = np.random.multivariate_normal(mean, cov, 1)
g.SetPoint(ipt,points[0][0],points[0][1])
v = std.vector("double")(ndims,0.0)
v[0] = points[0][0]
v[1] = points[0][1]
data.push_back(v)
c = rt.TCanvas("c","c",1400,600)
c.Divide(2,1)
c.cd(1)
g.SetMarkerStyle(20)
g.Draw("AP")
c.Update()
xmin = g.GetXaxis().GetXmin()
xmax = g.GetXaxis().GetXmax()
ymin = g.GetYaxis().GetXmin()
ymax = g.GetYaxis().GetXmax()
c.cd(2)
s = time.time()
output = algo.scan( data, 3, 1.0 )
print "cluster time: ",time.time()-s," sec"
print "Number of clusters: ",output.clusters.size()
haxis = rt.TH2D("hout","",100,xmin,xmax,100,ymin,ymax)
haxis.Draw()
gclusters = []
for icluster in xrange(0,output.clusters.size()):
npts = output.clusters.at(icluster).size()
gc = rt.TGraph( npts )
for ipt in range(0,npts):
idx = output.clusters.at(icluster).at(ipt)
gc.SetPoint(ipt, data.at(idx).at(0), data.at(idx).at(1) )
gclusters.append(gc)
gc.Draw("P")
if npts==0:
gc.SetMarkerStyle(24)
gc.SetMarkerColor( rt.kBlack )
else:
gc.SetMarkerStyle( 20 + icluster%4 )
gc.SetMarkerColor( colors[icluster%len(colors)] )
c.Update()
# testing point
testpoint = std.vector("double")(2,0.0)
testpoint[0] = gauscenters[0][0]
testpoint[1] = gauscenters[0][1]
match = output.findMatchingCluster( testpoint, data, 10.0 )
print "matching cluster index=",match
print "How'd we do?"
raw_input()
#algo.initialize()
# PRINT
#algo.printdata()
# DUMP
#algo.dump( "anntest.dmp" )
|
137991
|
from buildtest.tools.stylecheck import run_style_checks
def test_run_style_check():
run_style_checks(
no_black=False, no_isort=False, no_pyflakes=False, apply_stylechecks=False
)
|
138000
|
import unittest
from monty.multiprocessing import imap_tqdm
from math import sqrt
class FuncCase(unittest.TestCase):
def test_imap_tqdm(self):
results = imap_tqdm(4, sqrt, range(10000))
self.assertEqual(len(results), 10000)
self.assertEqual(results[0], 0)
self.assertEqual(results[400], 20)
self.assertEqual(results[9999], 99.99499987499375)
results = imap_tqdm(4, sqrt, (i ** 2 for i in range(10000)))
self.assertEqual(len(results), 10000)
self.assertEqual(results[0], 0)
self.assertEqual(results[400], 400)
if __name__ == "__main__":
unittest.main()
|
138022
|
import logging
import random
from datetime import datetime, timedelta
from sys import exit
from time import sleep
from colorama import Fore, Style
from GramAddict.core.config import Config
from GramAddict.core.device_facade import create_device, get_device_info
from GramAddict.core.filter import load_config as load_filter
from GramAddict.core.interaction import load_config as load_interaction
from GramAddict.core.log import (
configure_logger,
is_log_file_updated,
update_log_file_name,
)
from GramAddict.core.navigation import check_if_english
from GramAddict.core.persistent_list import PersistentList
from GramAddict.core.report import print_full_report
from GramAddict.core.session_state import SessionState, SessionStateEncoder
from GramAddict.core.storage import Storage
from GramAddict.core.utils import (
ask_for_a_donation,
can_repeat,
check_adb_connection,
check_if_updated,
close_instagram,
config_examples,
get_instagram_version,
get_value,
kill_atx_agent,
)
from GramAddict.core.utils import load_config as load_utils
from GramAddict.core.utils import (
move_usernames_to_accounts,
open_instagram,
pre_post_script,
print_telegram_reports,
save_crash,
set_time_delta,
stop_bot,
wait_for_next_session,
)
from GramAddict.core.views import AccountView, ProfileView, SearchView, TabBarView
from GramAddict.core.views import load_config as load_views
TESTED_IG_VERSION = "214.0.0.27.120"
def start_bot(**kwargs):
# Logging initialization
logger = logging.getLogger(__name__)
# Pre-Load Config
configs = Config(first_run=True, **kwargs)
configure_logger(configs.debug, configs.username)
if not kwargs:
if "--config" not in configs.args:
logger.info(
"We strongly recommend to use a config.yml file. Follow these links for more details: https://docs.gramaddict.org/#/configuration and https://github.com/GramAddict/bot/tree/master/config-examples",
extra={"color": f"{Fore.GREEN}{Style.BRIGHT}"},
)
sleep(3)
# Config-example hint
config_examples()
# Check for updates
check_if_updated()
# Move username folders to a main directory -> accounts
if "--move-folders-in-accounts" in configs.args:
move_usernames_to_accounts()
# Global Variables
sessions = PersistentList("sessions", SessionStateEncoder)
# Load Config
configs.load_plugins()
configs.parse_args()
# Some plugins need config values without being passed
# through. Because we do a weird config/argparse hybrid,
# we need to load the configs in a weird way
load_filter(configs)
load_interaction(configs)
load_utils(configs)
load_views(configs)
if not configs.args or not check_adb_connection():
return
if len(configs.enabled) < 1:
logger.error(
"You have to specify one of the actions: " + ", ".join(configs.actions)
)
return
device = create_device(configs.device_id)
session_state = None
if str(configs.args.total_sessions) != "-1":
total_sessions = get_value(configs.args.total_sessions, None, -1)
else:
total_sessions = -1
# init
analytics_at_end = False
telegram_reports_at_end = False
followers_now = None
following_now = None
while True:
set_time_delta(configs.args)
inside_working_hours, time_left = SessionState.inside_working_hours(
configs.args.working_hours, configs.args.time_delta_session
)
if not inside_working_hours:
wait_for_next_session(
time_left, session_state, sessions, device, configs.args.screen_record
)
pre_post_script(path=configs.args.pre_script)
get_device_info(device)
session_state = SessionState(configs)
session_state.set_limits_session(configs.args)
sessions.append(session_state)
device.wake_up()
logger.info(
"-------- START: "
+ str(session_state.startTime.strftime("%H:%M:%S - %Y/%m/%d"))
+ " --------",
extra={"color": f"{Style.BRIGHT}{Fore.YELLOW}"},
)
if not device.get_info()["screenOn"]:
device.press_power()
if device.is_screen_locked():
device.unlock()
if device.is_screen_locked():
logger.error(
"Can't unlock your screen. There may be a passcode on it. If you would like your screen to be turned on and unlocked automatically, please remove the passcode."
)
exit(0)
logger.info("Device screen ON and unlocked.")
if open_instagram(device, configs.args.screen_record, configs.args.close_apps):
try:
running_ig_version = get_instagram_version()
logger.info(f"Instagram version: {running_ig_version}")
if tuple(running_ig_version.split(".")) > tuple(
TESTED_IG_VERSION.split(".")
):
logger.info(
f"You have a newer version of IG then the one tested! (Tested version: {TESTED_IG_VERSION})",
extra={"color": f"{Style.BRIGHT}"},
)
except Exception as e:
logger.error(f"Error retrieving the IG version. Exception: {e}")
SearchView(device)._close_keyboard()
else:
break
try:
profileView = check_if_english(device)
if configs.args.username is not None:
success = AccountView(device).changeToUsername(configs.args.username)
if not success:
logger.error(
f"Not able to change to {configs.args.username}, abort!"
)
save_crash(device)
device.back()
break
AccountView(device).refresh_account()
(
session_state.my_username,
session_state.my_posts_count,
session_state.my_followers_count,
session_state.my_following_count,
) = profileView.getProfileInfo()
except Exception as e:
logger.error(f"Exception: {e}")
save_crash(device)
break
if (
session_state.my_username is None
or session_state.my_posts_count is None
or session_state.my_followers_count is None
or session_state.my_following_count is None
):
logger.critical(
"Could not get one of the following from your profile: username, # of posts, # of followers, # of followings. This is typically due to a soft ban. Review the crash screenshot to see if this is the case."
)
logger.critical(
f"Username: {session_state.my_username}, Posts: {session_state.my_posts_count}, Followers: {session_state.my_followers_count}, Following: {session_state.my_following_count}"
)
save_crash(device)
exit(1)
if not is_log_file_updated():
try:
update_log_file_name(session_state.my_username)
except Exception as e:
logger.error(
f"Failed to update log file name. Will continue anyway. {e}"
)
report_string = f"Hello, @{session_state.my_username}! You have {session_state.my_followers_count} followers and {session_state.my_following_count} followings so far."
logger.info(report_string, extra={"color": f"{Style.BRIGHT}"})
if configs.args.repeat:
logger.info(
f"You have {total_sessions + 1 - len(sessions) if total_sessions > 0 else 'infinite'} session(s) left. You can stop the bot by pressing CTRL+C in console.",
extra={"color": f"{Style.BRIGHT}{Fore.YELLOW}"},
)
sleep(3)
storage = Storage(session_state.my_username)
if configs.args.shuffle_jobs:
jobs_list = random.sample(configs.enabled, len(configs.enabled))
else:
jobs_list = configs.enabled
if "analytics" in jobs_list:
jobs_list.remove("analytics")
if configs.args.analytics:
analytics_at_end = True
if "telegram-reports" in jobs_list:
jobs_list.remove("telegram-reports")
if configs.args.telegram_reports:
telegram_reports_at_end = True
for plugin in jobs_list:
inside_working_hours, time_left = SessionState.inside_working_hours(
configs.args.working_hours, configs.args.time_delta_session
)
if not inside_working_hours:
logger.info(
"Outside of working hours. Ending session.",
extra={"color": f"{Fore.CYAN}"},
)
break
if not session_state.check_limit(
configs.args, limit_type=session_state.Limit.ALL, output=True
):
logger.info(
f"Current job: {plugin}",
extra={"color": f"{Style.BRIGHT}{Fore.BLUE}"},
)
if configs.args.scrape_to_file is not None:
logger.warning("You're in scraping mode!")
if ProfileView(device).getUsername() != session_state.my_username:
logger.debug("Not in your main profile.")
TabBarView(device).navigateToProfile()
configs.actions[plugin].run(device, configs, storage, sessions, plugin)
else:
logger.info(
"At last one of these limits has been reached: interactions/successful/follower/likes or scraped. Ending session.",
extra={"color": f"{Fore.CYAN}"},
)
break
# save the session in sessions.json
session_state.finishTime = datetime.now()
sessions.persist(directory=session_state.my_username)
# print reports
if telegram_reports_at_end:
logger.info("Going back to your profile..")
ProfileView(device)._click_on_avatar()
if ProfileView(device).getFollowingCount() is None:
ProfileView(device)._click_on_avatar()
AccountView(device).refresh_account()
(
_,
_,
followers_now,
following_now,
) = ProfileView(device).getProfileInfo()
if analytics_at_end:
configs.actions["analytics"].run(
device, configs, storage, sessions, "analytics"
)
# turn off bot
close_instagram(device, configs.args.screen_record)
if configs.args.screen_sleep:
device.screen_off()
logger.info("Screen turned off for sleeping time.")
kill_atx_agent(device)
logger.info(
"-------- FINISH: "
+ str(session_state.finishTime.strftime("%H:%M:%S - %Y/%m/%d"))
+ " --------",
extra={"color": f"{Style.BRIGHT}{Fore.YELLOW}"},
)
pre_post_script(pre=False, path=configs.args.post_script)
if configs.args.repeat and can_repeat(len(sessions), total_sessions):
print_full_report(sessions, configs.args.scrape_to_file)
inside_working_hours, time_left = SessionState.inside_working_hours(
configs.args.working_hours, configs.args.time_delta_session
)
if inside_working_hours:
time_left = (
get_value(configs.args.repeat, "Sleep for {} minutes.", 180) * 60
)
print_telegram_reports(
configs,
telegram_reports_at_end,
followers_now,
following_now,
time_left,
)
logger.info(
f'Next session will start at: {(datetime.now() + timedelta(seconds=time_left)).strftime("%H:%M:%S (%Y/%m/%d)")}.'
)
try:
sleep(time_left)
except KeyboardInterrupt:
stop_bot(
device,
sessions,
session_state,
configs.args.screen_record,
was_sleeping=True,
)
else:
print_telegram_reports(
configs,
telegram_reports_at_end,
followers_now,
following_now,
time_left.total_seconds(),
)
wait_for_next_session(
time_left,
session_state,
sessions,
device,
configs.args.screen_record,
)
else:
break
print_telegram_reports(
configs,
telegram_reports_at_end,
followers_now,
following_now,
)
print_full_report(sessions, configs.args.scrape_to_file)
ask_for_a_donation()
|
138025
|
from ...utils import Command
from ..utils import SiteManager
class StatusCommand(Command):
"""
Retrieve the work order status
"""
def setup(self, subparsers):
parser = super(StatusCommand, self).setup(subparsers)
parser.add_argument('-u', "--api_url", required=True, type=str, help="url of your Customer api")
parser.add_argument('-i', '--work_order_id', required=True, type=str, help="Work order id")
return parser
def run(self, api_url, work_order_id, **kwargs):
return SiteManager().status_work_order(api_url, work_order_id)
|
138041
|
import unittest
import requests
from src.config import PYTHON_MODULE_PORT
class APIStatusTest(unittest.TestCase):
def setUp(self):
self.url = f'http://localhost:{PYTHON_MODULE_PORT}/docs'
self.status_code = 200
self.response_message = True
def test_status_code(self):
response = requests.get(self.url)
self.assertEqual(response.status_code, self.status_code)
|
138049
|
import json
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from helium.auth.models import UserProfile
from helium.auth.models import UserSettings
from helium.auth.tests.helpers import userhelper
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Helium Edu"
__version__ = "1.4.46"
class TestCaseAuthenticationViews(TestCase):
def test_password_reset(self):
# GIVEN
user = userhelper.given_a_user_exists()
# WHEN
response = self.client.put(reverse('auth_user_resource_forgot'),
json.dumps({'email': user.email}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
temp_pass = response.context['password']
# THEN
response = self.client.post(reverse('auth_token_resource_obtain'),
json.dumps({'username': user.get_username(), 'password': temp_pass}),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_password_reset_real_fake_user_same_response(self):
# GIVEN
userhelper.given_a_user_exists()
# WHEN
response = self.client.put(reverse('auth_user_resource_forgot'),
json.dumps({'email': '<EMAIL>'}),
content_type='application/json')
# WHEN
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_registration_success(self):
# WHEN
data = {
'email': '<EMAIL>',
'username': 'my_test_user',
'password': '<PASSWORD>!',
'time_zone': 'America/Chicago'}
response1 = self.client.post(reverse('auth_user_resource_register'),
json.dumps(data),
content_type='application/json')
# THEN
self.assertEqual(response1.status_code, status.HTTP_201_CREATED)
self.assertEqual(response1.data['settings']['time_zone'], 'America/Chicago')
response2 = self.client.post(reverse('auth_token_resource_obtain'),
json.dumps({'username': 'my_test_user', 'password': '<PASSWORD>!'}),
content_type='application/json')
self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('account is not active', response2.data['non_field_errors'][0])
user = get_user_model().objects.get(email='<EMAIL>')
self.assertFalse(user.is_active)
self.assertEqual(user.username, 'my_test_user')
self.assertEqual(user.settings.time_zone, 'America/Chicago')
self.assertTrue(UserProfile.objects.filter(user__email='<EMAIL>').exists())
self.assertTrue(UserSettings.objects.filter(user__email='<EMAIL>').exists())
def test_registration_bad_data(self):
# WHEN
response = self.client.post(reverse('auth_user_resource_register'),
json.dumps({'email': 'test@not-a-valid-email', 'username': 'my_test_user',
'password1': '<PASSWORD>!', 'password2': '<PASSWORD>!',
'time_zone': 'America/Chicago'}),
content_type='application/json')
# THEN
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(get_user_model().objects.filter(username='my_test_user').exists())
self.assertIn('email', response.data)
def test_verification_success(self):
# GIVEN
user = userhelper.given_an_inactive_user_exists()
# WHEN
response = self.client.get(
reverse('auth_user_resource_verify') + f'?username={user.username}&code={user.verification_code}')
# THEN
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = get_user_model().objects.get(email=user.email)
self.assertEqual(get_user_model().objects.count(), 1)
self.assertEqual(user.get_username(), 'test_user')
self.assertTrue(user.is_active)
def test_verification_bad_request(self):
# GIVEN
user = userhelper.given_an_inactive_user_exists()
# WHEN
response = self.client.get(
reverse('auth_user_resource_verify') + f'?code={user.verification_code}')
# THEN
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
user = get_user_model().objects.get(email=user.email)
self.assertIn("'username' and 'password' must be given", response.data[0])
self.assertFalse(user.is_active)
def test_verification_not_found(self):
# WHEN
response = self.client.get(
reverse('auth_user_resource_verify') + "?username=not-a-user&code=not-a-real-code")
# THEN
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
138055
|
import os, sys
import torch
from .models.embedding import FullyConnectedEmbed, SkipLSTM
from .models.contact import ContactCNN
from .models.interaction import ModelInteraction
def build_lm_1(state_dict_path):
"""
:meta private:
"""
model = SkipLSTM(21, 100, 1024, 3)
state_dict = torch.load(state_dict_path)
model.load_state_dict(state_dict)
model.eval()
return model
def build_human_1(state_dict_path):
"""
:meta private:
"""
embModel = FullyConnectedEmbed(6165, 100, 0.5)
conModel = ContactCNN(100, 50, 7)
model = ModelInteraction(embModel, conModel, use_W=True, pool_size=9)
state_dict = torch.load(state_dict_path)
model.load_state_dict(state_dict)
model.eval()
return model
VALID_MODELS = {
"lm_v1": build_lm_1,
"human_v1": build_human_1
}
def get_state_dict(version="human_v1", verbose=True):
"""
Download a pre-trained model if not already exists on local device.
:param version: Version of trained model to download [default: human_1]
:type version: str
:param verbose: Print model download status on stdout [default: True]
:type verbose: bool
:return: Path to state dictionary for pre-trained language model
:rtype: str
"""
state_dict_basename = f"dscript_{version}.pt"
state_dict_basedir = os.path.dirname(os.path.realpath(__file__))
state_dict_fullname = f"{state_dict_basedir}/{state_dict_basename}"
state_dict_url = f"http://cb.csail.mit.edu/cb/dscript/data/models/{state_dict_basename}"
if not os.path.exists(state_dict_fullname):
try:
import urllib.request
import shutil
if verbose: print(f"Downloading model {version} from {state_dict_url}...")
with urllib.request.urlopen(state_dict_url) as response, open(state_dict_fullname, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
except Exception as e:
print("Unable to download model - {}".format(e))
sys.exit(1)
return state_dict_fullname
def get_pretrained(version="human_v1"):
"""
Get pre-trained model object.
Currently Available Models
==========================
See the `documentation <https://d-script.readthedocs.io/en/main/data.html#trained-models>`_ for most up-to-date list.
- ``lm_v1`` - Language model from `Bepler & Berger <https://github.com/tbepler/protein-sequence-embedding-iclr2019>`_.
- ``human_v1`` - Human trained model from D-SCRIPT manuscript.
Default: ``human_v1``
:param version: Version of pre-trained model to get
:type version: str
:return: Pre-trained model
:rtype: dscript.models.*
"""
if not version in VALID_MODELS:
raise ValueError("Model {} does not exist".format(version))
state_dict_path = get_state_dict(version)
return VALID_MODELS[version](state_dict_path)
|
138062
|
import rpyc
import copy
import unittest
from rpyc.utils.server import ThreadedServer
class MyClass(object):
def __add__(self, other):
return self.foo() + str(other)
def foo(self):
return "foo"
def bar(self):
return "bar"
def spam(self):
return "spam"
def _privy(self):
return "privy"
def exposed_foobar(self):
return "Fee Fie Foe Foo"
class YourClass(object):
def lala(self):
return MyClass()
def baba(self):
return "baba"
def gaga(self):
return "gaga"
try:
long
except NameError:
long = int
unicode = str
try:
bytes
except NameError:
bytes = str
class Protector(object):
def __init__(self, safetypes=(int, list, bool, tuple, str, float, long, unicode, bytes)):
self._safetypes = set(safetypes)
self._typereg = {}
def register(self, typ, attrs):
self._typereg[typ] = frozenset(attrs)
def wrap(self, obj):
class Restrictor(object):
def __call__(_, *args, **kwargs):
return self.wrap(obj(*args, **kwargs))
def _rpyc_getattr(_, name):
if type(obj) not in self._safetypes:
attrs = self._typereg.get(type(obj), ())
if name not in attrs:
raise AttributeError(name)
obj2 = getattr(obj, name)
return self.wrap(obj2)
__getattr__ = _rpyc_getattr
return Restrictor()
SVC_RESTRICTED = ["exposed_foobar", "__add__", "_privy", "foo", "bar"]
class MyService(rpyc.Service):
exposed_MyClass = MyClass
def exposed_get_one(self):
return rpyc.restricted(MyClass(), SVC_RESTRICTED)
def exposed_get_two(self):
protector = Protector()
protector.register(MyClass, SVC_RESTRICTED)
protector.register(YourClass, ["lala", "baba"])
return protector.wrap(YourClass())
class TestRestricted(unittest.TestCase):
def setUp(self):
self.server = ThreadedServer(MyService)
self.thd = self.server._start_in_thread()
self.conn = rpyc.connect("localhost", self.server.port)
def tearDown(self):
self.conn.close()
while self.server.clients:
pass
self.server.close()
self.thd.join()
def test_restricted(self):
obj = self.conn.root.get_one()
self.assertEqual(obj.foo(), "foo")
self.assertEqual(obj.bar(), "bar")
self.assertEqual(obj.__add__("bar"), "foobar")
self.assertEqual(obj._privy(), "privy")
self.assertEqual(obj.exposed_foobar(), "Fee Fie Foe Foo")
self.assertRaises(AttributeError, lambda: obj.spam)
def test_restricted2(self):
self.server.protocol_config = {'allow_public_attrs': False}
obj = self.conn.root.get_one()
self.assertEqual(obj.foo(), "foo")
self.assertEqual(obj.bar(), "bar")
self.assertEqual(obj.__add__("bar"), "foobar")
self.assertEqual(obj._privy(), "privy")
self.assertRaises(AttributeError, lambda: obj.spam)
class TestConfigAllows(unittest.TestCase):
def setUp(self):
self.cfg = self._reset_cfg()
self.server = ThreadedServer(MyService, port=0)
self.thd = self.server._start_in_thread()
self.conn = rpyc.connect("localhost", self.server.port)
def tearDown(self):
self.conn.close()
while self.server.clients:
pass
self.server.close()
self.thd.join()
def _reset_cfg(self):
self.cfg = copy.copy(rpyc.core.protocol.DEFAULT_CONFIG)
return self.cfg
def _get_myclass(self, proto_config):
self.conn.close()
self.server.protocol_config.update(proto_config)
self.conn = rpyc.connect("localhost", self.server.port)
return self.conn.root.MyClass()
def test_default_config(self):
obj = self._get_myclass(self.cfg)
self.assertEqual(obj + 'bar', "foobar")
self.assertEqual(obj.foobar(), "Fee Fie Foe Foo")
self.assertEqual(obj.exposed_foobar(), "Fee Fie Foe Foo")
self.assertRaises(AttributeError, lambda: obj._privy)
self.assertRaises(AttributeError, lambda: obj.foo)
self.assertRaises(AttributeError, lambda: obj.bar)
self.assertRaises(AttributeError, lambda: obj.spam)
def test_allow_all(self):
self._reset_cfg()
self.cfg['allow_all_attrs'] = True
obj = self._get_myclass(self.cfg)
self.assertEqual(obj + 'bar', "foobar")
self.assertEqual(obj.__add__("bar"), "foobar")
self.assertEqual(obj._privy(), "privy")
self.assertEqual(obj.foobar(), "Fee Fie Foe Foo")
self.assertEqual(obj.exposed_foobar(), "Fee Fie Foe Foo")
def test_allow_exposed(self):
self._reset_cfg()
self.cfg['allow_exposed_attrs'] = False
try:
self._get_myclass(self.cfg) # returns obj, but ignored
passed = False
except Exception:
passed = True
self.assertEqual(passed, True)
def test_allow_safe_attrs(self):
self._reset_cfg()
self.cfg['allow_safe_attrs'] = False
obj = self._get_myclass(self.cfg)
self.assertEqual(obj.foobar(), "Fee Fie Foe Foo")
self.assertEqual(obj.exposed_foobar(), "Fee Fie Foe Foo")
self.assertRaises(AttributeError, lambda: obj._privy)
self.assertRaises(AttributeError, lambda: obj + 'bar')
self.assertRaises(AttributeError, lambda: obj.foo)
self.assertRaises(AttributeError, lambda: obj.bar)
self.assertRaises(AttributeError, lambda: obj.spam)
def test_allow_public_attrs(self):
self._reset_cfg()
self.cfg['allow_public_attrs'] = True
obj = self._get_myclass(self.cfg)
self.assertEqual(obj + 'bar', "foobar")
self.assertEqual(obj.foo(), "foo")
self.assertEqual(obj.bar(), "bar")
self.assertEqual(obj.foobar(), "Fee Fie Foe Foo")
self.assertEqual(obj.exposed_foobar(), "Fee Fie Foe Foo")
self.assertRaises(AttributeError, lambda: obj._privy)
# def test_type_protector(self):
# obj = self.conn.root.get_two()
# assert obj.baba() == "baba"
# try:
# obj.gaga()
# except AttributeError:
# pass
# else:
# assert False, "expected an attribute error!"
# obj2 = obj.lala()
# assert obj2.foo() == "foo"
# assert obj2.spam() == "spam"
# try:
# obj.bar()
# except AttributeError:
# pass
# else:
# assert False, "expected an attribute error!"
#
if __name__ == "__main__":
unittest.main()
|
138071
|
import re
import os
import glob
import subprocess
from subprocess import Popen, PIPE
import numpy as np
# generates the string with the selected integrator
def set_integrator(scene, integrator_str):
start = '##INTEGRATOR-DEF-START'
end = '##INTEGRATOR-DEF-END'
replacement = integrator_str
match = re.match(r'(.+%s\s*).+?(\s*%s.+)' % (start, end), scene, re.DOTALL)
return match.group(1) + replacement + match.group(2)
def set_sampler(scene, sampler_str):
start = '##SAMPLER-DEF-START'
end = '##SAMPLER-DEF-END'
replacement = sampler_str
match = re.match(r'(.+%s\s*).+?(\s*%s.+)' % (start, end), scene, re.DOTALL)
return match.group(1) + replacement + match.group(2)
def run_and_time(args, workingDir, repeats=1):
totalTime = 0.0
var = 0.0
mean = 0.0
n = 0
for k in range(repeats):
p = Popen(args, cwd=workingDir, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
# Format of our implementation
renderTime = re.findall(r'Total rendering time: (\d+\.\d+) seconds.', output.decode('utf-8'))
overheadTime = re.findall(r'Overhead: (\d+\.\d+) seconds.', output.decode('utf-8'))
trialTime = 0.0
if not renderTime:
# Format of the optimal MIS implementation
renderTime = re.findall(r'Rendering stats: samples \d+, time (\d+\.\d+) s', output.decode('utf-8'))
if not renderTime:
# Fallback: Hijack PBRT progress reporter
# Accuracy below 0.5 seconds!!
# Might also be printing the full time multiple times,
# the first output is right after Done() was called and should be most accurate
times = re.findall(r'\+\+\] \((\d+\.\d+)s\)', output.decode('utf-8'))
times = np.array(times, dtype=np.float32)
trialTime = times[0]
print("Warning: time measurement fallback option triggered, accuracy < 0.5s!")
else:
trialTime = float(renderTime[0])
else:
trialTime = float(renderTime[0])
n += 1
if n == 1:
mean = trialTime
else:
newMean = mean + (trialTime - mean) / n
var += (trialTime - mean) * (trialTime - newMean)
mean = newMean
if n > 1:
var /= n-1
twoStandardDevs = np.sqrt(var) * 2
import math
roundToN = lambda x, n: round(x, -int(math.floor(math.log10(x))) + (n-1))
return (roundToN(mean, 3), 0.0 if repeats == 1 else roundToN(twoStandardDevs, 3))
def run_tests(ref_name, ref_integrator, ref_sampler, tester_fn, scenes):
filenames = []
for scene_name, scene_desc in scenes.items():
scene_path = scene_desc['path']
if not os.path.exists('./' + scene_name):
os.makedirs('./' + scene_name)
# load the scene template and render the reference (if it does not exist already)
with open(scene_path + scene_desc['template'], 'r') as f:
scene = f.read()
refpath = scene_name + '/' + ref_name
if not os.path.isfile(refpath):
sc = set_integrator(scene, ref_integrator)
sc = set_sampler(sc, ref_sampler)
with open(scene_path + 'scene.pbrtgen', 'w') as f:
f.write(sc)
subprocess.call(['./pbrt', scene_path + 'scene.pbrtgen', '--outfile', refpath])
filenames.append(refpath)
filenames.extend(tester_fn(scene_name, scene, scene_path))
return filenames
def show_results(filenames):
# separate out the stratification factors
factorImages = []
for name in filenames:
if 'stratfactor-d' in name:
factorImages.append(name)
for name in factorImages:
filenames.remove(name)
# open all images, assumes tev is in the path
try:
viewer = ['tev']
viewer += filenames
subprocess.call(viewer)
except Exception:
# tev was not found. Maybe we are on WSL and tev is a Windows .exe?
viewer = ['tev.exe']
viewer += filenames
try:
subprocess.call(viewer)
except:
print('"tev" not found in path, proceeding without showing images')
|
138103
|
from stacker.context import Context
from stacker.config import Config
from stacker.variables import Variable
from stacker_blueprints.network import Network
from stacker.blueprints.testutil import BlueprintTestCase
class TestNetwork(BlueprintTestCase):
def setUp(self):
self.ctx = Context(config=Config({'namespace': 'test'}))
self.common_variables = {
"VpcId": "vpc-abc1234",
"VpcDefaultSecurityGroup": "sg-01234abc",
"AvailabilityZone": "us-east-1a",
"CidrBlock": "10.0.0.0/24",
}
def create_blueprint(self, name):
return Network(name, self.ctx)
def generate_variables(self, variable_dict=None):
variable_dict = variable_dict or {}
self.common_variables.update(variable_dict)
return [Variable(k, v) for k, v in self.common_variables.items()]
def test_network_fail_internet_nat_gateway(self):
bp = self.create_blueprint("test_network_fail_internet_nat_gateway")
variables = {
"InternetGatewayId": "gw-abc1234z",
"NatGatewayId": "nat-abc1234z",
}
bp.resolve_variables(self.generate_variables(variables))
with self.assertRaises(ValueError):
bp.create_template()
def test_network_fail_nat_gateway_and_create_nat_gateway(self):
bp = self.create_blueprint(
"test_network_fail_nat_gateway_and_create_nat_gateway"
)
variables = {
"NatGatewayId": "nat-abc1234z",
"CreateNatGateway": True,
}
bp.resolve_variables(self.generate_variables(variables))
with self.assertRaises(ValueError):
bp.create_template()
def test_network_with_nat_gateway_id(self):
bp = self.create_blueprint("test_network_with_nat_gateway_id")
variables = {
"NatGatewayId": "nat-abc1234z",
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
self.assertNotIn("NatGateway", bp.template.resources)
self.assertEqual(
bp.template.resources["DefaultRoute"].NatGatewayId,
"nat-abc1234z"
)
self.assertEqual(bp.network_type, "private")
def test_network_with_internet_gateway_id_and_create_nat_gateway(self):
bp = self.create_blueprint(
"test_network_with_internet_gateway_id_and_create_nat_gateway"
)
variables = {
"InternetGatewayId": "igw-abc1234z",
"CreateNatGateway": True,
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
self.assertIn("NatGateway", bp.template.resources)
self.assertEqual(
bp.template.resources["DefaultRoute"].GatewayId,
"igw-abc1234z"
)
self.assertEqual(bp.network_type, "public")
def test_network_with_internet_gateway_id_and_no_create_nat_gateway(self):
bp = self.create_blueprint(
"test_network_with_internet_gateway_id_and_no_create_nat_gateway"
)
variables = {
"InternetGatewayId": "igw-abc1234z",
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
self.assertNotIn("NatGateway", bp.template.resources)
self.assertEqual(
bp.template.resources["DefaultRoute"].GatewayId,
"igw-abc1234z"
)
self.assertEqual(bp.network_type, "public")
def test_network_with_extra_tags(self):
bp = self.create_blueprint("test_network_with_extra_tags")
variables = {
"NatGatewayId": "nat-abc1234z",
"Tags": {"A": "apple"},
}
bp.resolve_variables(self.generate_variables(variables))
bp.create_template()
self.assertRenderedBlueprint(bp)
route_table = bp.template.resources["RouteTable"]
found_tag = False
for tag in route_table.Tags.tags:
if tag["Key"] == "A" and tag["Value"] == "apple":
found_tag = True
self.assertTrue(found_tag)
|
138137
|
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import CppExtension, CUDAExtension, CUDA_HOME
ext_modules = [
CppExtension('sym3eig_cpu', ['cpu/sym3eig.cpp']),
]
cmdclass = {'build_ext': torch.utils.cpp_extension.BuildExtension}
if CUDA_HOME is not None:
ext_modules += [
CUDAExtension('sym3eig_cuda',
['cuda/sym3eig.cpp', 'cuda/sym3eig_kernel.cu'])
]
__version__ = '1.0.0'
#url = 'https://github.com/mrjel/pytorch_sym3eig'
install_requires = ['torchvision']
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov', 'numpy']
setup(
name='torch_sym3eig',
version=__version__,
description='Implementation of batch-wise eigenvector/value computation for symmetric 3x3 matrices'
'Batchwise symmetric 3x3 eigencomputation in PyTorch',
author='<NAME>',
author_email='<EMAIL>',
#url=url,
#download_url='{}/archive/{}.tar.gz'.format(url, __version__),
keywords=[
'pytorch', 'eigenvector', 'eigenvalue', 'batchwise-sym3eig', 'geometric-deep-learning', 'neural-networks'
],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=find_packages(),
)
|
138139
|
import asyncio
import time
import json
import socket
import sys
from datetime import datetime
from gmqtt import Client as MQTTClient
from cover import Cover
from alarm_control_panel import Alarm
# Globals
# MQTT
from light import Light
from boiler import Boiler
from switch import Switch
tydom_topic = "+/tydom/#"
refresh_topic = "homeassistant/requests/tydom/refresh"
hostname = socket.gethostname()
# STOP = asyncio.Event()
class MQTT_Hassio():
def __init__(self, broker_host, port, user, password, mqtt_ssl,
home_zone=1, night_zone=2, tydom=None, tydom_alarm_pin=None):
self.broker_host = broker_host
self.port = port
self.user = user
self.password = password
self.ssl = mqtt_ssl
self.tydom = tydom
self.tydom_alarm_pin = tydom_alarm_pin
self.mqtt_client = None
self.home_zone = home_zone
self.night_zone = night_zone
async def connect(self):
try:
print('""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""')
print('Attempting MQTT connection...')
print('MQTT host : ', self.broker_host)
print('MQTT user : ', self.user)
adress = hostname + str(datetime.fromtimestamp(time.time()))
# print(adress)
client = MQTTClient(adress)
# print(client)
client.on_connect = self.on_connect
client.on_message = self.on_message
client.on_disconnect = self.on_disconnect
# client.on_subscribe = self.on_subscribe
client.set_auth_credentials(self.user, self.password)
await client.connect(self.broker_host, self.port, self.ssl)
self.mqtt_client = client
return self.mqtt_client
except Exception as e:
print("MQTT connection Error : ", e)
print('MQTT error, restarting in 8s...')
await asyncio.sleep(8)
await self.connect()
def on_connect(self, client, flags, rc, properties):
print("##################################")
try:
print("Subscribing to : ", tydom_topic)
# client.subscribe('homeassistant/#', qos=0)
client.subscribe('homeassistant/status', qos=0)
client.subscribe(tydom_topic, qos=0)
except Exception as e:
print("Error on connect : ", e)
async def on_message(self, client, topic, payload, qos, properties):
# print('Incoming MQTT message : ', topic, payload)
if ('update' in str(topic)):
# if "update" in topic:
print('Incoming MQTT update request : ', topic, payload)
await self.tydom.get_data()
elif ('kill' in str(topic)):
# if "update" in topic:
print('Incoming MQTT kill request : ', topic, payload)
print('Exiting...')
sys.exit()
elif (topic == "homeassistant/requests/tydom/refresh"):
print('Incoming MQTT refresh request : ', topic, payload)
await self.tydom.post_refresh()
elif (topic == "homeassistant/requests/tydom/scenarii"):
print('Incoming MQTT scenarii request : ', topic, payload)
await self.tydom.get_scenarii()
elif (topic == "homeassistant/status" and payload.decode() == 'online'):
await self.tydom.get_devices_data()
elif (topic == "/tydom/init"):
print('Incoming MQTT init request : ', topic, payload)
await self.tydom.connect()
# elif ('set_scenario' in str(topic)):
# print('Incoming MQTT set_scenario request : ', topic, payload)
# get_id = (topic.split("/"))[3] #extract id from mqtt
# # print(tydom, str(get_id), 'position', json.loads(payload))
# if not self.tydom.connection.open:
# print('Websocket not opened, reconnect...')
# await self.tydom.connect()
# await self.tydom.put_devices_data(str(get_id), 'position',
# str(json.loads(payload)))
# else:
# await self.tydom.put_devices_data(str(get_id), 'position',
# str(json.loads(payload)))
elif 'set_positionCmd' in str(topic):
print('Incoming MQTT set_positionCmd request : ', topic, payload)
value = str(payload).strip('b').strip("'")
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
print(str(get_id), 'positionCmd', value)
await Cover.put_positionCmd(tydom_client=self.tydom, device_id=device_id, cover_id=endpoint_id, positionCmd=str(value))
elif ('set_position' in str(topic)) and not ('set_positionCmd' in str(topic)):
print(
'Incoming MQTT set_position request : ',
topic,
json.loads(payload))
value = json.loads(payload)
# print(value)
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Cover.put_position(tydom_client=self.tydom, device_id=device_id, cover_id=endpoint_id, position=str(value))
elif 'set_levelCmd' in str(topic):
print('Incoming MQTT set_levelCmd request : ', topic, payload)
value = str(payload).strip('b').strip("'")
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
print(str(get_id), 'levelCmd', value)
await Light.put_levelCmd(tydom_client=self.tydom, device_id=device_id, light_id=endpoint_id,
levelCmd=str(value))
elif ('set_level' in str(topic)) and not ('set_levelCmd' in str(topic)):
print(
'Incoming MQTT set_level request : ',
topic,
json.loads(payload))
value = json.loads(payload)
# print(value)
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Light.put_level(tydom_client=self.tydom, device_id=device_id, light_id=endpoint_id,
level=str(value))
elif ('set_alarm_state' in str(topic)) and not ('homeassistant' in str(topic)):
# print(topic, payload, qos, properties)
command = str(payload).strip('b').strip("'")
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Alarm.put_alarm_state(tydom_client=self.tydom, device_id=device_id, alarm_id=endpoint_id, asked_state=command, home_zone=self.home_zone, night_zone=self.night_zone)
elif ('set_setpoint' in str(topic)):
value = str(payload).strip('b').strip("'")
print('Incoming MQTT setpoint request : ', topic, value)
value = json.loads(payload)
# print(value)
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Boiler.put_temperature(tydom_client=self.tydom, device_id=device_id, boiler_id=endpoint_id,
set_setpoint=str(value))
elif ('set_hvacMode' in str(topic)):
value = str(payload).strip('b').strip("'")
print('Incoming MQTT set_hvacMode request : ', topic, value)
# print(value)
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Boiler.put_hvacMode(tydom_client=self.tydom, device_id=device_id, boiler_id=endpoint_id,
set_hvacMode=str(value))
elif ('set_thermicLevel' in str(topic)):
value = str(payload).strip('b').strip("'")
print('Incoming MQTT set_thermicLevel request : ', topic, value)
# print(value)
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Boiler.put_thermicLevel(tydom_client=self.tydom, device_id=device_id, boiler_id=endpoint_id,
set_thermicLevel=str(value))
elif ('set_switch_state' in str(topic)) and not ('homeassistant' in str(topic)):
# print(topic, payload, qos, properties)
command = str(payload).strip('b').strip("'")
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Switch.put_switch_state(tydom_client=self.tydom, device_id=device_id, switch_id=endpoint_id, state=command)
elif 'set_levelCmdGate' in str(topic):
print('Incoming MQTT set_levelCmdGate request : ', topic, payload)
value = str(payload).strip('b').strip("'")
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
print(str(get_id), 'levelCmd', value)
await Light.put_levelCmdGate(tydom_client=self.tydom, device_id=device_id, switch_id=endpoint_id,
levelCmd=str(value))
elif ('set_levelGate' in str(topic)) and not ('set_levelCmd' in str(topic)):
print(
'Incoming MQTT set_levelGate request : ',
topic,
json.loads(payload))
value = json.loads(payload)
# print(value)
get_id = (topic.split("/"))[2] # extract ids from mqtt
device_id = (get_id.split("_"))[0] # extract id from mqtt
endpoint_id = (get_id.split("_"))[1] # extract id from mqtt
await Light.put_levelGate(tydom_client=self.tydom, device_id=device_id, switch_id=endpoint_id,
level=str(value))
else:
pass
# print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
# print('MQTT incoming : ', topic, payload.decode())
def on_disconnect(self, client, packet, exc=None):
print('MQTT Disconnected !')
print("##################################")
# self.connect()
def on_subscribe(self, client, mid, qos):
print("MQTT is connected and suscribed ! =)", client)
try:
pyld = str(datetime.fromtimestamp(time.time()))
client.publish(
'homeassistant/sensor/tydom/last_clean_startup',
pyld,
qos=1,
retain=True)
except Exception as e:
print("on subscribe error : ", e)
|
138165
|
from joblib import Parallel, delayed
import queue
import os
import time
# Define number of GPUs available
GPU_available = [0, 1]
N_GPU = len(GPU_available)
models = ["EleutherAI/gpt-j-6B"] #"EleutherAI/gpt-neo-1.3B", "EleutherAI/gpt-neo-2.7B", ]
datasets = ["flowMWOZ", "top", "dialKG-parse"]
template = "python main_conversational_parsing.py --model_checkpoint {} --dataset {} --gpu "
experiments = []
for m in models:
for d in datasets:
experiments.append(template.format(m,d))
# Put indices in queue
q = queue.Queue(maxsize=N_GPU)
mapper = {}
invert_mapper = {}
for i in range(N_GPU):
mapper[i] = GPU_available[i]
invert_mapper[GPU_available[i]] = i
q.put(i)
def runner(cmd):
gpu = mapper[q.get()]
print("RUNNING: ",str(cmd)+str(gpu))
os.system(str(cmd)+str(gpu))
q.put(invert_mapper[gpu])
# Change loop
Parallel(n_jobs=N_GPU, backend="threading")( delayed(runner)(e) for e in experiments)
|
138206
|
from __future__ import annotations
import pyqtgraph as pg
from pyqtgraph import colormap as cmap
from typing import Generic, Iterator, Sequence, TypeVar, overload, MutableSequence
import numpy as np
from ._utils import convert_color_code, to_rgba
from .components import Legend, Region, ScaleBar, TextItem
from .graph_items import BarPlot, Curve, FillBetween, InfLine, LayerItem, Scatter, Histogram, TextGroup
from .mouse_event import MouseClickEvent
from ._doc import write_docs
from ...widgets.utils import FreeWidget
BOTTOM = "bottom"
LEFT = "left"
class LayerList(MutableSequence[LayerItem]):
"""A napari-like layer list for plot item handling."""
def __init__(self, parent: HasDataItems):
self.parent = parent
def __getitem__(self, key: int | str) -> LayerItem:
if isinstance(key, int):
return self.parent._items[key]
elif isinstance(key, str):
for item in self.parent._items:
if item.name == key:
return item
else:
raise ValueError(f"Item '{key}' not found.")
else:
raise TypeError(f"Cannot use type {type(key)} as a key.")
def __setitem__(self, key, value):
raise NotImplementedError("Can't set item")
def __delitem__(self, key: int | str):
return self.parent._remove_item(key)
def append(self, item: LayerItem):
if not isinstance(item, LayerItem):
raise TypeError(f"Cannot append type {type(item)}.")
self.parent._add_item(item)
def insert(self, pos: int, item: LayerItem):
if not isinstance(item, LayerItem):
raise TypeError(f"Cannot insert type {type(item)}.")
self.parent._insert_item(pos, item)
def __len__(self):
return len(self.parent._items)
def clear(self):
for _ in range(len(self)):
self.parent._remove_item(-1)
def swap(self, pos0: int, pos1: int):
return self.parent._swap_items(pos0, pos1)
def move(self, source: int, destination: int):
return self.parent._move_item(source, destination)
class HasDataItems:
_items: list[LayerItem]
@property
def _graphics(self) -> pg.GraphicsWidget:
"""Target widget to add graphics items."""
raise NotImplementedError()
@property
def layers(self) -> LayerList:
return LayerList(self)
@overload
def add_curve(self, x: Sequence[float], **kwargs): ...
@overload
def add_curve(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_curve(self,
x=None,
y=None,
face_color = None,
edge_color = None,
color = None,
size: float = 7,
name: str | None = None,
lw: float = 1,
ls: str = "-",
symbol=None):
"""
Add a line plot like ``plt.plot(x, y)``.
Parameters
----------
{x}
{y}
{face_color}
{edge_color}
{color}
size: float, default is 7
Symbol size.
{name}
{lw}
{ls}
{symbol}
Returns
-------
Curve
A plot item of a curve.
"""
x, y = _check_xy(x, y)
name = self._find_unique_name((name or "Curve"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = Curve(x, y, face_color=face_color, edge_color=edge_color,
size=size, name=name, lw=lw, ls=ls, symbol=symbol)
self._add_item(item)
return item
@overload
def add_scatter(self, x: Sequence[float], **kwargs): ...
@overload
def add_scatter(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_scatter(self,
x=None,
y=None,
face_color = None,
edge_color = None,
color = None,
size: float = 7,
name: str | None = None,
lw: float = 1,
ls: str = "-",
symbol="o"):
"""
Add scatter plot like ``plt.scatter(x, y)``.
Parameters
----------
{x}
{y}
{face_color}
{edge_color}
{color}
size: float, default is 7
Symbol size.
{name}
{lw}
{ls}
{symbol}
Returns
-------
Scatter
A plot item of the scatter plot.
"""
x, y = _check_xy(x, y)
name = self._find_unique_name((name or "Scatter"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = Scatter(x, y, face_color=face_color, edge_color=edge_color,
size=size, name=name, lw=lw, ls=ls, symbol=symbol)
self._add_item(item)
return item
@write_docs
def add_hist(self, data: Sequence[float],
bins: int | Sequence | str = 10,
range=None,
density: bool = False,
face_color = None,
edge_color = None,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-",
):
"""
Add histogram like ``plt.hist(data)``.
Parameters
----------
data : array-like
Data for histogram constrction.
bins : int, sequence of float or str, default is 10
Bin numbers. See ``np.histogram`` for detail.
range : two floats, optional
Bin ranges. See ``np.histogram`` for detail.
density : bool, default is False
If true, plot the density instead of the counts. See ``np.histogram`` for
detail.
{face_color}
{edge_color}
{color}
{name}
{lw}
{ls}
Returns
-------
Histogram
A plot item of the histogram.
"""
name = self._find_unique_name((name or "Histogram"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = Histogram(data, bins=bins, range=range, density=density,
face_color=face_color, edge_color=edge_color,
name=name, lw=lw, ls=ls)
self._add_item(item)
return item
@overload
def add_bar(self, x: Sequence[float], **kwargs): ...
@overload
def add_bar(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_bar(self,
x=None,
y=None,
width: float = 0.6,
face_color = None,
edge_color = None,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-"):
"""
Add a bar plot like ``plt.bar(x, y)``.
Parameters
----------
{x}
{y}
width : float, default is 0.6
Width of each bar.
{face_color}
{edge_color}
{color}
{name}
{lw}
{ls}
Returns
-------
BarPlot
A plot item of the bar plot.
"""
x, y = _check_xy(x, y)
name = self._find_unique_name((name or "Bar"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = BarPlot(x, y, width=width, face_color=face_color,
edge_color=edge_color, name=name, lw=lw, ls=ls)
self._add_item(item)
return item
@overload
def add_fillbetween(self, x: Sequence[float], **kwargs): ...
@overload
def add_fillbetween(self, x: Sequence[float], y: Sequence[float], **kwargs): ...
@write_docs
def add_fillbetween(self,
x=None,
y1=None,
y2=None,
face_color = None,
edge_color = None,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-"):
x, y1 = _check_xy(x, y1)
name = self._find_unique_name((name or "FillBetween"))
face_color, edge_color = _check_colors(face_color, edge_color, color)
item = FillBetween(x, y1, y2, face_color=face_color, edge_color=edge_color,
name=name, lw=lw, ls=ls)
self._add_item(item)
@overload
def add_infline(self, slope: float, intercept: float, color = None,
name: str | None = None, lw: float = 1, ls: str = "-"):
...
@overload
def add_infline(self, pos: tuple[float, float], degree: float, color = None,
name: str | None = None, lw: float = 1, ls: str = "-"):
...
def add_infline(self,
*args,
color = None,
name: str | None = None,
lw: float = 1,
ls: str = "-",
**kwargs):
if kwargs:
if args:
raise TypeError("Cannot mix args and kwargs for infinite line parameters.")
keys = set(kwargs.keys())
if keys <= {"pos", "angle"}:
args = (kwargs.get("pos", (0, 0)), kwargs.get("angle", 0))
elif keys <= {"slope", "intercept"}:
args = (kwargs.get("slope", (0, 0)), kwargs.get("intercept", 0))
else:
raise ValueError(f"{kwargs} is invalid input.")
nargs = len(args)
if nargs == 1:
arg0 = args[0]
if np.isscalar(arg0):
angle = np.rad2deg(np.arctan(arg0))
pos = (0, 0)
else:
pos = arg0
angle = 90
elif nargs == 2:
arg0, arg1 = args
if np.isscalar(arg0):
angle = np.rad2deg(np.arctan(arg0))
pos = (0, arg1)
else:
pos = arg0
angle = arg1
else:
raise TypeError(
"Arguments of 'add_infline' should be either 'add_infline(slope, intercept)' "
"or 'add_infline(pos, degree)'."
)
item = InfLine(pos, angle, edge_color=color, name=name, lw=lw, ls=ls)
self._add_item(item)
@overload
def add_text(self, x: float, y: float, text: str, **kwargs):
...
@overload
def add_text(self, x: Sequence[float], y: Sequence[float], text: Sequence[str], **kwargs):
...
def add_text(self, x, y, text, color=None, name=None):
if np.isscalar(x) and np.isscalar(y):
x = [x]
y = [y]
text = [text]
item = TextGroup(x, y, text, color, name)
self._add_item(item)
def _add_item(self, item: LayerItem):
item.zorder = len(self._items)
self._graphics.addItem(item.native)
self._items.append(item)
def _insert_item(self, pos: int, item: LayerItem):
self._graphics.addItem(item.native)
self._items.insert(pos, item)
self._reorder()
def _swap_items(self, pos0: int, pos1: int):
item0 = self._items[pos0]
item1 = self._items[pos1]
self._items[pos0] = item1
self._items[pos1] = item0
self._reorder()
def _move_item(self, source: int, destination: int):
if source < destination:
destination -= 1
item = self._items.pop(source)
self._items.insert(destination, item)
self._reorder()
def _remove_item(self, item: LayerItem | int | str):
if isinstance(item, LayerItem):
i = self._items.index(item)
elif isinstance(item, int):
if item < 0:
item += len(self._items)
i = item
elif isinstance(item, str):
for i, each in enumerate(self._items):
if each.name == item:
break
else:
raise ValueError(f"No item named {item}")
if i < 0:
raise ValueError(f"Item {item} not found")
item = self._items.pop(i)
self._graphics.removeItem(item.native)
def _reorder(self):
for i, item in enumerate(self._items):
item.zorder = i
return None
def _find_unique_name(self, prefix: str):
existing_names = [item.name for item in self._items]
name = prefix
i = 0
while name in existing_names:
name = f"{prefix}-{i}"
i += 1
return name
class HasViewBox(HasDataItems):
def __init__(self, viewbox: pg.ViewBox):
self._viewbox = viewbox
self._items: list[LayerItem] = []
# prepare mouse event
self.mouse_click_callbacks = []
# This ROI is not editable. Mouse click event will use it to determine
# the origin of the coordinate system.
self._coordinate_fiducial = pg.ROI((0, 0))
self._coordinate_fiducial.setVisible(False)
self._viewbox.addItem(self._coordinate_fiducial, ignoreBounds=True)
self._enabled = True
def _mouse_clicked(self, e):
# NOTE: Mouse click event needs a reference item to map coordinates.
# Here plot items always have a linear region item as a default one,
# we can use it as the referene.
ev = MouseClickEvent(e, self._coordinate_fiducial)
x, y = ev.pos()
[xmin, xmax], [ymin, ymax] = self._viewbox.viewRange()
if xmin <= x <= xmax and ymin <= y <= ymax:
for callback in self.mouse_click_callbacks:
callback(ev)
@property
def xlim(self):
"""Range limits of X-axis."""
(xmin, xmax), _ = self._viewbox.viewRange()
return xmin, xmax
@xlim.setter
def xlim(self, value: tuple[float, float]):
self._viewbox.setXRange(*value)
@property
def ylim(self):
"""Range limits of Y-axis."""
_, (ymin, ymax) = self._viewbox.viewRange()
return ymin, ymax
@ylim.setter
def ylim(self, value: tuple[float, float]):
self._viewbox.setYRange(*value)
@property
def enabled(self) -> bool:
"""Mouse interactivity"""
return self._enabled
@enabled.setter
def enabled(self, value: bool):
self._viewbox.setMouseEnabled(value, value)
self._enabled = value
interactive = enabled
@property
def background_color(self):
rgba = self._viewbox.background.brush().color().getRgb()
return np.array(rgba)/255
@background_color.setter
def background_color(self, value):
value = convert_color_code(value)
self._viewbox.setBackgroundColor(pg.mkBrush(value).color())
def _update_scene(self):
raise NotImplementedError()
@property
def border(self):
return to_rgba(self._viewbox.border)
@border.setter
def border(self, value):
value = convert_color_code(value)
self._viewbox.setBorder(value)
class SimpleViewBox(HasViewBox):
def __init__(self):
super().__init__(pg.ViewBox())
@property
def _graphics(self):
return self._viewbox
class PlotItem(HasViewBox):
"""
A 1-D plot item that has similar API as napari Viewer.
"""
def __init__(self, viewbox: pg.ViewBox | None = None):
if viewbox is None:
viewbox = pg.ViewBox()
self.pgitem = pg.PlotItem(viewBox=viewbox)
super().__init__(self.pgitem.vb)
# prepare region item
self._region = Region()
self._region.visible = False
self.pgitem.addItem(self._region.native, ignoreBounds=True)
# prepare legend item
self._legend = Legend()
self._legend.native.setParentItem(self._viewbox)
self.pgitem.legend = self._legend.native
# initialize private attributes
self._xlabel = ""
self._ylabel = ""
@property
def _graphics(self):
return self.pgitem
@property
def region(self) -> Region:
"""Linear region item."""
return self._region
@property
def legend(self) -> Legend:
"""Legend item."""
return self._legend
@property
def xlabel(self):
"""Label of X-axis."""
return self._xlabel
@xlabel.setter
def xlabel(self, label: str) -> str:
self.pgitem.setLabel(BOTTOM, label)
self._xlabel = label
@property
def ylabel(self) -> str:
"""Label of Y-axis."""
return self._ylabel
@ylabel.setter
def ylabel(self, label: str):
self.pgitem.setLabel(LEFT, label)
self._ylabel = label
@property
def title(self) -> str:
return self.pgitem.titleLabel.text
@title.setter
def title(self, value: str):
value = str(value)
self.pgitem.setTitle(value)
def _update_scene(self):
# Since plot item does not have graphics scene before being added to
# a graphical layout, mouse event should be connected afterward.
self.pgitem.scene().sigMouseClicked.connect(self._mouse_clicked)
class ViewBoxExt(pg.ViewBox):
def __init__(self, parent=None, border=None, lockAspect=False, enableMouse=True,
invertY=False, enableMenu=True, name=None, invertX=False,
defaultPadding=0.02):
pg.ViewBox.__init__(**locals())
from pyqtgraph import icons
self.button = pg.ButtonItem(icons.getGraphPixmap("ctrl"), 14, self)
self.button.hide()
def hoverEvent(self, ev):
try:
if ev.enter:
self.button.show()
if ev.exit:
self.button.hide()
except RuntimeError:
pass
class ImageItem(HasViewBox):
def __init__(self,
viewbox: pg.ViewBox | None = None,
lock_contrast_limits: bool = False
):
if viewbox is None:
viewbox = ViewBoxExt(lockAspect=True, invertY=True)
self._lock_contrast_limits = lock_contrast_limits
super().__init__(viewbox)
self._image_item = pg.ImageItem()
tr = self._image_item.transform().translate(-0.5, -0.5)
self._image_item.setTransform(tr)
self._viewbox.addItem(self._image_item)
# prepare text overlay
self._text_overlay = TextItem(text="", color="gray")
self._text_overlay.native.setParentItem(self._viewbox)
# prepare scale bar
self._scale_bar = ScaleBar()
self._scale_bar.visible = False
self._scale_bar.native.setParentItem(self._viewbox)
self._scale_bar.native.anchor((1, 1), (1, 1), offset=(-20, -20))
# prepare title and labels
self._title = TextItem(text="", color="white", anchor=(0.5, 1.1))
self._title.pos = [1, 0]
self._viewbox.addItem(self._title.native)
self._title.visible = False
self._xlabel = TextItem(text="", color="white", anchor=(0.5, -0.1))
self._xlabel.pos = [1, 1]
self._viewbox.addItem(self._xlabel.native)
self._xlabel.visible = False
self._ylabel = TextItem(text="", color="white", anchor=(0.5, 1.1), angle=90)
self._ylabel.pos = [0, 1]
self._viewbox.addItem(self._ylabel.native)
self._ylabel.visible = False
if isinstance(viewbox, ViewBoxExt):
# prepare LUT histogram
self._hist = pg.HistogramLUTItem(orientation="horizontal")
self._hist.vb.setBackgroundColor([0, 0, 0, 0.2])
self._hist.setParentItem(self._viewbox)
self._hist.setVisible(False)
@viewbox.button.clicked.connect
def _(e):
visible = not self._hist.isVisible()
self._hist.setVisible(visible)
if visible:
self._hist._updateView()
width = min(160, self._viewbox.width())
self._hist.setFixedWidth(width)
self._cmap = "gray"
def _update_scene(self):
# Since plot item does not have graphics scene before being added to
# a graphical layout, mouse event should be connected afterward.
self._image_item.scene().sigMouseClicked.connect(self._mouse_clicked)
@property
def text_overlay(self) -> TextItem:
"""Text overlay on the image."""
return self._text_overlay
@property
def title(self) -> str:
return self._title.text
@title.setter
def title(self, value: str):
self._title.text = value
@property
def xlabel(self) -> str:
return self._xlabel.text
@xlabel.setter
def xlabel(self, value: str):
self._xlabel.text = value
@property
def ylabel(self) -> str:
return self._ylabel.text
@xlabel.setter
def ylabel(self, value: str):
self._ylabel.text = value
@property
def scale_bar(self) -> ScaleBar:
"""Scale bar on the image."""
return self._scale_bar
@property
def lock_contrast_limits(self):
return self._lock_contrast_limits
@lock_contrast_limits.setter
def lock_contrast_limits(self, value: bool):
self._lock_contrast_limits = bool(value)
@property
def _graphics(self):
return self._viewbox
@property
def image(self) -> np.ndarray | None:
"""Image data"""
if self._image_item.image is None:
return None
else:
return self._image_item.image.T
@image.setter
def image(self, image: np.ndarray):
no_image = self._image_item.image is None
if no_image:
auto_levels = True
else:
auto_levels = not self._lock_contrast_limits
clims = self.contrast_limits
img = np.asarray(image)
self._image_item.setImage(img.T, autoLevels=auto_levels)
self._hist.setImageItem(self._image_item)
self._hist._updateView()
if no_image:
self._viewbox.autoRange()
if not auto_levels:
self.contrast_limits = clims
sy, sx = img.shape[-2:]
self._title.pos = [sx/2, self._title.pos[1]]
self._title.visible = True
self._xlabel.pos = [sx/2, sy]
self._xlabel.visible = True
self._ylabel.pos = [0, sy/2]
self._ylabel.visible = True
@image.deleter
def image(self):
self._image_item.clear()
self._title.visible = False
self._xlabel.visible = False
self._ylabel.visible = False
@property
def contrast_limits(self) -> list[float, float]:
"""Contrast limits of image"""
return self._image_item.levels
@contrast_limits.setter
def contrast_limits(self, value: tuple[float, float]):
self._hist.setLevels(*value)
@property
def cmap(self):
"""Color map"""
return self._cmap
@cmap.setter
def cmap(self, value):
if isinstance(value, str):
_cmap = cmap.get(value, source="matplotlib")
else:
_cmap = value
self._hist.gradient.setColorMap(_cmap)
self._cmap = value
class QtPlotCanvas(FreeWidget, PlotItem):
"""
A 1-D data viewer that have similar API as napari Viewer.
"""
def __init__(self, **kwargs):
# prepare widget
PlotItem.__init__(self)
self.layoutwidget = pg.GraphicsLayoutWidget()
self.layoutwidget.addItem(self.pgitem)
self._update_scene()
super().__init__(**kwargs)
self.set_widget(self.layoutwidget)
class QtImageCanvas(FreeWidget, ImageItem):
def __init__(self, lock_contrast_limits: bool = False, **kwargs):
# prepare widget
ImageItem.__init__(self, lock_contrast_limits=lock_contrast_limits)
self.layoutwidget = pg.GraphicsLayoutWidget()
self.layoutwidget.addItem(self._viewbox)
self._update_scene()
super().__init__(**kwargs)
self.set_widget(self.layoutwidget)
class Qt2YPlotCanvas(FreeWidget):
def __init__(self, **kwargs):
self.layoutwidget = pg.GraphicsLayoutWidget()
item_l = PlotItem()
item_r = SimpleViewBox()
self.layoutwidget.addItem(item_l.pgitem)
item_l.pgitem.scene().addItem(item_r._viewbox)
item_l.pgitem.getAxis("right").linkToView(item_r._viewbox)
item_r._viewbox.setXLink(item_l.pgitem)
item_l._update_scene()
item_l.pgitem.showAxis("right")
self._plot_items: tuple[PlotItem, SimpleViewBox] = (item_l, item_r)
self.updateViews()
item_l.pgitem.vb.sigResized.connect(self.updateViews)
super().__init__(**kwargs)
self.set_widget(self.layoutwidget)
def __getitem__(self, k: int) -> PlotItem:
return self._plot_items[k]
def updateViews(self):
item_l ,item_r = self._plot_items
item_r._viewbox.setGeometry(item_l._viewbox.sceneBoundingRect())
item_r._viewbox.linkedViewChanged(item_l._viewbox, item_r._viewbox.XAxis)
_C = TypeVar("_C", bound=HasViewBox)
class _MultiPlot(FreeWidget, Generic[_C]):
_base_item_class: type[_C]
def __init__(self,
nrows: int = 0,
ncols: int = 0,
sharex: bool = False,
sharey: bool = False,
**kwargs):
"""
Multi-axes ``pyqtgraph`` canvas widget. Can contain multiple objects
of {cls}.
Parameters
----------
nrows : int, default is 0
Initial rows of axes.
ncols : int, default is 0
Initail columns of axes.
sharex : bool, default is False
If true, all the x-axes will be linked.
sharey : bool, default is False
If true, all the y-axes will be linked.
"""
self.layoutwidget = pg.GraphicsLayoutWidget()
self._axes: list[_C] = []
self._sharex = sharex
self._sharey = sharey
super().__init__(**kwargs)
self.set_widget(self.layoutwidget)
if nrows * ncols > 0:
for r in range(nrows):
for c in range(ncols):
self.addaxis(r, c)
def __init_subclass__(cls) -> None:
"""Update doc."""
init = cls.__init__
init.__doc__ = init.__doc__.format(
cls=cls._base_item_class.__name__
)
def addaxis(self,
row: int | None = None,
col: int | None = None,
rowspan: int = 1,
colspan: int = 1) -> _C:
"""Add a new axis to widget."""
item = self._base_item_class()
self._axes.append(item)
self.layoutwidget.addItem(item._graphics, row, col, rowspan, colspan)
item._update_scene()
if self._sharex and len(self._axes) > 1:
item._viewbox.setXLink(self._axes[0]._viewbox)
if self._sharey and len(self._axes) > 1:
item._viewbox.setYLink(self._axes[0]._viewbox)
return item
def __getitem__(self, k: int) -> _C:
return self._axes[k]
def __delitem__(self, k: int):
item = self._axes[k]
self.layoutwidget.removeItem(item._graphics)
def __iter__(self) -> Iterator[_C]:
return iter(self._axes)
class QtMultiPlotCanvas(_MultiPlot[PlotItem]):
"""A pyqtgraph-based canvas with multiple plot."""
_base_item_class = PlotItem
class QtMultiImageCanvas(_MultiPlot[ImageItem]):
"""A pyqtgraph-based canvas with multiple images."""
_base_item_class = ImageItem
def _check_xy(x, y):
if y is None:
if x is None:
x = []
y = []
else:
y = x
x = np.arange(len(y))
return x, y
def _check_colors(face_color, edge_color, color):
if color is None:
return face_color, edge_color
else:
if face_color is None and edge_color is None:
return color, color
else:
raise ValueError("Cannot set 'color' and either 'face_color' or "
"'edge_color' at the same time.")
|
138221
|
import os, sys, argparse
from os import listdir
from os.path import isfile, join
from os import walk
from dd_client import DD
from annoy import AnnoyIndex
import shelve
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--index",help="repository of images to be indexed")
parser.add_argument("--index-batch-size",type=int,help="size of image batch when indexing",default=1)
parser.add_argument("--search",help="image input file for similarity search")
parser.add_argument("--search-size",help="number of nearest neighbors",type=int,default=10)
parser.add_argument("--confidence-threshold",help="confidence threshold on bounding boxes",type=float,default=0.01)
parser.add_argument("--nclasses",help="number of classes in the model",type=int,default=21)
parser.add_argument("--model-dir",help="model directory",default="model")
args = parser.parse_args()
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def image_resize(imgfile,width):
imgquery = cv2.imread(imgfile)
r = width / imgquery.shape[1]
dim = (int(width), int(imgquery.shape[0] * r))
small = cv2.resize(imgquery,dim)
return small
host = 'localhost'
sname = 'imageserv'
description = 'image classification'
mllib = 'caffe'
mltype = 'supervised'
extract_layer = 'rois'
nclasses = args.nclasses
layer_size = 512 # auto anyways
width = height = 300
dd = DD(host)
dd.set_return_format(dd.RETURN_PYTHON)
ntrees = 1000
metric = 'angular' # or 'euclidean'
# creating ML service
model_repo = os.getcwd() + '/' + args.model_dir
model = {'repository':model_repo,'templates':'../templates/caffe/'}
parameters_input = {'connector':'image','width':width,'height':height}
parameters_mllib = {'nclasses':nclasses}
parameters_output = {}
try:
dd.put_service(sname,model,description,mllib,
parameters_input,parameters_mllib,parameters_output,mltype)
except:
pass
# reset call params
parameters_input = {}
parameters_mllib = {'gpu':True}
parameters_output = {'rois':'rois','confidence_threshold':args.confidence_threshold,'best':1}
if args.index:
try:
os.remove('data.bin')
except:
pass
s = shelve.open('data.bin')
# list files in image repository
c = 0
d = 1
onlyfiles = []
for (dirpath, dirnames, filenames) in walk(args.index):
nfilenames = []
for f in filenames:
nfilenames.append(dirpath + '/' + f)
onlyfiles.extend(nfilenames)
for x in batch(onlyfiles,args.index_batch_size):
classif = dd.post_predict(sname,x,parameters_input,parameters_mllib,parameters_output)
for p in classif['body']['predictions']:
uri = p['uri']
rois = p['rois']
sys.stdout.write('\rIndexing image '+str(d)+'/'+str(len(onlyfiles)) + ' : ' + str(len(rois)) + ' rois total:' + str(c) + ' ')
sys.stdout.flush()
for roi in rois:
bbox = roi['bbox']
cat = roi['cat']
prob = roi['prob']
vals = roi['vals']
if c == 0:
layer_size = len(vals)
s['layer_size'] = layer_size
t = AnnoyIndex(layer_size,metric) # prepare index
t.add_item(c,vals)
s[str(c)] = {'uri':uri, 'bbox' : bbox, 'cat' : cat, 'prob' : prob}
c = c + 1
d = d + 1
#if c >= 10000:
# break
print 'building index...\n'
print 'layer_size=',layer_size
t.build(ntrees)
t.save('index.ann')
s.close()
if args.search:
s = shelve.open('data.bin')
u = AnnoyIndex(s['layer_size'],metric)
u.load('index.ann')
data = [args.search]
classif = dd.post_predict(sname,data,parameters_input,parameters_mllib,parameters_output)
# search for every roi
res = classif['body']['predictions'][0]['rois']
print('number of ROI in query: ' + str(len(res)))
for roi in res:
near = u.get_nns_by_vector(roi['vals'],args.search_size,include_distances=True)
near_data = []
near_distance = []
for n in near[1]:
near_distance.append(n)
print('distances: ')
print(near_distance)
for n in near[0]:
near_data.append(s[str(n)])
# print query bbox
img = cv2.imread(args.search)
bbox = roi['bbox']
cat = roi['cat']
cv2.rectangle(img, (int(bbox['xmin']),int(bbox['ymax'])),(int(bbox['xmax']),int(bbox['ymin'])),(255,0,0),2)
cv2.putText(img,cat,(int(bbox['xmin']),int(bbox['ymax'])),cv2.FONT_HERSHEY_PLAIN,1,255)
cv2.imshow('query',img)
cv2.waitKey(0)
for n in near_data:
resimg = cv2.imread(n['uri'])
bbox = n['bbox']
cat = n['cat']
cv2.rectangle(resimg, (int(bbox['xmin']),int(bbox['ymax'])),(int(bbox['xmax']),int(bbox['ymin'])),(255,0,0),2)
cv2.putText(resimg,cat,(int(bbox['xmin']),int(bbox['ymax'])),cv2.FONT_HERSHEY_PLAIN,1,255)
cv2.imshow('res',resimg)
cv2.waitKey(0)
dd.delete_service(sname,clear='')
|
138294
|
from insights.parsers import SkipException
from insights.parsers import ip_netns_exec_namespace_lsof
from insights.parsers.ip_netns_exec_namespace_lsof import IpNetnsExecNamespaceLsofI
from insights.tests import context_wrap
import doctest
import pytest
IP_NETNS_EXEC_NAMESPACE_LSOF_I = """
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
neutron-n 975 root 5u IPv4 6482691 0t0 TCP *:http (LISTEN)
""".strip()
EXCEPTION1 = """
""".strip()
EXCEPTION2 = """
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
""".strip()
def test_ip_netns_exec_namespace_lsof():
data = IpNetnsExecNamespaceLsofI(context_wrap(IP_NETNS_EXEC_NAMESPACE_LSOF_I))
assert len(data.search(node="TCP")) == 1
assert len(data.search(command="neutron-n")) == 1
assert len(data.search(user="nobody")) == 0
assert data.data[0]["command"] == "neutron-n"
assert data.data[0].get("node") == "TCP"
assert [ps[2] for ps in data] == ["root"]
def test_ip_netns_exec_namespace_lsof_documentation():
env = {
"ns_lsof": IpNetnsExecNamespaceLsofI(context_wrap(IP_NETNS_EXEC_NAMESPACE_LSOF_I)),
}
failed, total = doctest.testmod(ip_netns_exec_namespace_lsof, globs=env)
assert failed == 0
def test_ip_netns_exec_namespace_lsof_exception1():
with pytest.raises(SkipException) as e:
IpNetnsExecNamespaceLsofI(context_wrap(EXCEPTION1))
assert "Empty file" in str(e)
def test_ip_netns_exec_namespace_lsof_exception2():
with pytest.raises(SkipException) as e:
IpNetnsExecNamespaceLsofI(context_wrap(EXCEPTION2))
assert "Useless data" in str(e)
|
138332
|
from __future__ import with_statement
from attest import Tests, Assert
suite = Tests()
@suite.test
def raises():
"""Assert.raises"""
try:
with Assert.raises(RuntimeError):
pass
except AssertionError, e:
Assert(e).__str__() == "didn't raise RuntimeError"
else:
raise AssertionError("didn't fail for missing exception")
# Groups of allowed exceptions
try:
with Assert.raises(RuntimeError, ValueError):
pass
except AssertionError, e:
Assert(e).__str__() == "didn't raise (RuntimeError, ValueError)"
else:
raise AssertionError("didn't fail for missing exception")
with Assert.raises(RuntimeError, ValueError) as error:
raise RuntimeError
error.__class__.is_(RuntimeError)
with Assert.raises(RuntimeError, ValueError) as error:
raise ValueError('invaluable')
error.__class__.is_(ValueError)
error.__str__() == 'invaluable'
with Assert.raises(AssertionError):
error.args == ('valuable',)
@suite.test
def not_raising():
"""Assert.not_raising"""
with Assert.raises(AssertionError):
with Assert.not_raising(RuntimeError):
raise RuntimeError
try:
with Assert.not_raising(RuntimeError):
pass
except Exception:
raise AssertionError('failed despite not raising RuntimeError')
@suite.test
def equality():
"""Assert() == and !="""
Assert(1) == 1
Assert(1) != 0
with Assert.raises(AssertionError):
Assert(1) == 0
with Assert.raises(AssertionError):
Assert(1) != 1
@suite.test
def compare():
"""Assert() comparisons"""
Assert(1) > 0
Assert(0) < 1
Assert(1) >= 0
Assert(1) >= 1
Assert(0) <= 0
Assert(0) <= 1
with Assert.raises(AssertionError):
Assert(0) > 1
with Assert.raises(AssertionError):
Assert(1) < 0
with Assert.raises(AssertionError):
Assert(0) >= 1
with Assert.raises(AssertionError):
Assert(0) >= 1
with Assert.raises(AssertionError):
Assert(1) <= 0
with Assert.raises(AssertionError):
Assert(1) <= 0
@suite.test
def contains():
"""Assert() membership"""
1 in Assert([0,1,2])
Assert(1).in_([0,1,2])
Assert(3).not_in([0,1,2])
with Assert.raises(AssertionError):
3 in Assert([0,1,2])
with Assert.raises(AssertionError):
Assert(3).in_([0,1,2])
with Assert.raises(AssertionError):
Assert(1).not_in([0,1,2])
@suite.test
def identity():
"""Assert() object identity"""
Assert(True).is_(True)
Assert(False).is_not(True)
Assert(True).is_(Assert(True))
Assert(False).is_not(Assert(True))
Assert([]).is_not([])
with Assert.raises(AssertionError):
Assert(False).is_(True)
with Assert.raises(AssertionError):
Assert(True).is_not(True)
with Assert.raises(AssertionError):
Assert(False).is_(Assert(True))
with Assert.raises(AssertionError):
Assert(True).is_not(Assert(True))
with Assert.raises(AssertionError):
Assert([]).is_([])
@suite.test
def proxy():
"""Assert().remote_attribute"""
hello = Assert('hello')
hello == 'hello'
hello.upper() == 'HELLO'
hello.attr('upper').attr('__name__') == 'upper'
with Assert.raises(AssertionError):
hello.upper() == 'hello'
with Assert.raises(AssertionError):
Assert(3).__str__() == '4'
with Assert.raises(AssertionError):
hello.attr('upper').attr('__name__') == 'lower'
@suite.test
def boolean():
"""Assert() in boolean context"""
bool(Assert(1))
with Assert.raises(AssertionError):
bool(Assert(0))
@suite.test
def nested_assert():
"""Assert(Assert(var)) is Assert(var)"""
Assert(Assert('hello')).__class__.is_(str)
@suite.test
def isinstance():
"""Assert.isinstance"""
with Assert.raises(AssertionError) as error:
Assert.isinstance('hello', (int, float))
error.__str__() == "not isinstance('hello', (int, float))"
with Assert.raises(AssertionError) as error:
Assert.isinstance('hello', int)
error.__str__() == "not isinstance('hello', int)"
Assert.isinstance('hello', basestring)
@suite.test
def not_isinstance():
"""Assert.not_isinstance"""
with Assert.raises(AssertionError) as error:
Assert.not_isinstance(1, (int, float))
error.__str__() == "isinstance(1, (int, float))"
with Assert.raises(AssertionError) as error:
Assert.not_isinstance(1, int)
error.__str__() == "isinstance(1, int)"
Assert.not_isinstance('hello', int)
@suite.test
def issubclass():
"""Assert.issubclass"""
with Assert.raises(AssertionError) as error:
Assert.issubclass(str, (int, float))
error.__str__() == "not issubclass(str, (int, float))"
with Assert.raises(AssertionError) as error:
Assert.issubclass(str, int)
error.__str__() == "not issubclass(str, int)"
Assert.issubclass(str, str)
@suite.test
def not_issubclass():
"""Assert.not_issubclass"""
with Assert.raises(AssertionError) as error:
Assert.not_issubclass(int, (int, float))
error.__str__() == "issubclass(int, (int, float))"
with Assert.raises(AssertionError) as error:
Assert.not_issubclass(int, int)
error.__str__() == "issubclass(int, int)"
Assert.not_issubclass(int, str)
@suite.test
def json():
"""Assert.json"""
Assert('{"works": true}').json == dict(works=True)
Assert('{"works": true}').json != dict(works=False)
with Assert.raises(AssertionError):
Assert('{"works": true}').json != dict(works=True)
with Assert.raises(AssertionError):
Assert('{"works": true}').json == dict(works=False)
try:
import lxml
except ImportError:
lxml = None
@suite.test_if(lxml)
def css():
"""Assert.css"""
html = Assert("""
<div id="maincontent">
<div class="container">
<p>Hello World</p>
</div>
</div>
""")
html.css('#maincontent .container p')[0].text == 'Hello World'
with Assert.raises(AssertionError):
html.css('#maincontent .container p')[0].text != 'Hello World'
@suite.test_if(lxml)
def xpath():
"""Assert.xpath"""
xml = Assert("""
<div id="maincontent">
<div class="container">
<p>Hello World</p>
</div>
</div>
""")
path = '/div[@id="maincontent"]/div[@class="container"]/p'
xml.xpath(path)[0].text == 'Hello World'
with Assert.raises(AssertionError):
xml.xpath(path)[0].text != 'Hello World'
@suite.test
def passed_to():
"""Assert.passed_to"""
Assert([1, 2, 3]).passed_to(len) == 3
Assert(1).passed_to(str) == '1'
Assert('a').passed_to(int, 16) == 10
Assert('a').passed_to(int, base=16) == 10
with Assert.raises(AssertionError):
Assert([1, 2, 3]).passed_to(len) != 3
with Assert.raises(AssertionError):
Assert(1).passed_to(str) != '1'
with Assert.raises(AssertionError):
Assert('a').passed_to(int, 16) != 10
with Assert.raises(AssertionError):
Assert('a').passed_to(int, base=16) != 10
@suite.test
def predicate():
with Assert.raises(AssertionError):
Assert(bool, 0)
Assert(bool, 1)
|
138404
|
import re
import json
import time
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.db import transaction, IntegrityError
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_safe, require_POST
from ide.models.build import BuildResult
from ide.models.project import Project, TemplateProject
from ide.models.files import SourceFile, ResourceFile
from ide.tasks.archive import create_archive, do_import_archive
from ide.tasks.build import run_compile
from ide.tasks.gist import import_gist
from ide.tasks.git import do_import_github
from utils.td_helper import send_td_event
from utils.jsonview import json_view, BadRequest
__author__ = 'katharine'
@require_safe
@login_required
@json_view
def project_info(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
source_files = SourceFile.objects.filter(project=project).order_by('file_name')
resources = ResourceFile.objects.filter(project=project).order_by('file_name')
return {
'type': project.project_type,
'name': project.name,
'last_modified': str(project.last_modified),
'app_uuid': project.app_uuid or '',
'app_company_name': project.app_company_name,
'app_short_name': project.app_short_name,
'app_long_name': project.app_long_name,
'app_version_label': project.app_version_label,
'app_is_watchface': project.app_is_watchface,
'app_is_hidden': project.app_is_hidden,
'app_keys': json.loads(project.app_keys),
'parsed_app_keys': project.get_parsed_appkeys(),
'app_is_shown_on_communication': project.app_is_shown_on_communication,
'app_capabilities': project.app_capabilities,
'app_jshint': project.app_jshint,
'app_dependencies': project.get_dependencies(include_interdependencies=False),
'interdependencies': [p.id for p in project.project_dependencies.all()],
'sdk_version': project.sdk_version,
'app_platforms': project.app_platforms,
'app_modern_multi_js': project.app_modern_multi_js,
'menu_icon': project.menu_icon.id if project.menu_icon else None,
'source_files': [{
'name': f.file_name,
'id': f.id,
'target': f.target,
'file_path': f.project_path,
'lastModified': time.mktime(f.last_modified.utctimetuple())
} for f in source_files],
'resources': [{
'id': x.id,
'file_name': x.file_name,
'kind': x.kind,
'identifiers': [y.resource_id for y in x.identifiers.all()],
'extra': {y.resource_id: y.get_options_dict(with_id=False) for y in x.identifiers.all()},
'variants': [y.get_tags() for y in x.variants.all()],
} for x in resources],
'github': {
'repo': "github.com/%s" % project.github_repo if project.github_repo is not None else None,
'branch': project.github_branch if project.github_branch is not None else None,
'last_sync': str(project.github_last_sync) if project.github_last_sync is not None else None,
'last_commit': project.github_last_commit,
'auto_build': project.github_hook_build,
'auto_pull': project.github_hook_uuid is not None
},
'supported_platforms': project.supported_platforms
}
@require_POST
@login_required
@json_view
def compile_project(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
build = BuildResult.objects.create(project=project)
task = run_compile.delay(build.id)
return {"build_id": build.id, "task_id": task.task_id}
@require_safe
@login_required
@json_view
def last_build(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
build = project.builds.order_by('-started')[0]
except (IndexError, BuildResult.DoesNotExist):
return {"build": None}
else:
b = {
'uuid': build.uuid,
'state': build.state,
'started': str(build.started),
'finished': str(build.finished) if build.finished else None,
'id': build.id,
'download': build.package_url if project.project_type == 'package' else build.pbw_url,
'log': build.build_log_url,
'build_dir': build.get_url(),
'sizes': build.get_sizes(),
}
return {"build": b}
@require_safe
@login_required
@json_view
def build_history(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
builds = project.builds.order_by('-started')[:10]
except (IndexError, BuildResult.DoesNotExist):
return {"build": None}
out = []
for build in builds:
out.append({
'uuid': build.uuid,
'state': build.state,
'started': str(build.started),
'finished': str(build.finished) if build.finished else None,
'id': build.id,
'download': build.package_url if project.project_type == 'package' else build.pbw_url,
'log': build.build_log_url,
'build_dir': build.get_url(),
'sizes': build.get_sizes()
})
return {"builds": out}
@require_safe
@login_required
@json_view
def build_log(request, project_id, build_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
build = get_object_or_404(BuildResult, project=project, pk=build_id)
log = build.read_build_log()
send_td_event('cloudpebble_view_build_log', data={
'data': {
'build_state': build.state
}
}, request=request, project=project)
return {"log": log}
@require_POST
@login_required
@json_view
def create_project(request):
name = request.POST['name']
template_id = request.POST.get('template', None)
if template_id is not None:
template_id = int(template_id)
project_type = request.POST.get('type', 'native')
template_name = None
sdk_version = str(request.POST.get('sdk', '2'))
try:
with transaction.atomic():
app_keys = '{}' if sdk_version == '2' else '[]'
project = Project.objects.create(
name=name,
owner=request.user,
app_company_name=request.user.username,
app_short_name=name,
app_long_name=name,
app_version_label='1.0',
app_is_watchface=False,
app_capabilities='',
project_type=project_type,
sdk_version=sdk_version,
app_keys=app_keys
)
if template_id is not None and template_id != 0:
template = TemplateProject.objects.get(pk=template_id)
template_name = template.name
template.copy_into_project(project)
elif project_type == 'simplyjs':
f = SourceFile.objects.create(project=project, file_name="app.js")
f.save_text(open('{}/src/html/demo.js'.format(settings.SIMPLYJS_ROOT)).read())
elif project_type == 'pebblejs':
f = SourceFile.objects.create(project=project, file_name="app.js")
f.save_text(open('{}/src/js/app.js'.format(settings.PEBBLEJS_ROOT)).read())
# TODO: Default file for Rocky?
project.full_clean()
project.save()
except IntegrityError as e:
raise BadRequest(str(e))
else:
send_td_event('cloudpebble_create_project', {'data': {'template': {'id': template_id, 'name': template_name}}},
request=request, project=project)
return {"id": project.id}
@require_POST
@login_required
@json_view
def save_project_settings(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
with transaction.atomic():
project.name = request.POST['name']
project.app_uuid = request.POST['app_uuid']
project.app_company_name = request.POST['app_company_name']
project.app_short_name = request.POST['app_short_name']
project.app_long_name = request.POST['app_long_name']
project.app_version_label = request.POST['app_version_label']
project.app_is_watchface = bool(int(request.POST['app_is_watchface']))
project.app_is_hidden = bool(int(request.POST['app_is_hidden']))
project.app_is_shown_on_communication = bool(int(request.POST['app_is_shown_on_communication']))
project.app_capabilities = request.POST['app_capabilities']
project.app_keys = request.POST['app_keys']
project.app_jshint = bool(int(request.POST['app_jshint']))
project.sdk_version = request.POST['sdk_version']
project.app_platforms = request.POST['app_platforms']
project.app_modern_multi_js = bool(int(request.POST['app_modern_multi_js']))
menu_icon = request.POST['menu_icon']
old_icon = project.menu_icon
if menu_icon != '':
menu_icon = int(menu_icon)
if old_icon is not None:
old_icon.is_menu_icon = False
old_icon.save()
icon_resource = project.resources.filter(id=menu_icon)[0]
icon_resource.is_menu_icon = True
icon_resource.save()
elif old_icon is not None:
old_icon.is_menu_icon = False
old_icon.save()
project.save()
except IntegrityError as e:
return BadRequest(str(e))
else:
send_td_event('cloudpebble_save_project_settings', request=request, project=project)
@require_POST
@login_required
@json_view
def save_project_dependencies(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
project.set_dependencies(json.loads(request.POST['dependencies']))
project.set_interdependencies([int(x) for x in json.loads(request.POST['interdependencies'])])
return {'dependencies': project.get_dependencies()}
except (IntegrityError, ValueError) as e:
raise BadRequest(str(e))
else:
send_td_event('cloudpebble_save_project_settings', request=request, project=project)
@require_POST
@login_required
@json_view
def delete_project(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
if not bool(request.POST.get('confirm', False)):
raise BadRequest(_("Not confirmed"))
project.delete()
send_td_event('cloudpebble_delete_project', request=request, project=project)
@login_required
@require_POST
@json_view
def begin_export(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
result = create_archive.delay(project.id)
return {'task_id': result.task_id}
@login_required
@require_safe
@json_view
def get_projects(request):
""" Gets a list of all projects owned by the user.
Accepts one possible filter: '?libraries=[id]'. If given, the list of projects
is limited to packages, and each returned package includes a 'depended_on' attribute
which is true if it is depended on by the project where pk=[id].
"""
filters = {
'owner': request.user
}
exclusions = {}
parent_project = None
libraries_for_project = int(request.GET['libraries']) if 'libraries' in request.GET else None
if libraries_for_project:
filters['project_type'] = 'package'
parent_project = get_object_or_404(Project, pk=libraries_for_project, owner=request.user)
parent_project_dependencies = parent_project.project_dependencies.all()
exclusions['pk'] = libraries_for_project
projects = Project.objects.filter(**filters).exclude(**exclusions)
def process_project(project):
data = {
'name': project.name,
'package_name': project.npm_name,
'id': project.id,
'app_version_label': project.app_version_label,
'latest_successful_build': None
}
try:
data['latest_successful_build'] = str(BuildResult.objects.filter(project=project, state=BuildResult.STATE_SUCCEEDED).latest('id').finished)
except BuildResult.DoesNotExist:
pass
if parent_project:
data['depended_on'] = project in parent_project_dependencies
return data
return {
'projects': [process_project(project) for project in projects]
}
@login_required
@require_POST
@json_view
def import_zip(request):
zip_file = request.FILES['archive']
name = request.POST['name']
try:
project = Project.objects.create(owner=request.user, name=name)
except IntegrityError as e:
raise BadRequest(str(e))
task = do_import_archive.delay(project.id, zip_file.read(), delete_project=True)
return {'task_id': task.task_id, 'project_id': project.id}
@login_required
@require_POST
@json_view
def import_github(request):
name = request.POST['name']
repo = request.POST['repo']
branch = request.POST['branch']
add_remote = (request.POST['add_remote'] == 'true')
match = re.match(r'^(?:https?://|git@|git://)?(?:www\.)?github\.com[/:]([\w.-]+)/([\w.-]+?)(?:\.git|/|$)', repo)
if match is None:
raise BadRequest(_("Invalid Github URL."))
github_user = match.group(1)
github_project = match.group(2)
try:
project = Project.objects.create(owner=request.user, name=name)
except IntegrityError as e:
raise BadRequest(str(e))
if add_remote:
project.github_repo = "%s/%s" % (github_user, github_project)
project.github_branch = branch
project.save()
task = do_import_github.delay(project.id, github_user, github_project, branch, delete_project=True)
return {'task_id': task.task_id, 'project_id': project.id}
@login_required
@require_POST
@json_view
def do_import_gist(request):
task = import_gist.delay(request.user.id, request.POST['gist_id'])
return {'task_id': task.task_id}
|
138407
|
from spaceone.core.locator import Locator
from spaceone.core.transaction import Transaction
class CoreObject(object):
def __init__(self, transaction: Transaction = None):
if transaction:
self.transaction = transaction
else:
self.transaction = Transaction()
self.locator = Locator(self.transaction)
|
138452
|
import toml
import os
def setup_config(usr, privKey):
print('Setup Config file for Node 1 ...')
conf = os.environ['OLDATA'] + "/devnet/1-Node/config.toml"
Configuration = toml.load(conf)
node_conf = Configuration['node']
auth_conf = node_conf['Auth']
if usr:
auth_conf['owner_credentials'] = ['username:password']
else:
auth_conf['owner_credentials'] = None
if privKey:
auth_conf['rpc_private_key'] = \
'<KEY>
else:
auth_conf['rpc_private_key'] = ""
f = open(conf, 'w')
toml.dump(Configuration, f)
if __name__ == "__main__":
print('rpcAuth Test Script running')
print('************ Test Get Token API ************')
setup_config(True, True)
|
138465
|
import numpy as np
import cv2
# This file is a set of commonly used functions by the viz scripts. It
# is not meant to be run on its own
def unblockshaped(arr, h, w, rgb=False):
if rgb:
n, nrows, ncols, nchannels = arr.shape
return (arr.reshape(h//nrows, -1, nrows, ncols, nchannels)
.swapaxes(1,2)
.reshape(h, w, 3))
n, nrows, ncols = arr.shape
return (arr.reshape(h//nrows, -1, nrows, ncols)
.swapaxes(1,2)
.reshape(h, w))
def reshape_to_row(arr, side=28, rgb=False):
if rgb:
grid = np.array([np.reshape(img, (side, side, 3)) for img in arr])
else:
grid = np.array([np.reshape(img, (side, side)) for img in arr])
return unblockshaped(grid, int(side), int(side * grid.shape[0]), rgb=rgb)
def reshape_to_grid(arr, side=28, rgb=False):
if rgb:
grid = np.array([np.reshape(img, (side, side, 3)) for img in arr])
else:
grid = np.array([np.reshape(img, (side, side)) for img in arr])
size = int(side * np.sqrt(grid.shape[0]))
return unblockshaped(grid, size, size, rgb=rgb)
|
138490
|
from douyin_spider.utils.common import parse_datetime, get_array_first
from douyin_spider.models.video import Video
from douyin_spider.models.music import Music
from douyin_spider.models.user import User, Star
from douyin_spider.models.address import Address
def get_video_url(video_list):
"""
parse video url from video_list
:param video_list:
:return:
"""
if video_list and isinstance(video_list, list):
return video_list[0]
return None
def get_music_url(music_list):
"""
parse music url from music_list
:param music_list:
:return:
"""
if music_list and isinstance(music_list, list):
return music_list[-1]
return None
def get_cover_url(video_json):
"""
parse cover url from video_json
:param video_json:
:return:
"""
return video_json.get('origin_cover') or video_json.get('cover')
def parse_gender(gender_codeName_str):
"""
parse user gender
:param gender_codeName_str:
:return:
"""
dict_gender_mapping = {'0': 'male', '1': 'female', '2': "unknown"}
if isinstance(gender_codeName_str, str):
return dict_gender_mapping[gender_codeName_str]
def download_video_test(url):
"""
download test with headers
:param url:
:return:
"""
import requests
headers = {'User-Agent': 'Aweme 5.5.0 rv:55011 (iPhone; iOS 11.3.1; zh_CN) Cronet'}
res = requests.get(url, headers=headers)
with open('test.mp4', 'wb+') as f:
f.write(res.content)
def parse_to_video(data):
"""
parse json to Video
:param data:
:return: Video
"""
id = data.get('aweme_id')
statistics = data.get('statistics', {})
like_count = statistics.get('digg_count')
comment_count = statistics.get('comment_count')
share_count = statistics.get('share_count')
share_url = data.get('share_url')
desc = data.get('desc')
group_id = data.get('group_id')
author_user_id = data.get('author_user_id')
create_time = parse_datetime(data.get('create_time'))
is_ads = data.get('is_ads')
region = data.get('region')
video = data.get('video', {})
ratio = video.get('ratio')
cover_url = get_array_first(get_cover_url(video).get('url_list', []))
play_url = get_video_url(video.get('play_addr', {}).get('url_list', []))
duration = data.get('duration')
music = parse_to_music(data.get('music', {}))
author = parse_to_user(data.get('author', {}))
address = parse_to_address(data.get('poi_info', {}))
if id:
return Video(
id=id,
like_count=like_count,
comment_count=comment_count,
share_count=share_count,
share_url=share_url,
desc=desc,
group_id=group_id,
author_user_id=author_user_id,
create_time=create_time,
is_ads=is_ads,
region=region,
ratio=ratio,
cover_url=cover_url,
play_url=play_url,
duration=duration,
music=music,
author=author,
address=address
)
else:
return None
def parse_to_user(author_json):
"""
parse json to User
:param author_json:
:return: User
"""
id = author_json.get('mid')
avatar_url = get_array_first(author_json.get('avatar_larger', {}).get('url_list'))
is_verified = author_json.get('is_verified')
verify_info = author_json.get('custom_verify')
is_hide_search = author_json.get('hide_search')
nickname = author_json.get('nickname')
region = author_json.get('CN')
signature = author_json.get('signature')
gender = parse_gender(author_json.get('gender'))
birthday = parse_datetime(author_json.get('birthday'))
alias = author_json.get('unique_id') or author_json.get('short_id')
if id:
return User(
id=id,
avatar_url=avatar_url,
is_verified=is_verified,
verify_info=verify_info,
is_hide_search=is_hide_search,
nickname=nickname,
region=region,
signature=signature,
gender=gender,
birthday=birthday,
alias=alias
)
else:
return None
def parse_to_star(star_json):
"""
parse json to Star
:param star_json:
:return:Star
"""
user_info = star_json.get('user_info', {})
id = user_info.get('uid')
nickname = user_info.get('nickname')
signature = user_info.get('signature')
avatar_url = get_array_first(user_info.get('avatar_larger', {}).get('url_list', []))
factor_hot_value = star_json.get('factor_hot_value')
hot_value = star_json.get('hot_value')
if id:
return Star(
id=id,
nickname=nickname,
signature=signature,
avatar_url=avatar_url,
factor_hot_value=factor_hot_value,
hot_value=hot_value
)
else:
return None
def parse_to_music(music_json):
"""
parse json to Music
:param music_json:
:return: Music
"""
id = music_json.get('mid')
title = music_json.get('title')
play_url = get_music_url(music_json.get('play_url', {}).get('url_list', []))
owner_name = music_json.get('owner_nickname')
album = music_json.get('album')
owner_id = music_json.get('owner_id')
duration = music_json.get('duration')
cover_url = get_array_first(music_json.get('cover_large', {}).get('url_list'))
if id:
return Music(
id=id,
title=title,
play_url=play_url,
owner_name=owner_name,
album=album,
owner_id=owner_id,
duration=duration,
cover_url=cover_url
)
else:
return None
def parse_to_address(poi_info_json):
"""
parse json to Address
:param poi_info_json:
:return: Address
"""
if poi_info_json:
id = poi_info_json.get('poi_id')
longitude = poi_info_json.get('poi_longitude')
latitude = poi_info_json.get('poi_latitude')
name = poi_info_json.get('poi_name')
address_info = poi_info_json.get('address_info', {})
province = address_info.get('province') or None
city = address_info.get('city') or None
simple_addr = address_info.get('simple_addr') or None
district = address_info.get('district') or None
city_code = address_info.get('city_code') or None
sub_address = address_info.get('address') or None
if id:
return Address(
id=id,
longitude=longitude,
latitude=latitude,
name=name,
province=province,
city=city,
simple_addr=simple_addr,
district=district,
city_code=city_code,
sub_address=sub_address
)
else:
return None
if __name__ == '__main__':
url = 'https://api.amemv.com/aweme/v1/play/?video_id=v0200f940000bis3ort1mik7192tss6g&line=1&ratio=540p&media_type=4&vr_type=0&improve_bitrate=0'
download_video_test(url)
|
138514
|
from functools import partial, reduce
import copy
from collections import namedtuple
from functools import partial
import sys
sys.setrecursionlimit(10**3)
#helpers
def curry(f):
arg_num = f.func_code.co_argcount
def wrap(*args):
_f = partial(f, *args)
if hasattr(f, 't'):
_f.t = f.t
if len(_f.args) == arg_num:
return _f()
return _f
return wrap
class Infix:
#taken from http://code.activestate.com/recipes/384122-infix-operators/
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
class Maybe(object):
pass
class Just(Maybe):
def __init__(self, value):
self.value = value
class Nothing(Maybe):
pass
def isJust(n):
assert isinstance(n, Maybe)
if isinstance(n, Nothing):
return False
else:
return True
isNothing = lambda n: not isJust(n)
def fromMaybe(d, x):
assert isinstance(x, Maybe)
if isJust(x):
return x.value
else:
return d
def fromJust(j):
assert isinstance(j, Maybe)
if isJust(j):
return j.value
else:
raise TypeError('Maybe.fromJust: Nothing')
def maybe(n, f, m):
assert isinstance(m, Maybe)
if isNothing(m):
return n
else:
return f(m.value)
def t(_type):
def inner(f):
f.t = _type
return f
return inner
@t(object)
def id_(i): return i
def special_id(_type):
@t(_type)
def sid(i): return i
return sid
foldl = lambda f, z, x: reduce(f, x, z)
# end of helpers
orElse = lambda x, y: x or y
@curry
def everywhere(f, x):
'''
Generic traversal combinator for every node in the tree bottom up.
'''
return f(gmapT(everywhere(f), x))
everywhereBU = everywhere
@curry
def everywhereTD(f, x):
'''
Generic traversal combinator for every node in the tree top down.
'''
return gmapT(everywhereTD(f), f(x))
def everywhereBut(q, f, x):
if q(x):
return x
return f(gmapT(everywhereBut(q, f), x))
everywhereButBU = everywhereBut
@curry
def everywhereButTD(q, f, x):
'''
everywhereButTD :: (object -> bool) -> (object -> object) -> object
'''
if q(x):
return x
return gmapT(everywhereButTD(q, f), f(x))
@curry
def everywhereButAny(q, f, x):
'''
everywhereButAny :: [(object -> bool)] -> (object -> object) -> object
'''
if any([q(xi) for x in x]):
return x
return f(gmapT(everywhereBut(q, f), x))
everywhereButAnyBU = everywhereButAny
@curry
def everywhereButAnyTD(q, f, x):
'''
everywhereButAny :: [(object -> bool)] -> (object -> object) -> object
'''
if any(map(q, x)):
return x
return gmapT(everywhereBut(q, f), f(x))
@curry
def everything(k, f, x):
return foldl(k, f(x), gmapQ(everything(k, f), x))
@curry
def mkT(f, value):
'''
apply transformation f to value.
'''
m = cast(value, f)
if isJust(m):
return f(m.value)
else:
return value
@Infix
@curry
def choice(f, q, a):
try:
return f(a)
except fail:
return q(a)
@Infix
@curry
def mkQ(r, q, a):
m = cast(a, q)
if isJust(m):
return q(m.value)
else:
return r
@Infix
@curry
def extQ(f, g, a):
return maybe(f(a), g, cast(a, g))
def cast(val, t):
'''
Return Just(val) if the type of value equals the type of the function parameter.
'''
if isinstance(val, t.t):
return Just(val)
else:
return Nothing()
def gmapT(f, val):
if type(val) in [int, str, float, bool, unicode]:
return val
elif type(val) in [list, set]:
if len(val) > 0:
return [f(val[0])] + f(val[1:])
else:
return []
elif type(val) == dict:
return { key:f(value) for key, value in val.items() }
elif callable(val):
return val
elif type(val) == tuple:
return tuple(*map(f, val))
else:
val = copy.deepcopy(val)
keys = dir(val)
keys = list(set(keys) - set(dir(object)))
keys = [key for key in keys if not key.startswith('_')]
items = [getattr(val, key) for key in keys]
items = map(f, items)
for key, value in zip(keys, items):
setattr(val, key, value)
return val
def gmapQ(f, val):
if type(val) in [int, str, float]:
return []
elif type(val) in [list, set, tuple]:
if len(val) > 0:
return [f(val[0]), f(val[1:])]
else:
return []
else:
val = copy.deepcopy(val)
keys = dir(val)
keys = [key for key in list(set(keys) - set(dir(object))) if not key.startswith('_')]
items = [getattr(val, key) for key in keys]
return map(f, items)
@t(object)
def gsize(t):
return 1 + sum(gmapQ(gsize, t))
|
138520
|
import os
import sys
import time
import json
import requests
from functools import partial
from threading import Thread
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtCore import pyqtSignal
from core import Utils, Command
import main_gui
'''
【主窗口类】
主窗口类继承自QMainWindow类。
图形程序绝大部分交互都在这里。
'''
class MainWin(QMainWindow):
def __init__(self, parent=None):
# pyqt必要的代码
self.app = QApplication(sys.argv)
QMainWindow.__init__(self, parent)
self.ui = main_gui.Ui_MainWindow()
self.ui.setupUi(self)
# 程序功能模块(core.py)初始化
self.afk = Command()
# 让功能模块(core.py)也能访问UI
self.afk.utils.ui = self.ui
# GUI数值初始化
self.init_interface()
# 信号初始化
self.init_signal()
# 显示GUI
self.show()
self.write_log("程序核心初始化完成!")
# 加载图片资源
self.afk.utils.load_res()
self.write_log("图片资源加载完成!")
self.write_log("afk-arena-tools是开源软件,如果有任何问题建议想法,欢迎提issue或PR~")
# 检查更新
Thread(target=self.check_update).start()
sys.exit(self.app.exec_())
# 图形界面数值的初始化
def init_interface(self):
# 当前正在执行的功能
self.curr_func = None
# log框只读
self.ui.textBrowser.setReadOnly(True)
# 加载配置
self.load_conf()
# 信号和槽(绑定事件)初始化
def init_signal(self):
# 绑定“生成器”页面的“生成”按钮到generate函数
self.ui.pushButton.clicked.connect(partial(self.do_func, self.afk.story_mode_retry_only))
self.ui.pushButton_2.clicked.connect(partial(self.do_func, self.afk.story_mode))
self.ui.pushButton_5.clicked.connect(partial(self.do_func, self.afk.tower_mode_retry_only))
self.ui.pushButton_6.clicked.connect(partial(self.do_func, self.afk.tower_mode))
self.ui.pushButton_10.clicked.connect(self.get_thread_status)
self.ui.pushButton_20.clicked.connect(self.stop_thread)
self.ui.pushButton_21.clicked.connect(self.wifi_adb_connect)
self.ui.pushButton_22.clicked.connect(partial(self.afk.utils.get_img, pop_up_window=True))
self.ui.pushButton_23.clicked.connect(self.afk.utils.adb_devices)
self.ui.pushButton_24.clicked.connect(self.afk.utils.adb_version)
self.ui.pushButton_25.clicked.connect(partial(self.afk.utils.get_img, save_img=True))
self.ui.pushButton_26.clicked.connect(self.gui_swipe)
self.ui.pushButton_27.clicked.connect(self.gui_tap)
self.ui.pushButton_28.clicked.connect(self.gui_long_press)
self.ui.pushButton_9.clicked.connect(partial(self.do_func, self.afk.daily_mode))
self.ui.radioButton.clicked.connect(partial(self.change_resolution, 100))
self.ui.radioButton_2.clicked.connect(partial(self.change_resolution, 75))
self.ui.radioButton_3.clicked.connect(partial(self.change_resolution, 50))
self.afk.utils.logger.update_signal.connect(self.write_log)
self.afk.utils.logger.error_stop_signal.connect(self.stop_thread)
self.afk.utils.logger.finish_exec_signal.connect(self.thread_finish_exec)
self.ui.doubleSpinBox.valueChanged.connect(self.change_exec_time_delay)
self.ui.doubleSpinBox_2.valueChanged.connect(self.change_threshold)
# 加载默认配置
def load_default_conf(self):
# 默认wifi_adb地址
self.ui.lineEdit.setText(self.afk.utils.wifi_adb_addr)
# 默认分辨率是1440P
self.ui.radioButton.setChecked(True)
# 日常任务默认勾选
self.ui.checkBox_2.setChecked(True)
self.ui.checkBox_3.setChecked(True)
self.ui.checkBox_4.setChecked(True)
self.ui.checkBox_5.setChecked(True)
self.ui.checkBox_6.setChecked(True)
self.ui.checkBox_7.setChecked(True)
self.ui.checkBox_8.setChecked(True)
self.ui.checkBox_9.setChecked(True)
self.ui.checkBox_10.setChecked(True)
self.ui.checkBox_11.setCheckable(False)
self.ui.checkBox_12.setCheckable(False)
self.ui.checkBox_13.setCheckable(False)
self.ui.checkBox_14.setChecked(True)
# 脚本执行设置
self.ui.doubleSpinBox.setValue(1.00)
self.ui.doubleSpinBox_2.setValue(0.90)
# 保存配置
def save_conf(self):
conf_data = {}
conf_data["wifi_adb_addr"] = self.afk.utils.wifi_adb_addr
conf_data["radioButton"] = self.ui.radioButton.isChecked()
conf_data["radioButton_2"] = self.ui.radioButton_2.isChecked()
conf_data["radioButton_3"] = self.ui.radioButton_3.isChecked()
conf_data["checkBox_2"] = self.ui.checkBox_2.isChecked()
conf_data["checkBox_3"] = self.ui.checkBox_3.isChecked()
conf_data["checkBox_4"] = self.ui.checkBox_4.isChecked()
conf_data["checkBox_5"] = self.ui.checkBox_5.isChecked()
conf_data["checkBox_6"] = self.ui.checkBox_6.isChecked()
conf_data["checkBox_7"] = self.ui.checkBox_7.isChecked()
conf_data["checkBox_8"] = self.ui.checkBox_8.isChecked()
conf_data["checkBox_9"] = self.ui.checkBox_9.isChecked()
conf_data["checkBox_10"] = self.ui.checkBox_10.isChecked()
conf_data["checkBox_11"] = self.ui.checkBox_11.isCheckable()
conf_data["checkBox_12"] = self.ui.checkBox_12.isCheckable()
conf_data["checkBox_13"] = self.ui.checkBox_13.isCheckable()
conf_data["checkBox_14"] = self.ui.checkBox_14.isChecked()
conf_data["doubleSpinBox"] = self.afk.exec_func_delay
conf_data["doubleSpinBox_2"] = self.afk.utils.threshold
with open(os.path.join(os.getcwd(), "conf.json"), "w") as f:
json.dump(conf_data, f)
# 加载配置
def load_conf(self):
full_path = os.path.join(os.getcwd(), "conf.json")
if os.path.isfile(full_path):
with open(full_path) as f:
conf_data = json.load(f)
try:
self.afk.utils.wifi_adb_addr = conf_data["wifi_adb_addr"]
self.ui.lineEdit.setText(self.afk.utils.wifi_adb_addr)
self.ui.radioButton.setChecked(conf_data["radioButton"])
if conf_data["radioButton"]:
self.change_resolution(100, show_log=False)
self.ui.radioButton_2.setChecked(conf_data["radioButton_2"])
if conf_data["radioButton_2"]:
self.change_resolution(75, show_log=False)
self.ui.radioButton_3.setChecked(conf_data["radioButton_3"])
if conf_data["radioButton_3"]:
self.change_resolution(50, show_log=False)
# 日常任务默认勾选
self.ui.checkBox_2.setChecked(conf_data["checkBox_2"])
self.ui.checkBox_3.setChecked(conf_data["checkBox_3"])
self.ui.checkBox_4.setChecked(conf_data["checkBox_4"])
self.ui.checkBox_5.setChecked(conf_data["checkBox_5"])
self.ui.checkBox_6.setChecked(conf_data["checkBox_6"])
self.ui.checkBox_7.setChecked(conf_data["checkBox_7"])
self.ui.checkBox_8.setChecked(conf_data["checkBox_8"])
self.ui.checkBox_9.setChecked(conf_data["checkBox_9"])
self.ui.checkBox_10.setChecked(conf_data["checkBox_10"])
self.ui.checkBox_11.setCheckable(conf_data["checkBox_11"])
self.ui.checkBox_12.setCheckable(conf_data["checkBox_12"])
self.ui.checkBox_13.setCheckable(conf_data["checkBox_13"])
self.ui.checkBox_14.setChecked(conf_data["checkBox_14"])
self.afk.exec_func_delay = float(conf_data["doubleSpinBox"])
self.ui.doubleSpinBox.setValue(self.afk.exec_func_delay)
self.afk.utils.threshold = float(conf_data["doubleSpinBox_2"])
self.ui.doubleSpinBox_2.setValue(self.afk.utils.threshold)
except:
self.write_log("配置读取错误,加载默认配置并生成配置文件conf.json")
self.load_default_conf()
self.save_conf()
else:
self.write_log("检测到是首次启动,加载默认配置并生成配置文件conf.json")
self.load_default_conf()
self.save_conf()
# 检查更新
def check_update(self):
def compare_ver(web_ver, local_ver):
if web_ver == local_ver:
return 0
web_ver = web_ver.split(".")
local_ver = local_ver.split(".")
for i in range(len(web_ver)):
if int(web_ver[i]) > int(local_ver[i]):
return 1
if int(web_ver[i]) < int(local_ver[i]):
return 0
try:
# 获取最新版本信息
r = requests.get("https://raw.githubusercontent.com/oscarcx123/afk-arena-tools/master/version.json")
web_ver_info = r.json()
# 读取本地版本信息
with open(os.path.join(os.getcwd(), "version.json")) as f:
local_ver_info = json.load(f)
# 比较版本号
if compare_ver(web_ver_info["version"], local_ver_info["version"]):
self.afk.utils.write_log(f"检测到新版本!")
self.afk.utils.write_log(f"版本号:{web_ver_info['version']}")
self.afk.utils.write_log(f"更新日期:{web_ver_info['time']}")
self.afk.utils.write_log(f"下载地址:{web_ver_info['url']}")
else:
self.afk.utils.write_log(f"当前已经是最新版本!")
except:
self.afk.utils.write_log(f"获取版本信息失败!")
# 执行功能
def do_func(self, func):
if not self.curr_func:
self.afk.stop = False
self.save_conf()
self.curr_func = Thread(target=func)
self.curr_func.start()
# 杀掉线程
def stop_thread(self):
self.afk.utils.stop_callback = True
if self.curr_func:
self.afk.stop = True
self.curr_func = None
self.write_log("成功停止当前执行的功能!如果还在继续,说明还有残余指令在运行,可以等待执行完毕或者直接重启软件")
else:
self.write_log("当前没有正在执行的功能!")
# 线程正常执行完毕
def thread_finish_exec(self):
self.curr_func = None
self.write_log("功能执行完毕!")
# 在GUI窗口的log框中输出日志
def write_log(self, text=None):
#print(self.afk.utils.text)
curr_time = time.strftime("%H:%M:%S", time.localtime())
if not text:
while len(self.afk.utils.text) > 0:
text = self.afk.utils.text.pop(0)
self.ui.textBrowser.insertPlainText(f"[{curr_time}]{text}\n")
self.ui.textBrowser.ensureCursorVisible()
else:
self.ui.textBrowser.insertPlainText(f"[{curr_time}]{text}\n")
self.ui.textBrowser.ensureCursorVisible()
# 更改分辨率
def change_resolution(self, percentage, show_log=True):
self.afk.utils.scale_percentage = percentage
self.afk.utils.load_res()
if show_log:
self.write_log(f"成功将分辨率更改为{int(1440 * percentage / 100)}P")
# 写入新的wifi_adb地址并连接
def wifi_adb_connect(self):
self.afk.utils.wifi_adb_addr = self.ui.lineEdit.text()
self.write_log("保存wifi_adb地址成功!")
self.afk.utils.adb_connect()
# 通过GUI点击
def gui_tap(self):
self.afk.utils.tap(x_coord=int(self.ui.lineEdit_2.text()), y_coord=int(self.ui.lineEdit_3.text()), randomize=False)
# 通过GUI长按
def gui_long_press(self):
self.afk.utils.swipe(fromX=int(self.ui.lineEdit_4.text()), fromY=int(self.ui.lineEdit_5.text()))
# 通过GUI滑动
def gui_swipe(self):
self.afk.utils.swipe(fromX=int(self.ui.lineEdit_4.text()), fromY=int(self.ui.lineEdit_5.text()), toX=int(self.ui.lineEdit_6.text()), toY=int(self.ui.lineEdit_7.text()))
# 获得当前执行状态
def get_thread_status(self):
if self.curr_func:
self.write_log(f"【运行状态】正在执行,线程名称:{self.curr_func.name}")
else:
self.write_log(f"【运行状态】没在执行")
# exec_time延迟设置
def change_exec_time_delay(self):
self.afk.exec_func_delay = self.ui.doubleSpinBox.value()
# 图片匹配阈值设置
def change_threshold(self):
self.afk.utils.threshold = self.ui.doubleSpinBox_2.value()
if __name__ == '__main__':
window = MainWin()
|
138585
|
import aiopg
# project
from ddtrace import Pin
from ddtrace.contrib.aiopg.patch import patch
from ddtrace.contrib.aiopg.patch import unpatch
from tests.contrib.asyncio.utils import AsyncioTestCase
from tests.contrib.asyncio.utils import mark_asyncio
from tests.contrib.config import POSTGRES_CONFIG
TEST_PORT = str(POSTGRES_CONFIG["port"])
class AiopgTestCase(AsyncioTestCase):
# default service
TEST_SERVICE = "postgres"
def setUp(self):
super().setUp()
self._conn = None
patch()
def tearDown(self):
super().tearDown()
if self._conn and not self._conn.closed:
self._conn.close()
unpatch()
async def _get_conn_and_tracer(self):
conn = self._conn = await aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(tracer=self.tracer).onto(conn)
return conn, self.tracer
@mark_asyncio
async def test_async_generator(self):
conn, tracer = await self._get_conn_and_tracer()
cursor = await conn.cursor()
q = "select 'foobarblah'"
await cursor.execute(q)
rows = []
async for row in cursor:
rows.append(row)
assert rows == [("foobarblah",)]
spans = self.pop_spans()
assert len(spans) == 1
span = spans[0]
assert span.name == "postgres.query"
|
138590
|
import requests
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
SERVER = "localhost"
URL = "http://%s:8080" % SERVER
def before_all(context):
__open_browser(context)
def __open_browser(context):
chrm = context.config.userdata['chromedriver_path']
try:
# if there is a proxy, we'll use it. Otherwise, we won't.
requests.get("http://localhost:8888", timeout=0.01)
# if there was no exception, we continue here.
PROXY = "localhost:8888"
proxy = Proxy()
proxy.proxy_type = ProxyType.MANUAL
proxy.http_proxy = PROXY
capabilities = webdriver.DesiredCapabilities.CHROME
proxy.add_to_capabilities(capabilities)
if (chrm):
context.driver = webdriver.Chrome(desired_capabilities=capabilities, executable_path=chrm)
else:
context.driver = webdriver.Chrome(desired_capabilities=capabilities)
return context.driver
except:
if (chrm):
context.driver = webdriver.Chrome(executable_path=chrm)
else:
# adding the service args as described below will cause Chromedriver
# to create a log of the communication between it and the Chrome
# browser. It's eye-opening.
#
# for instance:
# [1568045962.076][INFO]: [e18882b1f2abbda89f232f777f98f686] COMMAND TypeElement {
# "id": "0.47079920350295135-1",
# "sessionId": "e18882b1f2abbda89f232f777f98f686",
# "text": "Byron",
# "value": [ "B", "y", "r", "o", "n" ]
# }
#context.driver = webdriver.Chrome(service_args=["--verbose","--logepath=C:\\temp\\qc1.log"])
context.driver = webdriver.Chrome()
return context.driver
def before_scenario(context, scenario):
__reset_database()
def after_all(context):
__close_browser(context)
def __close_browser(context):
context.driver.close()
def __reset_database():
requests.get("%s/demo/flyway" % URL)
|
138599
|
import importlib
import json
import platform
import subprocess
import sys
from pathlib import Path
from loguru import logger
from sc2.game_data import AbilityData, GameData, UnitTypeData, UpgradeData
try:
from sc2.ids.id_version import ID_VERSION_STRING
except ImportError:
ID_VERSION_STRING = "4.11.4.78285"
class IdGenerator:
def __init__(self, game_data: GameData = None, game_version: str = None, verbose: bool = False):
self.game_data: GameData = game_data
self.game_version = game_version
self.verbose = verbose
self.HEADER = f'# DO NOT EDIT!\n# This file was automatically generated by "{Path(__file__).name}"\n'
self.PF = platform.system()
self.HOME_DIR = str(Path.home())
self.DATA_JSON = {
"Darwin": self.HOME_DIR + "/Library/Application Support/Blizzard/StarCraft II/stableid.json",
"Windows": self.HOME_DIR + "/Documents/StarCraft II/stableid.json",
"Linux": self.HOME_DIR + "/Documents/StarCraft II/stableid.json",
}
self.ENUM_TRANSLATE = {
"Units": "UnitTypeId",
"Abilities": "AbilityId",
"Upgrades": "UpgradeId",
"Buffs": "BuffId",
"Effects": "EffectId",
}
self.FILE_TRANSLATE = {
"Units": "unit_typeid",
"Abilities": "ability_id",
"Upgrades": "upgrade_id",
"Buffs": "buff_id",
"Effects": "effect_id",
}
def make_key(self, key):
if key[0].isdigit():
key = "_" + key
# In patch 5.0, the key has "@" character in it which is not possible with python enums
return key.upper().replace(" ", "_").replace("@", "")
def parse_data(self, data):
# for d in data: # Units, Abilities, Upgrades, Buffs, Effects
units = self.parse_simple("Units", data)
upgrades = self.parse_simple("Upgrades", data)
effects = self.parse_simple("Effects", data)
buffs = self.parse_simple("Buffs", data)
abilities = {}
for v in data["Abilities"]:
key = v["buttonname"]
remapid = v.get("remapid")
if (not key) and (remapid is None):
assert v["buttonname"] == ""
continue
if not key:
if v["friendlyname"] != "":
key = v["friendlyname"]
else:
exit(f"Not mapped: {v !r}")
key = key.upper().replace(" ", "_").replace("@", "")
if "name" in v:
key = f'{v["name"].upper().replace(" ", "_")}_{key}'
if "friendlyname" in v:
key = v["friendlyname"].upper().replace(" ", "_")
if key[0].isdigit():
key = "_" + key
if key in abilities and v["index"] == 0:
print(f"{key} has value 0 and id {v['id']}, overwriting {key}: {abilities[key]}")
# Commented out to try to fix: 3670 is not a valid AbilityId
abilities[key] = v["id"]
elif key in abilities:
print(f"{key} has appeared a second time with id={v['id']}")
else:
abilities[key] = v["id"]
abilities["SMART"] = 1
enums = {}
enums["Units"] = units
enums["Abilities"] = abilities
enums["Upgrades"] = upgrades
enums["Buffs"] = buffs
enums["Effects"] = effects
return enums
def parse_simple(self, d, data):
units = {}
for v in data[d]:
key = v["name"]
if not key:
continue
key_to_insert = self.make_key(key)
if key_to_insert in units:
index = 2
tmp = f"{key_to_insert}_{index}"
while tmp in units:
index += 1
tmp = f"{key_to_insert}_{index}"
key_to_insert = tmp
units[key_to_insert] = v["id"]
return units
def generate_python_code(self, enums):
assert {"Units", "Abilities", "Upgrades", "Buffs", "Effects"} <= enums.keys()
sc2dir = Path(__file__).parent
idsdir = sc2dir / "ids"
idsdir.mkdir(exist_ok=True)
with (idsdir / "__init__.py").open("w") as f:
initstring = f"__all__ = {[n.lower() for n in self.FILE_TRANSLATE.values()] !r}\n".replace("'", '"')
f.write("\n".join([self.HEADER, initstring]))
for name, body in enums.items():
class_name = self.ENUM_TRANSLATE[name]
code = [self.HEADER, "import enum", "\n", f"class {class_name}(enum.Enum):"]
for key, value in sorted(body.items(), key=lambda p: p[1]):
code.append(f" {key} = {value}")
# Add repr function to more easily dump enums to dict
code += ["\n", " def __repr__(self):", ' return f"' + class_name + '.{self.name}"']
code += [
"\n",
f"for item in {class_name}:",
# f" assert not item.name in globals()",
f" globals()[item.name] = item",
"",
]
ids_file_path = (idsdir / self.FILE_TRANSLATE[name]).with_suffix(".py")
with ids_file_path.open("w") as f:
f.write("\n".join(code))
# Apply formatting]
try:
subprocess.run(["black", "--line-length", "120", ids_file_path])
except FileNotFoundError:
print(
f"Black is not installed. Please use 'pip install black' to install black formatter.\nCould not autoformat file {ids_file_path}"
)
if self.game_version is not None:
version_path = Path(__file__).parent / "ids" / "id_version.py"
with open(version_path, "w") as f:
f.write(f'ID_VERSION_STRING = "{self.game_version}"\n')
def update_ids_from_stableid_json(self):
if self.game_version is None or ID_VERSION_STRING is None or ID_VERSION_STRING != self.game_version:
if self.verbose and self.game_version is not None and ID_VERSION_STRING is not None:
logger.info(
f"Game version is different (Old: {self.game_version}, new: {ID_VERSION_STRING}. Updating ids to match game version"
)
with open(self.DATA_JSON[self.PF], encoding="utf-8") as data_file:
data = json.loads(data_file.read())
self.generate_python_code(self.parse_data(data))
# Update game_data if this is a live game
if self.game_data is not None:
self.reimport_ids()
self.update_game_data()
def reimport_ids(self):
# Reload the newly written "id" files
# TODO This only re-imports modules, but if they haven't been imported, it will yield an error
from sc2.ids.ability_id import AbilityId
importlib.reload(sys.modules["sc2.ids.ability_id"])
importlib.reload(sys.modules["sc2.ids.unit_typeid"])
importlib.reload(sys.modules["sc2.ids.upgrade_id"])
importlib.reload(sys.modules["sc2.ids.effect_id"])
importlib.reload(sys.modules["sc2.ids.buff_id"])
# importlib.reload(sys.modules["sc2.ids.id_version"])
importlib.reload(sys.modules["sc2.constants"])
def update_game_data(self):
"""Re-generate the dicts from self.game_data.
This should be done after the ids have been reimported."""
from sc2.ids.ability_id import AbilityId
ids = set(a.value for a in AbilityId if a.value != 0)
self.game_data.abilities = {
a.ability_id: AbilityData(self.game_data, a)
for a in self.game_data._proto.abilities if a.ability_id in ids
}
# self.game_data.abilities = {
# a.ability_id: AbilityData(self.game_data, a) for a in self.game_data._proto.abilities
# }
self.game_data.units = {
u.unit_id: UnitTypeData(self.game_data, u)
for u in self.game_data._proto.units if u.available
}
self.game_data.upgrades = {u.upgrade_id: UpgradeData(self.game_data, u) for u in self.game_data._proto.upgrades}
self.game_data.unit_types = {}
if __name__ == "__main__":
updater = IdGenerator()
updater.update_ids_from_stableid_json()
|
138651
|
import argparse
import csv
import glob
import os
import sys
import time
from datetime import datetime
from pathlib import Path
try:
import streamlit as st
except ModuleNotFoundError:
pass
import torch
import torchvision
import yaml
from omegaconf import OmegaConf
from specvqgan.util import get_ckpt_path
sys.path.insert(0, '.') # nopep8
import matplotlib.pyplot as plt
import soundfile
from torch.utils.data.dataloader import default_collate
from feature_extraction.extract_mel_spectrogram import inv_transforms
from train import instantiate_from_config
from vocoder.modules import Generator
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--resume",
type=str,
nargs="?",
help="load from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-c",
"--config",
nargs="?",
metavar="single_config.yaml",
help="path to single config. If specified, base configs will be ignored "
"(except for the last one if left unspecified).",
const=True,
default="",
)
parser.add_argument(
"--ignore_base_data",
action="store_true",
help="Ignore data specification from base configs. Useful if you want "
"to specify a custom datasets on the command line.",
)
parser.add_argument(
'--vocoder_path',
default='./vocoder/logs/vggsound/',
help='The path to the folder with pre-trained Vocoder (a folder from ./vocoder/logs)'
)
parser.add_argument(
'--logdir',
default='./logs/',
help='Path to the log dir with pre-trained GPT'
)
return parser
def rename_models(x):
x = x[x.index('T')+1:]
name2type = {
'00-43-28_vggsound_transformer': 'VGGSound – Class – VGGSound Codebook',
'14-41-19_vas_transformer': 'VAS – Class – VGGSound Codebook',
'09-42-07_vas_transformer': 'VAS – Class – VAS Codebook',
'16-35-20_vggsound_transformer': 'VGGSound – No Feats – VGGSound Codebook',
'11-18-51_vggsound_transformer': 'VGGSound – 1 Feat BN – VGGSound Codebook',
'09-34-10_vggsound_transformer': 'VGGSound – 5 Feats BN – VGGSound Codebook',
'07-27-58_vggsound_transformer': 'VGGSound – 212 Feats BN – VGGSound Codebook',
'16-34-36_vas_transformer': 'VAS – No Feats – VGGSound Codebook',
'06-32-51_vas_transformer': 'VAS – 1 Feat BN – VGGSound Codebook',
'05-51-34_vas_transformer': 'VAS – 5 Feats BN – VGGSound Codebook',
'05-38-40_vas_transformer': 'VAS – 212 Feats BN – VGGSound Codebook',
'16-24-38_vas_transformer': 'VAS – No Feats – VAS Codebook',
'13-31-37_vas_transformer': 'VAS – 1 Feats BN – VAS Codebook',
'14-14-24_vas_transformer': 'VAS – 5 Feats BN – VAS Codebook',
'15-17-18_vas_transformer': 'VAS – 212 Feats BN – VAS Codebook',
'11-47-40_vas_transformer': 'VAS – 1 Feat RN50 – VGGSound Codebook',
'11-36-00_vas_transformer': 'VAS – 5 Feats RN50 – VGGSound Codebook',
'11-52-28_vas_transformer': 'VAS – 212 Feats RN50 – VGGSound Codebook',
'14-59-49_vas_transformer': 'VAS – 1 Feat RN50 – VAS Codebook',
'14-51-25_vas_transformer': 'VAS – 5 Feats RN50 – VAS Codebook',
'13-34-39_vas_transformer': 'VAS – 212 Feats RN50 – VAS Codebook',
'21-03-22_vggsound_transformer': 'VGGSound – 1 Feat RN50 – VGGSound Codebook',
'21-34-25_vggsound_transformer': 'VGGSound – 5 Feats RN50 – VGGSound Codebook',
'21-34-41_vggsound_transformer': 'VGGSound – 212 Feats RN50 – VGGSound Codebook',
}
if x in name2type:
x = f'{name2type[x]} ({x})'
return x
def load_model_from_config(config, sd, gpu=True, eval_mode=True):
if "ckpt_path" in config.params:
st.warning("Deleting the restore-ckpt path from the config...")
config.params.ckpt_path = None
if "downsample_cond_size" in config.params:
st.warning("Deleting downsample-cond-size from the config and setting factor=0.5 instead...")
config.params.downsample_cond_size = -1
config.params["downsample_cond_factor"] = 0.5
try:
if "ckpt_path" in config.params.first_stage_config.params:
config.params.first_stage_config.params.ckpt_path = None
st.warning("Deleting the first-stage restore-ckpt path from the config...")
if "ckpt_path" in config.params.cond_stage_config.params:
config.params.cond_stage_config.params.ckpt_path = None
st.warning("Deleting the cond-stage restore-ckpt path from the config...")
except:
pass
model = instantiate_from_config(config)
if sd is not None:
missing, unexpected = model.load_state_dict(sd, strict=False)
try:
st.warning(f"Missing Keys in State Dict: {missing}")
st.warning(f"Unexpected Keys in State Dict: {unexpected}")
except NameError:
pass
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def load_vocoder(ckpt_vocoder: str, eval_mode: bool):
ckpt_vocoder = Path(ckpt_vocoder)
vocoder_sd = torch.load(ckpt_vocoder / 'best_netG.pt', map_location='cpu')
with open(ckpt_vocoder / 'args.yml', 'r') as f:
args = yaml.load(f, Loader=yaml.UnsafeLoader)
vocoder = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers)
vocoder.load_state_dict(vocoder_sd)
if eval_mode:
vocoder.eval()
return {'model': vocoder}
def load_feature_extractor(gpu, eval_mode=True):
s = '''
feature_extractor:
target: evaluation.feature_extractors.melception.Melception
params:
num_classes: 309
features_list: ['logits']
feature_extractor_weights_path: ./evaluation/logs/21-05-10T09-28-40/melception-21-05-10T09-28-40.pt
transform_dset_out_to_inception_in:
- target: evaluation.datasets.transforms.FromMinusOneOneToZeroOne
- target: specvqgan.modules.losses.vggishish.transforms.StandardNormalizeAudio
params:
specs_dir: ./data/vggsound/melspec_10s_22050hz
cache_path: ./specvqgan/modules/losses/vggishish/data/
- target: evaluation.datasets.transforms.GetInputFromBatchByKey
params:
input_key: image
- target: evaluation.datasets.transforms.ToFloat32'''
feat_extractor_cfg = OmegaConf.create(s)
# downloading the checkpoint for melception
get_ckpt_path('melception', 'evaluation/logs/21-05-10T09-28-40')
pl_sd = torch.load(feat_extractor_cfg.feature_extractor.params.feature_extractor_weights_path,
map_location="cpu")
# use gpu=False to compute it on CPU
feat_extractor = load_model_from_config(
feat_extractor_cfg.feature_extractor, pl_sd['model'], gpu=gpu, eval_mode=eval_mode)['model']
if feat_extractor_cfg.transform_dset_out_to_inception_in is not None:
transforms = [instantiate_from_config(c) for c in feat_extractor_cfg.transform_dset_out_to_inception_in]
else:
transforms = [lambda x: x]
transforms = torchvision.transforms.Compose(transforms)
vggsound_meta = list(csv.reader(open('./data/vggsound.csv'), quotechar='"'))
unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))
label2target = {label: target for target, label in enumerate(unique_classes)}
target2label = {target: label for label, target in label2target.items()}
return {'model': feat_extractor, 'transforms': transforms, 'target2label': target2label}
def load_model_and_dataset(config, ckpt, ckpt_vocoder, gpu=True, eval_mode=True):
# get data
dsets = instantiate_from_config(config.data)
dsets.prepare_data()
dsets.setup()
# now load the specified checkpoint
if ckpt:
pl_sd = torch.load(ckpt, map_location="cpu")
global_step = pl_sd["global_step"]
else:
pl_sd = {"state_dict": None}
global_step = None
# loading the vocoder
if ckpt_vocoder:
vocoder = load_vocoder(ckpt_vocoder, eval_mode)['model']
vocoder = vocoder.to('cuda') if gpu else vocoder
model = load_model_from_config(config.model, pl_sd['state_dict'], gpu=gpu, eval_mode=eval_mode)['model']
# patch config for the adjusted input length which could be longer than during training (infinite samples)
# local_permuter = model.first_stage_permuter
# if config.model.params.first_stage_permuter_config.params.W is not None:
# config.model.params.first_stage_permuter_config.params.W *= W_scale
# model.first_stage_permuter = instantiate_from_config(config.model.params.first_stage_permuter_config).cuda().eval()
# print(config.model.params.first_stage_permuter_config)
feat_extractor = load_feature_extractor(gpu, eval_mode)
return dsets, model, vocoder, global_step, feat_extractor
# the same as the decorator `@st.cache(allow_output_mutation=True, suppress_st_warning=True)`
try:
load_model_and_dataset = st.cache(load_model_and_dataset, allow_output_mutation=True,
suppress_st_warning=True)
except NameError:
pass
def bchw_to_st(x, to_scale=True, flip_dims=None):
if flip_dims is not None:
# dims is a tuple. To flip only 2nd dim use: `flip_dims=(2,)`
x = x.flip(dims=flip_dims)
if to_scale:
# (-1, 1) -> (0, 1)
return (x.detach().cpu().numpy().transpose(0, 2, 3, 1) + 1.) / 2.
else:
return x.detach().cpu().numpy().transpose(0, 2, 3, 1)
def tensor_to_plt(x, vmin=None, vmax=None, flip_dims=None):
if flip_dims is not None:
# dims is a tuple. To flip only 2nd dim use: `flip_dims=(2,)`
x = x.flip(dims=flip_dims)
# remove batch dim and make channel-last
if len(x.shape) > 3:
x = x.squeeze(0)
# if the figure is taller than it is wider rotate (transpose). Also clipping it as feats can be large
if x.shape[-1] < x.shape[-2]:
x = x.clip(-2, 2).transpose(-1, -2)
x = x.cpu()
if len(x.shape) == 3:
x = x.permute(1, 2, 0)
# fig, arr = plt.subplots(nrows=1, ncols=1)
# # arr[i].set_title(f'{vid_name}_{name}')
# arr.imshow(x)
# arr.set_frame_on(False)
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# for facehq
# TODO: if x.shape[0] == 3:
# x = x.flip(dims=(1,)).permute(1, 2, 0)
# x = (x + 1) / 2
# x = x.clip(0, 1)
# newer version of the matplotlib started to fails when an image has 3 dim with `1` as the last one
if x.ndim == 3 and x.shape[-1] == 1:
x = x[:, :, 0]
ax.imshow(x, cmap=plt.get_cmap('gray'), vmin=vmin, vmax=vmax)
# ax.set_title('Some', fontsize=8)
return fig
def save_results(spec_plt, waves_dict, topk_preds, logdir, batch, mode, sample_rate, specs_key_in_batch):
# implemented only for B=1, otherwise mind the batch[key][0]
label = ''.join(filter(lambda x: str.isalnum(x) or ' ', batch['label'][0])).replace(' ', '_')
target = int(batch['target'][0])
vid_id = Path(batch[specs_key_in_batch][0]).name.replace('_mel.npy', '')
time_stamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
save_dir = Path(logdir) / 'streamlit' / f'{target:03d}_{label}' / vid_id
os.makedirs(save_dir, exist_ok=True)
dpi = 300
for wave_type, wave in waves_dict.items():
soundfile.write(save_dir / f'{mode}_{time_stamp}_{wave_type}.wav', wave, sample_rate, 'PCM_24')
if len(wave) > sample_rate * 10:
dpi *= 10
spec_plt.savefig(save_dir / f'{mode}_{time_stamp}.png', bbox_inches='tight', pad_inches=0, dpi=dpi)
with open(save_dir / f'{mode}_{time_stamp}_topkpreds.txt', 'w') as out_f:
out_f.write(topk_preds)
def show_wave_in_streamlit(wave_npy, sample_rate, caption):
# showing in streamlit. We cannot just show the npy wave and we need to save it first
temp_wav_file_path = 'todel.wav'
soundfile.write(temp_wav_file_path, wave_npy, sample_rate, 'PCM_24')
st.text(caption)
st.audio(temp_wav_file_path, format='audio/wav')
os.remove(temp_wav_file_path)
def spec_to_audio_to_st(x, spec_dir_path, sample_rate, show_griffin_lim, vocoder=None, show_in_st=True):
# audios are in [-1, 1], making them in [0, 1]
spec = (x.data.squeeze(0) + 1) / 2
out = {}
if vocoder:
# (L,) <- wave: (1, 1, L).squeeze() <- spec: (1, F, T)
wave_from_vocoder = vocoder(spec).squeeze().cpu().numpy()
out['vocoder'] = wave_from_vocoder
if show_in_st:
show_wave_in_streamlit(wave_from_vocoder, sample_rate, 'Reconstructed Wave via MelGAN')
if show_griffin_lim:
spec = spec.squeeze(0).cpu().numpy()
wave_from_griffinlim = inv_transforms(spec, Path(spec_dir_path).stem)
out['inv_transforms'] = wave_from_griffinlim
if show_in_st:
show_wave_in_streamlit(wave_from_griffinlim, sample_rate, 'Reconstructed Wave via Griffin Lim')
return out
def all_attention_to_st(attention, placeholders=None, scale_by_prior=None):
if scale_by_prior:
B, H, T, T = attention.shape
# attention weight is 1/T: if we have a seq with length 3 the weights are 1/3, 1/3, and 1/3
# making T by T matrix with zeros in the upper triangular part
attention_uniform_prior = 1 / torch.arange(1, T+1).view(1, T, 1).repeat(B, 1, T)
attention_uniform_prior = attention_uniform_prior.tril().view(B, 1, T, T).to(attention.device)
attention = attention - attention_uniform_prior
attention_agg = attention.sum(dim=1, keepdims=True)
att_st = tensor_to_plt(attention_agg)
# z_att_st = tensor_to_plt(z_att, flip_z_dims)
if placeholders is None:
return att_st
else:
placeholders['title_z_att'].text(f'Attention to All. {list(attention_agg.squeeze().shape)}')
placeholders['z_att'].write(att_st)
placeholders['title_c_att'].empty()
placeholders['c_att'].empty()
def last_attention_to_st(attention, z_curr_step, c_length, z_permuter, c_permuter, quant_c_shape,
quant_z_shape, placeholders=None, flip_c_dims=None, flip_z_dims=None):
B, H, T, T = attention.shape
# Since the attention ignores the last (target) element, we will visualize it as 0 – padding last 2 dims
# (B, H, T+1, T+1)
attention = torch.nn.functional.pad(attention, pad=(0, 1, 0, 1), value=0)
current_step = c_length + z_curr_step
attention_at_curr_step = attention[:, :, current_step-1, :]
# (B, H, c_length), (B, H, z_length) <-
c_att, z_att = attention_at_curr_step[:, :, :c_length], attention_at_curr_step[:, :, c_length:]
# aggregate through all heads H -> (B, c_length), (B, z_length)
c_att = c_att.sum(dim=1) # * 10
z_att = z_att.sum(dim=1) # * 10
# (B, length) -> (B, 1, *2d_or_1d_code_book_shape). *shpae[2:] will take 2 elems if 2d and 1 if 1d
c_att = c_permuter(c_att, reverse=True).reshape(B, 1, *quant_c_shape[2:])
z_att = z_permuter(z_att, reverse=True).reshape(B, 1, *quant_z_shape[2:])
# we don't need to flip 1d cond but we do need it for 2d input because of the spectrograms (upside-down)
# making value in two plots in the same range
# vmin = min(c_att.min(), z_att.min())
# vmax = max(c_att.max(), z_att.max())
vmin = None
vmax = None
c_att_st = tensor_to_plt(c_att, vmin, vmax, flip_c_dims)
z_att_st = tensor_to_plt(z_att, vmin, vmax, flip_z_dims)
c_att_weight = c_att.sum() / H
z_att_weight = z_att.sum() / H
if placeholders is None:
return c_att_st, z_att_st
else:
if len(c_att.squeeze().shape) > 0:
placeholders['title_c_att'].text(f'Attention to C. {list(c_att.squeeze().shape)}. Sum {c_att_weight:.2f}')
placeholders['c_att'].pyplot(c_att_st)
else:
placeholders['c_att'].empty()
placeholders['title_c_att'].text(f'Attention to C. Sum {c_att_weight:.2f}')
placeholders['title_z_att'].text(f'Attention to Z. {list(z_att.squeeze().shape)}. Sum {z_att_weight:.2f}')
placeholders['z_att'].write(z_att_st)
def get_class_preditions(x, feat_extractor, k=10):
# use device=torch.device('cpu') to compute on cpu and save some memory
device = x.device
x = {'image': x.squeeze(0).cpu()}
x = feat_extractor['transforms'](x).to(device)
features = feat_extractor['model'](x)
featuresdict = feat_extractor['model'].convert_features_tuple_to_dict(features)
probs = featuresdict['logits'].softmax(dim=1)
topk_probs, topk_targets = probs.topk(k)
to_print = f'Spectrogram Classifier (K={k}):\n'
for p, y in zip(topk_probs.squeeze(0).cpu().tolist(), topk_targets.squeeze(0).cpu().tolist()):
to_print += f'\t{feat_extractor["target2label"][y]}: {p:.5f}\n'
return to_print
def sample_conditionally(z_indices, sampling_shape, c_indices, quant_c, full_att_mat, scale_att_by_prior,
temperature, top_x, update_every, placeholders,
cond_stage_model_name, flip_z_dims, flip_c_dims, to_save_results, logdir, batch,
specs_key_in_batch, vocoder, feat_sampler_cfg, show_griffin_lim, feat_extractor,
mode):
start_t = time.time()
# for facehq
# patch_size_j = 16
# patch_size_i = 16
patch_size_i = 5
patch_size_j = 53
B, D, hr_h, hr_w = sampling_shape
# assert hr_w % patch_size_j == 0 and hr_w // patch_size_j == int(hr_w // patch_size_j)
if mode == 'full':
start_step = 0
else:
start_step = (patch_size_j // 2) * patch_size_i
z_pred_indices = torch.zeros((B, hr_h*hr_w)).long().to(z_indices.device)
z_pred_indices[:, :start_step] = z_indices[:, :start_step]
for step in range(start_step, hr_w * hr_h):
i = step % hr_h
j = step // hr_h
i_start = min(max(0, i - (patch_size_i // 2)), hr_h - patch_size_i)
j_start = min(max(0, j - (patch_size_j // 2)), hr_w - patch_size_j)
i_end = i_start + patch_size_i
j_end = j_start + patch_size_j
local_i = i - i_start
local_j = j - j_start
patch_2d_shape = (B, D, patch_size_i, patch_size_j)
placeholders['time'].text(f"Time: {time.time() - start_t:3.2f} seconds")
placeholders['info'].text(
f"Step: ({i},{j}) | Local: ({local_i},{local_j}) | Crop: ({i_start}:{i_end},{j_start}:{j_end})"
)
# TODO: faceshq – we don't need to permute the reshaped indices (1st and 2nd time)
# slicing the possibly permuted flat sequence:
# 1D z_pred_indices is permuted: A_flat = [1, 2, 3, 4, 5, 6, 7, 8, 9].
# the 2D input should be: A = [[1, 4, 7], [2, 5, 8], [3, 6, 9]].
# Therefore, after the first reshape it will be A.T = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# the last reshape flattens is back
patch = z_pred_indices \
.reshape(B, hr_w, hr_h) \
.permute(0, 2, 1)[:, i_start:i_end, j_start:j_end].permute(0, 2, 1) \
.reshape(B, patch_size_i * patch_size_j)
# if cond_stage_model_name == 'CoordStage':
# cpatch = c_indices \
# .reshape(B, hr_w, hr_h) \
# .permute(0, 2, 1)[:, i_start:i_end, j_start:j_end].permute(0, 2, 1) \
# .reshape(B, patch_size_i * patch_size_j)
# elif cond_stage_model_name == 'VQModel1d':
# cpatch = c_indices[:, j_start:j_end]
# elif cond_stage_model_name == 'FeatsClassStage':
# features = quant_c['feature']
# if feat_sampler_cfg is None:
# time_step_coeff = features.shape[-1] / sampling_shape[-1]
# assert time_step_coeff == int(time_step_coeff), f'{features.shape}, {sampling_shape}'
# j_start_feats = int(j_start * time_step_coeff)
# j_end_feats = int(j_end * time_step_coeff)
# else:
# feat_sample_size = feat_sampler_cfg.params.feat_sample_size
# times_to_repeat_after_resample = feat_sampler_cfg.params.times_to_repeat_after_resample
# if times_to_repeat_after_resample is not None:
# feat_sample_size *= times_to_repeat_after_resample
# patches_in_z = sampling_shape[-1] // patch_size_j
# patches_in_c = features.shape[-1] // feat_sample_size
# # assert patches_in_c == patches_in_z, f'{features.shape}, {sampling_shape}'
# j_start_feats = j_start // patch_size_j
# j_end_feats = j_start + feat_sample_size
# cpatch = {
# 'target': quant_c['target'],
# 'feature': c_indices['feature'][:, :, j_start_feats:j_end_feats]
# }
# elif cond_stage_model_name in ['RawFeatsStage', 'FeatClusterStage']:
# if feat_sampler_cfg is None:
# time_step_coeff = quant_c.shape[-1] / sampling_shape[-1]
# assert time_step_coeff == int(time_step_coeff), f'{quant_c.shape}, {sampling_shape}'
# j_start_feats = int(j_start * time_step_coeff)
# j_end_feats = int(j_end * time_step_coeff)
# else:
# feat_sample_size = feat_sampler_cfg.params.feat_sample_size
# times_to_repeat_after_resample = feat_sampler_cfg.params.times_to_repeat_after_resample
# if times_to_repeat_after_resample is not None:
# feat_sample_size *= times_to_repeat_after_resample
# patches_in_z = sampling_shape[-1] // patch_size_j
# patches_in_c = quant_c.shape[-1] // feat_sample_size
# print(patches_in_c, patches_in_z)
# # assert patches_in_c == patches_in_z, f'{quant_c.shape}, {sampling_shape}'
# j_start_feats = j_start // patch_size_j
# j_end_feats = j_start + feat_sample_size
# if cond_stage_model_name == 'FeatClusterStage':
# cpatch = c_indices[:, j_start_feats:j_end_feats]
# else:
# cpatch = c_indices[:, :, j_start_feats:j_end_feats]
# elif cond_stage_model_name == 'ClassOnlyStage':
# cpatch = c_indices
# else:
# raise NotImplementedError
# assuming we don't crop the conditioning and just use the whole c, if not desired uncomment the above
cpatch = c_indices
if cond_stage_model_name in ['RawFeatsStage', 'ClassOnlyStage', 'FeatsClassStage']:
logits, _, attention = model.transformer(patch[:, :-1], cpatch)
else:
patch = torch.cat((cpatch, patch), dim=1)
logits, _, attention = model.transformer(patch[:, :-1])
# remove conditioning
logits = logits[:, -patch_size_j*patch_size_i:, :]
local_pos_in_flat = local_j * patch_size_i + local_i
logits = logits[:, local_pos_in_flat, :]
logits = logits / temperature
if top_x is not None:
logits = model.top_k_logits(logits, top_x)
# apply softmax to convert to probabilities
probs = torch.nn.functional.softmax(logits, dim=-1)
# sample from the distribution
ix = torch.multinomial(probs, num_samples=1)
z_pred_indices[:, j * hr_h + i] = ix
# print(
# z_pred_indices \
# .reshape(B, hr_w, hr_h).permute(0, 2, 1)[:, i_start:i_end, j_start:j_end].permute(0, 2, 1)
# )
# print(z_pred_indices.reshape(B, hr_w, hr_h).permute(0, 2, 1).permute(0, 2, 1))
if step % update_every == 0:
z_pred_img = model.decode_to_img(z_pred_indices, sampling_shape)
placeholders['title_gen_spec'].text(f'Sampling {mode}. {list(z_pred_img.squeeze().shape)}')
# fliping the spectrogram just for illustration purposes (low freqs to bottom, high - top)
z_pred_img_st = tensor_to_plt(z_pred_img, flip_dims=flip_z_dims)
placeholders['gen_spec'].write(z_pred_img_st)
if full_att_mat:
all_attention_to_st(attention, placeholders, scale_att_by_prior)
else:
if cond_stage_model_name == 'FeatsClassStage':
# 212 + 1
c_length = cpatch['feature'].shape[-1] + cpatch['target'].shape[-1]
quant_c_shape = [None, None, c_length]
else:
c_length = cpatch.shape[-1]
quant_c_shape = quant_c.shape
# quant_z_shape = sampling_shape
last_attention_to_st(attention, local_pos_in_flat, c_length, model.first_stage_permuter,
model.cond_stage_permuter, quant_c_shape, patch_2d_shape, placeholders,
flip_c_dims, flip_z_dims)
# quant_z_shape = sampling_shape
z_pred_img = model.decode_to_img(z_pred_indices, sampling_shape)
print(f'Time: {time.time() - start_t:3.2f} seconds')
# showing the final image
placeholders['title_gen_spec'].text(f'Sampling {mode}. {list(z_pred_img.squeeze().shape)}')
z_pred_img_st = tensor_to_plt(z_pred_img, flip_dims=flip_z_dims)
placeholders['gen_spec'].write(z_pred_img_st)
if full_att_mat:
all_attention_to_st(attention, placeholders, scale_att_by_prior)
else:
if cond_stage_model_name == 'FeatsClassStage':
# 212 + 1
c_length = cpatch['feature'].shape[-1] + cpatch['target'].shape[-1]
quant_c_shape = [None, None, c_length]
else:
c_length = cpatch.shape[-1]
quant_c_shape = quant_c.shape
last_attention_to_st(attention, local_pos_in_flat, c_length, model.first_stage_permuter,
model.cond_stage_permuter, quant_c_shape, patch_2d_shape, placeholders,
flip_c_dims, flip_z_dims)
topk_preds = get_class_preditions(z_pred_img, feat_extractor)
st.text(topk_preds)
waves = spec_to_audio_to_st(z_pred_img, config.data.params.spec_dir_path,
config.data.params.sample_rate, show_griffin_lim, vocoder)
if to_save_results:
save_results(z_pred_img_st, waves, topk_preds, logdir, batch, mode, config.data.params.sample_rate,
specs_key_in_batch)
st.info('Done')
if __name__ == "__main__":
st.sidebar.info('''
Hi there 👋
This is a demo for **Visually Guided Sound Generation** project 🖼️ 👉 🔉.
[Project Page](https://v-iashin.github.io/specvqgan)
• [Paper](https://arxiv.org/abs/2110.08791)
• [Code](https://github.com/v-iashin/SpecVQGAN)
• [Colab](https://colab.research.google.com/drive/1pxTIMweAKApJZ3ZFqyBee3HtMqFpnwQ0?usp=sharing)
''')
sys.path.append(os.getcwd())
parser = get_parser()
opt, unknown = parser.parse_known_args()
avail_models = Path(opt.logdir).rglob('*/checkpoints')
# 'T' is an empty model which prevents loading the first model by default
avail_models = ['T'] + sorted([str(p.parent) for p in avail_models])
# filtering out codebook models as we need only samplers
avail_models = [m for m in avail_models if 'codebook' not in m]
assert len(avail_models) > 0, f'There is no model in {opt.logdir}'
st.sidebar.header('Select a Model')
model_ckpt = st.sidebar.selectbox('', avail_models, 0, format_func=rename_models)
if model_ckpt == 'T':
st.stop()
opt.resume = model_ckpt
ckpt_vocoder = opt.vocoder_path
ckpt = None
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
try:
template_idx = len(paths)-paths[::-1].index("logs")+1
except ValueError:
idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt_dir = os.path.join(logdir, "checkpoints")
ckpt_file = sorted(os.listdir(ckpt_dir))
if len(ckpt_file) > 1:
print(f'Warning: Found more than one checkpoint in {ckpt_dir}: {ckpt_file}')
ckpt_file = ckpt_file[0]
print(f'Using {ckpt_file}')
ckpt = os.path.join(logdir, 'checkpoints', ckpt_file)
print(f"logdir:{logdir}")
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*-project.yaml")))
opt.base = base_configs+opt.base
if opt.config:
if type(opt.config) == str:
opt.base = [opt.config]
else:
opt.base = [opt.base[-1]]
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
if opt.ignore_base_data:
for config in configs:
if hasattr(config, "data"):
del config["data"]
config = OmegaConf.merge(*configs, cli)
# determine the data folder
if 'vggsound.VGGSound' in config.data.params.train.target:
datapath = './data/vggsound/'
raw_vids_dir = os.path.join(datapath, 'video')
elif 'vas.VAS' in config.data.params.train.target:
datapath = './data/vas/'
raw_vids_dir = os.path.join(datapath, 'videos', '*')
else:
raise NotImplementedError
# patch config. E.g. if the model is trained on another machine with different paths
for a in ['spec_dir_path', 'rgb_feats_dir_path', 'flow_feats_dir_path']:
if config.data.params[a] is not None:
if 'vggsound.VGGSound' in config.data.params.train.target:
config.data.params[a] = os.path.join(datapath, Path(config.data.params[a]).name)
elif 'vas.VAS' in config.data.params.train.target:
config.data.params[a] = os.path.join(datapath, 'features', '*', Path(config.data.params[a]).name)
with st.beta_expander('Streamlit Logs'):
dsets, model, vocoder, global_step, feat_extractor = load_model_and_dataset(
config, ckpt, ckpt_vocoder, gpu=True, eval_mode=True
)
with st.beta_expander('Sampler Model Config'):
st.text(f'Global step: {global_step}')
st.text(f'Checkpoint: {ckpt}')
st.json(OmegaConf.to_container(config))
with torch.no_grad():
if len(dsets.datasets) > 1:
splits = sorted(dsets.datasets.keys())
if 'vas.VAS' in config.data.params.train.target:
# prevent loading train on demo which results in a error in streamlit
splits = ['validation', 'train']
st.sidebar.header('Select Data')
split = st.sidebar.radio('Split', splits)
dset = dsets.datasets[split]
else:
dset = next(iter(dsets.datasets.values()))
# filter dataset for available items using set intersection
if 'vggsound.VGGSound' in config.data.params.train.target:
avail_dataset = glob.glob(config.data.params['spec_dir_path'] + '/*_mel.npy')
avail_dataset = sorted(list(set(avail_dataset).intersection(dset.specs_dataset.dataset)))
avail_targets = list({dset.specs_dataset.video2target[Path(c).stem[:11]] for c in avail_dataset})
avail_label2target = {dset.specs_dataset.target2label[t]: t for t in avail_targets}
dset.specs_dataset.label2target = avail_label2target
dset.specs_dataset.dataset = avail_dataset
if hasattr(dset, 'feats_dataset'):
avail_dataset = glob.glob(config.data.params['rgb_feats_dir_path'] + '/*.pkl')
avail_dataset = [Path(p).stem for p in avail_dataset]
avail_dataset = sorted(list(set(avail_dataset).intersection(dset.feats_dataset.dataset)))
dset.feats_dataset.dataset = avail_dataset
elif 'vas.VAS' in config.data.params.train.target:
avail_dataset = glob.glob(config.data.params['spec_dir_path'] + '/*_mel.npy')
avail_dataset = [os.path.join(Path(p).parent.parent.stem, Path(p).stem.replace('_mel', '')) for p in avail_dataset]
avail_dataset = sorted(list(set(avail_dataset).intersection(dset.specs_dataset.dataset)))
dset.specs_dataset.dataset = avail_dataset
if hasattr(dset, 'feats_dataset'):
avail_dataset = glob.glob(config.data.params['rgb_feats_dir_path'] + '/*.pkl')
avail_dataset = [os.path.join(Path(p).parent.parent.stem, Path(p).stem) for p in avail_dataset]
avail_dataset = sorted(list(set(avail_dataset).intersection(dset.feats_dataset.dataset)))
dset.feats_dataset.dataset = avail_dataset
if len(dset) == 0:
st.sidebar.info('There are no samples for this split. Please select another split.')
st.stop()
select_specific_class = st.sidebar.checkbox('Select Specific Class...', value=False)
# add available classes
if select_specific_class:
labels = dset.specs_dataset.label2target.keys()
label_choice = st.sidebar.selectbox('Select a Class', sorted(labels))
# filter dataset for observations belonging to a specific class
label2target = dset.specs_dataset.label2target
if 'vggsound.VGGSound' in config.data.params.train.target:
video2target = dset.specs_dataset.video2target
paths = dset.specs_dataset.dataset
filter_paths = [c for c in paths if video2target[Path(c).stem[:11]] == label2target[label_choice]]
dset.specs_dataset.dataset = filter_paths
# if we have another first stage we need to do something extra
if hasattr(dset, 'feats_dataset'):
paths_feats = dset.feats_dataset.dataset
filter_paths_feats = [c for c in paths_feats if video2target[Path(c).stem[:11]] == label2target[label_choice]]
dset.feats_dataset.dataset = filter_paths_feats
elif 'vas.VAS' in config.data.params.train.target:
paths = dset.specs_dataset.dataset
filter_paths = [c for c in paths if c.startswith(label_choice)]
dset.specs_dataset.dataset = filter_paths
# if we have another first stage we need to do something extra
if hasattr(dset, 'feats_dataset'):
paths_feats = dset.feats_dataset.dataset
filter_paths_feats = [c for c in paths_feats if c.startswith(label_choice)]
dset.feats_dataset.dataset = filter_paths_feats
batch_size = 1
start_index = st.sidebar.number_input(f'Example Index in the Dataset [0, {len(dset)-1}]',
value=0, min_value=0, max_value=len(dset)-batch_size)
indices = list(range(start_index, start_index+batch_size))
batch = default_collate([dset[i] for i in indices])
if select_specific_class:
# restoring original dataset because we cached the dataset class and filtered for one class.
# Next time, the filtered dataset will be filtered again which empties the dataset.
dset.specs_dataset.dataset = paths
# if we have another first stage we need to do something extra
if hasattr(dset, 'feats_dataset'):
dset.feats_dataset.dataset = paths_feats
feat_sampler_cfg = dset.condition_dataset_cfg.feat_sampler_cfg
cond_stage_model_name = model.cond_stage_model.__class__.__name__
transformer_model_name = model.transformer.__class__.__name__
if (cond_stage_model_name in ['VQModel1d', 'FeatClusterStage']
or transformer_model_name in ['GPTFeats', 'GPTFeatsClass']):
specs_key_in_batch = 'file_path_specs_'
flip_c_dims = None
elif transformer_model_name == 'GPTClass':
specs_key_in_batch = 'file_path_'
flip_c_dims = None
else:
specs_key_in_batch = 'file_path_'
flip_c_dims = (2,)
flip_z_dims = (2,)
st.text('')
with st.beta_expander(f'Original Video. Class: {batch["label"]}.'):
vid_fname = Path(batch[specs_key_in_batch][0]).name.replace('_mel.npy', '.mp4')
st.text(f'Video file name: {vid_fname}')
if 'vggsound.VGGSound' in config.data.params.train.target:
video_file = open(os.path.join(raw_vids_dir, vid_fname), 'rb').read()
elif 'vas.VAS' in config.data.params.train.target:
cls = batch['label'][0]
video_file = open(os.path.join(raw_vids_dir.replace('*', cls), vid_fname), 'rb').read()
st.video(video_file, format='video/mp4')
x = model.get_input(model.first_stage_key, batch).to(model.device)
c = model.get_input(model.cond_stage_key, batch)
if isinstance(c, dict):
c = {k: v.to(model.device) for k, v in c.items()}
else:
c = c.to(model.device)
quant_z, z_indices = model.encode_to_z(x)
quant_c, c_indices = model.encode_to_c(c)
xrec = model.first_stage_model.decode(quant_z)
crec = model.cond_stage_model.decode(quant_c)
if transformer_model_name == 'GPTFeatsClass':
orig_cond_shape = c['feature'].squeeze().shape
rec_cond_shape = crec["feature"].squeeze().shape
else:
orig_cond_shape = c.squeeze().shape
rec_cond_shape = crec.squeeze().shape
st.text('')
with st.beta_expander(f'Conditioning {list(orig_cond_shape)}'):
if transformer_model_name == 'GPTClass':
st.write(batch['label'])
elif transformer_model_name == 'GPTFeatsClass':
st.write(batch['label'])
st.write(tensor_to_plt(c['feature'], flip_dims=flip_c_dims))
else:
st.write(tensor_to_plt(c, flip_dims=flip_c_dims))
# with st.beta_expander(f'Conditioning Reconstruction {list(rec_cond_shape)}'):
# if transformer_model_name == 'GPTClass':
# st.write(batch['label'])
# elif transformer_model_name == 'GPTFeatsClass':
# st.write(batch['label'])
# st.write(tensor_to_plt(crec['feature'], flip_dims=flip_c_dims))
# else:
# st.write(tensor_to_plt(crec, flip_dims=flip_c_dims))
st.sidebar.header('Results Handling')
update_every = st.sidebar.number_input('Display Result Every ... Step', value=3)
show_griffin_lim = st.sidebar.checkbox(
'Also Show Griffin-Lim', value=False,
help='Show spectrogram reconstruction from Griffin-Lim algorithm along the pre-trained vocoder')
to_save_results = st.sidebar.checkbox('Save Results', value=True)
st.text('')
with st.beta_expander(f'Input {list(x.squeeze().shape)}'):
st.write(tensor_to_plt(x, flip_dims=flip_z_dims))
topk_results = get_class_preditions(x, feat_extractor)
st.text(topk_results)
if st.button('Get Audio (Input)'):
spec_to_audio_to_st(x, config.data.params.spec_dir_path,
config.data.params.sample_rate, show_griffin_lim, vocoder)
with st.beta_expander(f'Input Reconstruction from SpecVQGAN {list(xrec.squeeze().shape)}', expanded=True):
st.write(tensor_to_plt(xrec, flip_dims=flip_z_dims))
topk_results = get_class_preditions(xrec, feat_extractor)
st.text(topk_results)
if st.button('Get Audio (Input Reconstruction)'):
spec_to_audio_to_st(xrec, config.data.params.spec_dir_path,
config.data.params.sample_rate, show_griffin_lim, vocoder)
st.sidebar.header('Sampling Parameters')
temperature = st.sidebar.number_input(
'Softmax Temperature', value=1.0,
help='$T$ in $\exp(x_i/T) / \Sigma_j \exp(x_j/T)$'
)
top_x = st.sidebar.number_input(
'Top X', value=config.model.params.first_stage_config.params.n_embed // 2,
help='Cuts sampling space of the next token to Top $X$ highest probability tokens. '
+ 'It increases diversity of samples but at the cost of relevance. '
+ 'As a rule of thumb, use `X = |codebook| // 2`.'
)
W_scale = st.sidebar.number_input(
'Temporal Scale', value=1, min_value=1,
help='The output length is `temporal_scale * 9.8 seconds`.')
sample_half = st.sidebar.checkbox(
'Prime with GT Tokens', value=False,
help='If checked, the first half of the tokens will be taken from the ground truth audio'
+ ' codebook representation and sampling will continue this sequence.')
full_att_mat = st.sidebar.checkbox(
'Show Full Attention Matrix', value=False,
help='The attention will be shown for each time stamp instead of only the current one.')
if full_att_mat:
scale_att_by_prior = st.sidebar.checkbox(
'Subtract Prior from Attention', value=True,
help='If checked, subtracts $1/S$ from each attention weight, where $S$ is number of'
+ ' previous tokens. For example, $[2/3, 1/6, 1/6]~–~[1/3, 1/3, 1/3] = [1/3, -1/6, -1/6]$')
else:
scale_att_by_prior = False
st.header('Sampling Results:')
# dummy outputs just to reserver some space
placeholders = {
'info': st.text('Step: (?,?) | Local: (?,?) | Crop: (?:?,?:?)'),
'time': st.text('Time: ?'),
'mode': st.text('Mode: ?'),
'title_c_att': st.text('Attention to C.'),
'c_att': st.pyplot(tensor_to_plt(torch.zeros_like(x))),
'title_z_att': st.text('Attention to Z.'),
'z_att': st.pyplot(tensor_to_plt(torch.zeros_like(x))),
'title_gen_spec': st.text('Generated sample'),
'gen_spec': st.pyplot(tensor_to_plt(torch.zeros_like(x))),
'title_rec_audio': st.text('Reconstructed Audio of the Generated Sample'),
}
sampling_shape = list(quant_z.shape)
# hr_w * w_scale
sampling_shape[3] *= W_scale
if st.sidebar.button('Start Sampling'):
mode = 'half' if sample_half else 'full'
sample_conditionally(
z_indices,
sampling_shape,
c_indices,
quant_c,
full_att_mat,
scale_att_by_prior,
temperature,
top_x,
update_every,
placeholders,
cond_stage_model_name,
flip_z_dims,
flip_c_dims,
to_save_results,
logdir,
batch,
specs_key_in_batch,
vocoder,
feat_sampler_cfg,
show_griffin_lim,
feat_extractor,
mode
)
|
138653
|
import os
import time
import socket
import random
import tensorflow as tf
from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.python.platform import flags
# NOTE: this script is based on https://github.com/cbfinn/maml/blob/master/utils.py
FLAGS = flags.FLAGS
## Image helper
def get_images(paths, labels, nb_samples=None, shuffle=True):
if nb_samples is not None:
sampler = lambda x: random.sample(x, nb_samples)
else:
sampler = lambda x: x
images = [(i, os.path.join(path, image)) \
for i, path in zip(labels, paths) \
for image in sampler(os.listdir(path))]
if shuffle:
random.shuffle(images)
return images
def clip_if_not_none(grad, min_value, max_value):
if grad is None:
return grad
return tf.clip_by_value(grad, min_value, max_value)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def make_logdir(configs, fname_args=[]):
this_run_str = time.strftime("%H%M%S_") + str(socket.gethostname())
if is_git_dir():
this_run_str += '_git' + git_hash_str() # random hash + git hash
for str_arg in fname_args:
if str_arg in configs.keys():
this_run_str += '_' + str_arg.title().replace('_','') + '_' + str(configs[str_arg])
else:
raise ValueError('%s in fname_args does not exist in configs' % str_arg)
this_run_str = this_run_str.replace('/','_')
#log_dir = os.path.join(configs['log_root_dir'], configs['log_sub_dir'], this_run_str)
return log_dir
def experiment_prefix_str(separator=',', hostname=False, git=True):
this_run_str = time.strftime("%y%m%d_%H%M%S")
if hostname:
this_run_str += str(socket.gethostname())
# NOTE: Unless you can attach your git folder when running borgy, this would fail!
# Comment out the `is_git_dir` condition and the `str(git_hash_str())` to get this to work
if git and is_git_dir():
this_run_str += separator + str(git_hash_str()) # random hash + git hash
this_run_str = this_run_str.replace('-','')
return this_run_str
def experiment_string2(configs, fname_args=[], separator=','):
this_run_str = ''
for (org_arg_str, short_arg_str) in fname_args:
short_arg_str = org_arg_str.title().replace('_','') if short_arg_str is None else short_arg_str
if org_arg_str in configs.keys():
this_run_str += separator + short_arg_str + str(configs[org_arg_str]).title().replace('_','')
else:
raise ValueError('%s in fname_args doesn not exist in configs' % org_arg_str)
this_run_str = this_run_str.replace('/', '_')
return this_run_str
def experiment_string(configs, fname_args=[], separator=','):
this_run_str = expr_prefix_str(configs)
for str_arg in fname_args:
if str_arg in configs.keys():
this_run_str += separator + str_arg.title().replace('_','') + '=' + str(configs[str_arg])
else:
raise ValueError('%s in fname_args does not exist in configs' % str_arg)
this_run_str = this_run_str.replace('/','_')
return this_run_str
def is_git_dir():
from subprocess import call, STDOUT
if call(["git", "branch"], stderr=STDOUT, stdout=open(os.devnull, 'w')) != 0:
return False
else:
return True
def git_hash_str(hash_len=7):
import subprocess
hash_str = subprocess.check_output(['git','rev-parse','HEAD'])
return str(hash_str[:hash_len])
|
138714
|
import hydra
from hydra.core.config_store import ConfigStore
from omegaconf import OmegaConf
from configs import TrainConfig
from jerex import model, util
cs = ConfigStore.instance()
cs.store(name="train", node=TrainConfig)
@hydra.main(config_name='train', config_path='configs/docred_joint')
def train(cfg: TrainConfig) -> None:
print(OmegaConf.to_yaml(cfg))
util.config_to_abs_paths(cfg.datasets, 'train_path', 'valid_path', 'test_path', 'types_path')
util.config_to_abs_paths(cfg.model, 'tokenizer_path', 'encoder_path')
util.config_to_abs_paths(cfg.misc, 'cache_path')
model.train(cfg)
if __name__ == '__main__':
train()
|
138734
|
import argparse
from pathlib import Path
from loguru import logger
from jupyter_ascending._environment import SYNC_EXTENSION
from jupyter_ascending.json_requests import SyncRequest
from jupyter_ascending.logger import setup_logger
from jupyter_ascending.requests.client_lib import request_notebook_command
@logger.catch
def send(file_name: str):
if f".{SYNC_EXTENSION}.py" not in file_name:
return
logger.info(f"Syncing File: {file_name}...")
file_name = str(Path(file_name).absolute())
with open(file_name, "r") as reader:
raw_result = reader.read()
request_obj = SyncRequest(file_name=file_name, contents=raw_result)
request_notebook_command(request_obj)
logger.info("... Complete")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
setup_logger()
parser.add_argument("--filename", help="Filename to send")
arguments = parser.parse_args()
send(arguments.filename)
|
138740
|
from __future__ import absolute_import, division, print_function, with_statement
import sys
from turbo.log import helper_log
from turbo.util import import_object, camel_to_underscore
class _HelperObjectDict(dict):
def __setitem__(self, name, value):
return super(_HelperObjectDict, self).setdefault(name, value)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise ValueError(name)
def install_helper(installing_helper_list, package_space):
for item in installing_helper_list:
# db model package
package = import_object('.'.join(['helpers', item]), package_space)
package_space[item] = _HelperObjectDict()
# all py files included by package
all_modules = getattr(package, '__all__', [])
for m in all_modules:
try:
module = import_object(
'.'.join(['helpers', item, m]), package_space)
except:
helper_log.error('module helpers.%s.%s Import Error' %
(item, m), exc_info=True)
sys.exit(0)
for model_name in getattr(module, 'MODEL_SLOTS', []):
model = getattr(module, model_name, None)
if model:
camel_name = model.__name__
underscore_name = camel_to_underscore(camel_name)
package_space[item][underscore_name] = model()
package_space[item][camel_name] = model
|
138745
|
ATTR_CODE = "auth_code"
CONF_MQTT_IN = "mqtt_in"
CONF_MQTT_OUT = "mqtt_out"
DATA_KEY = "media_player.hisense_tv"
DEFAULT_CLIENT_ID = "HomeAssistant"
DEFAULT_MQTT_PREFIX = "hisense"
DEFAULT_NAME = "Hisense TV"
DOMAIN = "hisense_tv"
|
138763
|
from abc import abstractmethod
from bxutils import logging
from bxcommon.services.transaction_service import TransactionService
from bxcommon.utils.blockchain_utils.btc.btc_object_hash import Sha256Hash
from bxgateway.services.abstract_block_cleanup_service import AbstractBlockCleanupService
from bxgateway.services.btc.btc_block_queuing_service import BtcBlockQueuingService
from bxgateway.messages.btc.block_btc_message import BlockBtcMessage
from bxgateway.messages.btc.inventory_btc_message import GetDataBtcMessage, InventoryType
logger = logging.get_logger(__name__)
class AbstractBtcBlockCleanupService(AbstractBlockCleanupService):
"""
Service for managing block cleanup.
"""
# pyre-fixme[11]: Annotation `BtcGatewayNode` is not defined as a type.
def __init__(self, node: "BtcGatewayNode", network_num: int):
"""
Constructor
:param node: reference to node object
:param network_num: network number
"""
super(AbstractBtcBlockCleanupService, self).__init__(node=node, network_num=network_num)
def clean_block_transactions_from_block_queue(
self,
block_hash: Sha256Hash,
block_queuing_service: BtcBlockQueuingService
) -> None:
if block_hash in block_queuing_service:
block_msg = self.node.block_queuing_service_manager.get_block_data(block_hash)
self.node.block_cleanup_service.clean_block_transactions(
transaction_service=self.node.get_tx_service(),
block_msg=block_msg
)
else:
logger.debug("block cleanup from queuing service failed, block is no longer tracked {}", block_hash)
@abstractmethod
def clean_block_transactions(
self,
block_msg: BlockBtcMessage,
transaction_service: TransactionService
) -> None:
pass
def _request_block(self, block_hash: Sha256Hash):
block_request_message = GetDataBtcMessage(
magic=self.node.opts.blockchain_net_magic,
inv_vects=[(InventoryType.MSG_BLOCK, block_hash)],
request_witness_data=False
)
logger.trace("Received block cleanup request: {}", block_hash)
node_conn = self.node.get_any_active_blockchain_connection()
if node_conn:
node_conn.enqueue_msg(block_request_message)
else:
logger.debug("Request for block '{}' failed. No connection to node.", repr(block_hash))
|
138767
|
from multiprocessing.pool import Pool
from itertools import repeat
import pandas as pd
import numpy as np
def get_composers(res):
"""
Get the composers for the given track.
**Parameters**
- `res`: string composer names for each track within charts
**Returns**
A dictionary of composers `{composer_num: composer_name}`.
"""
if res != None:
res = res.replace(", Jr", " Jr")
composers = [
name.strip()
for first_split in res.split(", ")
for name in first_split.split(" % ")
]
return {f"composer_{i + 1}": name for i, name in enumerate(composers)}
return {"composer_1": None}
def get_labels(res):
"""
Get the record labels for the given track.
**Parameters**
- `res`: list of string of album record labels, e.g.
['Universal/Warner Bros.', 'None']
**Returns**
A dictionary of record label, `{'album_label_{num}': label_name}`.
"""
if res != None:
filter_none = [
label_string
for label_string in res
if label_string != None
if label_string != "None"
]
joined_str = "/".join(filter_none)
labels = [
label.strip() for label in joined_str.split("/") if label != None
]
return (
{f"album_label_{i + 1}": label for i, label in enumerate(labels)}
if labels != None
else {"album_label_1": None}
)
return {"album_label_1": None}
def extract_rank_stats(stats):
"""
Extract the rank and number of plays of past days.
**Parameters**
- `stats`: list of rankStats for each track within charts,
each element being a dictionary containing the
ranks (and plays if available) of previous days
**Returns**
A dictionary of previous day ranks (and plays if available), `{'ranks_{day}s_ago': ranks, 'plays_{day}s_ago': plays}`.
"""
last = stats[-1]
rank_stats = {"rank_today": last["rank"]}
if "plays" in last.keys():
rank_stats["plays_today"] = last["plays"]
for i, stats_per_day in enumerate(stats[-2::-1]):
rank_stats[f"rank_{i+1}d_ago"] = stats_per_day["rank"]
if "plays" in stats_per_day.keys():
rank_stats[f"plays_{i+1}d_ago"] = stats_per_day["plays"]
return rank_stats
def parse_track(res, date):
"""
Parse the api query result of a single track within a chart
into a cleaned and structured one-row DataFrame, regardless of
what stream service it is from.
**Parameters**
- `res`: dictionary containing a track within a chart for
a given date
- `date`: string date in ISO format
**Returns**
A Pandas DataFrame with one row and multiple data fields.
"""
# define a key checker
kc = lambda k: res[k] if k in res.keys() else None
# define a list extender
expand = (
lambda k: {f"{k}_{i + 1}": name for i, name in enumerate(res[k])}
if res[k] != None
else {f"{k}_1": None}
)
# define a list extender with key check
kc_expand = lambda k: expand(k) if k in res.keys() else {f"{k}_1": None}
# common data fields
parsed = {
"date": date,
"added_at": kc("added_at"),
"cm_track": kc("cm_track"),
"image_url": kc("image_url"),
"isrc": kc("isrc"),
"name": kc("name"),
"peak_date": kc("peak_date"),
"peak_rank": kc("peak_rank"),
"pre_rank": kc("pre_rank"),
"time_on_chart": kc("time_on_chart"),
}
None_tag = True
if "id" in res.keys():
parsed["id"] = res["id"]
None_tag = False
elif kc("raw_data") != None:
if "id" in res["raw_data"].keys():
parsed["id"] = res["raw_data"]["id"]
None_tag = False
if None_tag:
parsed["id"] = None
common_fields = [
"artist_covers",
"artist_images",
"artist_names",
"cm_artist",
"code2s",
]
for data_field in common_fields:
expanded_dict = kc_expand(data_field)
parsed.update(expanded_dict)
if "rankStats" in res.keys() and len(res["rankStats"]) > 0:
rank_stats = extract_rank_stats(res["rankStats"])
parsed.update(rank_stats)
# special treatment for YouTube
if "youtube_track_id" in res.keys():
parsed_youtube = {
"artist_name": kc("artist_name"),
"position": kc("position"),
"view_count": kc("view_count"),
"youtube_artist": kc("youtube_artist"),
"youtube_track_id": kc("youtube_track_id"),
}
None_tag = True
if kc("raw_data") != None:
if "percent_views_change" in res["raw_data"].keys():
parsed_youtube["percent_views_change"] = res["raw_data"][
"percent_views_change"
]
None_tag = False
if None_tag:
parsed_youtube["percent_views_change"] = None
youtube_fields = [
"youtube_artist_ids",
"youtube_artist_names",
"youtube_track_ids",
]
for data_field in youtube_fields:
expanded_dict = kc_expand(data_field)
parsed_youtube.update(expanded_dict)
parsed.update(parsed_youtube)
else:
# common items except for YouTube
parsed_not_u2b = {
"code2": res["code2"].strip() if "code2" in res.keys() else None,
"rank": kc("rank"),
}
not_u2b_fields = [
"album_ids",
"album_label",
"album_names",
"album_upc",
"release_dates",
]
for data_field in not_u2b_fields:
expanded_dict = kc_expand(data_field)
parsed_not_u2b.update(expanded_dict)
None_tag = True
if kc("track_genre") != None:
genres_list = list(
set(
res["track_genre"]
.replace(",Music", "")
.replace(",", "/")
.split("/")
)
)
if len(genres_list):
track_genres = {
f"track_genre_{i + 1}": genre
for i, genre in enumerate(genres_list)
}
None_tag = False
if None_tag:
track_genres = {"track_genre_1": None}
parsed_not_u2b.update(track_genres)
parsed.update(parsed_not_u2b)
# common items for AppleMusic, iTunes and Shazam
if "itunes_album_id" in res.keys():
composers = get_composers(kc("composer_name"))
parsed.update(composers)
apple_fields = [
"itunes_album_id",
"itunes_album_ids",
"itunes_artist_ids",
"itunes_artist_names",
"itunes_track_ids",
"storefronts",
]
for data_field in apple_fields:
expanded_dict = kc_expand(data_field)
parsed.update(expanded_dict)
if "itunes" in res.keys():
# common for AppleMusic and iTunes
parsed["itunes"] = kc("itunes")
if "country" in res.keys():
parsed["country"] = res["country"].strip()
if "genre" in res.keys():
parsed["genre"] = res["genre"]
else:
# special items for Shazam
parsed_shazam = {
"city": kc("city"),
"itunes_id": kc("itunes_id"),
"num_of_shazams": kc("num_of_shazams"),
"shazam_track_id": kc("shazam_track_id"),
}
parsed.update(parsed_shazam)
else:
# special treatment for Spotify
parsed_spotify = {
"chart_name": kc("chart_name"),
"chart_type": kc("chart_type"),
"current_plays": kc("current_plays"),
"duration": kc("duration"),
"spotify": kc("spotify"),
"spotify_album_id": kc("spotify_album_id"),
"spotify_duration_ms": kc("spotify_duration_ms"),
"spotify_popularity": kc("spotify_popularity"),
}
spotify_fields = [
"spotify_album_ids",
"spotify_artist_ids",
"spotify_artist_names",
"spotify_track_ids",
]
for data_field in spotify_fields:
expanded_dict = kc_expand(data_field)
parsed_spotify.update(expanded_dict)
parsed.update(parsed_spotify)
parsed_df = pd.DataFrame(parsed, index=[0])
return parsed_df
def parse_charts(res, date=None):
"""
Manipulate the result (res) of any track api query into a coherent
dataframe. This takes the actual query result of xxx.chart(date),
using a tuple of the query and the date. The res param should
include a list of dictionaries representing each track on the chart.
**Parameters**
- `res`: list of dictionaries of track api query
results for a single date
- `date`: string date in ISO format
**Returns**
Pandas DataFrame with the following columns:
- For YouTube chart input:
```python
['added_at', 'artist_covers_{i}', 'artist_images_{i}',
'artist_name', 'artist_names_{i}', 'cm_artist_{i}',
'cm_track', 'code2s_{i}', 'id', 'image_url', 'isrc',
'name', 'peak_date', 'peak_rank', 'position', 'pre_rank',
'rank_{i}d_ago', 'rank_today', 'percent_views_change',
'time_on_chart', 'view_count', 'youtube_artist',
'youtube_artist_ids_{i}', 'youtube_artist_names_{i}',
'youtube_track_id', 'youtube_track_ids_{i}']
```
- For Spotify chart input:
```python
['added_at', 'album_ids_{i}', 'album_label_{i}',
'album_names_{i}', 'album_upc_{i}', 'artist_covers_{i}',
'artist_images_{i}', 'artist_names_{i}', 'chart_name',
'chart_type', 'cm_artist_{i}', 'cm_track', 'code2',
'code2s_{i}', 'current_plays', 'duration', 'id',
'image_url', 'isrc', 'name', 'peak_date', 'peak_rank',
'pre_rank', 'rank', 'rank_{i}d_ago', 'rank_today',
'plays_{i}d_ago', 'plays_today', 'release_dates_{i}',
'spotify', 'spotify_album_id', 'time_on_chart',
'spotify_album_ids_{i}', 'spotify_artist_ids_{i}',
'track_genre_{i}', 'spotify_artist_names_{i}',
'spotify_duration_ms', 'spotify_popularity',
'spotify_track_ids_{i}']
```
- For AppleMusic chart input:
```python
['added_at', 'album_ids_{i}', 'album_label_{i}',
'album_names_{i}', 'album_upc_{i}', 'artist_covers_{i}',
'artist_images_{i}', 'cm_track', 'artist_names_{i}',
'cm_artist_{i}', 'code2', 'code2s_{i}','itunes',
'composer_{i}', 'country', 'id', 'image_url', 'isrc',
'name', 'itunes_album_id_{i}', 'itunes_album_ids_{i}',
'itunes_artist_ids_{i}', 'itunes_artist_names_{i}',
'itunes_track_ids_{i}', 'peak_date', 'peak_rank',
'pre_rank', 'rank', 'rank_{i}d_ago', 'rank_today',
'release_dates_{i}', 'storefronts_{i}', 'time_on_chart',
'track_genre_{i}']
```
- For iTunes chart input:
```python
['added_at', 'album_ids_{i}', 'album_label_{i}',
'album_names_{i}', 'album_upc_{i}', 'artist_covers_{i}',
'artist_images_{i}', 'cm_track', 'artist_names_{i}',
'cm_artist_{i}', 'code2', 'code2s_{i}', 'genre',
'composer_{i}', 'id', 'image_url', 'isrc', 'itunes',
'itunes_album_id_{i}', 'itunes_album_ids_{i}',
'itunes_artist_ids_{i}', 'itunes_artist_names_{i}',
'itunes_track_ids_{i}', 'name', 'peak_date', 'peak_rank',
'pre_rank', 'rank', 'rank_{i}d_ago', 'rank_today',
'release_dates_{i}', 'storefronts_{i}', 'time_on_chart',
'track_genre_{i}']
```
- For Shazam chart input:
```python
['added_at', 'album_ids_{i}', 'album_label_{i}',
'album_names_{i}', 'album_upc_{i}', 'artist_covers_{i}',
'artist_images_{i}', 'cm_track', 'artist_names_{i}',
'city', 'cm_artist_{i}', 'code2', 'code2s_{i}',
'composer_{i}', 'id', 'image_url', 'isrc',
'itunes_album_id_{i}', 'itunes_album_ids_{i}',
'itunes_artist_ids_{i}', 'itunes_artist_names_{i}',
'itunes_id', 'itunes_track_ids_{i}', 'name',
'num_of_shazams', 'peak_date', 'peak_rank', 'pre_rank',
'rank', 'rank_{i}d_ago', 'rank_today', 'release_dates_{i}',
'shazam_track_id', 'storefronts_{i}', 'time_on_chart',
'track_genre_{i}']
```
"""
# first ensure we have a list as input...
try:
assert isinstance(res, type(list()))
assert len(res) > 0
except AssertionError:
print(f"Not a list or empty list for {date}: ")
return None
data = []
with Pool() as p:
data = p.starmap(parse_track, zip(res, repeat(date)))
parsed_chart = pd.concat(data, ignore_index=True, sort=True)
parsed_chart.replace("None", np.nan, inplace=True) # fill string 'None'
parsed_chart.fillna(value=np.nan, inplace=True) # fill None
return parsed_chart
def type_cast(parsed):
"""
Change the data type of certain columns of the cleaned DataFrame.
**Parameters**
- `parsed`: DataFrame of parsed chart with all the tracks
**Returns**
A Pandas DataFrame that's parsed and type-casted.
"""
fix_nan_str = lambda col: parsed.loc[:, col].fillna(value="").astype(str)
str_to_date = (
lambda d: pd.to_datetime(d[:10], errors="coerce")
if isinstance(d, str)
else d
)
findall_columns = lambda col: parsed.filter(regex=col).columns
# common data fields
parsed.loc[:, "date"] = parsed["date"].apply(str_to_date)
parsed.loc[:, "added_at"] = parsed["added_at"].apply(str_to_date)
parsed.loc[:, "cm_track"] = fix_nan_str("cm_track")
parsed.loc[:, "id"] = fix_nan_str("id")
parsed.loc[:, "image_url"] = fix_nan_str("image_url")
parsed.loc[:, "isrc"] = fix_nan_str("isrc")
parsed.loc[:, "name"] = fix_nan_str("name")
parsed.loc[:, "peak_date"] = parsed["peak_date"].apply(str_to_date)
parsed.loc[:, "peak_rank"] = parsed["peak_rank"].astype(float)
parsed.loc[:, "pre_rank"] = parsed["pre_rank"].astype(float)
parsed.loc[:, "time_on_chart"] = parsed["time_on_chart"].astype(float)
artist_covers_cols = findall_columns("^artist_covers")
parsed.loc[:, artist_covers_cols] = fix_nan_str(artist_covers_cols)
artist_images_cols = findall_columns("^artist_images")
parsed.loc[:, artist_images_cols] = fix_nan_str(artist_images_cols)
artist_names_cols = findall_columns("^artist_names")
parsed.loc[:, artist_names_cols] = fix_nan_str(artist_names_cols)
cm_artist_cols = findall_columns("^cm_artist")
parsed.loc[:, cm_artist_cols] = parsed[cm_artist_cols].astype(
float
) # to get around NaNs
code2s_cols = findall_columns("^code2s")
parsed.loc[:, code2s_cols] = fix_nan_str(code2s_cols)
today_cols = findall_columns("today")
parsed.loc[:, today_cols] = parsed[today_cols].astype(float)
days_ago_cols = findall_columns("d_ago")
parsed.loc[:, days_ago_cols] = parsed[days_ago_cols].astype(float)
# special for YouTube
if "youtube_track_id" in parsed.columns:
parsed.loc[:, "artist_name"] = fix_nan_str("artist_name")
parsed.loc[:, "position"] = parsed["position"].astype(float)
parsed.loc[:, "view_count"] = parsed["view_count"].astype(float)
parsed.loc[:, "percent_views_change"] = parsed[
"percent_views_change"
].astype(float)
parsed.loc[:, "youtube_artist"] = fix_nan_str("youtube_artist")
parsed.loc[:, "youtube_track_id"] = fix_nan_str("youtube_track_id")
u2b_artist_ids_cols = findall_columns("^youtube_artist_ids")
parsed.loc[:, u2b_artist_ids_cols] = fix_nan_str(u2b_artist_ids_cols)
u2b_artist_names_cols = findall_columns("^youtube_artist_names")
parsed.loc[:, u2b_artist_names_cols] = fix_nan_str(
u2b_artist_names_cols
)
u2b_track_ids_cols = findall_columns("^youtube_track_ids")
parsed.loc[:, u2b_track_ids_cols] = fix_nan_str(u2b_track_ids_cols)
else:
# common items except for YouTube
parsed.loc[:, "code2"] = fix_nan_str("code2")
parsed.loc[:, "rank"] = parsed["rank"].astype(float)
release_dates_cols = findall_columns("^release_dates")
for column in release_dates_cols:
parsed.loc[:, column] = parsed.loc[:, column].apply(str_to_date)
album_ids_cols = findall_columns(
"^album_ids"
) # match only columns starting with album_ids
parsed.loc[:, album_ids_cols] = parsed[album_ids_cols].astype(float)
album_label_cols = findall_columns("^album_label")
parsed.loc[:, album_label_cols] = fix_nan_str(album_label_cols)
album_names_cols = findall_columns("^album_names")
parsed.loc[:, album_names_cols] = fix_nan_str(album_names_cols)
album_upc_cols = findall_columns("^album_upc")
parsed.loc[:, album_upc_cols] = fix_nan_str(album_upc_cols)
track_genre_cols = findall_columns("^track_genre")
parsed.loc[:, track_genre_cols] = fix_nan_str(track_genre_cols)
# common items for AppleMusic, iTunes and Shazam
if "itunes_album_id_1" in parsed.columns:
composer_cols = findall_columns("^composer")
parsed.loc[:, composer_cols] = fix_nan_str(composer_cols)
itunes_album_id_cols = findall_columns(
"^itunes_album_id"
) # cast `id` and `ids` together
parsed.loc[:, itunes_album_id_cols] = (
parsed[itunes_album_id_cols].replace("", np.nan).astype(float)
)
itunes_artist_id_cols = findall_columns("^itunes_artist_id")
parsed.loc[:, itunes_artist_id_cols] = parsed[
itunes_artist_id_cols
].astype(float)
itunes_track_ids_cols = findall_columns("^itunes_track_ids")
parsed.loc[:, itunes_track_ids_cols] = parsed[
itunes_track_ids_cols
].astype(float)
itunes_artist_names_cols = findall_columns("^itunes_artist_names")
parsed.loc[:, itunes_artist_names_cols] = fix_nan_str(
itunes_artist_names_cols
)
storefronts_cols = findall_columns("^storefronts")
parsed.loc[:, storefronts_cols] = fix_nan_str(storefronts_cols)
if "itunes" in parsed.columns:
# common for AppleMusic and iTunes
parsed.loc[:, "itunes"] = fix_nan_str(
"itunes"
) # cast to str to prevent breaking
if "country" in parsed.columns:
parsed.loc[:, "country"] = fix_nan_str("country")
if "genre" in parsed.columns:
parsed.loc[:, "genre"] = fix_nan_str("genre")
else:
# special items for Shazam
parsed.loc[:, "city"] = fix_nan_str("city")
parsed.loc[:, "itunes_id"] = fix_nan_str("itunes_id")
parsed.loc[:, "num_of_shazams"] = parsed[
"num_of_shazams"
].astype(float)
parsed.loc[:, "shazam_track_id"] = fix_nan_str(
"shazam_track_id"
)
else:
# special treatment for Spotify
parsed.loc[:, "chart_name"] = fix_nan_str("chart_name")
parsed.loc[:, "chart_type"] = fix_nan_str("chart_type")
parsed.loc[:, "current_plays"] = parsed["current_plays"].astype(
float
)
parsed.loc[:, "duration"] = fix_nan_str("duration")
parsed.loc[:, "spotify"] = fix_nan_str("spotify")
parsed.loc[:, "spotify_album_id"] = fix_nan_str("spotify_album_id")
parsed.loc[:, "spotify_duration_ms"] = parsed[
"spotify_duration_ms"
].astype(float)
parsed.loc[:, "spotify_popularity"] = parsed[
"spotify_popularity"
].astype(float)
spotify_album_ids_cols = findall_columns("^spotify_album_ids")
parsed.loc[:, spotify_album_ids_cols] = fix_nan_str(
spotify_album_ids_cols
)
spotify_artist_ids_cols = findall_columns("^spotify_artist_ids")
parsed.loc[:, spotify_artist_ids_cols] = fix_nan_str(
spotify_artist_ids_cols
)
spotify_artist_names_cols = findall_columns(
"^spotify_artist_names"
)
parsed.loc[:, spotify_artist_names_cols] = fix_nan_str(
spotify_artist_names_cols
)
spotify_track_ids_cols = findall_columns("^spotify_track_ids")
parsed.loc[:, spotify_track_ids_cols] = fix_nan_str(
spotify_track_ids_cols
)
return parsed
|
138792
|
import ptf
from ptf.base_tests import BaseTest
from ptf import testutils
class TestParamsGet(BaseTest):
def setUp(self):
BaseTest.setUp(self)
def runTest(self):
params = testutils.test_params_get(default=None)
if params is None:
print(">>>None")
else:
for k, v in params.items():
print(">>>{}={}".format(k, v))
class TestParamGet(BaseTest):
def setUp(self):
BaseTest.setUp(self)
def runTest(self):
v = testutils.test_param_get('k1', default=-1)
if v is None:
print(">>>None")
else:
print(">>>k1={}".format(v))
|
138815
|
import logging
import pyipmi
import pyipmi.interfaces
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.constants import VM_POWERED_OFF, VM_POWERED_ON
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.node import wait_for_nodes_status, get_worker_nodes, get_master_nodes
from ocs_ci.ocs.ocp import OCP, wait_for_cluster_connectivity
from ocs_ci.utility.utils import TimeoutSampler, load_auth_config, exec_cmd
logger = logging.getLogger(__name__)
class BAREMETAL(object):
"""
wrapper for Baremetal
"""
def __init__(self):
"""
Initialize the variables required
"""
self.mgmt_details = load_auth_config()["ipmi"]
def get_ipmi_ctx(self, host, user, password):
"""
Function to get ipmi handler
Args:
host (str): Host mgmt address
user (str): User Name for accessing mgmt console
password (str): Password for accessing mgmt console
Returns (object): ipmi handler
"""
interface = pyipmi.interfaces.create_interface(
"ipmitool", interface_type=defaults.IPMI_INTERFACE_TYPE
)
ipmi = pyipmi.create_connection(interface)
ipmi.session.set_session_type_rmcp(host, port=defaults.IPMI_RMCP_PORT)
ipmi.session.set_auth_type_user(user, password)
ipmi.session.establish()
ipmi.target = pyipmi.Target(ipmb_address=defaults.IPMI_IPMB_ADDRESS)
return ipmi
def get_power_status(self, ipmi_ctx):
"""
Get BM Power status
Args:
ipmi_ctx (object) : Ipmi host handler
Returns: (bool): bm power status
"""
chassis_status = ipmi_ctx.get_chassis_status()
return VM_POWERED_ON if chassis_status.power_on else VM_POWERED_OFF
def verify_machine_is_down(self, node):
"""
Verifiy Baremetal machine is completely power off
Args:
node (object): Node objects
Returns:
bool: True if machine is down, False otherwise
"""
result = exec_cmd(cmd=f"ping {node.name} -c 10", ignore_error=True)
if result.returncode == 0:
return False
else:
return True
def stop_baremetal_machines(self, baremetal_machine, force=True):
"""
Stop Baremetal Machines
Args:
baremetal_machine (list): BM objects
force (bool): True for BM ungraceful power off, False for
graceful BM shutdown
Raises:
UnexpectedBehaviour: If baremetal machine is still up
"""
for node in baremetal_machine:
if force:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
logger.info(f"Powering Off {node.name}")
ipmi_ctx.chassis_control_power_down()
else:
ocp = OCP(kind="node")
ocp.exec_oc_debug_cmd(
node=node.name, cmd_list=["shutdown now"], timeout=60
)
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
for status in TimeoutSampler(
600, 5, self.get_power_status, ipmi_ctx
):
logger.info(
f"Waiting for Baremetal Machine {node.name} to power off"
f"Current Baremetal status: {status}"
)
if status == VM_POWERED_OFF:
logger.info(
f"Baremetal Machine {node.name} reached poweredOff status"
)
break
logger.info("Verifing machine is down")
ret = TimeoutSampler(
timeout=300,
sleep=3,
func=self.verify_machine_is_down,
node=node,
)
logger.info(ret)
if not ret.wait_for_func_status(result=True):
raise UnexpectedBehaviour("Machine {node.name} is still Running")
def start_baremetal_machines_with_ipmi_ctx(self, ipmi_ctxs, wait=True):
"""
Start Baremetal Machines using Ipmi ctx
Args:
ipmi_ctxs (list): List of BM ipmi_ctx
wait (bool): Wait for BMs to start
"""
for ipmi_ctx in ipmi_ctxs:
ipmi_ctx.chassis_control_power_up()
if wait:
for ipmi_ctx in ipmi_ctxs:
for status in TimeoutSampler(600, 5, self.get_power_status, ipmi_ctx):
logger.info(
f"Waiting for Baremetal Machine to power on. "
f"Current Baremetal status: {status}"
)
if status == VM_POWERED_ON:
logger.info("Baremetal Machine reached poweredOn status")
break
wait_for_cluster_connectivity(tries=400)
wait_for_nodes_status(
node_names=get_master_nodes(), status=constants.NODE_READY, timeout=800
)
wait_for_nodes_status(
node_names=get_worker_nodes(), status=constants.NODE_READY, timeout=800
)
def start_baremetal_machines(self, baremetal_machine, wait=True):
"""
Start Baremetal Machines
Args:
baremetal_machine (list): BM objects
wait (bool): Wait for BMs to start
"""
for node in baremetal_machine:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
logger.info(f"Powering On {node.name}")
ipmi_ctx.chassis_control_power_up()
if wait:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
for status in TimeoutSampler(
600, 5, self.get_power_status, ipmi_ctx
):
logger.info(
f"Waiting for Baremetal Machine {node.name} to power on. "
f"Current Baremetal status: {status}"
)
if status == VM_POWERED_ON:
logger.info(
f"Baremetal Machine {node.name} reached poweredOn status"
)
ipmi_ctx.session.close()
break
wait_for_cluster_connectivity(tries=400)
wait_for_nodes_status(
node_names=get_master_nodes(), status=constants.NODE_READY, timeout=800
)
wait_for_nodes_status(
node_names=get_worker_nodes(), status=constants.NODE_READY, timeout=800
)
def restart_baremetal_machines(self, baremetal_machine, force=True):
"""
Restart Baremetal Machines
Args:
baremetal_machine (list): BM objects
force (bool): True for BM ungraceful power off, False for
graceful BM shutdown
"""
self.stop_baremetal_machines(baremetal_machine, force=force)
self.start_baremetal_machines(baremetal_machine)
def get_nodes_ipmi_ctx(self, baremetal_machine):
"""
Get Node Ipmi handler
Args:
baremetal_machine: BM objects
"""
node_ipmi_ctx = list()
for node in baremetal_machine:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
node_ipmi_ctx.append(ipmi_ctx)
return node_ipmi_ctx
|
138844
|
import copy
import time
import json
import logging
log = logging.getLogger(__name__)
import torch
from optim import lbfgs_modified
import config as cfg
def store_checkpoint(checkpoint_file, state, optimizer, current_epoch, current_loss,\
verbosity=0):
r"""
:param checkpoint_file: target file
:param state: ipeps wavefunction
:param optimizer: Optimizer
:param current_epoch: current epoch
:param current_loss: current value of a loss function
:param verbosity: verbosity
:type checkpoint_file: str or Path
:type state: IPEPS
:type optimizer: torch.optim.Optimizer
:type current_epoch: int
:type current_loss: float
:type verbosity: int
Store the current state of the optimization in ``checkpoint_file``.
"""
torch.save({
'epoch': current_epoch,
'loss': current_loss,
'parameters': state.get_checkpoint(),
'optimizer_state_dict': optimizer.state_dict()}, checkpoint_file)
if verbosity>0:
print(checkpoint_file)
def optimize_state(state, ctm_env_init, loss_fn, grad_fn,
obs_fn=None, post_proc=None,
main_args=cfg.main_args, opt_args=cfg.opt_args,ctm_args=cfg.ctm_args,
global_args=cfg.global_args):
r"""
:param state: initial wavefunction
:param ctm_env_init: initial environment corresponding to ``state``
:param loss_fn: loss function
:param model: model with definition of observables
:param main_args: parsed command line arguments
:param opt_args: optimization configuration
:param ctm_args: CTM algorithm configuration
:param global_args: global configuration
:type state: IPEPS
:type ctm_env_init: ENV
:type loss_fn: function(IPEPS,ENV,CTMARGS,OPTARGS,GLOBALARGS)->torch.tensor
:type model: TODO Model base class
:type main_args: argparse.Namespace
:type opt_args: OPTARGS
:type ctm_args: CTMARGS
:type global_args: GLOBALARGS
Optimizes initial wavefunction ``state`` with respect to ``loss_fn`` using LBFGS optimizer.
The main parameters influencing the optimization process are given in :py:class:`config.OPTARGS`.
"""
verbosity = opt_args.verbosity_opt_epoch
checkpoint_file = main_args.out_prefix+"_checkpoint.p"
outputstatefile= main_args.out_prefix+"_state.json"
t_data = dict({"loss": [], "min_loss": 1.0e+16, "loss_ls": [], "min_loss_ls": 1.0e+16})
current_env=[ctm_env_init]
context= dict({"ctm_args":ctm_args, "opt_args":opt_args, "loss_history": t_data})
epoch= 0
parameters= state.get_parameters()
for A in parameters: A.requires_grad_(True)
optimizer = lbfgs_modified.LBFGS_MOD(parameters, max_iter=opt_args.max_iter_per_epoch, \
lr=opt_args.lr, tolerance_grad=opt_args.tolerance_grad, \
tolerance_change=opt_args.tolerance_change, history_size=opt_args.history_size, \
line_search_fn=opt_args.line_search, line_search_eps=opt_args.line_search_tol)
# load and/or modify optimizer state from checkpoint
if main_args.opt_resume is not None:
print(f"INFO: resuming from check point. resume = {main_args.opt_resume}")
checkpoint = torch.load(main_args.opt_resume)
epoch0 = checkpoint["epoch"]
loss0 = checkpoint["loss"]
cp_state_dict= checkpoint["optimizer_state_dict"]
cp_opt_params= cp_state_dict["param_groups"][0]
cp_opt_history= cp_state_dict["state"][cp_opt_params["params"][0]]
if main_args.opt_resume_override_params:
cp_opt_params["lr"] = opt_args.lr
cp_opt_params["max_iter"] = opt_args.max_iter_per_epoch
cp_opt_params["tolerance_grad"] = opt_args.tolerance_grad
cp_opt_params["tolerance_change"] = opt_args.tolerance_change
# resize stored old_dirs, old_stps, ro, al to new history size
cp_history_size= cp_opt_params["history_size"]
cp_opt_params["history_size"] = opt_args.history_size
if opt_args.history_size < cp_history_size:
if len(cp_opt_history["old_dirs"]) > opt_args.history_size:
cp_opt_history["old_dirs"]= cp_opt_history["old_dirs"][-opt_args.history_size:]
cp_opt_history["old_stps"]= cp_opt_history["old_stps"][-opt_args.history_size:]
cp_ro_filtered= list(filter(None,cp_opt_history["ro"]))
cp_al_filtered= list(filter(None,cp_opt_history["al"]))
if len(cp_ro_filtered) > opt_args.history_size:
cp_opt_history["ro"]= cp_ro_filtered[-opt_args.history_size:]
cp_opt_history["al"]= cp_al_filtered[-opt_args.history_size:]
else:
cp_opt_history["ro"]= cp_ro_filtered + [None for i in range(opt_args.history_size-len(cp_ro_filtered))]
cp_opt_history["al"]= cp_al_filtered + [None for i in range(opt_args.history_size-len(cp_ro_filtered))]
cp_state_dict["param_groups"][0]= cp_opt_params
cp_state_dict["state"][cp_opt_params["params"][0]]= cp_opt_history
optimizer.load_state_dict(cp_state_dict)
print(f"checkpoint.loss = {loss0}")
#@profile
def closure(linesearching=False):
context["line_search"]=linesearching
# 0) evaluate loss
optimizer.zero_grad()
with torch.no_grad():
loss, ctm_env, history, timings= loss_fn(state, current_env[0], context)
# 1) record loss and store current state if the loss improves
if linesearching:
t_data["loss_ls"].append(loss.item())
if t_data["min_loss_ls"] > t_data["loss_ls"][-1]:
t_data["min_loss_ls"]= t_data["loss_ls"][-1]
else:
t_data["loss"].append(loss.item())
if t_data["min_loss"] > t_data["loss"][-1]:
t_data["min_loss"]= t_data["loss"][-1]
state.write_to_file(outputstatefile, normalize=True)
# 2) log CTM metrics for debugging
if opt_args.opt_logging:
log.info({"history_length": len(history['log']), "history": history['log'],
"final_multiplets": history["final_multiplets"]})
log_entry=dict({"id": epoch, "loss": t_data["loss"][-1], "timings": timings})
if linesearching:
log_entry["LS"]=len(t_data["loss_ls"])
log_entry["loss"]=t_data["loss_ls"]
log.info(json.dumps(log_entry))
# 3) compute desired observables
if obs_fn is not None:
obs_fn(state, ctm_env, context)
# 4) evaluate gradient
t_grad0= time.perf_counter()
with torch.no_grad():
grad= grad_fn(state, ctm_env, context, loss)
for k in state.coeffs.keys():
state.coeffs[k].grad= grad[k]
t_grad1= time.perf_counter()
# 5) log grad metrics
if opt_args.opt_logging:
log_entry=dict({"id": epoch, "t_grad": t_grad1-t_grad0 })
if linesearching: log_entry["LS"]=len(t_data["loss_ls"])
log.info(json.dumps(log_entry))
# 6) detach current environment from autograd graph
current_env[0] = ctm_env.detach().clone()
return loss
# closure for derivative-free line search. This closure
# is to be called within torch.no_grad context
@torch.no_grad()
def closure_linesearch(linesearching):
context["line_search"]=linesearching
# 1) evaluate loss
loc_opt_args= copy.deepcopy(opt_args)
loc_opt_args.opt_ctm_reinit= opt_args.line_search_ctm_reinit
loc_ctm_args= copy.deepcopy(ctm_args)
# TODO check if we are optimizing C4v symmetric ansatz
if opt_args.line_search_svd_method != 'DEFAULT':
loc_ctm_args.projector_svd_method= opt_args.line_search_svd_method
loc_context= dict({"ctm_args":loc_ctm_args, "opt_args":loc_opt_args, \
"loss_history": t_data, "line_search": True})
loss, ctm_env, history, timings = loss_fn(state, current_env[0],\
loc_context)
# 2) store current state if the loss improves
t_data["loss_ls"].append(loss.item())
if t_data["min_loss_ls"] > t_data["loss_ls"][-1]:
t_data["min_loss_ls"]= t_data["loss_ls"][-1]
# 5) log CTM metrics for debugging
if opt_args.opt_logging:
log.info({"history_length": len(history['log']), "history": history['log'],
"final_multiplets": history["final_multiplets"]})
log_entry=dict({"id": epoch, "LS": len(t_data["loss_ls"]), \
"loss": t_data["loss_ls"], "timings": timings})
log.info(json.dumps(log_entry))
# 4) compute desired observables
if obs_fn is not None:
obs_fn(state, ctm_env, context)
current_env[0]= ctm_env
return loss
for epoch in range(main_args.opt_max_iter):
# checkpoint the optimizer
# checkpointing before step, guarantees the correspondence between the wavefunction
# and the last computed value of loss t_data["loss"][-1]
if epoch>0:
store_checkpoint(checkpoint_file, state, optimizer, epoch, t_data["loss"][-1])
# After execution closure ``current_env`` **IS NOT** corresponding to ``state``, since
# the ``state`` on-site tensors have been modified by gradient.
optimizer.step_2c(closure, closure_linesearch)
# reset line search history
t_data["loss_ls"]=[]
t_data["min_loss_ls"]=1.0e+16
if post_proc is not None:
post_proc(state, current_env[0], context)
# optimization is over, store the last checkpoint
store_checkpoint(checkpoint_file, state, optimizer, \
main_args.opt_max_iter, t_data["loss"][-1])
|
138903
|
class DB:
'''
Convience class for decibel scale. Other non-linear scales such as the richter scale could be handled similarly.
Usage:
dB = DB()
.
. (later)
.
gain = 15 * dB
'''
def __rmul__(self, val):
'''
Only allow multiplication from the right to avoid confusing situation
like: 15 * dB * 10
'''
return 10 ** (val / 10.)
def __test__():
dB = DB()
gain = 10 * dB
assert abs(gain - 10) < 1e-8
try:
gain2 = dB * 10
raise Exception('Should raise a type error!')
except TypeError:
pass
__test__()
|
138939
|
import copy
import itertools
from collections import deque
from typing import TextIO, Tuple
from aoc2019.intcode import Computer, read_program
def query_position(x: int, y: int, computer: Computer) -> bool:
computer = copy.deepcopy(computer)
computer.send_input(x)
computer.send_input(y)
computer.run()
return computer.get_output() == 1
def find_line(y: int, x_min: int, x_max: int, computer: Computer) -> Tuple[int, int]:
# First find start of the line:
offset = 0
while not query_position(x_min, y, computer):
offset += 1
x_min += 1
x_max += offset
while query_position(x_max, y, computer):
x_max += 1
x_max -= 1
return x_min, x_max
def part1(data: TextIO) -> int:
computer = Computer(read_program(data))
x_min, x_max = (0, 0)
total = 0
for y in range(50):
x_min, x_max = find_line(y, x_min, x_max, computer)
total += min(x_max, 49) - min(x_min, 50) + 1
return total
def part2(data: TextIO) -> int:
computer = Computer(read_program(data))
x_min, x_max = (0, 0)
lines = deque()
for y in itertools.count():
x_min, x_max = find_line(y, x_min, x_max, computer)
lines.append((x_min, x_max))
if len(lines) == 100:
x_top_min, x_top_max = lines.popleft()
if x_top_max - x_min + 1 < 100:
continue
return x_min * 10000 + y - 99
|
138965
|
def proper_divisors_sum(n):
return sum(a for a in xrange(1, n) if not n % a)
def amicable_numbers(a, b):
return proper_divisors_sum(a) == b and proper_divisors_sum(b) == a
# def amicable_numbers(a, b):
# # this works after multiple submissions to get lucky on random inputs
# return sum(c for c in xrange(1, a) if not a % c) == b
|
138989
|
from collections import OrderedDict
from ctypes import LittleEndianStructure, Structure, Union, c_uint8, c_uint16, c_uint32,\
string_at, byref, sizeof, c_bool, c_int16, Array, c_char
import json
import logging
import struct
from telemetry_unit_conversions import \
temp_sensor_adc_val_to_celsius, adc_to_bat_current_milli_amper, \
adc_to_solar_panel_current_milli_amper, adc_to_gps_current_milli_amper,\
adc_to_adcs_current_milli_amper, adc_to_obc_current_milli_amper,\
adc_to_payload_current_milli_amper, adc_to_solar_panel_voltage_milli_volt, \
adc_to_bat_voltage, adc_3v3_bus_voltage_milli_volt, adc_5v_bus_voltage_milli_volt,\
adc_12v_bus_voltage_milli_volt, adc_to_com_3v3_current_milli_amper,\
adc_to_com_5v_current_milli_amper
logging.basicConfig(level=logging.INFO,
format="%(asctime)s.%(msecs)03dZ - %(levelname)s: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S")
def format_all_fields(self):
def format_struct(struct):
if issubclass(struct.__class__, Structure):
return "\n" + str(struct)
else:
return struct
# pylint: disable=protected-access
return "\n".join([field[0] + ": " +
str(format_struct(getattr(self, field[0]))) for field in self._fields_])
def _serialize_ctypes_array_element(element):
if issubclass(element.__class__, Array):
return [_serialize_ctypes_array_element(x) for x in element]
elif issubclass(element.__class__, Structure) or issubclass(element.__class__, Union):
return _ctypes_obj_to_dic(element)
else:
return element
def _ctypes_obj_to_dic(obj):
result = OrderedDict()
# pylint: disable=protected-access
for field in obj._fields_:
field_name = field[0]
field_val = getattr(obj, field_name)
value_class = field_val.__class__
if issubclass(value_class, Array):
result[field_name] = [_serialize_ctypes_array_element(val) for val in field_val]
elif issubclass(value_class, Structure) or issubclass(value_class, Union):
result[field_name] = _ctypes_obj_to_dic(field_val)
else:
if isinstance(field_val, (bytes, bytearray)):
result[field_name] = field_val.decode("utf-8")
else:
result[field_name] = field_val
return result
def ctypes_obj_to_dic(obj):
return obj.unit_conversions_to_ground(_ctypes_obj_to_dic(obj))
class MessageData(LittleEndianStructure):
_pack_ = 1
def __str__(self):
return format_all_fields(self)
@classmethod
def unit_conversions_to_ground(cls, dic):
return dic
class CanStatistics(MessageData):
_fields_ = [('rx_frame_count', c_uint32),
('tx_frame_count', c_uint32),
('error_count', c_uint32),
]
class ADCData(MessageData):
_fields_ = [
# ADC0
('spxp_curr', c_uint16),
('spxn_curr', c_uint16),
('spyp_curr', c_uint16),
('spyn_curr', c_uint16),
('sp_x_v', c_uint16),
('sp_y_v', c_uint16),
('bat_curr', c_uint16),
('bat_v', c_uint16),
# ADC1
('uhf_curr_3v3', c_uint16),
('uhf_curr_5v', c_uint16),
('payload_curr', c_uint16),
('adcs_curr', c_uint16),
('gps_curr', c_uint16),
('obc_curr', c_uint16),
('sns_3v3', c_uint16),
('sns_5v', c_uint16),
# ADC2
('sns_12v_1', c_uint16),
('sns_12v_2', c_uint16),
('temp_sns1', c_uint16),
('temp_sns2', c_uint16)
]
class MpptPanelTelemetry(MessageData):
_fields_ = [('current_mppt_value', c_uint16)]
class MpptStatistics(MessageData):
_fields_ = [('panels', MpptPanelTelemetry * 2)]
def __str__(self):
strs = [str(x) for x in self.panels]
return "\n".join(strs)
class EpsStatistics(MessageData):
_fields_ = [('boot_count', c_uint32),
('periodic_boot_count', c_uint16),
('boot_reasons', c_uint8 * 12),
('last_boot_reason', c_uint8),
('total_uptime_s', c_uint32),
('uptime_s', c_uint32),
('memory_violation_reset_has_occured', c_bool),
('internal_temp', c_int16)]
class PowerLevelsBits(MessageData):
_fields_ = [('payload', c_uint16, 1),
('gps', c_uint16, 1),
('obc', c_uint16, 1),
('adcs', c_uint16, 1),
("battery_heater1", c_uint16, 1),
("battery_heater2", c_uint16, 1),
("charging", c_uint16, 1),
("uhf_a", c_uint16, 1),
("uhf_b", c_uint16, 1),
("toggle_3v3", c_uint16, 1),
("toggle_5v", c_uint16, 1),
("antenna_deployment1", c_uint16, 1),
("antenna_deployment2", c_uint16, 1)
]
class PowerLevels(Union):
_fields_ = [('raw', c_uint16),
('structured', PowerLevelsBits)]
def __str__(self):
return format_all_fields(self)
class PowerStatistics(MessageData):
_fields_ = [
('target_power_levels', PowerLevels),
('actual_power_levels', PowerLevels),
('state', c_uint8, 1),
('reserved', c_uint8, 7),
]
class AntennaStatistics(MessageData):
_fields_ = [
('deployment_sensed', c_uint8, 4),
('deployment_rounds', c_uint8, 4),
]
class SubsystemHeartbeatStatistics(MessageData):
_fields_ = [
('uhf_failures', c_uint16),
]
# See system-state.h for reference
class EpsStatisticsMessage(MessageData):
_fields_ = [('timestamp', c_uint32),
('can_statistics', CanStatistics),
('eps_statistics', EpsStatistics),
('adc_statistics', ADCData),
('mppt_statistics', MpptStatistics),
('power_statistics', PowerStatistics),
('subsystem_hearbeat_statistics', SubsystemHeartbeatStatistics),
('antenna_statistics', AntennaStatistics),
]
@classmethod
def _update_solar_panel_current_adc_to_milli_amper(cls, dic, field):
dic['adc_statistics'][field] = int(adc_to_solar_panel_current_milli_amper(
dic['adc_statistics'][field]))
@classmethod
def unit_conversions_to_ground(cls, dic):
cls._update_solar_panel_current_adc_to_milli_amper(dic, 'spxp_curr')
cls._update_solar_panel_current_adc_to_milli_amper(dic, 'spxn_curr')
cls._update_solar_panel_current_adc_to_milli_amper(dic, 'spyp_curr')
cls._update_solar_panel_current_adc_to_milli_amper(dic, 'spyn_curr')
dic['adc_statistics']['sp_x_v'] = int(adc_to_solar_panel_voltage_milli_volt(
dic['adc_statistics']['sp_x_v']))
dic['adc_statistics']['sp_y_v'] = int(adc_to_solar_panel_voltage_milli_volt(
dic['adc_statistics']['sp_y_v']))
dic['adc_statistics']['bat_curr'] = int(adc_to_bat_current_milli_amper(
dic['adc_statistics']['bat_curr']))
dic['adc_statistics']['bat_v'] = int(adc_to_bat_voltage(
dic['adc_statistics']['bat_v']))
dic['adc_statistics']['uhf_curr_3v3'] = int(adc_to_com_3v3_current_milli_amper(
dic['adc_statistics']['uhf_curr_3v3']))
dic['adc_statistics']['uhf_curr_5v'] = int(adc_to_com_5v_current_milli_amper(
dic['adc_statistics']['uhf_curr_5v']))
dic['adc_statistics']['payload_curr'] = int(adc_to_payload_current_milli_amper(
dic['adc_statistics']['payload_curr']))
dic['adc_statistics']['adcs_curr'] = int(adc_to_adcs_current_milli_amper(
dic['adc_statistics']['adcs_curr']))
dic['adc_statistics']['gps_curr'] = int(adc_to_gps_current_milli_amper(
dic['adc_statistics']['gps_curr']))
dic['adc_statistics']['obc_curr'] = int(adc_to_obc_current_milli_amper(
dic['adc_statistics']['obc_curr']))
dic['adc_statistics']['sns_3v3'] = int(adc_3v3_bus_voltage_milli_volt(
dic['adc_statistics']['sns_3v3']))
dic['adc_statistics']['sns_5v'] = int(adc_5v_bus_voltage_milli_volt(
dic['adc_statistics']['sns_5v']))
dic['adc_statistics']['sns_12v_1'] = int(adc_12v_bus_voltage_milli_volt(
dic['adc_statistics']['sns_12v_1']))
dic['adc_statistics']['sns_12v_2'] = int(adc_12v_bus_voltage_milli_volt(
dic['adc_statistics']['sns_12v_2']))
dic['adc_statistics']['temp_sns1'] = int(temp_sensor_adc_val_to_celsius(
dic['adc_statistics']['temp_sns1']))
dic['adc_statistics']['temp_sns2'] = int(temp_sensor_adc_val_to_celsius(
dic['adc_statistics']['temp_sns2']))
return dic
class UhfStatistics(MessageData):
_fields_ = [('boot_count', c_uint32),
('last_boot_reason', c_uint16),
('memory_violation_reset_has_occured', c_bool),
('internal_temp', c_int16),
('current_csp_packet_number', c_uint32),
('allowed_relay_packet_count', c_uint16),
('rx_csp_frame_count', c_uint32),
('rx_relay_frame_count', c_uint32),
('tx_csp_frame_count', c_uint32),
('rx_fifo_error_count', c_uint32),
('tx_fifo_error_count', c_uint32),
]
class UhfStatisticsMessage(MessageData):
_fields_ = [('can_statistics', CanStatistics),
('uhf_statistics', UhfStatistics)]
class RadioPacketType:
CSP = 1
RELAY = 2
TYPES = {CSP, RELAY}
LENGTH_TYPE = ">B"
LENGTH_HEADER_SIZE = 1
class HWRadioPacket:
def __init__(self, packet_type, payload, with_signature=False,
target_id=None, serial=None, cmac=None):
if packet_type not in RadioPacketType.TYPES:
raise ValueError("Packet type must be valid")
self.payload = payload
self.packet_type = packet_type
if with_signature and packet_type == RadioPacketType.CSP:
if not serial and not cmac:
self.serial = bytearray(struct.pack("<I", 0)) # Here we would in reality get the next available packet number
self.payload += self.serial
# Calculate after serial added
self.cmac = bytearray([ 0x0, 0x0, 0x0, 0x0 ]) # Here we would in reality sign for real
self.payload += self.cmac
elif cmac and serial:
self.serial = serial
self.cmac = cmac
self.payload += serial + cmac
else:
raise AssertionError("Either give both CMAC and serial or "
"neither")
self.sat_packet = bytearray([packet_type]) + self.payload
self.length_header = bytearray(struct.pack(LENGTH_TYPE, len(self.sat_packet)))
self.bytes = self.length_header + self.sat_packet
def __str__(self):
return " ".join(map(lambda b: "%02X" % b, self.bytes))
def get_bytes(self):
return self.bytes
def get_bytes_without_len(self):
return self.sat_packet
def len_without_len_field(self):
return len(self.bytes) - len(self.length_header)
@classmethod
def from_bytes(cls, data, with_signature=False):
if len(data) == 0:
raise ValueError("Packet can't be empty")
if len(data) < 2:
raise ValueError("Packet should contain atleast length and type")
if data[1] not in RadioPacketType.TYPES:
raise ValueError("Second byte should define the packet type")
packet_len = struct.unpack(LENGTH_TYPE, data[0:LENGTH_HEADER_SIZE])[0]
if packet_len != (len(data) - LENGTH_HEADER_SIZE):
logging.debug("Too many bytes for HW_RADIO_PACKET ignoring leftovers")
logging.debug(data)
cmac = None
serial = None
if with_signature:
cmac = data[-COUNTER_SIZE_BYTES:]
serial = data[-CMAC_SIZE_BYTES-COUNTER_SIZE_BYTES:-CMAC_SIZE_BYTES]
# Skip trailing cmac and counter so we can compare recalculated
# value
payload = data[LENGTH_HEADER_SIZE + 1:-CMAC_SIZE_BYTES-COUNTER_SIZE_BYTES]
else:
payload = data[LENGTH_HEADER_SIZE + 1:LENGTH_HEADER_SIZE + packet_len]
packet = HWRadioPacket(data[LENGTH_HEADER_SIZE], payload,
with_signature, serial=serial, cmac=cmac)
if with_signature:
packet.cmac = cmac
packet.serial = serial
return packet
@classmethod
def packets_from_bytes(cls, data, with_signature=False):
packets = []
idx = 0
while idx < len(data):
packet = HWRadioPacket.from_bytes(data[idx:], with_signature)
packets.append(packet)
idx += len(packet.get_bytes())
return packets
HEADER_SIZE = 4
HEADER_PLUS_LENGTH_SIZE = HEADER_SIZE + 2
class CspIdBits(LittleEndianStructure):
_fields_ = [('crc', c_uint32, 1),
('rdp', c_uint32, 1),
('xtea', c_uint32, 1),
('hmac', c_uint32, 1),
("reserved", c_uint32, 4),
("src_port", c_uint32, 6),
("dst_port", c_uint32, 6),
("dst", c_uint32, 5),
("src", c_uint32, 5),
("priority", c_uint32, 2), ]
class CspHeader(Union):
_fields_ = [("structured", CspIdBits),
("raw", c_uint32)]
def get_bytes(self):
return struct.pack(">I", self.raw)
class CspPacket:
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, src, dst, dst_port, src_port, payload,
priority=0, hmac=False, xtea=False, rdp=False,
crc=False):
self.len = struct.pack(">H", len(payload))
header_bits = CspIdBits(src=src, dst=dst, dst_port=dst_port, src_port=src_port,
priority=priority, hmac=hmac, xtea=xtea, rdp=rdp, crc=crc)
self.header = CspHeader(structured=header_bits)
self.src = src
self.dst = dst
self.dst_port = dst_port
self.src_port = src_port
self.payload = payload
self.priority = priority
self.hmac = hmac
self.xtea = xtea
self.rdp = rdp
self.crc = crc
def __str__(self):
payload_str = " ".join(map(lambda b: "%02X" %b, self.payload)) if len(self.payload) < 10 else ""
rdp_str = " RDP(" + str(Rdp.from_bytes(self.payload)) + ")" \
if self.rdp else ""
return "CSP(src %d:%d dst %d:%d prio %s hmac %s xtea %s rdp %s " \
"crc %s data[%d] [%s]%s)" % (
self.src, self.src_port, self.dst, self.dst_port, self.priority,
_print_bool(self.hmac),
_print_bool(self.xtea),
_print_bool(self.rdp),
_print_bool(self.crc),
len(self.payload), payload_str, rdp_str)
@classmethod
def from_header(cls, header, payload):
return CspPacket(header.structured.src, header.structured.dst, header.structured.dst_port,
header.structured.src_port, payload, priority=header.structured.priority,
hmac=header.structured.hmac, xtea=header.structured.xtea,
rdp=header.structured.rdp, crc=header.structured.crc
)
@classmethod
def from_bytes(cls, data, with_length=True):
if not data:
raise ValueError("argument should be iterable")
if len(data) < HEADER_PLUS_LENGTH_SIZE:
raise ValueError("Csp packet has to have at least 32bit header and 16bit length field")
header = CspHeader(raw=struct.unpack(">I", data[0:4])[0])
if with_length:
length = struct.unpack(">H", data[4:HEADER_PLUS_LENGTH_SIZE])[0]
if len(data) < length + HEADER_PLUS_LENGTH_SIZE:
raise ValueError("No support for buffering,"
" argument should enough bytes to satifsfy length field length")
return cls.from_header(header,
data[HEADER_PLUS_LENGTH_SIZE:HEADER_PLUS_LENGTH_SIZE+length])
else:
return cls.from_header(header, data[HEADER_SIZE:])
@classmethod
def packets_from_bytes(cls, data):
packets = []
idx = 0
while idx < len(data):
try:
packet = cls.from_bytes(data[idx:])
packets.append(packet)
idx += len(packet.get_bytes())
except ValueError:
logging.debug("Tried to parse malformed or"
" incomplete csp packet from bytes: %s", data[idx:])
break
return packets
def get_bytes(self, with_length=True):
if with_length:
return self.header.get_bytes() + self.len + self.payload
else:
return self.header.get_bytes() + self.payload
def uhf_to_json(str):
data = bytes.fromhex(str)
packet = HWRadioPacket.from_bytes(data, with_signature=False)
csp_packet = CspPacket.from_bytes(packet.payload)
msg = UhfStatisticsMessage.from_buffer_copy(csp_packet.payload)
print(json.dumps(ctypes_obj_to_dic(msg), indent=1, sort_keys=False))
def eps_to_json(str):
data = bytes.fromhex(str)
packet = HWRadioPacket.from_bytes(data, with_signature=False)
csp_packet = CspPacket.from_bytes(packet.payload)
msg = EpsStatisticsMessage.from_buffer_copy(csp_packet.payload)
print(json.dumps(ctypes_obj_to_dic(msg), indent=1, sort_keys=False))
|
139011
|
import pytest
from sqlalchemy import Column, Integer
from dbeditor.database import Database
from dbeditor.table_builder import BuilderGroup
@pytest.fixture
def group(database: Database) -> BuilderGroup:
g = BuilderGroup(database.engine)
g.start_building("example") # FIXME: fixture based on testing code
c = Column("id", Integer, primary_key=True, autoincrement=True)
g["example"].add_column(c)
return g
@pytest.mark.parametrize(
"table_name, expected", [("example", True), ("Lorem", False)]
)
def test_builder_group_contains(
group: BuilderGroup, table_name: str, expected: bool
) -> None:
assert (table_name in group) == expected
def test_builder_group_getitem(group: BuilderGroup) -> None:
assert group["example"] == group._builders["example"]
with pytest.raises(KeyError):
_ = group["item"]
# FIXME: depends on contains
def test_builder_group_delitem(group: BuilderGroup) -> None:
del group["example"]
assert "example" not in group
def test_builder_group_len(group: BuilderGroup) -> None:
assert len(group) == 1
# FIXME: depends on contains
def test_builder_group_start_building(group: BuilderGroup) -> None:
assert "new_table" not in group
group.start_building("new_table")
assert "new_table" in group
assert list(group["new_table"]) == []
def test_builder_group_start_building_on_existing_table(
group: BuilderGroup,
) -> None:
assert "new_table" not in group
group.start_building("new_table")
assert "new_table" in group
with pytest.raises(ValueError):
group.start_building("new_table")
# FIXME: depends on contains
def test_builder_group_create_table(
group: BuilderGroup, database: Database
) -> None:
group.create_table("example", database.metadata)
assert database.get_tables() == ["first", "second", "example"]
|
139018
|
from gi.repository import Gio, Gtk, GLib, Gdk
import pkg_resources
from typing import List
from ocrd_browser.util.gtk import ActionRegistry
from ocrd_browser.ui import MainWindow, AboutDialog, OpenDialog
from ocrd_browser.view import ViewRegistry
class OcrdBrowserApplication(Gtk.Application):
def __init__(self) -> None:
Gtk.Application.__init__(self, application_id='org.readmachine.ocrd-browser',
flags=Gio.ApplicationFlags.HANDLES_OPEN)
self.actions = ActionRegistry(for_widget=self)
self.view_registry = ViewRegistry.create_from_entry_points()
def do_startup(self) -> None:
Gtk.Application.do_startup(self)
self.actions.create('new')
self.actions.create('open')
self.actions.create('about')
self.actions.create('quit')
for entry_point in pkg_resources.iter_entry_points('ocrd_browser_ext'):
(entry_point.load())(self)
self.load_css()
def load_css(self) -> None:
css = Gtk.CssProvider()
css.load_from_resource('/org/readmachine/ocrd-browser/css/theme.css')
Gtk.StyleContext().add_provider_for_screen(Gdk.Screen.get_default(), css, Gtk.STYLE_PROVIDER_PRIORITY_USER)
# css = Gtk.CssProvider()
# css.load_from_path('/home/jk/PycharmProjects/ocrd-browser/gresources/css/test.css')
# Gtk.StyleContext().add_provider_for_screen(Gdk.Screen.get_default(), css, Gtk.STYLE_PROVIDER_PRIORITY_USER)
def do_activate(self) -> None:
win = self.get_active_window()
if not win:
win = MainWindow(application=self)
win.present()
def on_about(self, _action: Gio.SimpleAction, _param: str = None) -> None:
about_dialog = AboutDialog(application=self, transient_for=self.get_active_window(), modal=True)
about_dialog.present()
def on_quit(self, _action: Gio.SimpleAction, _param: str = None) -> None:
open_windows: int = 0
window: MainWindow
for window in self.get_windows():
if isinstance(window, MainWindow) and window.close_confirm(): # type: ignore[unreachable]
window.destroy()
else:
open_windows += 1
if open_windows == 0:
self.quit()
def on_open(self, _action: Gio.SimpleAction, _param: str = None) -> None:
open_dialog = OpenDialog(application=self, transient_for=self.get_active_window(), modal=True)
response = open_dialog.run()
if response == Gtk.ResponseType.OK:
self.open_in_window(open_dialog.get_uri(), window=open_dialog.get_transient_for())
open_dialog.destroy()
def on_new(self, _action: Gio.SimpleAction, _param: str = None) -> None:
win = MainWindow(application=self)
win.present()
def do_open(self, files: List[Gio.File], file_count: int, hint: str) -> int:
for file in files:
self.open_in_window(file.get_uri(), window=None)
return 0
def open_in_window(self, uri: str, window: MainWindow = None) -> None:
if not window or not window.document.empty:
window = MainWindow(application=self)
window.present()
GLib.timeout_add(10, window.open, uri)
|
139038
|
from requests import Response
from ...lib.gravity import Module
__all__ = [
"BaseServerForTrigger",
"BaseServerForHostingBuild"
]
class BaseServerForTrigger(Module):
"""
Abstract base class for API of triggering builds on automation (CI) server
"""
def trigger_build(self, revision: str) -> None: # pylint: disable=no-self-use
raise RuntimeError("Trigger build function is not defined for current driver.")
class BaseServerForHostingBuild(Module):
"""
Abstract base class for API of hosting CI builds on automation server
"""
def add_build_tag(self, tag: str) -> Response: # pylint: disable=no-self-use
raise RuntimeError("Tag adding function is not defined for current driver.")
def report_build_location(self) -> str:
raise NotImplementedError
def artifact_path(self, local_artifacts_dir: str, item: str) -> str:
raise NotImplementedError
|
139045
|
from photons_protocol.packets import dictobj
from photons_protocol.messages import T
from delfick_project.norms import sb
from bitarray import bitarray
import binascii
emptybt = bitarray("0000000000000000000000000000000000000000000000000000000000000000")
target_cache = {}
def look_at_target(pkt, value):
if value in (None, "0000000000000000", b"\x00" * 8, emptybt):
pkt.addressable = True
pkt.tagged = True
else:
pkt.tagged = False
return value
class FrameHeader(dictobj.PacketSpec):
fields = [
("size", T.Uint16.default(lambda pkt: int(pkt.size_bits(pkt) / 8))),
("protocol", T.Uint16.S(12).default(1024)),
("addressable", T.Bool.default(lambda pkt: True)),
(
"tagged",
T.Bool.default(lambda pkt: pkt.actual("target") in (None, b"\x00" * 8, emptybt)),
),
("reserved1", T.Reserved(2, left=True)),
("source", T.Uint32),
]
class FrameAddress(dictobj.PacketSpec):
fields = [
("target", T.Bytes(64).transform(look_at_target, look_at_target)),
("reserved2", T.Reserved(48)),
("res_required", T.Bool.default(True)),
("ack_required", T.Bool.default(True)),
("reserved3", T.Reserved(6)),
("sequence", T.Uint8),
]
class ProtocolHeader(dictobj.PacketSpec):
fields = [
("reserved4", T.Reserved(64)),
("pkt_type", T.Uint16.default(lambda pkt: pkt.Payload.message_type)),
("reserved5", T.Reserved(16)),
]
class LIFXPacket(dictobj.PacketSpec):
"""
The LIFXPacket represents protocol 1024.
It can be used to generate payload messages for this protocol.
This is the ``parent_packet`` for this protocol. This means
any message can be represented with this class using a payload as
``bytes``. Specific message classes will represent the payload as a
dictionary of data.
"""
parent_packet = True
fields = [
("frame_header", FrameHeader),
("frame_address", FrameAddress),
("protocol_header", ProtocolHeader),
("payload", "Payload"),
]
@property
def Key(self):
key = self.__dict__.get("Key", None)
if key is None:
key = (self.protocol, self.pkt_type, repr(self.payload))
self.__dict__["Key"] = key
return key
@Key.deleter
def Key(self):
if "Key" in self.__dict__:
del self.__dict__["Key"]
@property
def serial(self):
target = self.target
if target in (None, sb.NotSpecified):
return None
serial = target_cache.get(target)
if serial is None:
serial = target_cache[target] = binascii.hexlify(target[:6]).decode()
return serial
@property
def represents_ack(self):
return self.Payload.represents_ack
def __or__(self, kls):
"""
Determine if this object is of type ``kls``. It does this by looking at
the ``protocol`` and ``message_type`` values on the ``kls.Payload`` and this
instance and returning whether they are equal.
"""
this_protocol = dictobj.__getitem__(self, "protocol")
this_protocol = this_protocol if this_protocol is not sb.NotSpecified else self.protocol
if this_protocol != kls.Payload.Meta.protocol:
return False
this_pkt_type = dictobj.__getitem__(self, "pkt_type")
this_pkt_type = this_pkt_type if this_pkt_type is not sb.NotSpecified else self.pkt_type
return this_pkt_type == kls.Payload.message_type
@classmethod
def message(kls, message_type, *payload_fields, multi=None):
"""
This is to be used in conjunction with ``photons_protocol.messages.Messages``
.. code-block:: python
from photons_protocol.messages import Messages
class MyMessages(Messages):
MyMessage = LIFXPacket.message(13
, ("field_one", field_one_type)
, ("field_two", field_two_type)
)
This method returns a function that when called will return a new class
representing the message you are creating.
You may also use the fields from an existing packet by doing something
like:
.. code-block:: python
from photons_protocol.messages import Messages
class MyMessages(Messages):
MyMessage = LIFXPacket.message(13
, ("field_one", field_one_type)
, ("field_two", field_two_type)
)
MyOtherMessage = MyMessage.using(14)
Here, ``MyOtherMessage`` will use the same fields as ``MyMessage`` but
will have a ``pkt_type`` of ``14`` instead of ``13``.
And you can specify multiple replies options with the multi keyword:
.. code-block:: python
from photons_protocol.messages import Messages, MultiOptions
class MyMessages(Messages):
MyMessage = LIFXPacket.message(13
, ("field_one", field_one_type)
, ("field_two", field_two_type)
, multi = MultiOptions(
# expect MymessageReply messages
lambda req: MyMessages.MyMessageReply
# Use total on the reply packet to determine how many packets to receive
, lambda res: res.total
)
)
MyMessage = LIFXPacket.message(14
, ("total", total_type)
)
If you expect multiple replies but don't know how many to expect you can say:
.. code-block:: python
class MyMessages(Messages):
MyMessage = LIFXPacket.message(13
, ("field_one", field_one_type)
, ("field_two", field_two_type)
, multi = -1
)
"""
def maker(name):
Payload = type(
"{0}Payload".format(name),
(dictobj.PacketSpec,),
{
"fields": list(payload_fields),
"message_type": message_type,
"represents_ack": message_type == 45,
},
)
Payload.Meta.protocol = 1024
Payload.Meta.multi = multi
res = type(name, (LIFXPacket,), {"Payload": Payload, "parent_packet": False})
res.Meta.parent = LIFXPacket
res.Meta.multi = multi
return res
maker._lifx_packet_message = True
maker.using = lambda mt, **kwargs: kls.message(mt, *payload_fields, **kwargs)
return maker
class Payload(dictobj.PacketSpec):
message_type = 0
fields = []
LIFXPacket.Meta.protocol = 1024
LIFXPacket.Payload.Meta.protocol = 1024
# Helper for creating messages
msg = LIFXPacket.message
|
139147
|
import torch
import models
import cfg
import numpy as np
from utils.utils import set_log_dir, save_checkpoint, create_logger, pruning_generate, see_remain_rate, rewind_weight, see_remain_rate_orig
args = cfg.parse_args()
gen_net = eval('models.sngan_cifar10.Generator')(args=args).cuda()
pruning_generate(gen_net, 1-0.8**10)
checkpoint = torch.load(args.resume)
print(checkpoint['gen_state_dict'].keys())
gen_net.load_state_dict(checkpoint['gen_state_dict'])
see_remain_rate(gen_net)
num_kernel = 0
zero_kernel = 0
n_kernel = 0
state_dict = checkpoint['gen_state_dict']
for key in state_dict.keys():
if 'mask' in key:
mask = state_dict[key]
print(mask.shape)
num_kernel = num_kernel + mask.shape[1]
for i in range(mask.shape[1]):
if np.all(mask[:, i, :, :].cpu().numpy() == 0):
zero_kernel = zero_kernel + 1
if np.sum(mask[:, i, :, :].cpu().numpy() == 0) > mask[:,i,:,:].reshape(-1).shape[0] * 0.9:
n_kernel = n_kernel + 1
print(zero_kernel)
print(n_kernel)
print(num_kernel)
|
139151
|
import re
emails = '''
<EMAIL>
<EMAIL>
<EMAIL>
'''
pattern = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
matches = pattern.finditer(emails)
for match in matches:
print(match)
|
139152
|
import common
import paths
def test_simple_move(activate_package, make_workspace):
activate_package(package='basic', into='main')
workspace = make_workspace('main')
changes = workspace.move(
'basic/bar.py',
31,
'basic/foo.py')
workspace.perform(changes)
common.compare_workspaces(
paths.approved('simple_move'),
paths.active('main', 'basic'))
|
139232
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
import cPickle
from tensorflow.contrib import slim
#Load features and labels
features = cPickle.load(open('nn_features.p', 'rb'))
labels = cPickle.load(open('labels.p', 'rb'))
mask = np.random.choice(features.shape[0], features.shape[0], replace=False)
features = features[mask]
labels = labels[mask]
val_features = features[:10000]
train_features = features[10000:]
val_labels = labels[:10000]
train_labels = labels[10000:]
positive_mask = []
negative_mask = []
for i in range(train_labels.shape[0]):
if np.array_equal(train_labels[i], [0,1]):
positive_mask.append(i)
else:
negative_mask.append(i)
pos_features = train_features[positive_mask]
pos_labels = train_labels[positive_mask]
neg_features = train_features[negative_mask]
neg_labels = train_labels[negative_mask]
#change these values later
learning_rate = 0.001
training_epochs = 10
display_step = 1
in_dim = features.shape[1]
n_samples = train_features.shape[0]
batch_size = 512
num_features = features.shape[1]
num_classes = labels.shape[1]
num_iter = 1000
n_hidden1 = 256
n_hidden2 = 256
n_hidden3 = 256
reg_strength = 5e-4
dropout_rate = 0.5
#define placeholder for our input
X = tf.placeholder("float", [None, num_features])
Y = tf.placeholder("float", [None, num_classes])
#drop_p = tf.placeholder(tf.float32)
def model(x):
layer = slim.fully_connected(x,n_hidden1, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden1')
layer = slim.batch_norm(layer, scope='bn1')
layer = slim.dropout(layer, dropout_rate, scope='dropout1')
layer = slim.fully_connected(layer,n_hidden2, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden2')
layer = slim.batch_norm(layer, scope='bn2')
layer = slim.dropout(layer, dropout_rate, scope='dropout2')
layer = slim.fully_connected(layer,n_hidden3, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden3')
out_layer = slim.fully_connected(layer,num_classes, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='out_layer')
return out_layer
"""
# Hidden layer with RELU activation
layer = slim.fully_connected(x,n_hidden1, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden1')
layer = slim.fully_connected(layer,n_hidden2, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden2')
out_layer = slim.fully_connected(layer,num_classes, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(reg_strength),scope='out_layer')
return out_layer
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.add(tf.matmul(layer_2, weights['out']), biases['out'])
return out_layer
"""
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_features, n_hidden1])),
'h2': tf.Variable(tf.random_normal([n_hidden1, n_hidden2])),
'out': tf.Variable(tf.random_normal([n_hidden2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden1])),
'b2': tf.Variable(tf.random_normal([n_hidden2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
recommendor = model(X)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(recommendor, Y))
#regularizers = (tf.nn.l2_loss(weights['h1']) + tf.nn.l2_loss(biases['b1']) + tf.nn.l2_loss(weights['h2']) + tf.nn.l2_loss(biases['b2']))
#loss += reg_strength * regularizers
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# Test model
correct_prediction = tf.equal(tf.argmax(recommendor, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
probabilities = tf.nn.softmax(recommendor)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_loss = 0.
total_batch = int(train_features.shape[0]/batch_size)
# Loop over all batches
start = 0
end = batch_size
for i in range(total_batch):
pos_mask = np.random.choice(pos_features.shape[0], batch_size/2, replace=False)
neg_mask = np.random.choice(neg_features.shape[0], batch_size/2, replace=False)
batch_x = np.vstack((pos_features[pos_mask], neg_features[neg_mask]))
batch_y = np.vstack((pos_labels[pos_mask], neg_labels[neg_mask]))
shuffle = np.random.choice(batch_x.shape[0], batch_x.shape[0], replace=False)
batch_x = batch_x[shuffle]
batch_y = batch_y[shuffle]
#batch_x, batch_y = train_features[start:end], train_labels[start:end]
# Run optimization op (backprop) and loss op (to get loss value)
_, c = sess.run([optimizer, loss], feed_dict={X: batch_x,
Y: batch_y})
# Compute average loss
avg_loss += c / total_batch
start = end
end += batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "loss=", \
"{:.9f}".format(avg_loss))
print("Optimization Finished!")
acc, p = sess.run([accuracy, probabilities], feed_dict={X: val_features, Y: val_labels})
print('Val Accuracy: ', acc)
print('probabilities: ', p[:,1])
|
139244
|
import io
import os
import time
import argparse
import random
import logging
import warnings
import multiprocessing
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import Block, nn
from mxnet.gluon.data.sampler import Sampler, SequentialSampler
import gluonnlp as nlp
from gluonnlp.model import get_model
from gluonnlp.data import BERTTokenizer
from gluonnlp.data.dataset import SimpleDataset, Dataset
import json
import collections
from tmnt.preprocess.vectorizer import TMNTVectorizer
from tmnt.data_loading import to_label_matrix, PairedDataLoader, RoundRobinDataLoader
from typing import Dict
from gluonnlp.data import BERTSentenceTransform
from itertools import accumulate
class JsonlDataset(SimpleDataset):
"""A dataset wrapping over a jsonlines (.jsonl) file, each line is a json object.
Parameters:
filename : Path to the .jsonl file.
txt_key: Json attribute key to use for seleting text document strings
label_key: Json attribute key to use to get string labels
encoding : File encoding format. (default 'utf8')
label_remap : Dictionary to map labels.
"""
def __init__(self, filename: str, txt_key: str, label_key: str,
encoding: str = 'utf8', label_remap: Dict[str,str] = None, random_drop_pct: float = 0.0):
if not isinstance(filename, (tuple, list)):
filename = (filename, )
self._filenames = [os.path.expanduser(f) for f in filename]
self._encoding = encoding
self._txt_key = txt_key
self._label_key = label_key
self._label_remap = label_remap
self._random_drop_pct = random_drop_pct
self._random_drop = random_drop_pct > 0.0
super(JsonlDataset, self).__init__(self._read())
def _read(self):
all_samples = []
for filename in self._filenames:
samples = []
with open(filename, 'r', encoding=self._encoding) as fin:
for line in fin.readlines():
if not self._random_drop or (random.uniform(0,1) > self._random_drop_pct):
s = json.loads(line, object_pairs_hook=collections.OrderedDict)
label = s.get(self._label_key)
if self._label_remap is not None:
label = self._label_remap.get(label)
samples.append((s[self._txt_key], label))
all_samples += samples
return all_samples
class UnevenArrayDataset(Dataset):
"""A dataset that combines multiple dataset-like objects, e.g.
Datasets, lists, arrays, etc. but does NOT require lengths to be the same.
The i-th sample is defined as `(x1[i % len(x1)], x2[i % len(x2)], ...)`.
Parameters:
*args : one or more dataset-like objects. The data arrays.
"""
def __init__(self, *args):
assert len(args) > 0, "Needs at least 1 arrays"
self._sub_lengths = [len(a) for a in args]
self._length = max(self._sub_lengths) # length is equal to maximum subdataset length
self._data = []
for i, data in enumerate(args):
if isinstance(data, mx.nd.NDArray) and len(data.shape) == 1:
data = data.asnumpy()
self._data.append(data)
def __getitem__(self, idx):
if idx >= self._length:
raise StopIteration
if len(self._data) == 1:
return self._data[0][idx]
else:
return tuple(data[idx % data_len] for data,data_len in zip(self._data, self._sub_lengths))
def __len__(self):
return self._length
class BERTDatasetTransform(object):
"""Dataset transformation for BERT-style sentence classification or regression.
Parameters
----------
tokenizer : BERTTokenizer.
Tokenizer for the sentences.
max_seq_length : int.
Maximum sequence length of the sentences.
labels : list of int , float or None. defaults None
List of all label ids for the classification task and regressing task.
If labels is None, the default task is regression
pad : bool, default True
Whether to pad the sentences to maximum length.
pair : bool, default True
Whether to transform sentences or sentence pairs.
has_label: bool.
Whether labels are present for supervised learning
vectorizer: TMNTVectorizer
TMNTVectorizer to generate bag of words
bert_vocab_size: int
Use the raw BERT word-pieces as the bag-of-words vocabulary
num_classes: int
Must be provided if class_labels isn't provided
"""
def __init__(self,
tokenizer,
max_seq_length,
class_labels=None,
label_alias=None,
pad=True,
pair=True,
has_label=True,
vectorizer=None,
bert_vocab_size=0,
num_classes=None):
self.class_labels = class_labels
self.has_label = has_label
self.use_bert_bow = bert_vocab_size > 0
self.bert_vocab_size = bert_vocab_size
self._label_dtype = 'int32' if class_labels else 'float32'
self.num_classes = len(class_labels) if class_labels else num_classes
if has_label and class_labels:
self._label_map = {}
for (i, label) in enumerate(class_labels):
self._label_map[label] = i
if label_alias:
for key in label_alias:
if label_alias[key] in self._label_map:
self._label_map[key] = self._label_map[label_alias[key]]
self._bert_xform = BERTSentenceTransform(
tokenizer, max_seq_length, pad=pad, pair=pair)
self.vectorizer = vectorizer
def __call__(self, line):
"""Perform transformation for sequence pairs or single sequences.
The transformation is processed in the following steps:
- tokenize the input sequences
- insert [CLS], [SEP] as necessary
- generate type ids to indicate whether a token belongs to the first
sequence or the second sequence.
- generate valid length
For sequence pairs, the input is a tuple of 3 strings:
text_a, text_b and label.
Inputs:
text_a: 'is this jacksonville ?'
text_b: 'no it is not'
label: '0'
Tokenization:
text_a: 'is this jack ##son ##ville ?'
text_b: 'no it is not .'
Processed:
tokens: '[CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]'
type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
valid_length: 14
label: 0
For single sequences, the input is a tuple of 2 strings: text_a and label.
Inputs:
text_a: 'the dog is hairy .'
label: '1'
Tokenization:
text_a: 'the dog is hairy .'
Processed:
text_a: '[CLS] the dog is hairy . [SEP]'
type_ids: 0 0 0 0 0 0 0
valid_length: 7
label: 1
Parameters
----------
line: tuple of str
Input strings. For sequence pairs, the input is a tuple of 3 strings:
(text_a, text_b, label). For single sequences, the input is a tuple
of 2 strings: (text_a, label).
Returns
-------
np.array: input token ids in 'int32', shape (seq_length,)
np.array: valid length in 'int32', shape (1,)
np.array: input token type ids in 'int32', shape (seq_length,)
np.array: classification task: label id in 'int32', shape (num_classes,),
regression task: label in 'float32', shape (1,)
"""
if self.has_label:
input_ids, valid_length, segment_ids = self._bert_xform(line[:-1])
label_str = line[-1]
# map to int if class labels are available
if self.class_labels:
if label_str:
labels = [ self._label_map.get(label,0) for label in label_str.split(',') ]
if labels is None or len(labels) == 0:
labels = [0]
else:
labels = [0]
else:
try:
labels=[int(label_str)]
except:
labels=[0]
#label = np.array(labels, dtype=self._label_dtype)
if self.num_classes is not None and self.num_classes > 1:
label_mat, _ = to_label_matrix([labels], num_labels=self.num_classes)
else:
label_mat = np.array([[0.0]]) # just fill with zeros; assumption is that labels will be ignored
bow = None
if self.use_bert_bow:
bow = np.zeros(self.bert_vocab_size)
inds, cnts = np.unique(input_ids, return_counts=True)
bow[inds] = cnts
bow = mx.nd.array(np.expand_dims(bow, 0), dtype='float32')
elif self.vectorizer:
bow,_ = self.vectorizer.transform(line[:-1])
bow = mx.nd.array(bow, dtype='float32')
return input_ids, valid_length, segment_ids, bow, label_mat[0]
else:
return self._bert_xform(line)
class FixedSeedRandomSampler(Sampler):
"""Samples elements from [0, length) randomly without replacement but with a FIXED seed to reproduce.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, length, rng=1234):
self._length = length
self._rng = rng
self._calls = 0
def __iter__(self):
self._calls += 1
np.random.seed(self._rng + self._calls)
indices = np.arange(self._length)
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
def preprocess_seq_data(trans, class_labels, dataset, batch_size, max_len, train_mode=True, pad=False, aux_dataset=None):
pool = multiprocessing.Pool()
# transformation for data train and dev
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
# data train
data_ds = mx.gluon.data.SimpleDataset(pool.map(trans, dataset))
#data_ds_len = data_ds.transform(lambda input_id, length, segment_id, bow, label_id: length, lazy=False)
final_ds = data_ds.transform( lambda a,b,c,d,e: ((a,b,c,d,e),) ) # singleton tuple
final_ds_len = data_ds.transform(lambda input_id, length, segment_id, bow, label_id: length, lazy=False)
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)))
if train_mode:
# bucket sampler
num_buckets = min(6, len(data_ds) // batch_size)
batch_sampler = nlp.data.sampler.FixedBucketSampler(
final_ds_len,
batch_size=batch_size,
num_buckets=num_buckets,
ratio=0.2, # may avoid batches with size = 1 (which may tigger a bug)
shuffle=True)
# data loader for training
loader = gluon.data.DataLoader(
dataset=final_ds,
num_workers=4,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
else:
loader = gluon.data.DataLoader(
dataset=final_ds,
batch_size=batch_size,
num_workers=4,
shuffle=False,
batchify_fn=batchify_fn)
return loader, len(data_ds)
def get_aux_dataloader(trans, batch_size, aux_dataset):
pool = multiprocessing.Pool()
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
aux_ds = mx.gluon.data.SimpleDataset(pool.map(trans, aux_dataset))
a_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))
loader_aux = gluon.data.DataLoader(
dataset=aux_ds,
num_workers=4,
last_batch = 'rollover', ## need to ensure all batches are the same size here
shuffle=True, # shuffle optional (for training)
batch_size = batch_size,
batchify_fn = a_batchify_fn)
return loader_aux
def get_bert_datasets(class_labels,
vectorizer,
train_ds,
dev_ds,
batch_size,
max_len,
aux_ds = None,
bert_model_name = 'bert_12_768_12',
bert_dataset = 'book_corpus_wiki_en_uncased',
pad=False,
use_bert_vocab=False,
label_alias=None,
num_classes = None,
ctx=mx.cpu()):
if class_labels is None and num_classes is None:
raise Exception("Must provide class_labels or num_classes")
bert, bert_vocabulary = get_model(
name=bert_model_name,
dataset_name=bert_dataset,
pretrained=True,
ctx=ctx,
use_pooler=True,
use_decoder=False,
use_classifier=False)
do_lower_case = 'uncased' in bert_dataset
bert_tokenizer = BERTTokenizer(bert_vocabulary, lower=do_lower_case)
trans = BERTDatasetTransform(bert_tokenizer, max_len,
class_labels=class_labels,
label_alias=label_alias,
pad=pad, pair=False,
has_label=True,
vectorizer=vectorizer,
bert_vocab_size = len(bert_vocabulary) if use_bert_vocab else 0,
num_classes = num_classes)
train_data, num_train_examples = preprocess_seq_data(trans, class_labels, train_ds, batch_size, max_len, train_mode=True, pad=pad)
if aux_ds is not None:
aux_data = get_aux_dataloader(trans, batch_size, aux_ds)
else:
aux_data = None
dev_data, _ = preprocess_seq_data(trans, class_labels, dev_ds, batch_size, max_len, train_mode=False, pad=pad)
return train_data, dev_data, aux_data, num_train_examples, bert, bert_vocabulary
############
# Handle dataloading for Smoothed Deep Metric Loss with parallel batching
############
def preprocess_data_metriclearn(trans, class_labels, train_a_ds, train_b_ds, batch_size, max_len, pad=False, bucket_sample=False, aux_dataset=None):
"""Train/eval Data preparation function."""
pool = multiprocessing.Pool()
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
a_data_train = mx.gluon.data.SimpleDataset(pool.map(trans, train_a_ds))
b_data_train = mx.gluon.data.SimpleDataset(pool.map(trans, train_b_ds))
# magic that "zips" these two datasets and pairs batches
joined_data_train = UnevenArrayDataset(a_data_train, b_data_train)
joined_len = joined_data_train.transform( lambda a, b: a[1] + b[1], lazy=False ) ## a[1] and b[1] and lengths, bucket by sum
if aux_dataset is None:
final_ds = joined_data_train.transform( lambda a,b: ((a,b),) ) # singleton tuple
final_len = joined_len
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Tuple(
## tuple for a_data: (ids, lengths, segments, bow vector, label)
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)),
## tuple for b_data
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))))
else:
aux_ds = mx.gluon.data.SimpleDataset(pool.map(trans, aux_dataset))
final_ds = UnevenArrayDataset(joined_data_train, aux_ds)
logging.info("Uneven dataset created, size = {} (from data_ds = {}, aux_ds = {})".format(len(final_ds), len(joined_data_train), len(aux_ds)))
final_len = final_ds.transform( lambda a, b: a[0][1] + a[1][1] + b[1], lazy=False )
batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Tuple(
## tuple for a_data: (ids, lengths, segments, bow vector, label)
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)),
## tuple for b_data
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))),
# tuple for auxilliary data
nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype)))
if bucket_sample:
batch_sampler = nlp.data.sampler.FixedBucketSampler(
final_len,
batch_size=batch_size,
num_buckets=4,
ratio=0.2,
shuffle=True)
loader = gluon.data.DataLoader(
dataset=final_ds,
num_workers=4,
batch_sampler=batch_sampler,
batchify_fn=batchify_fn)
else:
loader = gluon.data.DataLoader(
dataset=final_ds,
num_workers=4,
shuffle=False, batch_size = batch_size,
batchify_fn=batchify_fn)
return loader, len(final_ds)
def preprocess_data_metriclearn_separate(trans1, trans2, class_labels, train_a_ds, train_b_ds, batch_size, shuffle_both=False, shuffle_a_only=True):
"""Train/eval Data preparation function."""
pool = multiprocessing.Pool()
label_dtype = 'float32' # if not task.class_labels else 'int32'
bow_count_dtype = 'float32'
a_data_train = mx.gluon.data.SimpleDataset(pool.map(trans1, train_a_ds))
b_data_train = mx.gluon.data.SimpleDataset(pool.map(trans2, train_b_ds))
a_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))
b_batchify_fn = nlp.data.batchify.Tuple(
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(),
nlp.data.batchify.Pad(axis=0), nlp.data.batchify.Stack(bow_count_dtype), nlp.data.batchify.Stack(label_dtype))
## set up 'parallel' samplers that always stay in sync
if shuffle_both:
a_sampler = FixedSeedRandomSampler(len(a_data_train), rng=1234)
b_sampler = FixedSeedRandomSampler(len(b_data_train), rng=1234)
elif shuffle_a_only:
a_sampler = FixedSeedRandomSampler(len(a_data_train), rng=1234)
b_sampler = SequentialSampler(len(b_data_train))
else:
a_sampler = SequentialSampler(len(a_data_train))
b_sampler = SequentialSampler(len(b_data_train))
a_loader_train = gluon.data.DataLoader(
dataset=a_data_train,
num_workers=4,
last_batch = 'discard', ## need to ensure all batches are the same size here AND stay synchronized
sampler = a_sampler,
batch_size = batch_size,
batchify_fn = a_batchify_fn)
b_loader_train = gluon.data.DataLoader(
dataset=b_data_train,
num_workers=4,
sampler = b_sampler,
batch_size = batch_size,
batchify_fn = b_batchify_fn)
paired_loader = PairedDataLoader(a_loader_train, b_loader_train)
return paired_loader, len(a_data_train)
def get_dual_bert_datasets(class_labels,
vectorizer,
train_ds1,
train_ds2,
model_name,
dataset,
max_len1,
max_len2,
pad,
use_bert_vocab=False,
shuffle_both=False,
shuffle_a_only=False,
aux_dataset = None,
forced_batch_size = 0,
aux_batch_size = 32,
ctx=mx.cpu()):
bert, bert_vocabulary = get_model(
name=model_name,
dataset_name=dataset,
pretrained=True,
ctx=ctx,
use_pooler=True,
use_decoder=False,
use_classifier=False)
do_lower_case = 'uncased' in dataset
bert_tokenizer = BERTTokenizer(bert_vocabulary, lower=do_lower_case)
def get_transform(_class_labels, _max_len):
trans = BERTDatasetTransform(bert_tokenizer,
_max_len,
class_labels = _class_labels,
label_alias=None,
pad=pad, pair=False,
has_label=True,
vectorizer=vectorizer,
bert_vocab_size=len(bert_vocabulary) if use_bert_vocab else 0)
return trans
if isinstance(train_ds1, list) and isinstance(train_ds2, list) and len(train_ds1) == len(train_ds2):
trans1s = [ get_transform(class_labels[i], max_len1) for i in range(len(train_ds1)) ]
trans2s = [ get_transform(class_labels[i], max_len2) for i in range(len(train_ds2)) ]
train_sets = [
preprocess_data_metriclearn_separate(trans1s[i], trans2s[i], class_labels[i], train_ds1[i], train_ds2[i],
(forced_batch_size or len(train_ds2[i])), shuffle_a_only=shuffle_a_only, shuffle_both=shuffle_both)
for i in range(len(train_ds1)) ]
num_train_examples = list(accumulate([ s for _,s in train_sets], lambda x,y: x + y)).pop()
loaders = [l for l,_ in train_sets]
train_data = RoundRobinDataLoader(loaders)
else:
batch_size = forced_batch_size or len(train_ds2)
trans1 = get_transform(class_labels, max_len1)
trans2 = get_transform(class_labels, max_len2)
train_data, num_train_examples = preprocess_data_metriclearn_separate(
trans1, trans2, class_labels, train_ds1, train_ds2, batch_size, shuffle_a_only=shuffle_a_only, shuffle_both=shuffle_both)
if aux_dataset is not None:
aux_trans = get_transform([], max_len1)
aux_dataloader = get_aux_dataloader(aux_trans, aux_batch_size, aux_dataset)
else:
aux_dataloader = None
return train_data, aux_dataloader, num_train_examples, bert, bert_vocabulary
|
139265
|
from __future__ import absolute_import
from selenium import webdriver
import multiprocessing
import requests
import time
import unittest
import percy
import sys
import os
from .utils import invincible, wait_for
class IntegrationTests(unittest.TestCase):
def percy_snapshot(cls, name):
if ('PERCY_PROJECT' in os.environ and
os.environ['PERCY_PROJECT'] == 'solvebio/contrib/dash'):
snapshot_name = '{} - Py{}'.format(
name, sys.version_info.major
).replace('/', '-')
try:
cls.percy_runner.snapshot(
name=snapshot_name
)
except Exception:
print('Saving "{}" failed'.format(snapshot_name))
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
cls.driver = webdriver.Chrome()
if ('PERCY_PROJECT' in os.environ and
os.environ['PERCY_PROJECT'] == 'solvebio/contrib/dash'):
loader = percy.ResourceLoader(
webdriver=cls.driver
)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
if ('PERCY_PROJECT' in os.environ and
os.environ['PERCY_PROJECT'] == 'solvebio/contrib/dash'):
cls.percy_runner.finalize_build()
def setUp(self):
super(IntegrationTests, self).setUp()
self.driver = webdriver.Chrome()
def wait_for_element_by_id(id):
wait_for(lambda: None is not invincible(
lambda: self.driver.find_element_by_id(id)
))
return self.driver.find_element_by_id(id)
self.wait_for_element_by_id = wait_for_element_by_id
def wait_for_element_by_css_selector(css_selector):
wait_for(lambda: None is not invincible(
lambda: self.driver.find_element_by_css_selector(css_selector)
))
return self.driver.find_element_by_css_selector(css_selector)
self.wait_for_element_by_css_selector = \
wait_for_element_by_css_selector
def tearDown(self):
super(IntegrationTests, self).tearDown()
time.sleep(5)
self.server_process.terminate()
time.sleep(5)
self.driver.quit()
def startServer(self, app):
def run():
app.scripts.config.serve_locally = True
app.run_server(
port=8050,
debug=False,
processes=2
)
# Run on a separate process so that it doesn't block
self.server_process = multiprocessing.Process(target=run)
self.server_process.start()
time.sleep(15)
# Visit the dash page
try:
self.driver.get('http://localhost:8050{}'.format(
app.config['routes_pathname_prefix'])
)
except:
print('Failed attempt to load page, trying again')
print(self.server_process)
print(self.server_process.is_alive())
time.sleep(5)
print(requests.get('http://localhost:8050'))
self.driver.get('http://localhost:8050')
time.sleep(0.5)
# Inject an error and warning logger
logger = '''
window.tests = {};
window.tests.console = {error: [], warn: [], log: []};
var _log = console.log;
var _warn = console.warn;
var _error = console.error;
console.log = function() {
window.tests.console.log.push({
method: 'log',
arguments: arguments
});
return _log.apply(console, arguments);
};
console.warn = function() {
window.tests.console.warn.push({
method: 'warn',
arguments: arguments
});
return _warn.apply(console, arguments);
};
console.error = function() {
window.tests.console.error.push({
method: 'error',
arguments: arguments
});
return _error.apply(console, arguments);
};
'''
self.driver.execute_script(logger)
|
139272
|
import hashlib, json
import nacl.bindings
from nacl import encoding
from nacl.utils import random
from .utils import to_hex, from_hex, is_hex, str_to_bytes
def create_address(pubkey):
if is_hex(pubkey):
pubkey = from_hex(pubkey)
h = hashlib.new('ripemd160')
h.update(pubkey)
return h.digest()
class Key(object):
def __init__(self, public_key,secret_key):
self._pubkey = public_key
self._privkey = secret_key
self._address = create_address(self._pubkey)
@classmethod
def generate(cls, seed=None):
if seed:
if not isinstance(seed, bytes):
raise Exception("Seed must be bytes")
if len(seed) != nacl.bindings.crypto_sign_SEEDBYTES:
raise Exception(
"The seed must be exactly {} bytes long".format(nacl.bindings.crypto_sign_SEEDBYTES)
)
public_key, secret_key = nacl.bindings.crypto_sign_seed_keypair(seed)
else:
r = random(nacl.bindings.crypto_sign_SEEDBYTES)
public_key, secret_key = nacl.bindings.crypto_sign_seed_keypair(r)
return cls(public_key, secret_key)
@classmethod
def fromPrivateKey(cls, sk):
if len(sk) < 64:
raise Exception('Not a private key')
if is_hex(sk):
sk = from_hex(sk)
pubkey = sk[32:]
return cls(pubkey, sk)
@classmethod
def verify(cls, pubkey, message):
if is_hex(pubkey):
pubkey = from_hex(pubkey)
smessage = encoding.RawEncoder.decode(message)
pk = encoding.RawEncoder.decode(pubkey)
try:
return nacl.bindings.crypto_sign_open(smessage, pk)
except:
# Bad or forged signature
return False
def sign(self, msg):
message = str_to_bytes(msg)
raw_signed = nacl.bindings.crypto_sign(message, self._privkey)
return encoding.RawEncoder.encode(raw_signed)
def address(self, tohex=False):
if tohex:
return to_hex(self._address)
return self._address
def publickey(self, tohex=False):
if tohex:
return to_hex(self._pubkey)
return self._pubkey
def privatekey(self, tohex=False):
if tohex:
return to_hex(self._privkey)
return self._privkey
def to_json(self):
result = {
'address': self.address(tohex=True),
'publickey': self.publickey(tohex=True),
'privatekey': self.privatekey(tohex=True)
}
return json.dumps(result,indent=2)
|
139275
|
import graphene
from ...invoice import models
from ..core.types import Job, ModelObjectType
from ..meta.types import ObjectWithMetadata
class Invoice(ModelObjectType):
number = graphene.String()
external_url = graphene.String()
created_at = graphene.DateTime(required=True)
updated_at = graphene.DateTime(required=True)
message = graphene.String()
url = graphene.String(description="URL to download an invoice.")
class Meta:
description = "Represents an Invoice."
interfaces = [ObjectWithMetadata, Job, graphene.relay.Node]
model = models.Invoice
|
139295
|
class Solution:
def __init__(self):
self.map = {}
def cloneGraph(self, node):
if node is None:
return None
next = UndirectedGraphNode(node.label)
self.map[str(node.label)] = next
for tmp in node.neighbors:
if str(tmp.label) in self.map:
next.neighbors.append(self.map.get(str(tmp.label)))
else:
next.neighbors.append(self.cloneGraph(tmp))
return self.map.get(str(node.label))
|
139413
|
import operator
import rt
def parallel_sum(l, r):
"""Computes (l + (l+1) + ... + r)."""
# TODO(zhangwen): this function can either return an int or a future; this seems confusing...
if l == r:
return l
m = (l + r) // 2
sl = parallel_sum(l, m)
sr = parallel_sum(m + 1, r)
return rt.spawn(operator.add, (sl, sr))
def handler(event, context):
n = event["n"]
if n == 1:
return 1
else:
return parallel_sum(1, n).wait()
|
139449
|
import argparse
from blesuite.connection_manager import BLEConnectionManager
from blesuite_wrapper import ble_service_read, ble_service_read_async, ble_service_write, \
ble_handle_subscribe, ble_service_scan, ble_service_write_async, ble_run_smart_scan
from blesuite import utils
from blesuite.utils.print_helper import print_data_and_hex
from blesuite.utils import validators
import logging
__version__ = "2.0"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def parse_command():
"""
Creates parser and parses command line tool call.
:return: parsed arguments
"""
global __version__
#Dictionary of available commands. Place new commands here
cmd_choices = {'scan': "Scan for BTLE devices",
'smartscan': "Scan specified BTLE device for device information, services, characteristics "
"(including associated descriptors). Note: This scan takes longer than the service scan",
'servicescan': 'Scan specified address for all services, characteristics, and descriptors. ',
'read': "Read value from specified device and handle",
'write': "Write value to specific handle on a device. Specify the --data or --files options"
"to set the payload data. Only data or file data can be specified, not both"
"(data submitted using the data flag takes precedence over data in files).",
'subscribe': "Write specified value (0000,0100,0200,0300) to chosen handle and initiate listener.",
'spoof': 'Modify your Bluetooth adapter\'s BT_ADDR. Use --address to set the address. Some chipsets'
' may not be supported.'}
address_type_choices = ['public', 'random']
parser = argparse.ArgumentParser(prog="blesuite",
description='Bluetooh Low Energy (BTLE) tool set for communicating and '
'testing BTLE devices on the application layer.') # ,
# formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('command', metavar='command', type=str, nargs=1,
action='store', choices=cmd_choices.keys(),
help='BLESuite command you would like to execute.' +
'The following are the currently supported commands:\n' +
'\n'.join(['\033[1m{}\033[0m: {}'.format(k, v) for k, v in cmd_choices.iteritems()]))
parser.add_argument('--async', action='store_true', help='\033[1m<read, write>\033[0m '
'Enable asynchronous writing/reading. Any output'
'will be displayed when received. This prevents'
'blocking.')
parser.add_argument('--skip-device-info-query', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to skip querying the device'
'for common information such as device name. This'
'is helpful when devices do not implement these services.')
parser.add_argument('--smart-read', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to attempt to read'
'from each discovered characteristic descriptor.'
'Note: This will increase scan time to handle'
'each read operation.')
parser.add_argument('-m', '--mode', metavar='mode', default=[1],
type=int, nargs=1, required=False,
action='store', help='\033[1m<subscribe>\033[0m '
'Selects which configuration to set'
'for a characteristic configuration descriptor.'
'0=off,1=notifications,2=indications,'
'3=notifications and inidications')
parser.add_argument('--timeout', metavar='timeout', default=[5],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<lescan, read, write>\033[0m '
'Timeout (in seconds) for attempting to retrieve data from a device '
'(ie reading from a descriptor handle). (Default: 5 seconds)')
parser.add_argument('--subscribe-timeout', metavar='subscribe-timeout', default=[None],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<subscribe>\033[0m '
'Time (in seconds) for attempting to retrieve data from a device '
'when listening for notifications or indications. (Default: Indefinite)')
# Device for discovery service can be specified
parser.add_argument('-i', '--adapter', metavar='adapter', default=[0],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Specify which Bluetooth adapter should be used. '
'These can be found by running (hcitool dev).')
parser.add_argument('-d', '--address', metavar='address', type=validators.validate_bluetooth_address_cli, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Bluetooth address (BD_ADDR) of the target Bluetooth device')
parser.add_argument('-a', '--handles', metavar='handles', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read, write>\033[0m '
'Hexadecimal handel list of characteristics to access (ex: 005a 006b). If '
'you want to access the value of a characteristic, use the handle_value '
'value from the service scan.')
parser.add_argument('-u', '--uuids', metavar='uuids', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read>\033[0m '
'UUID list of characteristics to access. If '
'you want to access the value of a characteristic, use the UUID '
'value from the service scan.')
parser.add_argument('--data', metavar='data', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Strings that you want to write to a handle (separated by spaces).')
parser.add_argument('--files', metavar='files', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Files that contain data to write to handle (separated by spaces)')
parser.add_argument('--payload-delimiter', metavar='payload-delimiter', type=str, nargs=1,
required=False, action='store', default=["EOF"],
help='\033[1m<write>\033[0m '
'Specify a delimiter (string) to use when specifying data for BLE payloads.'
'For instance, if I want to send packets with payloads in a file separated'
'by a comma, supply \'--payload-delimiter ,\'. Supply EOF if you want the entire contents'
'of a file sent. (Default: EOF)')
parser.add_argument("-t", '--address-type', metavar='address-type', type=str, nargs=1,
required=False, action='store', default=['public'], choices=address_type_choices,
help='\033[1m<all commands>\033[0m '
'Type of BLE address you want to connect to [public | random].')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('--debug', action='store_true', help='\033[1m<all commands>\033[0m '
'Enable logging for debug statements.')
return parser.parse_args()
def process_args(args):
"""
Process command line tool arguments parsed by argparse
and call appropriate bleSuite functions.
:param args: parser.parse_args()
:return:
"""
command = args.command[0]
if args.debug:
logging.basicConfig(level=logging.DEBUG)
timeout = args.timeout[0] * 1000 # convert seconds to ms
if command == 'spoof':
import bdaddr
if args.address[0] == "":
print "Please specify an address to spoof."
else:
logger.debug("About to spoof to address %s for adapter %s" % (args.address[0], args.adapter[0]))
ret = bdaddr.bdaddr(("hci"+str(args.adapter[0])), args.address[0])
if ret == -1:
raise ValueError('Spoofing failed. Your device may not be supported.')
if command == 'scan':
print "BTLE Scan beginning"
with BLEConnectionManager(args.adapter[0], 'central') as connection_manager:
discovered = connection_manager.scan(timeout)
print "Discovered:"
for i in discovered.keys():
print "\t", i, "(public)" if discovered[i][0] == 0 else "(random)"
for h, j in enumerate(discovered[i][1]):
gap = connection_manager.decode_gap_data(str(discovered[i][1][h]))
info = connection_manager.generate_gap_data_dict(gap)
for k in info.keys():
print "\t\t", k + ":"
print "\t\t\t", info[k]
if command == 'smartscan':
print "BTLE Smart Scan beginning"
device = ble_run_smart_scan(args.address[0], args.adapter[0],
args.address_type[0], skip_device_info_query=args.skip_device_info_query,
attempt_read=args.smart_read,
timeout=timeout)
if command == 'servicescan':
print "BTLE Scanning Services"
ble_service_scan(args.address[0], args.adapter[0],
args.address_type[0])
if command == 'read':
if len(args.handles) <= 0 and len(args.uuids) <= 0:
print "ERROR: No handles or UUIDs supplied for read operation."
return
print "Reading value from handle or UUID"
if args.async:
uuidData, handleData = ble_service_read_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1], False)'''
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), True)'''
else:
uuidData, handleData = ble_service_read(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
if command == 'write':
if len(args.handles) <= 0:
print "ERROR: No handles supplied for write operation. Note: Write operation does not support use of UUIDs."
return
print "Writing value to handle"
if args.async:
logger.debug("Async Write")
if len(args.data) > 0:
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data,
timeout=timeout)
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write_async(args.addr[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
utils.print_helper.print_data_and_hex(dataTuple[2], False, prefix="\t")
print "Output:"
#if tuple[1][0] is a string, it means our cmdLineToolWrapper removed the GattResponse object
#due to a timeout, else we grab the GattResponse and its response data
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False, prefix="\t")
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), False, prefix="\t")
else:
logger.debug("Sync Write")
print args.data
if len(args.data) > 0:
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data, timeout=timeout)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
print_data_and_hex([dataTuple[2]], False, prefix="\t")
print "Output:"
print_data_and_hex(dataTuple[1], False, prefix="\t")
if command == 'subscribe':
print "Subscribing to device"
if args.subscribe_timeout[0] is not None:
timeout = args.subscribe_timeout[0] * 1000
else:
timeout = None
ble_handle_subscribe(args.address[0], args.handles, args.adapter[0],
args.address_type[0], args.mode[0], timeout)
return
def main():
"""
Main loop for BLESuite command line tool.
:return:
"""
args = parse_command()
process_args(args)
logger.debug("Args: %s" % args)
|
139508
|
import httpretty
import json
import unittest
def setUp_method_with_http_mocking(test_class):
original_setUp = test_class.setUp if hasattr(test_class, 'setUp') else None
def new_setUp(self):
httpretty.enable()
self.addCleanup(httpretty.disable)
if original_setUp:
original_setUp(self)
test_class.setUp = new_setUp
return test_class
def is_TstCase(x):
try:
return issubclass(x, unittest.TestCase)
except TypeError:
return False
def with_http_mocking(x):
if is_TstCase(x):
return setUp_method_with_http_mocking(x)
return httpretty.httprettified(x)
def last_request_body():
return httpretty.last_request().body
def last_request_json():
return json.loads(last_request_body().decode('UTF-8'))
def last_request_params():
return httpretty.last_request().querystring
def mock_http(header, response_body, content_type='text/json'):
method, uri = header.split(' ', 1)
httpretty.register_uri(
method=method,
uri=uri,
body=response_body,
content_type=content_type,
)
|
139509
|
import cupy
from cupy._core import internal
def take(a, indices, axis=None, out=None):
"""Takes elements of an array at specified indices along an axis.
This is an implementation of "fancy indexing" at single axis.
This function does not support ``mode`` option.
Args:
a (cupy.ndarray): Array to extract elements.
indices (int or array-like): Indices of elements that this function
takes.
axis (int): The axis along which to select indices. The flattened input
is used by default.
out (cupy.ndarray): Output array. If provided, it should be of
appropriate shape and dtype.
Returns:
cupy.ndarray: The result of fancy indexing.
.. seealso:: :func:`numpy.take`
"""
# TODO(okuta): check type
return a.take(indices, axis, out)
def take_along_axis(a, indices, axis):
"""Take values from the input array by matching 1d index and data slices.
Args:
a (cupy.ndarray): Array to extract elements.
indices (cupy.ndarray): Indices to take along each 1d slice of ``a``.
axis (int): The axis to take 1d slices along.
Returns:
cupy.ndarray: The indexed result.
.. seealso:: :func:`numpy.take_along_axis`
"""
if indices.dtype.kind not in ('i', 'u'):
raise IndexError('`indices` must be an integer array')
if axis is None:
a = a.ravel()
axis = 0
ndim = a.ndim
axis = internal._normalize_axis_index(axis, ndim)
if ndim != indices.ndim:
raise ValueError(
'`indices` and `a` must have the same number of dimensions')
fancy_index = []
for i, n in enumerate(a.shape):
if i == axis:
fancy_index.append(indices)
else:
ind_shape = (1,) * i + (-1,) + (1,) * (ndim - i - 1)
fancy_index.append(cupy.arange(n).reshape(ind_shape))
return a[tuple(fancy_index)]
def choose(a, choices, out=None, mode='raise'):
return a.choose(choices, out, mode)
def compress(condition, a, axis=None, out=None):
"""Returns selected slices of an array along given axis.
Args:
condition (1-D array of bools): Array that selects which entries to
return. If len(condition) is less than the size of a along the
given axis, then output is truncated to the length of the condition
array.
a (cupy.ndarray): Array from which to extract a part.
axis (int): Axis along which to take slices. If None (default), work
on the flattened array.
out (cupy.ndarray): Output array. If provided, it should be of
appropriate shape and dtype.
Returns:
cupy.ndarray: A copy of a without the slices along axis for which
condition is false.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.compress`
"""
return a.compress(condition, axis, out)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""Returns specified diagonals.
This function extracts the diagonals along two specified axes. The other
axes are not changed. This function returns a writable view of this array
as NumPy 1.10 will do.
Args:
a (cupy.ndarray): Array from which the diagonals are taken.
offset (int): Index of the diagonals. Zero indicates the main
diagonals, a positive value upper diagonals, and a negative value
lower diagonals.
axis1 (int): The first axis to take diagonals from.
axis2 (int): The second axis to take diagonals from.
Returns:
cupy.ndarray: A view of the diagonals of ``a``.
.. seealso:: :func:`numpy.diagonal`
"""
# TODO(okuta): check type
return a.diagonal(offset, axis1, axis2)
def extract(condition, a):
"""Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``.
If ``condition`` is boolean, ``np.extract`` is equivalent to
``arr[condition]``.
Args:
condition (int or array_like): An array whose nonzero or True entries
indicate the elements of array to extract.
a (cupy.ndarray): Input array of the same size as condition.
Returns:
cupy.ndarray: Rank 1 array of values from arr where condition is True.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.extract`
"""
if not isinstance(a, cupy.ndarray):
raise TypeError('extract requires input array to be cupy.ndarray')
if not isinstance(condition, cupy.ndarray):
condition = cupy.array(condition)
a = a.ravel()
condition = condition.ravel()
return a.take(condition.nonzero()[0])
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Args:
condlist (list of bool arrays): The list of conditions which determine
from which array in `choicelist` the output elements are taken.
When multiple conditions are satisfied, the first one encountered
in `condlist` is used.
choicelist (list of cupy.ndarray): The list of arrays from which the
output elements are taken. It has to be of the same length
as `condlist`.
default (scalar) : If provided, will fill element inserted in `output`
when all conditions evaluate to False. default value is 0.
Returns:
cupy.ndarray: The output at position m is the m-th element of the
array in `choicelist` where the m-th element of the corresponding
array in `condlist` is True.
.. seealso:: :func:`numpy.select`
"""
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
if len(condlist) == 0:
raise ValueError("select with an empty condition list is not possible")
if not cupy.isscalar(default):
raise TypeError("default only accepts scalar values")
for i in range(len(choicelist)):
if not isinstance(choicelist[i], cupy.ndarray):
raise TypeError("choicelist only accepts lists of cupy ndarrays")
cond = condlist[i]
if cond.dtype.type is not cupy.bool_:
raise ValueError(
'invalid entry {} in condlist: should be boolean ndarray'
.format(i))
dtype = cupy.result_type(*choicelist)
condlist = cupy.broadcast_arrays(*condlist)
choicelist = cupy.broadcast_arrays(*choicelist, default)
if choicelist[0].ndim == 0:
result_shape = condlist[0].shape
else:
result_shape = cupy.broadcast_arrays(condlist[0],
choicelist[0])[0].shape
result = cupy.empty(result_shape, dtype)
cupy.copyto(result, default)
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
cupy.copyto(result, choice, where=cond)
return result
|
139518
|
import time
import unittest
from MiscUtils.Funcs import (
asclocaltime, commas, charWrap, wordWrap, excstr,
hostName, localIP, localTimeDelta, positiveId,
safeDescription, timestamp, uniqueId, valueForString)
class TestFuncs(unittest.TestCase):
"""Unit tests for the functions in MiscUtils.Funcs."""
def testCommas(self):
testSpec = '''
0 '0'
0.0 '0.0'
1 '1'
11 '11'
111 '111'
1111 '1,111'
11111 '11,111'
1.0 '1.0'
11.0 '11.0'
1.15 '1.15'
12345.127 '12,345.127'
-1 '-1'
-11 '-11'
-111 '-111'
-1111 '-1,111'
-11111 '-11,111'
'''
tests = testSpec.split()
count = len(tests)
i = 0
while i < count:
source = eval(tests[i])
result = eval(tests[i+1])
self.assertEqual(commas(source), result)
# also try the source as a string instead of a number
source = eval(f"'{tests[i]}'")
self.assertEqual(commas(source), result)
i += 2
def testCharWrap(self):
self.assertEqual(charWrap("""
Sparse is better than dense.
Readability counts.""", 34, 16), """
Sparse is better than
dense.
Readability counts.""")
def testWordWrap(self):
# an example with some spaces and newlines
msg = '''Arthur: "The Lady of the Lake, her arm clad in the purest \
shimmering samite, held aloft Excalibur from the bosom of the water, \
signifying by Divine Providence that I, Arthur, was to carry \
Excalibur. That is why I am your king!"
Dennis: "Listen. Strange women lying in ponds distributing swords is \
no basis for a system of government. Supreme executive power derives \
from a mandate from the masses, not from some farcical aquatic ceremony!"'''
for margin in range(20, 200, 29):
if margin == 78:
s = wordWrap(msg)
else:
s = wordWrap(msg, margin)
for line in s.splitlines():
self.assertLessEqual(
len(line), margin,
f'len={len(line)}, margin={margin}, line={line!r}')
self.assertEqual(msg.split(), s.split())
def testExcstr(self):
self.assertEqual(excstr(None), None)
self.assertEqual(
excstr(ValueError('Kawoom!')), 'ValueError: Kawoom!')
def testHostName(self):
# About all we can do is invoke hostName() to see that no exceptions
# are thrown, and do a little type checking on the return type.
host = hostName()
self.assertIsNotNone(host)
self.assertIsInstance(host, str)
self.assertEqual(host, host.lower())
def testLocalIP(self):
ip = localIP()
self.assertTrue(ip)
self.assertFalse(ip.startswith('127.'))
self.assertEqual(localIP(), ip) # second invocation
self.assertEqual(localIP(useCache=None), ip)
# ignore if the following tests fetch the WSL address
ips = (ip, '192.168.80.1', '172.25.112.1')
self.assertIn(
localIP(remote=None, useCache=None), ips,
'See if this works: localIP(remote=None).'
' If this fails, dont worry.')
self.assertIn(
localIP(remote=('www.hostname.and.domain.are.invalid', 80),
useCache=None), ips)
def testPositiveId(self):
# About all we can do is invoke positiveId()
# to see that no exceptions are thrown and the result is positive.
self.assertIsInstance(positiveId(self), int)
self.assertGreater(positiveId(self), 0)
def testSafeDescription(self):
desc = safeDescription
# basics:
s = desc(1).replace('type=', 'class=')
self.assertEqual(s, "what=1 class=<class 'int'>")
s = desc(1, 'x').replace('type=', 'class=')
self.assertEqual(s, "x=1 class=<class 'int'>")
s = desc('x').replace('type=', 'class=')
s = s.replace("<type 'string'>", "<class 'str'>")
self.assertEqual(s, "what='x' class=<class 'str'>")
class Dummy:
pass
s = desc(Dummy())
self.assertIn('Dummy object', s, repr(s))
# okay now test that safeDescription eats exceptions from repr():
class Bogus:
def __repr__(self):
raise KeyError('bogus')
b = Bogus()
try:
s = desc(b)
except Exception:
s = 'failure: should not throw exception'
self.assertIn("(exception from repr(obj): KeyError: 'bogus')", s)
def testAsclocaltime(self):
self.assertEqual(len(asclocaltime()), 24)
t = time.time()
self.assertEqual(asclocaltime(t), time.asctime(time.localtime(t)))
def testTimestamp(self):
d = timestamp()
self.assertIsInstance(d, dict)
self.assertEqual(','.join(sorted(d)), 'condensed,dashed,pretty,tuple')
self.assertEqual(len(d['tuple']), 6)
self.assertEqual(len(d['condensed']), 14)
self.assertEqual(len(d['pretty']), 19)
self.assertEqual(len(d['dashed']), 19)
t = time.time()
d = timestamp(t)
t = time.localtime(t)[:6]
self.assertEqual(d['tuple'], t)
self.assertEqual(
d['condensed'], '{:4d}{:02d}{:02d}{:02d}{:02d}{:02d}'.format(*t))
self.assertEqual(
d['condensed'],
d['pretty'].replace('-', '').replace(':', '').replace(' ', ''))
self.assertEqual(d['condensed'], d['dashed'].replace('-', ''))
def testLocalTimeDelta(self):
d = localTimeDelta()
self.assertEqual(d.microseconds, 0)
self.assertEqual(d.seconds % 3600, 0)
self.assertTrue(-1 <= d.days < 1)
d = localTimeDelta(time.time())
self.assertEqual(d.microseconds, 0)
self.assertEqual(d.seconds % 3600, 0)
self.assertTrue(-1 <= d.days < 1)
def testUniqueId(self):
past = set()
def checkId(i):
self.assertIsInstance(i, str, type(i))
self.assertEqual(len(i), 32)
for c in i:
self.assertTrue(c in '0123456789abcdef')
self.assertFalse(i in past)
past.add(i)
for n in range(10):
checkId(uniqueId())
checkId(uniqueId(None))
checkId(uniqueId(n))
checkId(uniqueId(forObject=checkId))
def testValueForString(self):
evalCases = '''
1
9223372036854775808
5.5
True
False
None
[1]
['a']
{'x':1}
(1, 2, 3)
'a'
"z"
"""1234"""
'''
stringCases = '''
kjasdfkasdf
2389234lkdsflkjsdf
*09809
'''
evalCases = [s.strip() for s in evalCases.strip().splitlines()]
for case in evalCases:
value = valueForString(case)
evalCase = eval(case)
self.assertEqual(
value, evalCase,
f'case={case!r}, valueForString()={value!r},'
f' eval()={evalCase!r}')
stringCases = [s.strip() for s in stringCases.strip().splitlines()]
for case in stringCases:
value = valueForString(case)
self.assertEqual(
value, case,
f'case={case!r}, valueForString()={value!r}')
|
139532
|
import os
import tqdm
import torch
import numpy as np
from lib.helpers.save_helper import load_checkpoint
from lib.helpers.decode_helper import extract_dets_from_outputs
from lib.helpers.decode_helper import decode_detections
class Tester(object):
def __init__(self, cfg, model, data_loader, logger):
self.cfg = cfg
self.model = model
self.data_loader = data_loader
self.logger = logger
self.class_name = data_loader.dataset.class_name
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if self.cfg.get('resume_model', None):
load_checkpoint(model = self.model,
optimizer = None,
filename = cfg['resume_model'],
logger = self.logger,
map_location=self.device)
self.model.to(self.device)
def test(self):
torch.set_grad_enabled(False)
self.model.eval()
results = {}
progress_bar = tqdm.tqdm(total=len(self.data_loader), leave=True, desc='Evaluation Progress')
for batch_idx, (inputs, calibs, coord_ranges, _, info) in enumerate(self.data_loader):
# load evaluation data and move data to current device.
inputs = inputs.to(self.device)
calibs = calibs.to(self.device)
coord_ranges = coord_ranges.to(self.device)
# the outputs of centernet
outputs = self.model(inputs,coord_ranges,calibs,K=50,mode='test')
dets = extract_dets_from_outputs(outputs=outputs, K=50)
dets = dets.detach().cpu().numpy()
# get corresponding calibs & transform tensor to numpy
calibs = [self.data_loader.dataset.get_calib(index) for index in info['img_id']]
info = {key: val.detach().cpu().numpy() for key, val in info.items()}
cls_mean_size = self.data_loader.dataset.cls_mean_size
dets = decode_detections(dets = dets,
info = info,
calibs = calibs,
cls_mean_size=cls_mean_size,
threshold = self.cfg['threshold'])
results.update(dets)
progress_bar.update()
# save the result for evaluation.
self.save_results(results)
progress_bar.close()
def save_results(self, results, output_dir='./outputs'):
output_dir = os.path.join(output_dir, 'data')
os.makedirs(output_dir, exist_ok=True)
for img_id in results.keys():
out_path = os.path.join(output_dir, '{:06d}.txt'.format(img_id))
f = open(out_path, 'w')
for i in range(len(results[img_id])):
class_name = self.class_name[int(results[img_id][i][0])]
f.write('{} 0.0 0'.format(class_name))
for j in range(1, len(results[img_id][i])):
f.write(' {:.2f}'.format(results[img_id][i][j]))
f.write('\n')
f.close()
|
139547
|
from pyontutils import sheets
from pyontutils.sheets import Sheet
from pyontutils.namespaces import ilxtr
from neurondm.sheets import CutsV1, Row as RowBase
from neurondm.core import Config, NeuronEBM
from neurondm.phenotype_namespaces import Species
class NeuronNerves(NeuronEBM):
owlClass = ilxtr.NeuronNerves
shortname = 'nerves'
class Row(RowBase):
entail_predicates = tuple()
neuron_class = NeuronNerves
def include(self):
return True
def neuron_existing(self):
return None
def entailed_molecular_phenotypes(self):
return []
class FromNLP(Sheet):
name = 'nlp-pns'
class NervesEBM(CutsV1, FromNLP):
name = 'nlp-pns'
sheet_name = 'evidence-based modeling statements'
fetch_grid = False
_prefix_exclude = tuple()
def main():
sheets.Row = Row # monkey batch
ner = NervesEBM()
ros = [ner.row_object(i + 1) for i, r in enumerate(ner.values[1:])]
config = Config('nerves')
_final = [r.neuron_cleaned(context=NeuronNerves(Species.Human)) for r in ros if r.include()]
final = _final
[f._sigh() for f in final]
config.write()
config.write_python()
neurons = config.neurons()
n = neurons[0]
return config,
if __name__ == '__main__':
main()
|
139567
|
from pybindgen import *
from PBGutils import *
#-------------------------------------------------------------------------------
# The class to handle wrapping this module.
#-------------------------------------------------------------------------------
class Kernel:
#---------------------------------------------------------------------------
# Add the types to the given module.
#---------------------------------------------------------------------------
def __init__(self, mod, srcdir, topsrcdir, dims):
self.dims = dims
# Includes.
mod.add_include('"%s/KernelTypes.hh"' % srcdir)
# Namespace.
space = mod.add_cpp_namespace("Spheral")
# Expose types.
self.types = ("BSpline", "W4Spline", "Gaussian", "SuperGaussian", "PiGaussian",
"Hat", "Sinc", "NSincPolynomial", "NBSpline", "QuarticSpline",
"QuinticSpline", "Table", "WendlandC2", "WendlandC4", "WendlandC6", "ExpInv")
for type in self.types:
for ndim in self.dims:
dim = "%id" % ndim
name = type + "Kernel" + dim
exec('self.%(name)s = addObject(space, "%(name)s", allow_subclassing=True)' % {"name" : name})
return
#---------------------------------------------------------------------------
# Add the types to the given module.
#---------------------------------------------------------------------------
def generateBindings(self, mod):
for ndim in self.dims:
dim = "%id" % ndim
# Generic Kernel types.
for type in ("BSpline", "W4Spline", "SuperGaussian", "WendlandC2", "WendlandC4", "WendlandC6", "QuarticSpline", "QuinticSpline", "ExpInv"):
name = type + "Kernel" + dim
exec("self.generateDefaultKernelBindings(self.%s, %i)" % (name, ndim))
# Now some specialized bindings for kernels.
exec("""
self.generateGaussianKernelBindings(self.GaussianKernel%(dim)s, %(ndim)i)
self.generatePiGaussianKernelBindings(self.PiGaussianKernel%(dim)s, %(ndim)i)
self.generateHatKernelBindings(self.HatKernel%(dim)s, %(ndim)i)
self.generateSincKernelBindings(self.SincKernel%(dim)s, %(ndim)i)
self.generateNSincPolynomialKernelBindings(self.NSincPolynomialKernel%(dim)s, %(ndim)i)
self.generateNBSplineKernelBindings(self.NBSplineKernel%(dim)s, %(ndim)i)
self.generateTableKernelBindings(self.TableKernel%(dim)s, %(ndim)i)
""" % {"dim" : dim, "ndim" : ndim})
return
#---------------------------------------------------------------------------
# The new sub modules (namespaces) introduced.
#---------------------------------------------------------------------------
def newSubModules(self):
return ["KernelSpace"]
#---------------------------------------------------------------------------
# Default (kernels with just the default constructor).
#---------------------------------------------------------------------------
def generateDefaultKernelBindings(self, x, ndim):
x.add_constructor([])
self.generateGenericKernelBindings(x, ndim)
return
#---------------------------------------------------------------------------
# Gaussian
#---------------------------------------------------------------------------
def generateGaussianKernelBindings(self, x, ndim):
x.add_constructor([param("double", "extent")])
self.generateGenericKernelBindings(x, ndim)
return
#---------------------------------------------------------------------------
# PiGaussian
#---------------------------------------------------------------------------
def generatePiGaussianKernelBindings(self, x, ndim):
x.add_constructor([])
x.add_constructor([param("double", "K")])
x.add_instance_attribute("K", "double", getter="getK", setter="setK")
self.generateGenericKernelBindings(x, ndim)
return
#---------------------------------------------------------------------------
# Hat
#---------------------------------------------------------------------------
def generateHatKernelBindings(self, x, ndim):
x.add_constructor([param("double", "eta0"), param("double", "W0")])
x.add_instance_attribute("eta0", "double", getter="eta0", is_const=True)
x.add_instance_attribute("W0", "double", getter="W0", is_const=True)
self.generateGenericKernelBindings(x, ndim)
return
#---------------------------------------------------------------------------
# Sinc
#---------------------------------------------------------------------------
def generateSincKernelBindings(self, x, ndim):
x.add_constructor([param("double", "extent")])
self.generateGenericKernelBindings(x, ndim)
return
#---------------------------------------------------------------------------
# NSincPolynomial
#---------------------------------------------------------------------------
def generateNSincPolynomialKernelBindings(self, x, ndim):
x.add_constructor([param("int", "order")])
self.generateGenericKernelBindings(x, ndim)
return
#---------------------------------------------------------------------------
# NBSpline
#---------------------------------------------------------------------------
def generateNBSplineKernelBindings(self, x, ndim):
x.add_constructor([param("int", "order")])
x.add_method("factorial", "int", [param("int", "n")], is_const=True)
x.add_method("binomialCoefficient", "int", [param("int", "n"), param("int", "m")], is_const=True)
x.add_method("oneSidedPowerFunction", "double", [param("double", "s"), param("int", "exponent")], is_const=True)
x.add_instance_attribute("order", "int", getter="order", setter="setOrder")
self.generateGenericKernelBindings(x, ndim)
return
#---------------------------------------------------------------------------
# TableKernel
#---------------------------------------------------------------------------
def generateTableKernelBindings(self, x, ndim):
bsplinekernel = "Spheral::BSplineKernel%id" % ndim
w4splinekernel = "Spheral::W4SplineKernel%id" % ndim
gaussiankernel = "Spheral::GaussianKernel%id" % ndim
supergaussiankernel = "Spheral::SuperGaussianKernel%id" % ndim
pigaussiankernel = "Spheral::PiGaussianKernel%id" % ndim
hatkernel = "Spheral::HatKernel%id" % ndim
sinckernel = "Spheral::SincKernel%id" % ndim
nsincpolynomialkernel = "Spheral::NSincPolynomialKernel%id" % ndim
quarticsplinekernel = "Spheral::QuarticSplineKernel%id" % ndim
quinticsplinekernel = "Spheral::QuinticSplineKernel%id" % ndim
nbsplinekernel = "Spheral::NBSplineKernel%id" % ndim
wendlandc2kernel = "Spheral::WendlandC2Kernel%id" % ndim
wendlandc4kernel = "Spheral::WendlandC4Kernel%id" % ndim
wendlandc6kernel = "Spheral::WendlandC6Kernel%id" % ndim
# Constructors.
for W in (bsplinekernel, w4splinekernel, gaussiankernel, supergaussiankernel, pigaussiankernel,
hatkernel, sinckernel, nsincpolynomialkernel, quarticsplinekernel, quinticsplinekernel, nbsplinekernel,
wendlandc2kernel,wendlandc4kernel,wendlandc6kernel):
x.add_constructor([constrefparam(W, "kernel"),
param("int", "numPoints", default_value="1000"),
param("double", "hmult", default_value="1.0")])
#x.add_method("augment", None, [constrefparam(W, "W")])
# Methods.
x.add_method("kernelAndGradValue", "pair_double_double", [param("double", "etaMagnitude"), param("double", "Hdet")], is_const=True)
x.add_method("kernelAndGradValues", None, [constrefparam("vector_of_double", "etaMagnitudes"),
constrefparam("vector_of_double", "Hdets"),
refparam("vector_of_double", "kernelValues"),
refparam("vector_of_double", "gradValues"),], is_const=True)
x.add_method("equivalentNodesPerSmoothingScale", "double", [param("double", "Wsum")], is_const=True)
x.add_method("equivalentWsum", "double", [param("double", "nPerh")], is_const=True)
x.add_method("f1", "double", [param("double", "etaMagnitude")], is_const=True)
x.add_method("f2", "double", [param("double", "etaMagnitude")], is_const=True)
x.add_method("gradf1", "double", [param("double", "etaMagnitude")], is_const=True)
x.add_method("gradf2", "double", [param("double", "etaMagnitude")], is_const=True)
x.add_method("f1Andf2", None, [param("double", "etaMagnitude"),
refparam("double", "f1"),
refparam("double", "f2"),
refparam("double", "gradf1"),
refparam("double", "gradf2")], is_const=True)
x.add_method("lowerBound", "int", [param("double", "etaMagnitude")], is_const=True)
x.add_method("valid", "bool", [], is_const=True, is_virtual=True)
# Attributes.
x.add_instance_attribute("nperhValues", "vector_of_double", getter="nperhValues", is_const=True)
x.add_instance_attribute("WsumValues", "vector_of_double", getter="WsumValues", is_const=True)
x.add_instance_attribute("numPoints", "int", getter="numPoints", is_const=True)
x.add_instance_attribute("stepSize", "double", getter="stepSize", is_const=True)
x.add_instance_attribute("stepSizeInv", "double", getter="stepSizeInv", is_const=True)
# Generic methods.
self.generateGenericKernelBindings(x, ndim)
#---------------------------------------------------------------------------
# Add generic Kernel methods.
#---------------------------------------------------------------------------
def generateGenericKernelBindings(self, x, ndim):
# Objects.
vector = "Vector%id" % ndim
symtensor = "SymTensor%id" % ndim
# Methods.
x.add_method("operator()", "double", [param("double", "etaMagnitude"), param("double", "Hdet")], is_const=True, custom_name = "__call__")
x.add_method("operator()", "double", [constrefparam(vector, "eta"), param("double", "Hdet")], is_const=True, custom_name="__call__")
x.add_method("operator()", "double", [param("double", "etaMagnitude"), param(symtensor, "H")], is_const=True, custom_name="__call__")
x.add_method("operator()", "double", [constrefparam(vector, "eta"), param(symtensor, "H")], is_const=True, custom_name="__call__")
x.add_method("grad", "double", [param("double", "etaMagnitude"), param("double", "Hdet")], is_const=True)
x.add_method("grad", "double", [constrefparam(vector, "eta"), param("double", "Hdet")], is_const=True)
x.add_method("grad", "double", [param("double", "etaMagnitude"), param(symtensor, "H")], is_const=True)
x.add_method("grad", "double", [constrefparam(vector, "eta"), param(symtensor, "H")], is_const=True)
x.add_method("grad2", "double", [param("double", "etaMagnitude"), param("double", "Hdet")], is_const=True)
x.add_method("grad2", "double", [constrefparam(vector, "eta"), param("double", "Hdet")], is_const=True)
x.add_method("grad2", "double", [param("double", "etaMagnitude"), param(symtensor, "H")], is_const=True)
x.add_method("grad2", "double", [constrefparam(vector, "eta"), param(symtensor, "H")], is_const=True)
x.add_method("gradh", "double", [param("double", "etaMagnitude"), param("double", "Hdet")], is_const=True)
x.add_method("gradh", "double", [constrefparam(vector, "eta"), param("double", "Hdet")], is_const=True)
x.add_method("gradh", "double", [param("double", "etaMagnitude"), param(symtensor, "H")], is_const=True)
x.add_method("gradh", "double", [constrefparam(vector, "eta"), param(symtensor, "H")], is_const=True)
x.add_method("kernelValue", "double", [param("double", "etaMagnitude"), param("double", "etaMagnitude")], is_const=True)
x.add_method("gradValue", "double", [param("double", "etaMagnitude"), param("double", "etaMagnitude")], is_const=True)
x.add_method("grad2Value", "double", [param("double", "etaMagnitude"), param("double", "etaMagnitude")], is_const=True)
x.add_method("gradhValue", "double", [param("double", "etaMagnitude"), param("double", "etaMagnitude")], is_const=True)
x.add_method("valid", "bool", [], is_const=True, is_virtual=True)
x.add_method("setVolumeNormalization", None, [param("double", "value")], visibility="protected")
x.add_method("setKernelExtent", None, [param("double", "value")], visibility="protected")
x.add_method("setInflectionPoint", None, [param("double", "value")], visibility="protected")
# Attributes.
x.add_instance_attribute("volumeNormalization", "double", getter="volumeNormalization", is_const=True)
x.add_instance_attribute("kernelExtent", "double", getter="kernelExtent", is_const=True)
x.add_instance_attribute("inflectionPoint", "double", getter="inflectionPoint", is_const=True)
return
|
139570
|
import FWCore.ParameterSet.Config as cms
# Geometry and Magnetic field must be initialized separately
# Geant4-based CMS Detector simulation (OscarProducer)
# - returns label "g4SimHits"
#
from SimG4Core.Application.g4SimHits_cfi import *
|
139573
|
from flask_restplus import Api
authorizations = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'X-Token'
}
}
api = Api(version='1.0', title='PMS API', description='PMS API', authorizations=authorizations)
api.namespaces.pop(0)
ns = api.namespace('v1', description='这是自定义名称空间')
from .user import UserView
from .pms import ResourceApi, GetUsers, GetPerms, GroupView, PermissionApi
|
139581
|
from collections.abc import Sequence
import operator
import urllib.parse
import requests
import cachetools
BASE_URL = "http://archive.org/"
def _session(base_url, retries):
# TODO: backoff?
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=retries)
session.mount(base_url, adapter)
return session
class InternetArchiveClient:
pykka_traversable = True
def __init__(self, base_url=BASE_URL, retries=0, timeout=None):
self.__base_url = base_url
self.__session = _session(base_url, retries)
self.__timeout = timeout
self.cache = None # public
@property
def proxies(self):
return self.__session.proxies
@property
def useragent(self):
return self.__session.headers.get("User-Agent")
@useragent.setter
def useragent(self, value):
self.__session.headers["User-Agent"] = value
@cachetools.cachedmethod(operator.attrgetter("cache"))
def getitem(self, identifier):
obj = self.__get("/metadata/%s" % identifier).json()
if not obj:
raise LookupError(identifier)
elif "error" in obj:
raise LookupError(obj["error"])
elif "result" in obj:
return obj["result"]
else:
return obj
def geturl(self, identifier, filename=None):
if filename:
path = f"/download/{identifier}/{filename}"
else:
path = "/download/%s" % identifier
return urllib.parse.urljoin(self.__base_url, path)
def search(self, query, fields=None, sort=None, rows=None, start=None):
response = self.__get(
"/advancedsearch.php",
params={
"q": query,
"fl[]": fields,
"sort[]": sort,
"rows": rows,
"start": start,
"output": "json",
},
)
if response.content:
return self.SearchResult(response.json())
else:
raise self.SearchError(response.url)
def __get(self, path, params=None):
return self.__session.get(
urllib.parse.urljoin(self.__base_url, path),
params=params,
timeout=self.__timeout,
)
class SearchResult(Sequence):
def __init__(self, result):
response = result["response"]
self.docs = response.get("docs", [])
self.rowcount = response.get("numFound", None)
# query is optional, and responseHeader likely to change
try:
self.query = result["responseHeader"]["params"]["query"]
except LookupError:
self.query = None
def __getitem__(self, key):
return self.docs[key]
def __len__(self):
return len(self.docs)
def __iter__(self):
return iter(self.docs)
class SearchError(Exception):
pass
if __name__ == "__main__":
import argparse
import logging
import json
import sys
parser = argparse.ArgumentParser()
parser.add_argument("arg", metavar="PATH | USER | QUERY")
parser.add_argument("-B", "--base-url", default="http://archive.org")
parser.add_argument("-f", "--fields", nargs="+")
parser.add_argument("-i", "--indent", type=int, default=2)
parser.add_argument("-q", "--query", action="store_true")
parser.add_argument("-r", "--rows", type=int)
parser.add_argument("-R", "--retries", type=int, default=0)
parser.add_argument("-s", "--sort", nargs="+")
parser.add_argument("-t", "--timeout", type=float)
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
client = InternetArchiveClient(args.base_url, args.retries, args.timeout)
if args.query:
result = client.search(args.arg, args.fields, args.sort, args.rows)
else:
result = client.getitem(args.arg)
json.dump(result, sys.stdout, default=vars, indent=args.indent)
sys.stdout.write("\n")
|
139618
|
def parse_list_ranges(s,sep='-'):
r = []
x = s.split(',')
for y in x:
z = y.split(sep)
if len(z)==1:
r += [int(z[0])]
else:
r += range(int(z[0]),int(z[1])+1)
return list(r)
def parse_list_floats(s):
x = s.split(',')
return list(map(float, x))
|
139636
|
from logging import Logger
from os.path import basename
from typing import TYPE_CHECKING
from urllib.parse import urlparse
import boto3
from jsonschema import ValidationError, validate
from linz_logger import get_log
from ..aws_message_attributes import (
DATA_TYPE_STRING,
MESSAGE_ATTRIBUTE_TYPE_DATASET,
MESSAGE_ATTRIBUTE_TYPE_KEY,
)
from ..error_response_keys import ERROR_MESSAGE_KEY
from ..logging_keys import LOG_MESSAGE_LAMBDA_FAILURE, LOG_MESSAGE_LAMBDA_START
from ..parameter_store import ParameterName, get_param
from ..resources import Resource
from ..s3 import S3_URL_PREFIX
from ..step_function_keys import (
DATASET_ID_KEY,
DATASET_PREFIX_KEY,
METADATA_URL_KEY,
NEW_VERSION_S3_LOCATION,
VERSION_ID_KEY,
)
from ..types import JsonObject
if TYPE_CHECKING:
# When type checking we want to use the third party package's stub
from mypy_boto3_sqs import SQSServiceResource
from mypy_boto3_sqs.type_defs import MessageAttributeValueTypeDef
else:
# In production we want to avoid depending on a package which has no runtime impact
S3Client = SQSServiceResource = object # pragma: no mutate
MessageAttributeValueTypeDef = dict # pragma: no mutate
LOGGER: Logger = get_log()
SQS_RESOURCE: SQSServiceResource = boto3.resource("sqs")
def lambda_handler(event: JsonObject, _context: bytes) -> JsonObject:
"""Main Lambda entry point."""
LOGGER.debug(LOG_MESSAGE_LAMBDA_START, extra={"lambda_input": event})
# validate input
try:
validate(
event,
{
"type": "object",
"properties": {
DATASET_ID_KEY: {"type": "string"},
DATASET_PREFIX_KEY: {"type": "string"},
VERSION_ID_KEY: {"type": "string"},
METADATA_URL_KEY: {"type": "string"},
},
"required": [DATASET_ID_KEY, DATASET_PREFIX_KEY, METADATA_URL_KEY, VERSION_ID_KEY],
},
)
except ValidationError as error:
LOGGER.warning(LOG_MESSAGE_LAMBDA_FAILURE, extra={"error": error})
return {ERROR_MESSAGE_KEY: error.message}
new_version_metadata_key = (
f"{event[DATASET_PREFIX_KEY]}/{event[VERSION_ID_KEY]}/"
f"{basename(urlparse(event[METADATA_URL_KEY]).path[1:])}"
)
# add reference to root catalog
SQS_RESOURCE.get_queue_by_name(
QueueName=get_param(ParameterName.UPDATE_CATALOG_MESSAGE_QUEUE_NAME)
).send_message(
MessageBody=new_version_metadata_key,
MessageAttributes={
MESSAGE_ATTRIBUTE_TYPE_KEY: MessageAttributeValueTypeDef(
DataType=DATA_TYPE_STRING, StringValue=MESSAGE_ATTRIBUTE_TYPE_DATASET
)
},
)
return {
NEW_VERSION_S3_LOCATION: f"{S3_URL_PREFIX}"
f"{Resource.STORAGE_BUCKET_NAME.resource_name}/"
f"{new_version_metadata_key}"
}
|
139693
|
import numpy as np
from torch.autograd import Variable
import torch as torch
import copy
from torch.autograd.gradcheck import zero_gradients
def deepfool(image, net, num_classes, overshoot, max_iter):
"""
:param image: Image of size HxWx3
:param net: network (input: images, output: values of activation **BEFORE** softmax).
:param num_classes: num_classes (limits the number of classes to test against, by default = 10)
:param overshoot: used as a termination criterion to prevent vanishing updates (default = 0.02).
:param max_iter: maximum number of iterations for deepfool (default = 50)
:return: minimal perturbation that fools the classifier, number of iterations that it required, new estimated_label and perturbed image
"""
is_cuda = torch.cuda.is_available()
if is_cuda:
image = image.cuda()
net = net.cuda()
f_image = net.forward(Variable(image[None, :, :, :], requires_grad=True)).data.cpu().numpy().flatten()
I = f_image.argsort()[::-1]
I = I[0:num_classes]
label = I[0]
input_shape = image.cpu().numpy().shape
pert_image = copy.deepcopy(image)
w = np.zeros(input_shape)
r_tot = np.zeros(input_shape)
loop_i = 0
x = Variable(pert_image[None, :], requires_grad=True)
fs = net.forward(x)
k_i = label
while k_i == label and loop_i < max_iter:
pert = np.inf
fs[0, I[0]].backward(retain_graph=True)
grad_orig = x.grad.data.cpu().numpy().copy()
for k in range(1, num_classes):
zero_gradients(x)
fs[0, I[k]].backward(retain_graph=True)
cur_grad = x.grad.data.cpu().numpy().copy()
# set new w_k and new f_k
w_k = cur_grad - grad_orig
f_k = (fs[0, I[k]] - fs[0, I[0]]).data.cpu().numpy()
pert_k = abs(f_k)/np.linalg.norm(w_k.flatten())
# determine which w_k to use
if pert_k < pert:
pert = pert_k
w = w_k
# compute r_i and r_tot
# Added 1e-4 for numerical stability
r_i = (pert+1e-4) * w / np.linalg.norm(w)
r_tot = np.float32(r_tot + r_i)
if is_cuda:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot).cuda()
else:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot)
x = Variable(pert_image, requires_grad=True)
fs = net.forward(x)
k_i = np.argmax(fs.data.cpu().numpy().flatten())
loop_i += 1
return (1+overshoot)*r_tot, loop_i, label, k_i, pert_image
|
139719
|
import arcade
TILE_SCALING = 1.0
def test_csv_left_up():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/csv_left_up_embedded.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
def test_csv_right_down():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/csv_right_down_external.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
def test_base_64_zlib():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/base_64_zlib.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
def test_base_64_gzip():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/base_64_gzip.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
|
139730
|
import numpy as np
import autoarray as aa
import autogalaxy as ag
from autolens.lens.model.result import ResultDataset
class ResultInterferometer(ResultDataset):
@property
def max_log_likelihood_fit(self):
return self.analysis.fit_interferometer_for_instance(instance=self.instance)
@property
def real_space_mask(self):
return self.max_log_likelihood_fit.interferometer.real_space_mask
@property
def unmasked_model_visibilities(self):
return self.max_log_likelihood_fit.unmasked_blurred_image
@property
def unmasked_model_visibilities_of_planes(self):
return self.max_log_likelihood_fit.unmasked_blurred_image_of_planes
@property
def unmasked_model_visibilities_of_planes_and_galaxies(self):
fit = self.max_log_likelihood_fit
return fit.unmasked_blurred_image_of_planes_and_galaxies
def visibilities_for_galaxy(self, galaxy: ag.Galaxy) -> np.ndarray:
"""
Parameters
----------
galaxy
A galaxy used in this search
Returns
-------
ndarray or None
A numpy arrays giving the model visibilities of that galaxy
"""
return self.max_log_likelihood_fit.galaxy_model_visibilities_dict[galaxy]
@property
def visibilities_galaxy_dict(self) -> {str: ag.Galaxy}:
"""
A dictionary associating galaxy names with model visibilities of those galaxies
"""
return {
galaxy_path: self.visibilities_for_galaxy(galaxy)
for galaxy_path, galaxy in self.path_galaxy_tuples
}
@property
def hyper_galaxy_visibilities_path_dict(self):
"""
A dictionary associating 1D hyper_galaxies galaxy visibilities with their names.
"""
hyper_galaxy_visibilities_path_dict = {}
for path, galaxy in self.path_galaxy_tuples:
hyper_galaxy_visibilities_path_dict[path] = self.visibilities_galaxy_dict[
path
]
return hyper_galaxy_visibilities_path_dict
@property
def hyper_model_visibilities(self):
hyper_model_visibilities = aa.Visibilities.zeros(
shape_slim=(self.max_log_likelihood_fit.visibilities.shape_slim,)
)
for path, galaxy in self.path_galaxy_tuples:
hyper_model_visibilities += self.hyper_galaxy_visibilities_path_dict[path]
return hyper_model_visibilities
|
139757
|
from __future__ import division
import numpy as np
from scipy.optimize import fmin_bfgs
from itertools import combinations_with_replacement
import causalinference.utils.tools as tools
from .data import Dict
class Propensity(Dict):
"""
Dictionary-like class containing propensity score data.
Propensity score related data includes estimated logistic regression
coefficients, maximized log-likelihood, predicted propensity scores,
and lists of the linear and quadratic terms that are included in the
logistic regression.
"""
def __init__(self, data, lin, qua):
Z = form_matrix(data['X'], lin, qua)
Z_c, Z_t = Z[data['controls']], Z[data['treated']]
beta = calc_coef(Z_c, Z_t)
self._data = data
self._dict = dict()
self._dict['lin'], self._dict['qua'] = lin, qua
self._dict['coef'] = beta
self._dict['loglike'] = -neg_loglike(beta, Z_c, Z_t)
self._dict['fitted'] = sigmoid(Z.dot(beta))
self._dict['se'] = calc_se(Z, self._dict['fitted'])
def __str__(self):
table_width = 80
coefs = self._dict['coef']
ses = self._dict['se']
output = '\n'
output += 'Estimated Parameters of Propensity Score\n\n'
entries1 = ['', 'Coef.', 'S.e.', 'z', 'P>|z|',
'[95% Conf. int.]']
entry_types1 = ['string']*6
col_spans1 = [1]*5 + [2]
output += tools.add_row(entries1, entry_types1,
col_spans1, table_width)
output += tools.add_line(table_width)
entries2 = tools.gen_reg_entries('Intercept', coefs[0], ses[0])
entry_types2 = ['string'] + ['float']*6
col_spans2 = [1]*7
output += tools.add_row(entries2, entry_types2,
col_spans2, table_width)
lin = self._dict['lin']
for (lin_term, coef, se) in zip(lin, coefs[1:], ses[1:]):
entries3 = tools.gen_reg_entries('X'+str(lin_term),
coef, se)
output += tools.add_row(entries3, entry_types2,
col_spans2, table_width)
qua = self._dict['qua']
lin_num = len(lin)+1 # including intercept
for (qua_term, coef, se) in zip(qua, coefs[lin_num:],
ses[lin_num:]):
name = 'X'+str(qua_term[0])+'*X'+str(qua_term[1])
entries4 = tools.gen_reg_entries(name, coef, se)
output += tools.add_row(entries4, entry_types2,
col_spans2, table_width)
return output
class PropensitySelect(Propensity):
"""
Dictionary-like class containing propensity score data.
Propensity score related data includes estimated logistic regression
coefficients, maximized log-likelihood, predicted propensity scores,
and lists of the linear and quadratic terms that are included in the
logistic regression.
"""
def __init__(self, data, lin_B, C_lin, C_qua):
X_c, X_t = data['X_c'], data['X_t']
lin = select_lin_terms(X_c, X_t, lin_B, C_lin)
qua = select_qua_terms(X_c, X_t, lin, C_qua)
super(PropensitySelect, self).__init__(data, lin, qua)
def form_matrix(X, lin, qua):
N, K = X.shape
mat = np.empty((N, 1+len(lin)+len(qua)))
mat[:, 0] = 1 # constant term
current_col = 1
if lin:
mat[:, current_col:current_col+len(lin)] = X[:, lin]
current_col += len(lin)
for term in qua: # qua is a list of tuples of column numbers
mat[:, current_col] = X[:, term[0]] * X[:, term[1]]
current_col += 1
return mat
def sigmoid(x, top_threshold=100, bottom_threshold=-100):
high_x = (x >= top_threshold)
low_x = (x <= bottom_threshold)
mid_x = ~(high_x | low_x)
values = np.empty(x.shape[0])
values[high_x] = 1.0
values[low_x] = 0.0
values[mid_x] = 1/(1+np.exp(-x[mid_x]))
return values
def log1exp(x, top_threshold=100, bottom_threshold=-100):
high_x = (x >= top_threshold)
low_x = (x <= bottom_threshold)
mid_x = ~(high_x | low_x)
values = np.empty(x.shape[0])
values[high_x] = 0.0
values[low_x] = -x[low_x]
values[mid_x] = np.log(1 + np.exp(-x[mid_x]))
return values
def neg_loglike(beta, X_c, X_t):
return log1exp(X_t.dot(beta)).sum() + log1exp(-X_c.dot(beta)).sum()
def neg_gradient(beta, X_c, X_t):
return (sigmoid(X_c.dot(beta))*X_c.T).sum(1) - \
(sigmoid(-X_t.dot(beta))*X_t.T).sum(1)
def calc_coef(X_c, X_t):
K = X_c.shape[1]
neg_ll = lambda b: neg_loglike(b, X_c, X_t)
neg_grad = lambda b: neg_gradient(b, X_c, X_t)
logit = fmin_bfgs(neg_ll, np.zeros(K), neg_grad,
full_output=True, disp=False)
return logit[0]
def calc_se(X, phat):
H = np.dot(phat*(1-phat)*X.T, X)
return np.sqrt(np.diag(np.linalg.inv(H)))
def get_excluded_lin(K, included):
included_set = set(included)
return [x for x in range(K) if x not in included_set]
def get_excluded_qua(lin, included):
whole_set = list(combinations_with_replacement(lin, 2))
included_set = set(included)
return [x for x in whole_set if x not in included_set]
def calc_loglike(X_c, X_t, lin, qua):
Z_c = form_matrix(X_c, lin, qua)
Z_t = form_matrix(X_t, lin, qua)
beta = calc_coef(Z_c, Z_t)
return -neg_loglike(beta, Z_c, Z_t)
def select_lin(X_c, X_t, lin_B, C_lin):
# Selects, through a sequence of likelihood ratio tests, the
# variables that should be included linearly in propensity
# score estimation.
K = X_c.shape[1]
excluded = get_excluded_lin(K, lin_B)
if excluded == []:
return lin_B
ll_null = calc_loglike(X_c, X_t, lin_B, [])
def lr_stat_lin(lin_term):
ll_alt = calc_loglike(X_c, X_t, lin_B+[lin_term], [])
return 2 * (ll_alt - ll_null)
lr_stats = np.array([lr_stat_lin(term) for term in excluded])
argmax_lr = lr_stats.argmax()
if lr_stats[argmax_lr] < C_lin:
return lin_B
else:
new_term = [excluded[argmax_lr]]
return select_lin(X_c, X_t, lin_B+new_term, C_lin)
def select_lin_terms(X_c, X_t, lin_B, C_lin):
# Mostly a wrapper around function select_lin to handle cases that
# require little computation.
if C_lin <= 0:
K = X_c.shape[1]
return lin_B + get_excluded_lin(K, lin_B)
elif C_lin == np.inf:
return lin_B
else:
return select_lin(X_c, X_t, lin_B, C_lin)
def select_qua(X_c, X_t, lin, qua_B, C_qua):
# Selects, through a sequence of likelihood ratio tests, the
# variables that should be included quadratically in propensity
# score estimation.
excluded = get_excluded_qua(lin, qua_B)
if excluded == []:
return qua_B
ll_null = calc_loglike(X_c, X_t, lin, qua_B)
def lr_stat_qua(qua_term):
ll_alt = calc_loglike(X_c, X_t, lin, qua_B+[qua_term])
return 2 * (ll_alt - ll_null)
lr_stats = np.array([lr_stat_qua(term) for term in excluded])
argmax_lr = lr_stats.argmax()
if lr_stats[argmax_lr] < C_qua:
return qua_B
else:
new_term = [excluded[argmax_lr]]
return select_qua(X_c, X_t, lin, qua_B+new_term, C_qua)
def select_qua_terms(X_c, X_t, lin, C_qua):
# Mostly a wrapper around function select_qua to handle cases that
# require little computation.
if lin == []:
return []
if C_qua <= 0:
return get_excluded_qua(lin, [])
elif C_qua == np.inf:
return []
else:
return select_qua(X_c, X_t, lin, [], C_qua)
|
139845
|
class TableViewUIUtils(object):
""" This utility class contains members that involve the Revit UI and operate on schedule views or MEP electrical panel schedules. """
@staticmethod
def TestCellAndPromptToEditTypeParameter(tableView,sectionType,row,column):
"""
TestCellAndPromptToEditTypeParameter(tableView: TableView,sectionType: SectionType,row: int,column: int) -> bool
Prompts the end-user to control whether a type parameter contained in the
specified table cell should be allowed edited.
tableView: The table view.
sectionType: The section the row lies in.
row: The row index in the section.
column: The column index in the section.
Returns: Returns true if editing the cell is allowed; otherwise false.
"""
pass
__all__=[
'TestCellAndPromptToEditTypeParameter',
]
|
139865
|
import base64
import gzip
import json
from typing import Dict
class RserverExchange:
"""Data-oriented class to simplify dealing with RStudio messages decoded from the MITM log
Attributes are stored in a way that is ready for activity records. bytes get decoded, and images get
base64-encoded.
"""
def __init__(self, raw_message: Dict):
"""Note, parsing may throw a json.JSONDecodeError (though it shouldn't!)"""
self.path = raw_message['request']['path'].decode()
self.request_headers = {k.decode(): v.decode() for k, v in raw_message['request']['headers']}
request_text = raw_message['request']['content']
if request_text:
self.request = json.loads(request_text.decode())
else:
self.request = None
self.response_headers = {k.decode(): v.decode() for k, v in raw_message['response']['headers']}
self.response_type = self.response_headers.get('Content-Type')
# This could get fairly resource intensive if our records get large,
# but for now we keep things simple - all parsing happens in this class, and we can optimize later
response_bytes = raw_message['response']['content']
if self.response_headers.get('Content-Encoding') == 'gzip':
response_bytes = gzip.decompress(response_bytes)
# Default response is empty string
if self.response_type == 'application/json':
# strict=False allows control codes, as used in tidyverse output
self.response = json.loads(response_bytes.decode(), strict=False)
elif self.response_type == 'image/png':
self.response = base64.b64encode(response_bytes).decode('ascii')
# if we actually wanted to work with the image, could do so like this:
# img = Image.open(io.BytesIO(response_bytes))
elif response_bytes:
self.response = '**unsupported**'
else:
self.response = ''
|
139889
|
import pytest
from ninja import NinjaAPI, Router
from ninja.testing import TestClient
api = NinjaAPI()
@api.get("/endpoint")
# view->api
def global_op(request):
return "global"
first_router = Router()
@first_router.get("/endpoint_1")
# view->router, router->api
def router_op1(request):
return "first 1"
second_router_one = Router()
@second_router_one.get("endpoint_1")
# view->router2, router2->router1, router1->api
def router_op2(request):
return "second 1"
second_router_two = Router()
@second_router_two.get("endpoint_2")
# view->router2, router2->router1, router1->api
def router2_op3(request):
return "second 2"
first_router.add_router("/second", second_router_one, tags=["one"])
first_router.add_router("/second", second_router_two, tags=["two"])
api.add_router("/first", first_router, tags=["global"])
@first_router.get("endpoint_2")
# router->api, view->router
def router1_op1(request):
return "first 2"
@second_router_one.get("endpoint_3")
# router2->router1, router1->api, view->router2
def router21_op3(request, path_param: int = None):
return "second 3" if path_param is None else f"second 3: {path_param}"
second_router_three = Router()
@second_router_three.get("endpoint_4")
# router1->api, view->router2, router2->router1
def router_op3(request, path_param: int = None):
return "second 4" if path_param is None else f"second 4: {path_param}"
first_router.add_router("/second", second_router_three, tags=["three"])
client = TestClient(api)
@pytest.mark.parametrize(
"path,expected_status,expected_response",
[
("/endpoint", 200, "global"),
("/first/endpoint_1", 200, "first 1"),
("/first/endpoint_2", 200, "first 2"),
("/first/second/endpoint_1", 200, "second 1"),
("/first/second/endpoint_2", 200, "second 2"),
("/first/second/endpoint_3", 200, "second 3"),
("/first/second/endpoint_4", 200, "second 4"),
],
)
def test_inheritance_responses(path, expected_status, expected_response):
response = client.get(path)
assert response.status_code == expected_status, response.content
assert response.json() == expected_response
def test_tags():
schema = api.get_openapi_schema()
# print(schema)
glob = schema["paths"]["/api/first/endpoint_1"]["get"]
assert glob["tags"] == ["global"]
e1 = schema["paths"]["/api/first/second/endpoint_1"]["get"]
assert e1["tags"] == ["one"]
e2 = schema["paths"]["/api/first/second/endpoint_2"]["get"]
assert e2["tags"] == ["two"]
|
139905
|
from django.db import models, migrations
from django.db.models import CASCADE
from tree.fields import PathField
from tree.operations import CreateTreeTrigger
from tree.sql.base import ALPHANUM_LEN
class Migration(migrations.Migration):
dependencies = [
('tree', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('parent', models.ForeignKey('self', blank=True, null=True, on_delete=CASCADE)),
('path', PathField(order_by=('name',), max_siblings=ALPHANUM_LEN*3)),
],
options={
'ordering': ('path', 'name'),
},
),
CreateTreeTrigger('tests.Place'),
]
|
139926
|
from calendar import timegm
from datetime import datetime
from rest_framework_jwt.compat import get_username, get_username_field
from rest_framework_jwt.settings import api_settings
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.utils import jwt_decode_handler
from django_otp.models import Device
def jwt_otp_payload(user, device = None):
"""
Optionally include OTP device in JWT payload
"""
username_field = get_username_field()
username = get_username(user)
payload = {
'user_id': user.pk,
'username': username,
'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA
}
# Include original issued at time for a brand new token,
# to allow token refresh
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.utcnow().utctimetuple()
)
if api_settings.JWT_AUDIENCE is not None:
payload['aud'] = api_settings.JWT_AUDIENCE
if api_settings.JWT_ISSUER is not None:
payload['iss'] = api_settings.JWT_ISSUER
# UserAPI additions
if (user is not None) and (device is not None) and (device.user_id == user.id) and (device.confirmed is True):
payload['otp_device_id'] = device.persistent_id
else:
payload['otp_device_id'] = None
return payload
def get_custom_jwt(user, device):
"""
Helper to generate a JWT for a validated OTP device.
This resets the orig_iat timestamp, as we've re-validated the user.
"""
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_otp_payload(user, device)
return jwt_encode_handler(payload)
def otp_is_verified(self, request):
"""
Helper to determine if user has verified OTP.
"""
auth = JSONWebTokenAuthentication()
jwt_value = auth.get_jwt_value(request)
if jwt_value is None:
return False
payload = jwt_decode_handler(jwt_value)
persistent_id = payload.get('otp_device_id')
if persistent_id:
device = Device.from_persistent_id(persistent_id)
if (device is not None) and (device.user_id != request.user.id):
return False
else:
# Valid device in JWT
return True
else:
return False
|
139964
|
from django.urls import reverse
from django.test import RequestFactory, TestCase
from django.utils.http import urlencode
from feder.cases.models import Case
from feder.institutions.factories import InstitutionFactory
from feder.letters.factories import IncomingLetterFactory
from feder.letters.models import Letter
from feder.main.tests import PermissionStatusMixin
from feder.parcels.factories import IncomingParcelPostFactory
from feder.users.factories import UserFactory
from .factories import CaseFactory, AliasFactory
from .forms import CaseForm
from .views import CaseAutocomplete
from feder.teryt.factories import CommunityJSTFactory, CountyJSTFactory
class ObjectMixin:
def setUp(self):
self.user = UserFactory(username="john")
self.case = CaseFactory()
self.permission_object = self.case.monitoring
class CaseFormTestCase(ObjectMixin, TestCase):
def test_standard_save(self):
data = {"name": "example", "institution": InstitutionFactory().pk}
form = CaseForm(monitoring=self.case.monitoring, user=self.user, data=data)
self.assertTrue(form.is_valid(), msg=form.errors)
obj = form.save()
self.assertEqual(obj.name, "example")
self.assertEqual(obj.monitoring, self.case.monitoring)
self.assertEqual(obj.user, self.user)
class CaseListViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = []
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse("cases:list")
def test_filter_out_quarantined(self):
Case.objects.filter(pk=self.case.pk).update(is_quarantined=True)
response = self.client.get(self.get_url())
self.assertNotContains(response, self.case.name)
def test_show_quaranited_for_authorized(self):
Case.objects.filter(pk=self.case.pk).update(is_quarantined=True)
self.grant_permission("monitorings.view_quarantined_case")
self.login_permitted_user()
response = self.client.get(self.get_url())
self.assertContains(response, self.case)
def test_for_filter_cases_by_community(self):
common_county = CountyJSTFactory()
valid = CaseFactory(institution__jst=CommunityJSTFactory(parent=common_county))
invalid = CaseFactory(
institution__jst=CommunityJSTFactory(parent=common_county)
)
response = self.client.get(
"{}?voideship={}&county={}&community={}".format(
self.get_url(),
common_county.parent.pk,
common_county.pk,
valid.institution.jst.pk,
)
)
self.assertContains(response, valid.name)
self.assertContains(response, valid.institution.name)
self.assertNotContains(response, invalid.name)
self.assertNotContains(response, invalid.institution.name)
class CaseDetailViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = []
status_anonymous = 200
status_no_permission = 200
def get_url(self):
return reverse("cases:details", kwargs={"slug": self.case.slug})
def test_show_note_on_letter(self):
letter = IncomingLetterFactory(record__case=self.case)
response = self.client.get(self.get_url())
self.assertContains(response, letter.note)
def test_not_contains_spam_letter(self):
letter = IncomingLetterFactory(record__case=self.case, is_spam=Letter.SPAM.spam)
response = self.client.get(self.get_url())
self.assertNotContains(response, letter.body)
def test_contains_letter(self):
letter = IncomingLetterFactory(record__case=self.case)
response = self.client.get(self.get_url())
self.assertContains(response, letter.body)
def test_show_parce_post(self):
parcel = IncomingParcelPostFactory(record__case=self.case)
response = self.client.get(self.get_url())
self.assertContains(response, parcel.title)
class CaseCreateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ["monitorings.add_case"]
def get_url(self):
return reverse(
"cases:create", kwargs={"monitoring": str(self.case.monitoring.pk)}
)
class CaseUpdateViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ["monitorings.change_case"]
def get_url(self):
return reverse("cases:update", kwargs={"slug": self.case.slug})
class CaseDeleteViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ["monitorings.delete_case"]
def get_url(self):
return reverse("cases:delete", kwargs={"slug": self.case.slug})
class CaseAutocompleteTestCase(TestCase):
# TODO: Why `self.Client` is not in use?
def setUp(self):
self.factory = RequestFactory()
def test_filter_by_name(self):
CaseFactory(name="123")
CaseFactory(name="456")
request = self.factory.get("/customer/details", data={"q": "123"})
request.user = UserFactory()
response = CaseAutocomplete.as_view()(request)
self.assertContains(response, "123")
self.assertNotContains(response, "456")
class SitemapTestCase(ObjectMixin, TestCase):
def test_cases(self):
url = reverse("sitemaps", kwargs={"section": "cases"})
needle = reverse("cases:details", kwargs={"slug": self.case.slug})
response = self.client.get(url)
self.assertContains(response, needle)
class CaseQuerySetTestCase(TestCase):
def test_find_by_email(self):
case = CaseFactory(email="<EMAIL>")
self.assertEqual(
Case.objects.by_addresses(["<EMAIL>"]).get(), case
)
def test_find_by_alias(self):
case = CaseFactory(email="<EMAIL>")
AliasFactory(case=case, email="<EMAIL>")
self.assertEqual(
Case.objects.by_addresses(["<EMAIL>"]).get(), case
)
class CaseReportViewSetTestCase(TestCase):
# TODO: Tests for other available filters could be added here
def test_filter_by_name(self):
CaseFactory(institution=InstitutionFactory(name="123"))
CaseFactory(institution=InstitutionFactory(name="456"))
response = self.client.get(
"{}?{}".format(reverse("case-report-list"), urlencode({"name": "2"}))
)
self.assertContains(response, "123")
self.assertNotContains(response, "456")
def test_csv_renderer_used(self):
response = self.client.get(
"{}?{}".format(reverse("case-report-list"), urlencode({"format": "csv"}))
)
self.assertEqual(response.status_code, 200)
self.assertIn("text/csv", response["content-type"])
|
139994
|
from twilio.twiml.voice_response import VoiceResponse, Say
response = VoiceResponse()
response.say('Chapeau!', voice='alice', language='fr-FR')
print(response)
|
140025
|
config = {
'population_size' : 100,
'mutation_probability' : .1,
'crossover_rate' : .9,
# maximum simulation runs before finishing
'max_runs' : 100,
# maximum timesteps per simulation
'max_timesteps' : 150,
# smoothness value of the line in [0, 1]
'line_smoothness' : .4,
# Bound for our gain parameters (p, i, d)
'max_gain_value' : 3,
# when set to 1, we create a new map this run. When set to 0, loads a new map
'new_map' : True,
'runs_per_screenshot' : 10,
'data_directory' : '/home/monk/genetic_pid_data',
'map_filename' : 'map.csv'
}
|
140056
|
import os
import pickle
import numpy as np
import torch
from loguru import logger
from tqdm import tqdm
def make_adj_list(N, edge_index_transposed):
A = np.eye(N)
for edge in edge_index_transposed:
A[edge[0], edge[1]] = 1
adj_list = A != 0
return adj_list
def make_adj_list_wrapper(x):
return make_adj_list(x["num_nodes"], x["edge_index"].T)
def compute_adjacency_list(data):
out = []
for x in tqdm(data, "adjacency list", leave=False):
out.append(make_adj_list_wrapper(x))
return out
def combine_results(data, adj_list):
out_data = []
for x, l in tqdm(zip(data, adj_list), "assembling adj_list result", total=len(data), leave=False):
x["adj_list"] = l
out_data.append(x)
return out_data
def compute_adjacency_list_cached(data, key, root="/data/zhwu/tmp"):
cachefile = f"{root}/OGB_ADJLIST_{key}.pickle"
if os.path.exists(cachefile):
with open(cachefile, "rb") as cachehandle:
logger.debug("using cached result from '%s'" % cachefile)
result = pickle.load(cachehandle)
return combine_results(data, result)
result = compute_adjacency_list(data)
with open(cachefile, "wb") as cachehandle:
logger.debug("saving result to cache '%s'" % cachefile)
pickle.dump(result, cachehandle)
logger.info("Got adjacency list data for key %s" % key)
return combine_results(data, result)
|
140062
|
del_items(0x80139F2C)
SetType(0x80139F2C, "void PresOnlyTestRoutine__Fv()")
del_items(0x80139F54)
SetType(0x80139F54, "void FeInitBuffer__Fv()")
del_items(0x80139F7C)
SetType(0x80139F7C, "void FeAddEntry__Fii8TXT_JUSTUsP7FeTableP5CFont(int X, int Y, enum TXT_JUST Just, unsigned short Str, struct FeTable *MenuPtr, struct CFont *Font)")
del_items(0x80139FF0)
SetType(0x80139FF0, "void FeAddTable__FP11FeMenuTablei(struct FeMenuTable *Table, int Count)")
del_items(0x8013A06C)
SetType(0x8013A06C, "void FeAddNameTable__FPUci(unsigned char *Table, int Count)")
del_items(0x8013A19C)
SetType(0x8013A19C, "void FeDrawBuffer__Fv()")
del_items(0x8013A5B0)
SetType(0x8013A5B0, "void FeNewMenu__FP7FeTable(struct FeTable *Menu)")
del_items(0x8013A630)
SetType(0x8013A630, "void FePrevMenu__Fv()")
del_items(0x8013A6DC)
SetType(0x8013A6DC, "void FeSelUp__Fi(int No)")
del_items(0x8013A7C4)
SetType(0x8013A7C4, "void FeSelDown__Fi(int No)")
del_items(0x8013A8A8)
SetType(0x8013A8A8, "int FeGetCursor__Fv()")
del_items(0x8013A8BC)
SetType(0x8013A8BC, "void FeSelect__Fv()")
del_items(0x8013A900)
SetType(0x8013A900, "void FeMainKeyCtrl__FP7CScreen(struct CScreen *FeScreen)")
del_items(0x8013AA98)
SetType(0x8013AA98, "void InitDummyMenu__Fv()")
del_items(0x8013AAA0)
SetType(0x8013AAA0, "void InitFrontEnd__FP9FE_CREATE(struct FE_CREATE *CreateStruct)")
del_items(0x8013AB60)
SetType(0x8013AB60, "void FeInitMainMenu__Fv()")
del_items(0x8013ABBC)
SetType(0x8013ABBC, "void FeInitNewGameMenu__Fv()")
del_items(0x8013AC0C)
SetType(0x8013AC0C, "void FeNewGameMenuCtrl__Fv()")
del_items(0x8013AD40)
SetType(0x8013AD40, "void FeInitPlayer1ClassMenu__Fv()")
del_items(0x8013ADB4)
SetType(0x8013ADB4, "void FeInitPlayer2ClassMenu__Fv()")
del_items(0x8013AE28)
SetType(0x8013AE28, "void FePlayerClassMenuCtrl__Fv()")
del_items(0x8013AE70)
SetType(0x8013AE70, "void FeDrawChrClass__Fv()")
del_items(0x8013B30C)
SetType(0x8013B30C, "void FeInitNewP1NameMenu__Fv()")
del_items(0x8013B354)
SetType(0x8013B354, "void FeInitNewP2NameMenu__Fv()")
del_items(0x8013B39C)
SetType(0x8013B39C, "void FeNewNameMenuCtrl__Fv()")
del_items(0x8013B92C)
SetType(0x8013B92C, "void FeCopyPlayerInfoForReturn__Fv()")
del_items(0x8013B9FC)
SetType(0x8013B9FC, "void FeEnterGame__Fv()")
del_items(0x8013BA24)
SetType(0x8013BA24, "void FeInitLoadMemcardSelect__Fv()")
del_items(0x8013BA8C)
SetType(0x8013BA8C, "void FeInitLoadChar1Menu__Fv()")
del_items(0x8013BAF8)
SetType(0x8013BAF8, "void FeInitLoadChar2Menu__Fv()")
del_items(0x8013BB64)
SetType(0x8013BB64, "void FeInitDifficultyMenu__Fv()")
del_items(0x8013BBAC)
SetType(0x8013BBAC, "void FeDifficultyMenuCtrl__Fv()")
del_items(0x8013BC64)
SetType(0x8013BC64, "void FeInitBackgroundMenu__Fv()")
del_items(0x8013BCAC)
SetType(0x8013BCAC, "void FeInitBook1Menu__Fv()")
del_items(0x8013BCF8)
SetType(0x8013BCF8, "void FeInitBook2Menu__Fv()")
del_items(0x8013BD44)
SetType(0x8013BD44, "void FeBackBookMenuCtrl__Fv()")
del_items(0x8013BF40)
SetType(0x8013BF40, "void PlayDemo__Fv()")
del_items(0x8013BF54)
SetType(0x8013BF54, "void FadeFEOut__Fv()")
del_items(0x8013C018)
SetType(0x8013C018, "void DrawBackTSK__FP4TASK(struct TASK *T)")
del_items(0x8013C110)
SetType(0x8013C110, "void FrontEndTask__FP4TASK(struct TASK *T)")
del_items(0x8013C488)
SetType(0x8013C488, "void McMainCharKeyCtrl__Fv()")
del_items(0x8013C890)
SetType(0x8013C890, "void DrawFeTwinkle__Fii(int SelX, int SelY)")
del_items(0x8013C950)
SetType(0x8013C950, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8013C978)
SetType(0x8013C978, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x8013C9D4)
SetType(0x8013C9D4, "void ___7CScreen(struct CScreen *this, int __in_chrg)")
del_items(0x8013C9F4)
SetType(0x8013C9F4, "unsigned char CheckActive__4CPad(struct CPad *this)")
del_items(0x8013D5A4)
SetType(0x8013D5A4, "void InitCredits__Fv()")
del_items(0x8013D5E0)
SetType(0x8013D5E0, "int PrintCredits__FPciiiii(char *Str, int Y, int CharFade, int RFlag, int GFlag, int BFlag)")
del_items(0x8013DE04)
SetType(0x8013DE04, "void DrawCreditsTitle__Fiiiii(int TitleNo, int TitleFade, int TitleMode, int NextTitleNo, int Y)")
del_items(0x8013DED0)
SetType(0x8013DED0, "void DrawCreditsSubTitle__Fiiiii(int SubTitleNo, int SubTitleFade, int SubTitleMode, int NextSubTitleNo, int Y)")
del_items(0x8013DFAC)
SetType(0x8013DFAC, "void DoCredits__Fv()")
del_items(0x8013E230)
SetType(0x8013E230, "void PRIM_GetPrim__FPP8POLY_FT4(struct POLY_FT4 **Prim)")
del_items(0x8013E2AC)
SetType(0x8013E2AC, "int GetCharHeight__5CFontc(struct CFont *this, char ch)")
del_items(0x8013E2EC)
SetType(0x8013E2EC, "int GetCharWidth__5CFontc(struct CFont *this, char ch)")
del_items(0x8013E344)
SetType(0x8013E344, "void ___7CScreen_addr_8013E344(struct CScreen *this, int __in_chrg)")
del_items(0x8013E364)
SetType(0x8013E364, "struct FRAME_HDR *GetFr__7TextDati(struct TextDat *this, int FrNum)")
del_items(0x80142920)
SetType(0x80142920, "void endian_swap__FPUci(unsigned char *b, int byts)")
del_items(0x80142954)
SetType(0x80142954, "unsigned short to_sjis__Fc(char asc)")
del_items(0x801429D4)
SetType(0x801429D4, "char to_ascii__FUs(unsigned short sjis)")
del_items(0x80142A54)
SetType(0x80142A54, "void ascii_to_sjis__FPcPUs(char *asc, unsigned short *sjis)")
del_items(0x80142AD8)
SetType(0x80142AD8, "void sjis_to_ascii__FPUsPc(unsigned short *sjis, char *asc)")
del_items(0x80142B50)
SetType(0x80142B50, "void read_card_directory__Fi(int card_number)")
del_items(0x80142D5C)
SetType(0x80142D5C, "int test_card_format__Fi(int card_number)")
del_items(0x80142E4C)
SetType(0x80142E4C, "int checksum_data__FPci(char *buf, int size)")
del_items(0x80142E88)
SetType(0x80142E88, "int delete_card_file__Fii(int card_number, int file)")
del_items(0x80142F80)
SetType(0x80142F80, "int read_card_file__FiiiPc(int card_number, int file, int id, char *buf)")
del_items(0x80143144)
SetType(0x80143144, "int format_card__Fi(int card_number)")
del_items(0x80143208)
SetType(0x80143208, "int write_card_file__FiiPcT2PUcPUsiT4(int card_number, int id, char *name, char *title, unsigned char *icon, unsigned short *clut, int size, unsigned char *buf)")
del_items(0x80143560)
SetType(0x80143560, "void new_card__Fi(int card_number)")
del_items(0x801435F4)
SetType(0x801435F4, "void service_card__Fi(int card_number)")
del_items(0x8015D834)
SetType(0x8015D834, "int GetFileNumber__FiPc(int side, char *file_name)")
del_items(0x8015D8F4)
SetType(0x8015D8F4, "int DoSaveOptions__Fv()")
del_items(0x8015D948)
SetType(0x8015D948, "int DoSaveCharacter__FPc(char *savefilename)")
del_items(0x8015DA18)
SetType(0x8015DA18, "int DoSaveGame__Fv()")
del_items(0x8015DAD8)
SetType(0x8015DAD8, "void DoLoadGame__Fv()")
del_items(0x8015DB68)
SetType(0x8015DB68, "int DoFrontEndLoadCharacter__FPc(char *loadfilenameptr)")
del_items(0x8015DBC4)
SetType(0x8015DBC4, "void McInitLoadCard1Menu__Fv()")
del_items(0x8015DC10)
SetType(0x8015DC10, "void McInitLoadCard2Menu__Fv()")
del_items(0x8015DC5C)
SetType(0x8015DC5C, "void ChooseCardLoad__Fv()")
del_items(0x8015DD10)
SetType(0x8015DD10, "void McInitLoadCharMenu__Fv()")
del_items(0x8015DD38)
SetType(0x8015DD38, "void McInitLoadGameMenu__Fv()")
del_items(0x8015DD94)
SetType(0x8015DD94, "void McMainKeyCtrl__Fv()")
del_items(0x8015DED0)
SetType(0x8015DED0, "void ShowAlertBox__Fv()")
del_items(0x8015E0A4)
SetType(0x8015E0A4, "void ShowCardActionText__FPc(char *Text)")
del_items(0x8015E1E8)
SetType(0x8015E1E8, "bool GetLoadStatusMessage__FPc(char *file_name)")
del_items(0x8015E28C)
SetType(0x8015E28C, "bool GetSaveStatusMessage__FiPc(int fileblocks, char *file_name)")
del_items(0x8015E364)
SetType(0x8015E364, "void ShowGameFiles__FPciiG4RECT(char *filename, int saveflag, int Spacing, struct RECT ORect)")
del_items(0x8015E4CC)
SetType(0x8015E4CC, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8015E4EC)
SetType(0x8015E4EC, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8015E4F4)
SetType(0x8015E4F4, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8015E4FC)
SetType(0x8015E4FC, "int SetOTpos__6Dialogi(struct Dialog *this, int OT)")
del_items(0x8015E508)
SetType(0x8015E508, "void ___6Dialog_addr_8015E508(struct Dialog *this, int __in_chrg)")
del_items(0x8015E530)
SetType(0x8015E530, "struct Dialog *__6Dialog_addr_8015E530(struct Dialog *this)")
del_items(0x8015E58C)
SetType(0x8015E58C, "int ILoad__Fv()")
del_items(0x8015E5E0)
SetType(0x8015E5E0, "void LoadQuest__Fi(int i)")
del_items(0x8015E6A8)
SetType(0x8015E6A8, "void ISave__Fi(int v)")
del_items(0x8015E708)
SetType(0x8015E708, "void SaveQuest__Fi(int i)")
del_items(0x8015E7D4)
SetType(0x8015E7D4, "int PSX_GM_SaveGame__FiPcT1(int card_number, char *name, char *title)")
del_items(0x8015EA74)
SetType(0x8015EA74, "int PSX_GM_LoadGame__FUcii(unsigned char firstflag, int card_number, int file)")
del_items(0x8015ED60)
SetType(0x8015ED60, "int PSX_CH_LoadGame__Fii(int card_number, int file)")
del_items(0x8015EEC4)
SetType(0x8015EEC4, "int PSX_CH_SaveGame__FiPcT1(int card_number, char *name, char *title)")
del_items(0x8015F044)
SetType(0x8015F044, "void RestorePads__Fv()")
del_items(0x8015F104)
SetType(0x8015F104, "void StorePads__Fv()")
del_items(0x8015F1C0)
SetType(0x8015F1C0, "void GetIcon__Fv()")
del_items(0x8015F1FC)
SetType(0x8015F1FC, "int PSX_OPT_LoadGame__Fiib(int card_number, int file, bool KillHandler)")
del_items(0x8015F260)
SetType(0x8015F260, "int PSX_OPT_SaveGame__FiPc(int card_number, char *filename)")
del_items(0x8015F2F8)
SetType(0x8015F2F8, "void LoadOptions__Fv()")
del_items(0x8015F368)
SetType(0x8015F368, "void SaveOptions__Fv()")
|
140068
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from . import util,dataloader
def default_eval(refer_loader,query_loader,model,class_acc=False):
fb_vector = None
if hasattr(model,'get_fb_vector'):
fb_vector = get_fb_vector(refer_loader,model)
centroid = get_class_centroid(refer_loader,model,fb_vector)
acc = get_prediction(query_loader,model,centroid,fb_vector,class_acc)
return acc
def get_class_centroid(loader,model,fb_vector=None):
way = len(loader.dataset.classes)
dim = model.dim
centroid = torch.zeros(way,dim).cuda()
for i, (inp,target) in enumerate(loader):
current_class_id = target[0]
if fb_vector is not None:
(image,_) = inp
image = image.cuda()
vectors = model.get_feature_vector(image,fb_vector)
elif isinstance(inp,list):
(image,mask) = inp
image = image.cuda()
mask = mask.cuda()
vectors = model.get_feature_vector(image,mask)
elif isinstance(inp,torch.Tensor):
inp = inp.cuda()
vectors = model.get_feature_vector(inp)
centroid[current_class_id] = vectors.mean(0).view(dim)
return centroid
def get_prediction(loader,model,centroid,fb_vector=None,class_acc=False):
data_source = loader.dataset
centroid = centroid.unsqueeze(0)
way = len(data_source.classes)
correct_count = torch.zeros(way).cuda()
counts = torch.zeros(way).cuda()
for class_id in data_source.targets:
counts[class_id] += 1
for i, (inp,target) in enumerate(loader):
current_class_id = target[0]
batch_size = target.size(0)
target = target.cuda()
if fb_vector is not None:
(image,mask) = inp
image = image.cuda()
out = model.get_feature_vector(image,fb_vector)
elif isinstance(inp,list):
(image,mask) = inp
image = image.cuda()
mask = mask.cuda()
out = model.get_feature_vector(image,mask)
elif isinstance(inp,torch.Tensor):
inp = inp.cuda()
out = model.get_feature_vector(inp)
out = out.unsqueeze(1)
neg_l2_distance = torch.sum((centroid-out)**2,2).neg().view(batch_size,way)
_, top1_pred = neg_l2_distance.topk(1)
correct_count[current_class_id] = torch.sum(torch.eq(top1_pred,target.view(batch_size,1)))
acc = (torch.sum(correct_count)/torch.sum(counts)).item()*100
if not class_acc:
return acc
else:
class_acc = torch.mean(correct_count/counts).item()*100
return acc,class_acc
def get_fb_vector(loader,model):
num_channel = model.num_channel
sum_fb_vector = torch.zeros(num_channel,2).cuda()
total_num = 0
for i,((inp,mask),class_id) in enumerate(loader):
total_num += inp.size(0)
inp=inp.cuda()
mask=mask.cuda()
fb_vector = model.get_fb_vector(inp,mask)
sum_fb_vector += fb_vector.sum(0)
fb_vector = sum_fb_vector/total_num
return fb_vector
def k_shot_eval(eval_loader,model,way,shot):
test_shot = 16
target = torch.LongTensor([i//test_shot for i in range(test_shot*way)]).cuda()
acc_list = []
for i, (inp,_) in enumerate(eval_loader):
if isinstance(inp,list):
(image_inp,mask) = inp
image_inp = image_inp.cuda()
mask = mask.cuda()
max_index = model.eval_k_shot(image_inp,mask,way,shot)
elif isinstance(inp,torch.Tensor):
inp = inp.cuda()
max_index = model.eval_k_shot(inp,way,shot)
acc = 100*torch.sum(torch.eq(max_index,target)).item()/test_shot/way
acc_list.append(acc)
mean,interval = util.eval(acc_list)
return mean,interval
def eval_test(model,pm,config,pm_na=None):
logger = config.logger
annot = config.eval_annot
logger.info('------------------------')
logger.info('evaluating:')
with torch.no_grad():
model.load_state_dict(torch.load(config.save_path))
model.eval()
refer_loader = dataloader.eval_dataloader(pm.test_refer,
annot=annot,annot_path=pm.annot_path)
query_loader = dataloader.eval_dataloader(pm.test_query,
annot=annot,annot_path=pm.annot_path)
test_acc = default_eval(refer_loader,query_loader,model=model)
logger.info(('the final test acc is %.3f') % (test_acc))
way = len(refer_loader.dataset.classes)
for shot in [1,5]:
eval_loader = dataloader.eval_k_shot_dataloader(pm.k_shot,
way=way,shot=shot,annot=annot,annot_path=pm.k_shot_annot_path)
mean,interval = k_shot_eval(eval_loader,model,way,shot)
logger.info('%d-way-%d-shot acc: %.2f\t%.2f'%(way,shot,mean,interval))
if pm_na is not None:
logger.info('------------------------')
logger.info('evaluating on NA:')
refer_loader = dataloader.eval_dataloader(pm_na.test_refer,
annot=annot,annot_path=pm_na.annot_path)
query_loader = dataloader.eval_dataloader(pm_na.test_query,
annot=annot,annot_path=pm_na.annot_path)
mean_acc,class_acc = default_eval(refer_loader,query_loader,
model=model,class_acc=True)
logger.info(('mean_acc is %.3f') % (mean_acc))
logger.info(('class_acc is %.3f') % (class_acc))
|
140101
|
import pytest
class Examples:
small_example = [
"""
describe "This":
before_each:
self.x = 5
describe "That":
before_each:
self.y = 6
describe "Meh":
after_each:
self.y = None
describe "Blah":pass
describe "async":
async before_each:
pass
async after_each:
pass
describe "Another":
before_each:
self.z = 8 """,
"""
class TestThis :
def setUp (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .x =5
class TestThis_That (TestThis ):
def setUp (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .y =6
class TestThis_That_Meh (TestThis_That ):
def tearDown (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each ();self .y =None
class TestThis_Blah (TestThis ):pass
class TestThis_Async (TestThis ):
async def setUp (self ):
await __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).async_before_each ();pass
async def tearDown (self ):
await __import__ ("noseOfYeti").tokeniser .TestSetup (super ()).async_after_each ();pass
class TestAnother :
def setUp (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .z =8
TestThis .is_noy_spec =True
TestThis_That .is_noy_spec =True
TestThis_That_Meh .is_noy_spec =True
TestThis_Blah .is_noy_spec =True
TestThis_Async .is_noy_spec =True
TestAnother .is_noy_spec =True
""",
]
big_example = [
"""
describe "This":
before_each:
self.x = 5
it 'should':
if x:
pass
else:
x += 9
async it 'supports async its':
pass
describe "That":
before_each:
self.y = 6
describe "Meh":
after_each:
self.y = None
it "should set __testname__ for non alpha names ' $^":
pass
it 'should':
if y:
pass
else:
pass
it 'should have args', arg1, arg2:
blah |should| be_good()
describe "Blah":pass
ignore "root level $pecial-method*+":
pass
describe "Another":
before_each:
self.z = 8
it 'should':
if z:
if u:
print "hello \
there"
else:
print "no"
else:
pass
async it 'supports level 0 async its':
pass
""",
"""
class TestThis :
def setUp (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .x =5
def test_should (self ):
if x :
pass
else :
x +=9
async def test_supports_async_its (self ):
pass
class TestThis_That (TestThis ):
def setUp (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .y =6
class TestThis_That_Meh (TestThis_That ):
def tearDown (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_after_each ();self .y =None
def test_should_set_testname_for_non_alpha_names (self ):
pass
def test_should (self ):
if y :
pass
else :
pass
def test_should_have_args (self ,arg1 ,arg2 ):
blah |should |be_good ()
class TestThis_Blah (TestThis ):pass
def ignore__root_level_pecial_method ():
pass
class TestAnother :
def setUp (self ):
__import__ ("noseOfYeti").tokeniser .TestSetup (super ()).sync_before_each ();self .z =8
def test_should (self ):
if z :
if u :
print "hello \
there"
else :
print "no"
else :
pass
async def test_supports_level_0_async_its ():
pass
TestThis .is_noy_spec =True
TestThis_That .is_noy_spec =True
TestThis_That_Meh .is_noy_spec =True
TestThis_Blah .is_noy_spec =True
TestAnother .is_noy_spec =True
ignore__root_level_pecial_method .__testname__ ="root level $pecial-method*+"
TestThis_That_Meh .test_should_set_testname_for_non_alpha_names .__testname__ ="should set __testname__ for non alpha names ' $^"
""",
]
comment_example = [
"""
assertTileHues(
self, tiles[0],
25.0, 25.0, 25.0, 25.0, 25.0, 25.0, # noqa
18.75, 18.75, 18.75, 18.75, 18.75, 18.75, # noqa
)
it "things":
assertTileHues(
self, tiles[1],
25.0, 25.0, 25.0, 25.0, 25.0, 25.0, # noqa
18.75, 18.75, 18.75, 18.75, 18.75, 18.75, # noqa
)
expected = {
# something
("D2", "<d"): lambda s: ("D2", "<d", s)
# something else
,
("B2", None): ("B2", None, None),
}
def t(n, f, c):
return expected[(n, f, c)]
""",
"""
assertTileHues (
self ,tiles [0 ],
25.0 ,25.0 ,25.0 ,25.0 ,25.0 ,25.0 ,# noqa
18.75 ,18.75 ,18.75 ,18.75 ,18.75 ,18.75 ,# noqa
)
def test_things ():
assertTileHues (
self ,tiles [1 ],
25.0 ,25.0 ,25.0 ,25.0 ,25.0 ,25.0 ,# noqa
18.75 ,18.75 ,18.75 ,18.75 ,18.75 ,18.75 ,# noqa
)
expected ={
# something
("D2","<d"):lambda s :("D2","<d",s )
# something else
,
("B2",None ):("B2",None ,None ),
}
def t (n ,f ,c ):
return expected [(n ,f ,c )]
""",
]
class Test_Tokeniser:
def test_gives_describes_noy_specific_attributes(self):
pytest.helpers.assert_example(
[
'describe "Something testable"',
"""
class TestSomethingTestable :pass
TestSomethingTestable .is_noy_spec =True
""",
]
)
class Test_Tokeniser_Complex:
def test_works_with_space(self):
pytest.helpers.assert_example(Examples.small_example)
def test_works_with_tabs(self):
pytest.helpers.assert_example(Examples.small_example, convert_to_tabs=True)
def test_keeps_good_indentation_in_body_with_spaces(self):
pytest.helpers.assert_example(Examples.big_example)
def test_keeps_good_indentation_in_body_with_tabs(self):
pytest.helpers.assert_example(Examples.big_example, convert_to_tabs=True)
def test_keeps_correct_indentation_with_comments(self):
pytest.helpers.assert_example(Examples.comment_example, convert_to_tabs=True)
|
140113
|
from pymoo.core.problem import Problem
from pymoo.problems.meta import MetaProblem
from pymoo.util.misc import at_least_2d_array
class ConstraintsAsPenalty(MetaProblem):
def __init__(self, problem, penalty=1e6):
super().__init__(problem)
self.penalty = penalty
# set the constraints to be zero, because they are now added to the objective
self.n_constr = 0
def do(self, x, out, *args, **kwargs):
self.problem.do(x, out, *args, **kwargs)
if self.problem.has_constraints():
F, G = at_least_2d_array(out["F"]), at_least_2d_array(out["G"])
CV = Problem.calc_constraint_violation(G)
out["__F__"] = F
out["__G__"] = G
out["__CV__"] = CV
out["F"] = F + self.penalty * CV
out["G"] = None
|
140120
|
import datetime
import json
import os
import discord
from discord.errors import HTTPException
from discord.ext import commands
class Logging(commands.Cog, description="Keep a track of what members do in your server with this category."):
def __init__(self, bot):
self.bot = bot
with open("storage/modlogs_channels.json", "r") as modlogsFile:
self.modlogsFile = json.load(modlogsFile)
@commands.command(name="messagelogschannel",
aliases=["seteditedlogschannel", "setdeletedlogschannel",
"setlogschannel", "setlogchannel"],
description="Sets the channel in which edited/deleted message logs are sent.")
@commands.has_permissions(administrator=True)
async def set_modlogs_channel(self, ctx, *channel: discord.TextChannel):
if not channel:
try:
set=self.modlogsFile.get(str(ctx.guild.id))
embed=discord.Embed(title="Current Message Log channel",description=f"<#{set}>",color=discord.Color.random())
return await ctx.send(embed=embed)
except:
return await ctx.send('Not set')
channel_id = channel.id
self.modlogsFile[str(ctx.guild.id)] = int(channel_id)
with open("storage/modlogs_channels.json", "w") as modlogsFile:
json.dump(self.modlogsFile, modlogsFile, indent=4)
await ctx.send(embed=discord.Embed(description=f"Logs channel set as {channel.name} succesfully. "
f"Edited/Deleted mesages, and profile changes will be shown in this channel.",color=discord.Color.green()))
# message edit event
@commands.Cog.listener()
async def on_message_edit(self, before, after):
message_channel_id = self.modlogsFile.get(str(before.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
message_link = f"https://discord.com/channels/{before.guild.id}/{before.channel.id}/{before.id}"
embed = discord.Embed(title=f"Message edited in {before.channel.name}",
color=before.author.color, timestamp=after.created_at)
embed.add_field(name="Before", value=before.content)
embed.add_field(name="After", value=after.content)
embed.add_field(
name="Link", value=f"__[Message]({message_link})__")
embed.set_footer(text=f"Author • {before.author} | Edited")
embed.set_thumbnail(url=before.author.avatar_url)
# the edited timestamp would come in the right, so we dont need to specify it in the footer
try:
await message_channel.send(embed=embed)
except: # embeds dont have a message.content, so it gives us an error
pass
# message delete event
@commands.Cog.listener()
async def on_message_delete(self, message):
message_channel_id = self.modlogsFile.get(str(message.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"Message deleted in {message.channel.name}",
color=message.author.color, timestamp=message.created_at)
embed.add_field(name="Content", value=message.content)
embed.set_footer(text=f"Author • {message.author} | Created")
embed.set_thumbnail(url=message.author.avatar_url)
if message_channel is None:
return
try:
await message_channel.send(embed=embed)
except HTTPException:
pass
@commands.Cog.listener()
async def on_bulk_message_delete(self, messages):
message_channel_id = (self.modlogsFile.get(str(messages[0].guild.id)))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
with open(f"storage/tempText/{messages[0].guild.id}.txt", "w") as temp_textfile:
for x in messages:
line1 = f"{x.channel.name} | From: {x.author} | Sent At: {x.created_at}\n"
temp_textfile.write(line1)
temp_textfile.write(f"{x.content}\n\n")
file = discord.File(f"./storage/tempText/{messages[0].guild.id}.txt")
await message_channel.send(file=file, content=f"{len(messages)} messages deleted. "
f"Sending information as text file.")
os.remove(f"./storage/tempText/{messages[0].guild.id}.txt")
# ban event
@commands.Cog.listener()
async def on_member_ban(self, guild, member):
message_channel_id = self.modlogsFile.get(str(guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"{member} has been banned from {guild.name}", description=f"ID: {member.id}",
timestamp=member.created_at)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
@commands.Cog.listener()
async def on_member_update(self, before, after):
message_channel_id = self.modlogsFile.get(str(before.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
# nickname change
if not before.nick == after.nick:
embed = discord.Embed(title=f"{before}'s nickname has been updated", description=f"ID: {before.id}",
color=after.color, timestamp=before.created_at)
embed.add_field(
name="Before", value=before.display_name)
embed.add_field(
name="After", value=after.display_name)
embed.set_thumbnail(url=after.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# role change
if not before.roles == after.roles:
embed = discord.Embed(title=f"{before}'s roles have been updated", description=f"ID: {before.id}",
color=after.color, timestamp=before.created_at)
before_roles_str, after_roles_str = "", ""
for x in before.roles[::-1]:
before_roles_str += f"{x.mention} "
for x in after.roles[::-1]:
after_roles_str += f"{x.mention} "
embed.add_field(
name="Before", value=before_roles_str)
embed.add_field(name="After", value=after_roles_str)
embed.set_thumbnail(url=after.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# unban event
@commands.Cog.listener()
async def on_member_unban(self, guild, member):
message_channel_id = self.modlogsFile.get(str(guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"{member} has been unbanned", description=f"ID: {member.id}",
color=discord.Color.green(),
timestamp=member.created_at)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text="Account created at")
await message_channel.send(embed=embed)
# join event
@commands.Cog.listener()
async def on_member_join(self, member):
message_channel_id = self.modlogsFile.get(str(member.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
embed = discord.Embed(title=f"{member} joined the the server.", color=discord.Color.green(),
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text="Join time")
await message_channel.send(embed=embed)
# leave event
@commands.Cog.listener()
async def on_member_remove(self, member):
message_channel_id = self.modlogsFile.get(str(member.guild.id))
if message_channel_id is None:
return
message_channel = self.bot.get_channel(int(message_channel_id))
if message_channel is None:
return
roles = [role for role in member.roles]
embed = discord.Embed(title=f"{member} has left the server.", color=discord.Color.dark_red(),
timestamp=datetime.datetime.utcnow(),
description=f"**Their account was created at:** {member.created_at}")
embed.add_field(name="Their roles", value=" ".join(
[role.mention for role in roles]))
embed.set_footer(text=f"Left at")
embed.set_thumbnail(url=member.avatar_url)
await message_channel.send(embed=embed)
def setup(bot):
bot.add_cog(Logging(bot))
|
140143
|
from sqlalchemy import Column, Integer, String
from .database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
username = Column(String, unique=True)
|
140153
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScGaropabaSpider(FecamGazetteSpider):
name = "sc_garopaba"
FECAM_QUERY = "cod_entidade:98"
TERRITORY_ID = "4205704"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.