code stringlengths 281 23.7M |
|---|
def main():
(actor_id, n_actors, replay_ip, learner_ip) = get_environ()
args = argparser()
param_queue = Queue(maxsize=3)
procs = [Process(target=exploration, args=(args, actor_id, n_actors, replay_ip, param_queue)), Process(target=recv_param, args=(learner_ip, actor_id, param_queue))]
for p in proc... |
class Z3Model(Model):
def __init__(self, environment, z3_model):
Model.__init__(self, environment)
self.z3_model = z3_model
self.converter = Z3Converter(environment, z3_model.ctx)
def get_value(self, formula, model_completion=True):
titem = self.converter.convert(formula)
... |
class HashingDataset(Dataset):
def __init__(self, data_path, img_filename, label_filename, transform=transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()])):
self.img_path = data_path
self.transform = transform
img_filepath = os.path.join(data_path, i... |
def main_worker(rank, world_size, cfg):
print('==> Start rank:', rank)
local_rank = (rank % 8)
cfg.local_rank = local_rank
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl', init_method=f'tcp://localhost:{cfg.port}', world_size=world_size, rank=rank)
(logger, writer) = (No... |
class GLobalAdaptiveNormalization(nn.Module):
def __init__(self, channels):
super().__init__()
self.inorm = InstanceNorm()
self.msap = SelfAttentionPooling(channels)
self.ssap = SelfAttentionPooling(channels)
def forward(self, x, means, stds):
mean = self.msap(means).unsq... |
def filter_marks(marks: List[RelativeMark[_ItemType]], all_items: List[Item]) -> List[RelativeMark[_ItemType]]:
result = []
for mark in marks:
if ((mark.item in all_items) and (mark.item_to_move in all_items)):
result.append(mark)
else:
mark.item_to_move.dec_rel_marks()
... |
_module()
class WideRes38(nn.Module):
def __init__(self):
super(WideRes38, self).__init__()
self.conv1a = nn.Conv2d(3, 64, 3, padding=1, bias=False)
self.b2 = ResBlock(64, 128, 128, stride=2)
self.b2_1 = ResBlock(128, 128, 128)
self.b2_2 = ResBlock(128, 128, 128)
self... |
def run(settings):
settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.'
settings.batch_size = 64
settings.num_workers = 8
settings.print_interval = 1
settings.normalize_mean = [0.485, 0.456, 0.406]
settings.normalize_std = [0.229, 0.224, 0.225]
... |
def train(epoch):
print(('Epoch: %d' % epoch))
net.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
(inputs, targets) = (inputs.cuda(), targets.cuda())
adv_x = Linf_PGD(inputs, targets, net, opt.steps, opt.max_norm)
... |
def fidelityMatrixClustersRandomShuffled(qnnArch, numTrainingPairs):
kind = 'clustersRandomShuffled'
trainingDataInput = []
for i in range(numTrainingPairs):
t = randomQubitState(qnnArch[0])
trainingDataInput.append(t)
trainingDataOutput = []
width = 2
lineIndex = list(range(0, (... |
def googlenet(pretrained=False, progress=True, quantize=False, **kwargs):
if pretrained:
if ('transform_input' not in kwargs):
kwargs['transform_input'] = True
if ('aux_logits' not in kwargs):
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.... |
(scope='module')
def inline_query_result_venue():
return InlineQueryResultVenue(TestInlineQueryResultVenueBase.id_, TestInlineQueryResultVenueBase.latitude, TestInlineQueryResultVenueBase.longitude, TestInlineQueryResultVenueBase.title, TestInlineQueryResultVenueBase.address, foursquare_id=TestInlineQueryResultVenu... |
def test_service_browser_uses_non_strict_names():
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
zc = r.Zeroconf(interfaces=['127.0.0.1'])
browser = ServiceBrowser(zc, ['_tivo-videostream._tcp.local.'], [on_service_state_change])
browser.cancel()
with pytest.ra... |
class CommandSequenceTest(ParserTest):
def setUp(self):
ParserTest.setUp(self)
warnings.simplefilter('error', category=KickstartParseWarning)
def tearDown(self):
warnings.resetwarnings()
ParserTest.tearDown(self)
def get_parser(self):
handler = makeVersion(self.versio... |
()
('--crs', type=str, default=None, help="The projection of the map.\n\n\x08\n- integer (4326,3857 ...epsg code)\x08\n- string (web, equi7_eu ...Maps.CRS name)\n\x08\nThe default is 'web' (e.g. Web Mercator Projection).\n\n\x08\n")
('--file', type=str, default='', help='Path to a file that should be plotted. \n\n\x08\... |
def bump_semver2_version(current_version: str, part=None) -> str:
if (not semver.VersionInfo.isvalid(current_version)):
click.echo(f'Current version {current_version} is not a valid semver2 version. Please amend it')
parsed_current_version = semver.VersionInfo.parse(current_version)
next_version = '... |
class UserPersistsInPartialPipeline(BaseActionTest):
def test_user_persists_in_partial_pipeline_kwargs(self):
user = User(username='foobar1')
user.email = ''
self.strategy.set_settings({'SOCIAL_AUTH_PIPELINE': ('social_core.pipeline.social_auth.social_details', 'social_core.pipeline.social_a... |
class NamedTupleTests(BaseTestCase):
class NestedEmployee(NamedTuple):
name: str
cool: int
def test_basics(self):
Emp = NamedTuple('Emp', [('name', str), ('id', int)])
self.assertIsSubclass(Emp, tuple)
joe = Emp('Joe', 42)
jim = Emp(name='Jim', id=1)
self.... |
.parametrize('prefer_grpc', [False, True])
def test_empty_vector(prefer_grpc):
client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT)
client.recreate_collection(collection_name=COLLECTION_NAME, vectors_config={}, timeout=TIMEOUT)
client.upsert(collection_name=COLLECTION_NAME, points=[PointStruct(id... |
def extract_status_change(chat_member_update: ChatMemberUpdated) -> Optional[Tuple[(bool, bool)]]:
status_change = chat_member_update.difference().get('status')
(old_is_member, new_is_member) = chat_member_update.difference().get('is_member', (None, None))
if (status_change is None):
return None
... |
def load_EEZ(countries_codes, geo_crs, EEZ_gpkg='./data/eez/eez_v11.gpkg'):
if (not os.path.exists(EEZ_gpkg)):
raise Exception(f'File EEZ {EEZ_gpkg} not found, please download it from and copy it in {os.path.dirname(EEZ_gpkg)}')
geodf_EEZ = gpd.read_file(EEZ_gpkg, engine='pyogrio').to_crs(geo_crs)
... |
def pvefficiency_adr(effective_irradiance, temp_cell, k_a, k_d, tc_d, k_rs, k_rsh):
k_a = np.array(k_a)
k_d = np.array(k_d)
tc_d = np.array(tc_d)
k_rs = np.array(k_rs)
k_rsh = np.array(k_rsh)
G_REF = np.array(1000.0)
s = (effective_irradiance / G_REF)
T_REF = np.array(25.0)
dt = (tem... |
class Subscriptions(models.Model):
class Meta():
table = 'subscriptions'
guild_id = fields.BigIntField(pk=True)
log_channel_id = fields.BigIntField()
slug = fields.CharField(max_length=20)
balance = fields.IntField(default=0)
upi_id = fields.CharField(max_length=25)
plans: fields.Man... |
class EarlyMean(nn.Module):
def __init__(self, features=64, feature_extractor=Features4Layer, activation=relu):
super(EarlyMean, self).__init__()
self.features = feature_extractor(features, activation=activation)
def forward(self, frame_1, frame_2, frame_3, frame_4, frame_5):
frame_1 = f... |
_subclass
class USBInstr(ResourceName):
board: str = '0'
manufacturer_id: str = ''
model_code: str = ''
serial_number: str = ''
usb_interface_number: str = '0'
interface_type: ClassVar[str] = 'USB'
resource_class: ClassVar[str] = 'INSTR'
is_rc_optional: ClassVar[bool] = True |
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm... |
def tail_log_file(filename, offset, nlines, callback=None):
def seek_file(filehandle, offset, nlines, callback):
lines_found = []
buffer_size = 4098
block_count = (- 1)
while (len(lines_found) < (offset + nlines)):
try:
filehandle.seek((block_count * buffe... |
class StripeOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.stripe.StripeOAuth2'
account_data_url = '
access_token_body = json.dumps({'stripe_publishable_key': 'pk_test_foobar', 'access_token': 'foobar', 'livemode': False, 'token_type': 'bearer', 'scope': 'read_only', 'refresh_token': 'rt_fooba... |
def test_build_overviews_bilinear(data):
inputfile = str(data.join('RGB.byte.tif'))
with rasterio.open(inputfile, 'r+') as src:
overview_factors = [2, 4]
src.build_overviews(overview_factors, resampling=OverviewResampling.bilinear)
assert (src.overviews(1) == [2, 4])
assert (src.... |
class Views():
def __init__(self, client):
self.client = client
async def wildcard(self, req):
raise web.HTTPFound('/')
async def home(self, req):
if (len(chat_ids) == 1):
raise web.HTTPFound(f"{chat_ids[0]['alias_id']}")
chats = []
for chat in chat_ids:
... |
def test_azimuthal_equidistant_operation():
aeop = AzimuthalEquidistantConversion(latitude_natural_origin=1, longitude_natural_origin=2, false_easting=3, false_northing=4)
assert (aeop.name == 'unknown')
assert (aeop.method_name == 'Modified Azimuthal Equidistant')
assert (_to_dict(aeop) == {'Latitude o... |
def load_checkpoint(model, checkpoint_path, model_key='model|module|state_dict', strict=True):
state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False)
if (('positional_embedding' in state_dict) and (not hasattr(model, 'positional_embedding'))):
state_dict = convert_to_custom_... |
class PersonalAccessTokenManager(DeleteMixin, RetrieveMixin, RESTManager):
_path = '/personal_access_tokens'
_obj_cls = PersonalAccessToken
_list_filters = ('user_id',)
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> PersonalAccessToken:
return cast(PersonalAccessToken, ... |
def test_together():
together_api = TogetherAPIBackend()
response = together_api.completions(prompt='What is the capital of France?', max_tokens=10, n=1, stop_token='\n', temperature=0.7, engine='togethercomputer/llama-2-70b')
pprint(response)
wrapper = OpenSourceAPIWrapper()
response = wrapper.call... |
class DatabasesEndpoint(Endpoint):
def list(self, **kwargs: Any) -> SyncAsync[Any]:
return self.parent.request(path='databases', method='GET', query=pick(kwargs, 'start_cursor', 'page_size'), auth=kwargs.get('auth'))
def query(self, database_id: str, **kwargs: Any) -> SyncAsync[Any]:
return self... |
class StereoDepthCameraConfig(CameraConfig):
def __init__(self, *args, min_depth: float=0.05, **kwargs):
super().__init__(*args, **kwargs)
self.min_depth = min_depth
def rgb_resolution(self):
return (self.width, self.height)
def rgb_intrinsic(self):
fy = ((self.height / 2) / ... |
class Interp1D(EditableModule):
def __init__(self, x: torch.Tensor, y: Optional[torch.Tensor]=None, method: Union[(str, Callable, None)]=None, assume_sorted: bool=False, **fwd_options):
if (method is None):
method = 'cspline'
methods = {'cspline': CubicSpline1D, 'linear': LinearInterp1D}... |
def parse_args():
parser = argparse.ArgumentParser(description='Convert Open Images annotations into MS Coco format')
parser.add_argument('-p', '--path', dest='path', help='path to openimages data', type=str)
parser.add_argument('--version', default='challenge_2019', choices=['v4', 'v5', 'v6', 'challenge_20... |
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if (len(args) > 0):
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
arg... |
class FasterRCNNTest(unittest.TestCase):
def setUp(self):
self.model = get_model_no_weights('COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml')
def test_flop(self):
inputs = [{'image': torch.rand(3, 800, 800)}]
res = flop_count_operators(self.model, inputs)
self.assertEqual(int(res['c... |
class MdStatCollector(diamond.collector.Collector):
MDSTAT_PATH = '/proc/mdstat'
def get_default_config_help(self):
config_help = super(MdStatCollector, self).get_default_config_help()
return config_help
def get_default_config(self):
config = super(MdStatCollector, self).get_default_... |
def energy_elec(ks, dm=None, h1e=None, vhf=None):
if (dm is None):
dm = ks.make_rdm1()
if (h1e is None):
h1e = ks.get_hcore()
if ((vhf is None) or (getattr(vhf, 'ecoul', None) is None)):
vhf = ks.get_veff(ks.mol, dm)
if (not (isinstance(dm, numpy.ndarray) and (dm.ndim == 2))):
... |
class GigapetaCom(SimpleDownloader):
__name__ = 'GigapetaCom'
__type__ = 'downloader'
__version__ = '0.09'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallbac... |
def replace_relu6_with_relu(sess: tf.compat.v1.Session, relu6_op: tf.Operation):
with sess.graph.as_default():
assert (len(relu6_op.inputs) == 1)
new_tensor = tf.nn.relu(relu6_op.inputs[0])
relu_op = new_tensor.op
relu_outputs = list(relu_op.outputs)
relu6_outputs = list(relu... |
def main(args):
ps = torch.load(args.load_path, map_location='cpu')
obj_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67... |
class StatFileObj(StatsObj):
def __init__(self, start_time=None):
StatsObj.__init__(self)
for attr in self._stat_file_attrs:
self.set_stat(attr, 0)
if (start_time is None):
start_time = Time.getcurtime()
self.StartTime = start_time
self.Errors = 0
... |
def compute_tables(b, huffman_groups, symbols_in_use):
groups_lengths = []
for j in range(huffman_groups):
length = b.readbits(5)
lengths = []
for i in range(symbols_in_use):
if (not (0 <= length <= 20)):
raise 'Bzip2 Huffman length code outside range 0..20'
... |
def visualize_changes_in_model_after_and_before_cle():
model = models.resnet18(pretrained=True).to(torch.device('cpu'))
model = model.eval()
model_copy = copy.deepcopy(model)
results_dir = './visualization'
batch_norm_fold.fold_all_batch_norms(model_copy, (1, 3, 224, 224))
equalize_model(model, ... |
def do_kitti_detection_evaluation(dataset, predictions, output_folder, logger):
predict_folder = os.path.join(output_folder, 'data')
mkdir(predict_folder)
for (image_id, prediction) in predictions.items():
predict_txt = (image_id + '.txt')
predict_txt = os.path.join(predict_folder, predict_t... |
def create_object_vocab(args, image_ids, objects, aliases, vocab):
image_ids = set(image_ids)
print(('Making object vocab from %d training images' % len(image_ids)))
object_name_counter = Counter()
for image in objects:
if (image['image_id'] not in image_ids):
continue
for ob... |
def remove_small_images(args, image_id_to_image, splits):
new_splits = {}
for (split_name, image_ids) in splits.items():
new_image_ids = []
num_skipped = 0
for image_id in image_ids:
image = image_id_to_image[image_id]
(height, width) = (image['height'], image['wi... |
_model
def ecaresnext26tn_32x4d(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered_narrow', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnext26tn_32x4d', pretrained,... |
class GeoPolygon(BaseModel, extra='forbid'):
exterior: 'GeoLineString' = Field(..., description='Geo filter request Matches coordinates inside the polygon, defined by `exterior` and `interiors`')
interiors: Optional[List['GeoLineString']] = Field(default=None, description='Interior lines (if present) bound hol... |
class GradientSharedStep(ArrayStepShared):
def __init__(self, vars, model=None, blocked=True, dtype=None, logp_dlogp_func=None, **pytensor_kwargs):
model = modelcontext(model)
if (logp_dlogp_func is None):
func = model.logp_dlogp_function(vars, dtype=dtype, **pytensor_kwargs)
els... |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
norm_func = (ll.FrozenBatchNorm2d if config.MODEL.FIXNORM else ll.BatchNorm2d)
self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, 1, bias=False)... |
class TestCustomSerializer(TestCase):
def test_set_custom_functions(self):
jsons.set_serializer((lambda *_, **__: 'custom_serializer'), str)
jsons.set_deserializer((lambda *_, **__: 'custom_deserializer'), str)
dumped = jsons.dump('serialize me')
loaded = jsons.load(dumped)
s... |
def parse_args():
parser = argparse.ArgumentParser(description='Generate training, val, and test set of NAF ')
parser.add_argument('root_path', help='Root dir path of NAF')
parser.add_argument('--preserve-vertical', help='Preserve samples containing vertical texts', action='store_true')
parser.add_argum... |
class NCOLCILowResData(NCOLCIBase):
rows_name = 'tie_rows'
cols_name = 'tie_columns'
def __init__(self, filename, filename_info, filetype_info, engine=None, **kwargs):
super().__init__(filename, filename_info, filetype_info, engine)
self.l_step = self.nc.attrs['al_subsampling_factor']
... |
def training(config):
if (not os.path.exists(os.path.join(config.split_dir, 'splits.pkl'))):
create_splits(output_dir=config.split_dir, image_dir=config.data_dir)
if (config.saved_model_path is not None):
config.load_model = True
exp = SegExperiment(config=config, name=config.name, n_epochs=... |
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.ReLU(True))
self.b2 = nn.Sequential(nn.Conv2d(in_planes, n... |
def report_acc(score_by_lib: Dict[(str, List[ScoreRecord])]):
num_problem = 0
for lib in score_by_lib:
num_problem += len(score_by_lib[lib])
print(f'Total Questions: {num_problem}')
avg_score_total = 0
for lib in score_by_lib:
avg_score_lib = 0
for problem_id in range(len(sco... |
class SetupReader():
DEFAULT: ClassVar[dict[(str, Any)]] = {'name': None, 'version': None, 'description': None, 'install_requires': [], 'extras_require': {}, 'python_requires': None}
FILES: ClassVar[list[str]] = ['setup.py', 'setup.cfg']
def read_from_directory(cls, directory: Path) -> dict[(str, Any)]:
... |
class RandomCrop(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, img, mask):
if (self.padding > 0):
img = Ima... |
class WeightedSoftmaxClassificationLossTest(tf.test.TestCase):
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[(- 100), 100, (- 100)], [100, (- 100), (- 100)], [0, 0, (- 100)], [(- 100), (- 100), 100]], [[(- 100), 0, 0], [(- 100), 100, (- 100)], [(- 100), 100, (- 100)], [100, (- 100), (... |
.parametrize('sni_config', [{'icon_size': 35}], indirect=True)
.usefixtures('dbus')
def test_statusnotifier_icon_size(manager_nospawn, sni_config):
manager_nospawn.start(sni_config)
widget = manager_nospawn.c.widget['statusnotifier']
assert (widget.info()['width'] == 0)
win = manager_nospawn.test_window... |
class TestInvalidUsageNearestNeighborResampler():
.parametrize('input_data', [lazy_fixture('data_2d_float32_xarray_dask'), lazy_fixture('data_3d_float32_xarray_dask')])
def test_mismatch_geo_data_dims(self, area_def_stere_source, area_def_stere_target, input_data):
resampler = KDTreeNearestXarrayResampl... |
class InstructorTrainer(Seq2SeqTrainer):
def _get_train_sampler(self):
if ((self.train_dataset is None) or (not has_length(self.train_dataset))):
return None
generator = None
if (self.args.world_size <= 1):
generator = torch.Generator()
if (self.args.data_... |
def encode_complex(val):
(real, imag) = (val.real, val.imag)
real = (int(real) if real.is_integer() else real)
imag = (int(imag) if imag.is_integer() else imag)
tidy_real = misc.tidy_up_float(real)
tidy_imag = misc.tidy_up_float(imag)
return '{}{}{}i'.format(tidy_real, ('' if (imag < 0) else '+'... |
def test_scale_preservation():
ob = gfx.WorldObject()
s = (1, (- 2), 3)
ob.local.scale = s
npt.assert_array_almost_equal(ob.local.scale, s)
child = gfx.WorldObject()
ob.add(child)
npt.assert_array_almost_equal(child.local.scale, [1, 1, 1])
npt.assert_array_almost_equal(child.world.scale,... |
def clustering_prompt(items, prompt):
def rmreturn(s):
s = s.replace('\n\n', ' ')
s = s.replace('\n', ' ')
return s.strip()
cluster_prompts = []
for item in items:
query = item['question']
backinfo = rmreturn(item['output'][0])
item_prompt = prompt.replace('{q... |
class SuitColor(BitPackEnum, Enum):
long_name: str
PLAYER_1 = 'player1'
PLAYER_2 = 'player2'
PLAYER_3 = 'player3'
PLAYER_4 = 'player4'
RANDOM = 'random'
def ui_icons(self) -> dict[(str, Path)]:
base_path = RandovaniaGame.METROID_PRIME_ECHOES.data_path.joinpath('assets', 'suit_renders... |
(eq=False, kw_only=True)
class LiveCollection():
live_manager: LiveManager
_n_collected_tasks: int = 0
_n_errors: int = 0
(hookwrapper=True)
def pytask_collect(self) -> Generator[(None, None, None)]:
self.live_manager.start()
(yield)
def pytask_collect_file_log(self, reports: lis... |
class GBlock(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size=[3, 3], padding=1, stride=1, n_class=None, bn=True, activation=F.relu, upsample=True, downsample=False, z_dim=148):
super().__init__()
self.conv0 = SpectralNorm(nn.Conv2d(in_channel, out_channel, kernel_size, stride, pa... |
class ProtocolReceiver(Receiver):
protocol = PROTOCOL_PQv3
__slots__ = ('send', 'view')
def __init__(self, send):
super().__init__()
self.send = send
self.view = memoryview(b'')
def accept(self, data):
self.view = data
def transmit(self):
while self.view:
... |
def decode_nvfancontrol():
nvfan = {}
current_fan = ''
if os.path.isfile('/etc/nvfancontrol.conf'):
with open('/etc/nvfancontrol.conf', 'r') as fp:
for line in fp:
match_name = re.search(FAN_NVFAN_NAME_RE, line.strip())
match_values = re.search(FAN_NVFAN_O... |
def test_groupby_aggregate_with_start_state(stream):
example = pd.DataFrame({'name': [], 'amount': []})
sdf = DataFrame(stream, example=example).groupby(['name'])
output0 = sdf.amount.sum(start=None).stream.gather().sink_to_list()
output1 = sdf.amount.mean(with_state=True, start=None).stream.gather().si... |
def convert_date_to_nominalization(thought):
if ('Today is 04/19/1969.' in thought):
return "The date's recognition as 04/19/1969 and the addition of 24 hours, which equates to one day, results in the identification of 04/20/1969 as the next day."
if ('One day after 06/01/1943 is 06/02/1943,' in thought... |
def runClean(args):
(nbFileDeleted, nbFileToDelete) = (0, 0)
exts = (PASSWORD_EXTENSION_FILE, CHALLENGE_EXT_FILE)
pathOfOdat = os.path.dirname(os.path.abspath(__file__))
for (root, dirs, files) in os.walk(pathOfOdat):
for currentFile in files:
logging.debug('Processing file: {0}'.for... |
def kep2xyz(kep):
sinT = np.sin(kep['theta'])
cosT = np.cos(kep['theta'])
sinI = np.sin(kep['eqinc'])
cosI = np.cos(kep['eqinc'])
sinS = np.sin(kep['ascn'])
cosS = np.cos(kep['ascn'])
xmx = ((- sinS) * cosI)
xmy = (cosS * cosI)
ux = ((xmx * sinT) + (cosS * cosT))
uy = ((xmy * sin... |
class Ui_Camera(object):
def setupUi(self, Camera):
Camera.setObjectName('Camera')
Camera.resize(668, 422)
self.centralwidget = QtWidgets.QWidget(Camera)
self.centralwidget.setObjectName('centralwidget')
self.gridLayout_3 = QtWidgets.QGridLayout(self.centralwidget)
se... |
class Migration(migrations.Migration):
dependencies = [('schedule', '0035_voucher_code_generation_for_speakers')]
operations = [migrations.AddField(model_name='speakervoucher', name='voucher_email_sent_at', field=models.DateTimeField(blank=True, help_text='When the email was last sent', null=True))] |
class BondChargeCorrectionHandler(ParameterHandler):
class BCCType(ParameterType):
_VALENCE_TYPE = 'Bond'
_ELEMENT_NAME = 'BCC'
charge_correction = ParameterAttribute(unit=unit.elementary_charge)
_TAGNAME = 'BondChargeCorrection'
_INFOTYPE = BCCType
_OPENMMTYPE = openmm.Nonbonded... |
class TimeEvent(Event, metaclass=ABCMeta):
def next_trigger_time(self, now: datetime) -> datetime:
pass
def notify(self, listener) -> None:
pass
def __eq__(self, other):
if (self is other):
return True
if (type(self) != type(other)):
return False
... |
def convLSTM(input, hidden, filters, kernel, scope):
with tf.variable_scope(scope, initializer=tf.truncated_normal_initializer(stddev=0.1)):
cell = BasicConvLSTMCell.BasicConvLSTMCell([input.get_shape()[1], input.get_shape()[2]], kernel, filters)
if (hidden is None):
hidden = cell.zero_s... |
class SingleDivideGWPreconditioner(Preconditioner):
def __init__(self, x, num_bits, left=True):
super(SingleDivideGWPreconditioner, self).__init__(x, num_bits, left)
def transform(self, x, debug=False):
torch.set_printoptions(linewidth=100)
with torch.no_grad():
mn = min((x.m... |
class Effect4282(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Hybrid Turret')), 'damageMultiplier', module.getModifiedItemAttr('subsystemBonusGallenteOffensive2'), skill='Gallente Off... |
def inference(train_inputs_x, vocabulary_size, embedding_size):
with tf.name_scope('embeddings'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], (- 1.0), 1.0))
embed_x = tf.nn.embedding_lookup(embeddings, train_inputs_x)
nce_weights = tf.Variable(tf.truncated_norma... |
class TestCompareOutputs(TestCase):
def test_compare_outputs_surface_form(self):
options = [{'surface form': cap} for cap in ['false', 'differential', 'algebraic']]
model_combos = [[pybamm.lithium_ion.SPM(opt) for opt in options], [pybamm.lithium_ion.DFN(opt) for opt in options]]
for models ... |
def test_get_enabled_param_quantizers(cpu_session):
sim = QuantizationSimModel(cpu_session, ['conv2d_input'], ['keras_model/Softmax'], use_cuda=False)
sim.compute_encodings(forward_pass_callback, None)
enabled_quantizers = sim.get_enabled_parameter_quantizers()
try:
assert (len(enabled_quantizer... |
def game_generator_queue(path='./', random_map=False, question_type='location', max_q_size=30, wait_time=0.5, nb_worker=1):
q = mp.Queue()
nb_worker = min(nb_worker, (mp.cpu_count() - 1))
def data_generator_task(p_num):
counter = 0
while True:
np.random.seed(((p_num * 12345) + co... |
def test_basic():
assert (get_dataclass_shape(BasicDataclass) == Shape(input=InputShape(constructor=BasicDataclass, kwargs=None, fields=(InputField(type=int, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=InitVarInt, id='b', default=NoDefault(), is_requi... |
class EscapedKeyAction(KeyAction):
def _get_key_info(self):
vkey_scan = LoByte(win32functions.VkKeyScanW(self.key))
return (vkey_scan, win32functions.MapVirtualKeyW(vkey_scan, 0), 0)
def key_description(self):
return 'KEsc {}'.format(self.key)
def run(self):
for inp in self.G... |
def split_and_match_files_list(paths: Sequence[str]) -> list[str]:
expanded_paths = []
for path in paths:
path = expand_path(path.strip())
globbed_files = fileglob.glob(path, recursive=True)
if globbed_files:
expanded_paths.extend(globbed_files)
else:
expa... |
def drinkingwaste_to_detectwaste(label):
metals_and_plastics = ['PET', 'HDPEM', 'AluCan']
glass = ['Glass']
if (label in metals_and_plastics):
label = 'metals_and_plastics'
elif (label in glass):
label = 'glass'
else:
print(label, 'is non-drinkingwaste label')
label =... |
def text_fields(**kwargs):
n_feats = kwargs['n_feats']
include_lengths = kwargs['include_lengths']
base_name = kwargs['base_name']
pad = kwargs.get('pad', '<blank>')
bos = kwargs.get('bos', '<s>')
eos = kwargs.get('eos', '</s>')
truncate = kwargs.get('truncate', None)
fields_ = []
fe... |
def get_upgrade_response(connection):
data = b''
while (b'\r\n\r\n' not in data):
data += connection.recv(8192)
(headers, rest) = data.split(b'\r\n\r\n', 1)
split_headers = headers.split()
if (split_headers[1] != b'101'):
raise RuntimeError('Not upgrading!')
return rest |
def tune_mnist(data_dir, num_samples=10, num_epochs=10, num_workers=1, use_gpu=False, **trainer_kwargs):
config = {'layer_1': tune.choice([32, 64, 128]), 'layer_2': tune.choice([64, 128, 256]), 'lr': tune.loguniform(0.0001, 0.1), 'batch_size': tune.choice([32, 64, 128])}
metrics = {'loss': 'ptl/val_loss', 'acc'... |
def test_vector_grid():
import folium
from folium.plugins import VectorGridProtobuf
from streamlit_folium import _get_map_string
m = folium.Map()
url = '
VectorGridProtobuf(url, 'test').add_to(m)
leaflet = _get_map_string(m)
assert ('var vector_grid_protobuf_div_1 = L.vectorGrid.protobuf... |
class CallGraphWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallGraphWindow, self).__init__(parent)
self.model = LookupCreateModel('Context-Sensitive Call Graph', (lambda x=glb: CallGraphModel(x)))
self.view.setModel(self.model)
for (c, w) in ((0, 250), (1, 100... |
.parametrize('line', ['Standards Track', 'Informational', 'Process', 'accepted', 'active', 'april fool!', 'deferred', 'draft', 'final', 'provisional', 'rejected', 'superseded', 'withdrawn'])
def test_validate_status_invalid(line: str):
warnings = [warning for (_, warning) in check_peps._validate_status(1, line)]
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.