function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def write_to_tensorboard(self, tb_writer, tb_tag, step): for key in self._num_top_i_successes: for i in range(self._k): top_i_success_rate = (self._num_top_i_successes[key][i] / (self._num_accesses[key] + 1e-8)) utils.log_scalar( tb_writer, "{}/{}_top_{}".format(tb_tag, key, i + 1), top_i_success_rate, step)
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def __init__(self): self._weighted_taus = [] self._masks = []
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def write_to_tensorboard(self, tb_writer, tb_tag, step): weighted_taus = np.array(self._weighted_taus) eviction_masks = np.array(self._masks) eviction_mean_weighted_tau = np.sum( weighted_taus * eviction_masks) / (np.sum(eviction_masks) + 1e-8) utils.log_scalar( tb_writer, "{}/eviction_weighted_tau".format(tb_tag), eviction_mean_weighted_tau, step) utils.log_scalar( tb_writer, "{}/total_weighted_tau".format(tb_tag), np.mean(weighted_taus), step)
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def __init__(self): self._optimal_scores = [] self._evicted_scores = [] self._masks = []
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def sample_list_annotations(): # Create a client client = aiplatform_v1.DatasetServiceClient() # Initialize request argument(s) request = aiplatform_v1.ListAnnotationsRequest( parent="parent_value", ) # Make the request page_result = client.list_annotations(request=request) # Handle the response for response in page_result: print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def resource_lexical_syntactic_featurizer() -> Resource: return Resource("LexicalSyntacticFeaturizer")
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def create_lexical_syntactic_featurizer( default_model_storage: ModelStorage, default_execution_context: ExecutionContext, resource_lexical_syntactic_featurizer: Resource,
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def inner(config: Dict[Text, Any]): return LexicalSyntacticFeaturizer.create( config={**LexicalSyntacticFeaturizer.get_default_config(), **config,}, model_storage=default_model_storage, execution_context=default_execution_context, resource=resource_lexical_syntactic_featurizer, )
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def test_feature_computation( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer ], sentence: Text, part_of_speech: Optional[List[Text]], feature_config: List[List[Text]], expected_features: List[Union[int, List[int]]],
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def test_features_for_messages_with_missing_part_of_speech_tags( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer ],
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def test_only_featurizes_text_attribute( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer ],
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def test_process_multiple_messages( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer ],
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def test_create_train_load_and_process( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer ], default_model_storage: ModelStorage, default_execution_context: ExecutionContext, resource_lexical_syntactic_featurizer: Resource, feature_config: List[Text],
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def test_validate_config(config: Dict[Text, Any], raises: bool): if not raises: LexicalSyntacticFeaturizer.validate_config(config) else: with pytest.raises(InvalidConfigException): LexicalSyntacticFeaturizer.validate_config(config)
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def test_warn_if_part_of_speech_features_cannot_be_computed( create_lexical_syntactic_featurizer: Callable[ [Dict[Text, Any]], LexicalSyntacticFeaturizer ], sentence: Text, feature_config: Dict[Text, Any], expected_features: np.ndarray,
RasaHQ/rasa_nlu
[ 15758, 4259, 15758, 111, 1476448069 ]
def sample_suggest_trials(): # Create a client client = aiplatform_v1beta1.VizierServiceClient() # Initialize request argument(s) request = aiplatform_v1beta1.SuggestTrialsRequest( parent="parent_value", suggestion_count=1744, client_id="client_id_value", ) # Make the request operation = client.suggest_trials(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def mock_timeout(hass, monkeypatch): """Mock async_timeout.timeout.""" class MockTimeout: def __init__(self, timeout): self._timeout = timeout self._loop = asyncio.get_event_loop() self._task = None self._cancelled = False self._unsub = None async def __aenter__(self): if self._timeout is None: return self self._task = asyncio.Task.current_task() if self._timeout <= 0: self._loop.call_soon(self._cancel_task) return self # Wait for a time_changed event instead of real time passing. self._unsub = async_call_later(hass, self._timeout, self._cancel_task) return self async def __aexit__(self, exc_type, exc_val, exc_tb): if exc_type is asyncio.CancelledError and self._cancelled: self._unsub = None self._task = None raise asyncio.TimeoutError if self._timeout is not None and self._unsub: self._unsub() self._unsub = None self._task = None return None @callback def _cancel_task(self, now=None): if self._task is not None: self._task.cancel() self._cancelled = True monkeypatch.setattr(script, "timeout", MockTimeout)
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def check_action(): if script_obj.last_action and message in script_obj.last_action: flag.set()
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def service_done_cb(event): logger.debug("simulated service (%s:%s) done", fire, listen) service_done.set()
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def heard_event_cb(event): logger.debug("heard: %s", event) heard_event.set()
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def record_call(service): """Add recorded event to set.""" raise ValueError("BROKEN")
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def does_not_raise(): """Indicate no exception is expected.""" yield
pschmitt/home-assistant
[ 1, 1, 1, 6, 1485261624 ]
def _leaky_relu(x): return tf.nn.leaky_relu(x, alpha=0.2)
tensorflow/tpu
[ 5035, 1773, 5035, 290, 1499817279 ]
def _dense(x, channels, name): return tf.layers.dense( x, channels, kernel_initializer=tf.truncated_normal_initializer(stddev=0.02), name=name)
tensorflow/tpu
[ 5035, 1773, 5035, 290, 1499817279 ]
def _deconv2d(x, filters, kernel_size, stride, name): return tf.layers.conv2d_transpose( x, filters, [kernel_size, kernel_size], strides=[stride, stride], padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=0.02), name=name)
tensorflow/tpu
[ 5035, 1773, 5035, 290, 1499817279 ]
def generator(x, is_training=True, scope='Generator'): # fc1024-bn-relu + fc6272-bn-relu + deconv64-bn-relu + deconv1-tanh with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): x = _dense(x, 1024, name='g_fc1') x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn1')) x = _dense(x, 7 * 7 * 128, name='g_fc2') x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn2')) x = tf.reshape(x, [-1, 7, 7, 128]) x = _deconv2d(x, 64, 4, 2, name='g_dconv3') x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn3')) x = _deconv2d(x, 1, 4, 2, name='g_dconv4') x = tf.tanh(x) return x
tensorflow/tpu
[ 5035, 1773, 5035, 290, 1499817279 ]
def __enter__(self): return self
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _subscriber_default(self): return salt.transport.ipc.IPCMessageSubscriber( self.socket_path, io_loop=self.io_loop, )
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def _publisher_default(self): return salt.transport.ipc.IPCMessagePublisher( {"ipc_write_buffer": 0}, self.socket_path, io_loop=self.io_loop, )
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def new_client(self): return IPCTester( io_loop=self.io_loop, socket_path=self.socket_path, server=self.server, payloads=self.payloads, payload_ack=self.payload_ack, )
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def __enter__(self): self.publisher.start() self.io_loop.add_callback(self.subscriber.connect) return self
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def ipc_socket_path(tmp_path): if salt.utils.platform.is_darwin(): # A shorter path so that we don't hit the AF_UNIX path too long tmp_path = pathlib.Path("/tmp").resolve() _socket_path = tmp_path / "ipc-test.ipc" try: yield _socket_path finally: if _socket_path.exists(): _socket_path.unlink()
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def channel(io_loop, ipc_socket_path): _ipc_tester = IPCTester(io_loop=io_loop, socket_path=str(ipc_socket_path)) with _ipc_tester: yield _ipc_tester
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def test_100_update(self): command = ["update", "network", "--network", "excx-net", "--network_environment", "excx", "--building", "ut", "--type", "dmz-net", "--side", "b", "--comments", "New network comments"] self.noouttest(command)
quattor/aquilon
[ 12, 16, 12, 38, 1361797498 ]
def test_120_update_rename(self): command = ["update", "network", "--network", "netsvcmap", "--rename_to", "rename-test", "--comments", "New comment"] self.noouttest(command)
quattor/aquilon
[ 12, 16, 12, 38, 1361797498 ]
def test_122_update_rename_existing(self): net = self.net["np06bals03_v103"] command = ["update", "network", "--network", "rename-test", "--rename_to", "np06bals03_v103"] out,err = self.successtest(command) self.matchoutput(err, "WARNING: Network name {} is already used for address {}/{}." .format("np06bals03_v103", net.ip, net.prefixlen), command) command = ["update", "network", "--ip", net.ip, "--rename_to", "netsvcmap"] self.noouttest(command)
quattor/aquilon
[ 12, 16, 12, 38, 1361797498 ]
def test_201_verify_utdmz1(self): command = ["search", "network", "--network_compartment", "perimeter.ut"] self.noouttest(command)
quattor/aquilon
[ 12, 16, 12, 38, 1361797498 ]
def test_900_delinuse(self): net = self.net["unknown0"] command = ["del", "network", "--ip", net.ip] out = self.badrequesttest(command) self.matchoutput(out, "Network %s [%s] is still in use" % (net.name, net), command)
quattor/aquilon
[ 12, 16, 12, 38, 1361797498 ]
def setUp(self): self.set_filename('textbox15.xlsx')
jmcnamara/XlsxWriter
[ 3172, 594, 3172, 18, 1357261626 ]
def __init__(self, *args: Any) -> None: super(ProgressiveTest, self).__init__(*args) self._output: bool = False self._scheduler: Optional[Scheduler] = None self._temp_dir_flag: bool = False level: Any = getenv("LOGLEVEL") if level in ProgressiveTest.levels: level = ProgressiveTest.levels[level] if level: print(f"Logger level {level} for {self}", file=sys.stderr) self.log(int(level))
jdfekete/progressivis
[ 47, 4, 47, 6, 1440882684 ]
def terse(x: Any) -> None: _ = x print(".", end="", file=sys.stderr)
jdfekete/progressivis
[ 47, 4, 47, 6, 1440882684 ]
def setUp(self) -> None: np.random.seed(42)
jdfekete/progressivis
[ 47, 4, 47, 6, 1440882684 ]
def cleanup(self) -> None: cleanup_temp_dir()
jdfekete/progressivis
[ 47, 4, 47, 6, 1440882684 ]
def setUpClass(cls: Type[ProgressiveTest]) -> None: cleanup_temp_dir() init_temp_dir_if()
jdfekete/progressivis
[ 47, 4, 47, 6, 1440882684 ]
def tearDownClass(cls: Type[ProgressiveTest]) -> None: cleanup_temp_dir()
jdfekete/progressivis
[ 47, 4, 47, 6, 1440882684 ]
def log(level: int = logging.NOTSET, package: str = "progressivis") -> None: log_level(level, package=package)
jdfekete/progressivis
[ 47, 4, 47, 6, 1440882684 ]
def extractDlscanlationsCom(item): ''' Parser for 'dlscanlations.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
fake-name/ReadableWebProxy
[ 191, 16, 191, 3, 1437712243 ]
def _get_manager(cls, model_class): return model_class.private_objects
DemocracyClub/EveryElection
[ 12, 13, 12, 126, 1475835740 ]
def __init__(self, node, parent, result): """ DagError Init """ Exception.__init__(self) self.node = node self.parent = parent self.result = result
aldebaran/qibuild
[ 67, 45, 67, 42, 1297185497 ]
def assert_dag(data): """ Check if data is a dag >>> assert_dag({ ... 'a' : ( 'g', 'b', 'c', 'd' ), ... 'b' : ( 'e', 'c' ), ... 'e' : ( 'g', 'c' )}) >>> assert_dag({ ... 'a' : ( 'g', 'b', 'c', 'd' ), ... 'b' : ( 'e', 'c' ), ... 'e' : ( 'e', 'c' )}) Traceback (most recent call last): ... DagError: Circular dependency error: Starting from 'e', node 'e' depends on 'e', complete path [] """ for node, _ in data.items(): _topological_sort(data, node, node, True)
aldebaran/qibuild
[ 67, 45, 67, 42, 1297185497 ]
def _topological_sort(data, head, top_node, raise_exception=False, result=None, visited=None): """ Internal function """ if not result: result = [] if not visited: visited = [] deps = data.get(head, list()) if head in visited: if head == top_node and raise_exception: raise DagError(head, head, result) return result visited.append(head) for i in deps: try: result.index(i) except ValueError: # the item does not exist result = _topological_sort(data, i, top_node, raise_exception, result, visited) result.append(head) return result
aldebaran/qibuild
[ 67, 45, 67, 42, 1297185497 ]
def testPolicyWithoutItems(self): # Test an example policy without items. policy = { 'name': '_policy_name', 'caption': '_policy_caption', 'desc': 'This is a long policy caption. More than one sentence ' 'in a single line because it is very important.\n' 'Second line, also important' } writer = android_policy_writer.GetWriter({}) writer.Init() writer.BeginTemplate() writer.WritePolicy(policy) self.assertEquals( writer._resources.toxml(), '<resources>' '<string name="_policy_nameTitle">_policy_caption</string>' '<string name="_policy_nameDesc">This is a long policy caption. More ' 'than one sentence in a single line because it is very ' 'important.\nSecond line, also important' '</string>' '</resources>')
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def test_quarterly_dont_normalize(): date = datetime(2012, 3, 31, 5, 30) offsets = (BQuarterEnd, BQuarterBegin) for klass in offsets: result = date + klass() assert result.time() == date.time()
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def test_on_offset(offset): dates = [ datetime(2016, m, d) for m in [10, 11, 12] for d in [1, 2, 3, 28, 29, 30, 31] if not (m == 11 and d == 31) ] for date in dates: res = offset.is_on_offset(date) slow_version = date == (date + offset) - offset assert res == slow_version
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def test_repr(self): expected = "<BusinessQuarterBegin: startingMonth=3>" assert repr(BQuarterBegin()) == expected expected = "<BusinessQuarterBegin: startingMonth=3>" assert repr(BQuarterBegin(startingMonth=3)) == expected expected = "<BusinessQuarterBegin: startingMonth=1>" assert repr(BQuarterBegin(startingMonth=1)) == expected
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def test_offset_corner_case(self): # corner offset = BQuarterBegin(n=-1, startingMonth=1) assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def test_offset(self, case): offset, cases = case for base, expected in cases.items(): assert_offset_equal(offset, base, expected)
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def test_repr(self): expected = "<BusinessQuarterEnd: startingMonth=3>" assert repr(BQuarterEnd()) == expected expected = "<BusinessQuarterEnd: startingMonth=3>" assert repr(BQuarterEnd(startingMonth=3)) == expected expected = "<BusinessQuarterEnd: startingMonth=1>" assert repr(BQuarterEnd(startingMonth=1)) == expected
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def test_offset_corner_case(self): # corner offset = BQuarterEnd(n=-1, startingMonth=1) assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def test_offset(self, case): offset, cases = case for base, expected in cases.items(): assert_offset_equal(offset, base, expected)
pandas-dev/pandas
[ 37157, 15883, 37157, 3678, 1282613853 ]
def is_windows(): return os.name == 'nt'
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def main_compile_targets(args): json.dump(['all'], args.output)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def test_attributes(): trajs = data.brownian_trajs_df() trajs = Trajectories(trajs) assert_array_equal(trajs.t_stamps, np.array([0, 1, 2, 3, 4])) assert_array_equal(trajs.labels, np.array([0, 1, 2, 3, 4])) segments = {0: [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0)], 1: [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1)], 2: [(0, 2), (1, 2), (2, 2), (3, 2), (4, 2)], 3: [(0, 3), (1, 3), (2, 3), (3, 3), (4, 3)], 4: [(0, 4), (1, 4), (2, 4), (3, 4), (4, 4)]} assert_dict_equal(trajs.segment_idxs, segments) traj = np.array([[ -9.25386045, 11.34555088, 22.11820326, 3. , 0. ], [ 11.05321776, 3.23738477, 2.62790435, 2. , 1. ], [ 16.6824928 , 14.602054 , -12.1218683 , 4. , 2. ], [ 17.22410516, 14.8068125 , -11.87642753, 4. , 3. ], [ 2.80222495, -13.13783042, 8.56406878, 0. , 4. ]]) t_stamp, traj_to_test = list(trajs.iter_segments)[0] assert_array_almost_equal(traj, traj_to_test) assert list(trajs.get_segments().keys()) == [0, 1, 2, 3, 4]
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_copy(): trajs = data.brownian_trajs_df() trajs = Trajectories(trajs) assert isinstance(trajs.copy(), Trajectories)
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_reverse(): trajs = data.brownian_trajs_df() trajs = Trajectories(trajs) assert trajs.reverse().shape == (25, 5) trajs = data.brownian_trajs_df() trajs = Trajectories(trajs) trajs.reverse(inplace=True) assert trajs.shape == (25, 5)
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_interpolate(): trajs = Trajectories(data.with_gaps_df()) trajs.set_index('true_label', inplace=True, append=True) trajs.reset_index(level='label', drop=True, inplace=True) trajs.index.set_names(['t_stamp', 'label'], inplace=True) interpolated = trajs.time_interpolate(sampling=3, time_step=0.1, s=1) # t_stamps_in = interpolated.index.get_level_values('t_stamp') # indexer = t_stamps_in % 2 == 0 # interpolated.loc[indexer].shape, trajs.shape # indexer = interpolated.t_stamps % 3 == 0 # assert interpolated.loc[indexer].shape[0] == trajs.shape[0] dts = interpolated.get_segments()[0].t.diff().dropna() # All time points should be equaly spaced assert_almost_equal(dts.min(), dts.max())
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_get_speeds(): trajs = Trajectories(data.brownian_trajs_df()) speeds = trajs.get_speeds().tolist() real_speeds = [np.nan, np.nan, np.nan, np.nan, np.nan, 857.99153458573994, 1596.9530747771976, 873.15267834726137, 1282.3088174598233, 408.98588960526808, 378.40023709328955, 1809.9895146014187, 917.93227668556324, 592.31881736181106, 0.48325048326444919, 0.39551116881922965, 798.29858694043128, 1085.3214310682606, 405.49164945495221, 550.37555144616226, 1406.707586739079, 1031.9444945962532, 1077.6619763794718, 1445.7789239945778, 739.66839622816326] assert_almost_equal(speeds, real_speeds)
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_project(): trajs = Trajectories(data.directed_motion_trajs_df()) trajs.rename(columns={'true_label': 'new_label'}, inplace=True) trajs.relabel() trajs.project([0, 1], coords=['x', 'y'], keep_first_time=False, reference=None, inplace=True, progress=False) excepted = np.array([[ 0.27027431, 0. ], [-0.27027431, 0. ], [-0.25306519, 0.69683713], [ 0.04633664, 0.31722648]]) assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4]) trajs = trajs.project([0, 1], coords=['x', 'y'], keep_first_time=False, reference=None, inplace=False, progress=False) assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4]) assert_raises(ValueError, trajs.project, [0, 1], coords=['x', 'y', 'z', 't'])
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_get_longest_segments(): """ """ trajs = data.brownian_trajs_df() trajs = Trajectories(trajs) assert trajs.get_longest_segments(1) == [4]
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_remove_segments(): """ """ trajs = data.brownian_trajs_df() trajs = Trajectories(trajs) trajs.remove_segments(1, inplace=True) assert np.all(trajs.labels == [0, 2, 3, 4])
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_relabel(): """ """ trajs = Trajectories(data.brownian_trajs_df()) trajs.columns = ['x', 'y', 'z', 'new_label', 't'] trajs.relabel(inplace=True) new_values = [[1.933058243735795, -14.581064591435775, 11.603556633147544, 0.0], [-12.862215173899491, -2.8611502446443238, -2.2738941196781424, 0.0], [9.100887851132633, 2.837252570763561, 2.875753940450461, 0.0], [-9.253860446235523, 11.345550876585719, 22.118203258275745, 0.0]] assert trajs.iloc[:4].values.tolist() == new_values trajs = Trajectories(data.brownian_trajs_df()) trajs.columns = ['x', 'y', 'z', 'new_label', 't'] trajs = trajs.relabel(inplace=False) new_values = [[1.933058243735795, -14.581064591435775, 11.603556633147544, 0.0], [-12.862215173899491, -2.8611502446443238, -2.2738941196781424, 0.0], [9.100887851132633, 2.837252570763561, 2.875753940450461, 0.0], [-9.253860446235523, 11.345550876585719, 22.118203258275745, 0.0]] assert trajs.iloc[:4].values.tolist() == new_values
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_remove_spots(): """ """ trajs = Trajectories(data.brownian_trajs_df()) new_trajs = trajs.remove_spots([(3, 2), (0, 0)], inplace=False) new_indexes = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (3, 0), (3, 1), (3, 3), (3, 4), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4)] assert new_trajs.index.tolist() == new_indexes new_trajs = trajs.remove_spots((0, 0), inplace=False) new_indexes = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4)] assert new_trajs.index.tolist() == new_indexes
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_cut_segments(): """ """ trajs = Trajectories(data.brownian_trajs_df()) trajs.cut_segments((2, 3), inplace=True) new_indexes = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (3, 0), (3, 1), (3, 2), (3, 4), (3, 5), (4, 0), (4, 1), (4, 2), (4, 4), (4, 5)] assert trajs.index.tolist() == new_indexes trajs = Trajectories(data.brownian_trajs_df()) trajs = trajs.cut_segments((2, 3), inplace=False) new_indexes = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (3, 0), (3, 1), (3, 2), (3, 4), (3, 5), (4, 0), (4, 1), (4, 2), (4, 4), (4, 5)] assert trajs.index.tolist() == new_indexes
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def test_get_bounds(): """ """ trajs = Trajectories(data.brownian_trajs_df()) trajs['t'] *= 10 t_stamp_bounds = {0: (0, 4), 1: (0, 4), 2: (0, 4), 3: (0, 4), 4: (0, 4)} t_bounds = {0: (0.0, 40.0), 1: (0.0, 40.0), 2: (0.0, 40.0), 3: (0.0, 40.0), 4: (0.0, 40.0)} assert trajs.get_bounds() == t_stamp_bounds assert trajs.get_bounds(column='t') == t_bounds
bnoi/scikit-tracker
[ 9, 3, 9, 6, 1392198505 ]
def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_lost_aqualish_soldier_female_01.iff" result.attribute_template_id = 9 result.stfName("npc_name","aqualish_base_female")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Creature() result.template = "object/creature/npc/droid/shared_wed_treadwell_base.iff" result.attribute_template_id = 3 result.stfName("droid_name","wed_treadwell_base")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Mission() result.template = "object/mission/base/shared_base_mission.iff" result.attribute_template_id = -1 result.stfName("string_id_table","")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def create(kernel): result = Tangible() result.template = "object/tangible/ship/attachment/weapon/shared_blacksun_heavy_weapon2_s02.iff" result.attribute_template_id = 8 result.stfName("item_n","ship_attachment")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def setUp(self): """Instantiate a KytosEventBuffer.""" self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.kytos_event_buffer = KytosEventBuffer('name', loop=self.loop)
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def create_event_mock(name='any'): """Create a new event mock.""" event = MagicMock() event.name = name return event
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def test_put__shutdown(self): """Test put method to shutdown event.""" event = self.create_event_mock('kytos/core.shutdown') self.kytos_event_buffer.put(event) self.assertTrue(self.kytos_event_buffer._reject_new_events)
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def test_aget(self): """Test aget async method.""" event = self.create_event_mock() self.kytos_event_buffer._queue.sync_q.put(event) expected = self.loop.run_until_complete(self.kytos_event_buffer.aget()) self.assertEqual(event, expected)
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def test_task_done(self, mock_task_done): """Test task_done method.""" self.kytos_event_buffer.task_done() mock_task_done.assert_called()
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def test_join(self, mock_join): """Test join method.""" self.kytos_event_buffer.join() mock_join.assert_called()
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def test_empty(self): """Test empty method to empty and with one event in query.""" empty_1 = self.kytos_event_buffer.empty() event = self.create_event_mock() self.kytos_event_buffer._queue.sync_q.put(event) empty_2 = self.kytos_event_buffer.empty() self.assertTrue(empty_1) self.assertFalse(empty_2)
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def test_full(self, mock_full): """Test full method to full and not full query.""" mock_full.side_effect = [False, True] full_1 = self.kytos_event_buffer.full() full_2 = self.kytos_event_buffer.full() self.assertFalse(full_1) self.assertTrue(full_2)
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def setUp(self): """Instantiate a KytosBuffers.""" self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) self.kytos_buffers = KytosBuffers(loop=self.loop)
kytos/kytos
[ 43, 46, 43, 125, 1465587463 ]
def get_display(self): top = DivWdg() top.add_color("background", "background") top.add_color("color", "color") top.add_style("min-width: 600px") os_name = os.name top.set_unique_id() top.add_smart_style("spt_info_title", "background", self.top.get_color("background3")) top.add_smart_style("spt_info_title", "padding", "3px") top.add_smart_style("spt_info_title", "font-weight", "bold") # server title_div = DivWdg() top.add(title_div) title_div.add("Server") title_div.add_class("spt_info_title") os_div = DivWdg() top.add(os_div) os_info = platform.uname() try: os_login = os.getlogin() except Exception: os_login = os.environ.get("LOGNAME") table = Table() table.add_color("color", "color") table.add_style("margin: 10px") os_div.add(table) for i, title in enumerate(['OS','Node Name','Release','Version','Machine']): table.add_row() td = table.add_cell("%s: " % title) td.add_style("width: 150px") table.add_cell( os_info[i] ) table.add_row() table.add_cell("CPU Count: ") try : import multiprocessing table.add_cell( multiprocessing.cpu_count() ) except (ImportError, NotImplementedError): table.add_cell( "n/a" ) table.add_row() table.add_cell("Login: ") table.add_cell( os_login )
Southpaw-TACTIC/TACTIC
[ 473, 170, 473, 29, 1378771601 ]
def handle_directories(self, top): # deal with asset directories top.add(DivWdg('Asset Folders', css='spt_info_title')) mailserver = Config.get_value("services", "mailserver") table = Table() table.add_color("color", "color") table.add_style("margin: 10px") top.add(table) table.add_row() td = table.add_cell("asset_base_dir: ") td.add_style("width: 150px") asset_base_dir = Config.get_value("checkin", "asset_base_dir") if asset_base_dir: table.add_cell( asset_base_dir ) tr = table.add_row() tr.add_style('border-bottom: 1px #bbb solid') # check if it is writable is_writable = os.access(asset_base_dir, os.W_OK) span = SpanWdg("writable:") span.add_style('padding-left: 20px') td = table.add_cell(span) td = table.add_cell(str(is_writable)) else: table.add_cell( "None configured") client_os = Environment.get_env_object().get_client_os() if os.name == 'nt': os_name = 'win32' else: os_name = 'linux' if client_os == 'nt': client_os_name = 'win32' else: client_os_name = 'linux' env = Environment.get() client_handoff_dir = env.get_client_handoff_dir(include_ticket=False, no_exception=True) client_asset_dir = env.get_client_repo_dir() table.add_row() td = table.add_cell("%s_server_handoff_dir: " % os_name) td.add_style("width: 150px") handoff_dir = Config.get_value("checkin", "%s_server_handoff_dir" % os_name)
Southpaw-TACTIC/TACTIC
[ 473, 170, 473, 29, 1378771601 ]
def handle_python_script_test(self, top): top.add(DivWdg('Python Script Test', css='spt_info_title')) table = Table(css='script') table.add_color("color", "color") table.add_style("margin: 10px") table.add_style("width: 100%") top.add(table) table.add_row() td = table.add_cell("Script Path: ") td.add_style("width: 150px") text = TextWdg('script_path') td = table.add_cell(text) button = ActionButtonWdg(title='Run') table.add_cell(button) button.add_style("float: right") button.add_behavior( { 'type': 'click_up', 'cbjs_action': ''' var s = TacticServerStub.get(); try { var path = bvr.src_el.getParent('.script').getElement('.spt_input').value; if (! path) throw('Please enter a valid script path'); s.execute_cmd('tactic.command.PythonCmd', {script_path: path}); } catch(e) { spt.alert(spt.exception.handler(e)); } ''' })
Southpaw-TACTIC/TACTIC
[ 473, 170, 473, 29, 1378771601 ]
def handle_load_balancing(self, top): # deal with asset directories top.add(DivWdg('Load Balancing', css='spt_info_title')) table = Table() table.add_class("spt_loadbalance") table.add_color("color", "color") table.add_style("margin: 10px") top.add(table) table.add_row() td = table.add_cell("Load Balancing: ") td.add_style("width: 150px") button = ActionButtonWdg(title='Test') td = table.add_cell(button) message_div = DivWdg() message_div.add_class("spt_loadbalance_message") table.add_cell(message_div) button.add_behavior( { 'type': 'click_up', 'cbjs_action': ''' var server = TacticServerStub.get() var ports = {}; var count = 0; for (var i = 0; i < 50; i++) { var info = server.get_connection_info(); var port = info.port; var num = ports[port]; if (!num) { ports[port] = 1; count += 1; } else { ports[port] += 1; } // if there are 10 requests and still only one, then break if (i == 10 && count == 1) break; } // build the ports string x = []; for (i in ports) { x.push(i); } x.sort(); x = x.join(", "); var loadbalance_el = bvr.src_el.getParent(".spt_loadbalance"); var message_el = loadbalance_el.getElement(".spt_loadbalance_message"); if (count > 1) { var message = "Yes (found " + count + " ports: "+x+")"; } else { var message = "<blink style='background: red; padding: 3px'>Not enabled (found only port " + x + ")</blink>"; } message_el.innerHTML = message ''' } )
Southpaw-TACTIC/TACTIC
[ 473, 170, 473, 29, 1378771601 ]
def execute(self): tmp_dir = Environment.get_tmp_dir() # remove the sidebar cache sidebar_cache_dir = "%s/cache/side_bar" % tmp_dir if os.path.exists(sidebar_cache_dir): import shutil shutil.rmtree(sidebar_cache_dir)
Southpaw-TACTIC/TACTIC
[ 473, 170, 473, 29, 1378771601 ]
def get_display(self): config_search_type = "config/widget_config"
Southpaw-TACTIC/TACTIC
[ 473, 170, 473, 29, 1378771601 ]
def get_display(self): top = self.top top.add("<br/>") top.add_style("margin-left: 10px") try: import multiprocessing cpu_count = multiprocessing.cpu_count() except (ImportError, NotImplementedError): cpu_count = 'n/a' title = DivWdg() title.add("Click to start performance test: ") title.add_style("float: left") top.add(title) title.add_style("margin-top: 5px") button = ActionButtonWdg(title='Test') top.add(button)
Southpaw-TACTIC/TACTIC
[ 473, 170, 473, 29, 1378771601 ]
def load_dataset(incl_test=False, incl_foldc=False): fr = h2o.import_file(pu.locate("smalldata/titanic/titanic_expanded.csv"), header=1) target = "survived" train = fr test = None if incl_test: fr = fr.split_frame(ratios=[.8], destination_frames=["titanic_train", "titanic_test"], seed=seed) train = fr[0] test = fr[1] if incl_foldc: train["foldc"] = train.kfold_column(3, seed) return pu.ns(train=train, test=test, target=target)
h2oai/h2o-3
[ 6169, 1943, 6169, 208, 1393862887 ]
def test_deprecated_f_param_is_alias_for_smoothing(): ds = load_dataset(incl_test=True) te = H2OTargetEncoderEstimator(noise=0) te.train(y=ds.target, training_frame=ds.train) encoded = te.predict(ds.test) # print(encoded) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") te_f = H2OTargetEncoderEstimator(noise=0, f=25, blending=True) assert len(w) == 1 assert issubclass(w[0].category, H2ODeprecationWarning) assert "``f`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message)
h2oai/h2o-3
[ 6169, 1943, 6169, 208, 1393862887 ]
def test_deprecated_noise_level_param_is_alias_for_noise(): ds = load_dataset(incl_test=True) te = H2OTargetEncoderEstimator() te.train(y=ds.target, training_frame=ds.train) encoded = te.predict(ds.test) # print(encoded) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") te_nl = H2OTargetEncoderEstimator(noise_level=0) assert len(w) == 1 assert issubclass(w[0].category, H2ODeprecationWarning) assert "``noise_level`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message) te_nl.train(y=ds.target, training_frame=ds.train) encoded_nl = te_nl.predict(ds.test) # print(encoded_nl) te_n = H2OTargetEncoderEstimator(noise=0) te_n.train(y=ds.target, training_frame=ds.train) encoded_n = te_n.predict(ds.test) # print(encoded_n) try: pu.compare_frames(encoded_nl, encoded, 0, tol_numeric=1e-5) assert False, "should have raised" except AssertionError as ae: assert "should have raised" not in str(ae) assert pu.compare_frames(encoded_nl, encoded_n, 0, tol_numeric=1e-5)
h2oai/h2o-3
[ 6169, 1943, 6169, 208, 1393862887 ]
def test_transform_data_leakage_handling_param_raise_warning(): ds = load_dataset(incl_test=True) te = H2OTargetEncoderEstimator(data_leakage_handling="leave_one_out", seed=42) te.train(y=ds.target, training_frame=ds.train) encoded = te.predict(ds.test) encoded_as_training = te.transform(ds.test, as_training=True) transformed_1 = te.transform(ds.test) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") transformed_2 = te.transform(ds.test, data_leakage_handling="none") assert len(w) == 1 assert issubclass(w[0].category, H2ODeprecationWarning) assert "`data_leakage_handling` is deprecated in `transform` method and will be ignored" in str(w[0].message) # if data_leakage_handling is specified and not "none", this is interpreted as `as_training=True` with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") transformed_3 = te.transform(ds.test, data_leakage_handling="leave_one_out") assert len(w) == 2 assert issubclass(w[1].category, H2ODeprecationWarning) assert "as_training=True" in str(w[1].message)
h2oai/h2o-3
[ 6169, 1943, 6169, 208, 1393862887 ]
def __init__(self): super().__init__("Machine feature is disabled")
dmpetrov/dataversioncontrol
[ 11197, 1036, 11197, 597, 1488615393 ]
def __init__(self, args): super().__init__(args) if not self.config["feature"].get("machine", False): raise MachineDisabledError if getattr(self.args, "name", None): self.args.name = self.args.name.lower()
dmpetrov/dataversioncontrol
[ 11197, 1036, 11197, 597, 1488615393 ]