function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _makeOne(self): path = "/" + uuid.uuid4().hex return self.client.Queue(path)
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def test_empty_queue(self): queue = self._makeOne() assert len(queue) == 0 assert queue.get() is None assert len(queue) == 0
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def test_priority(self): queue = self._makeOne() queue.put(b"four", priority=101) queue.put(b"one", priority=0) queue.put(b"two", priority=0) queue.put(b"three", priority=10) assert queue.get() == b"one" assert queue.get() == b"two" assert queue.get() == b"three" assert queue.get() == b"four"
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def setUp(self): KazooTestCase.setUp(self) skip = False if CI_ZK_VERSION and CI_ZK_VERSION < (3, 4): skip = True elif CI_ZK_VERSION and CI_ZK_VERSION >= (3, 4): skip = False else: ver = self.client.server_version() if ver[1] < 4: skip = True if skip: pytest.skip("Must use Zookeeper 3.4 or above")
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def test_queue_validation(self): queue = self._makeOne() with pytest.raises(TypeError): queue.put({}) with pytest.raises(TypeError): queue.put(b"one", b"100") with pytest.raises(TypeError): queue.put(b"one", 10.0) with pytest.raises(ValueError): queue.put(b"one", -100) with pytest.raises(ValueError): queue.put(b"one", 100000) with pytest.raises(TypeError): queue.put_all({}) with pytest.raises(TypeError): queue.put_all([{}]) with pytest.raises(TypeError): queue.put_all([b"one"], b"100") with pytest.raises(TypeError): queue.put_all([b"one"], 10.0) with pytest.raises(ValueError): queue.put_all([b"one"], -100) with pytest.raises(ValueError): queue.put_all([b"one"], 100000)
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def test_queue(self): queue = self._makeOne() queue.put(b"one") queue.put_all([b"two", b"three"]) assert len(queue) == 3 assert not queue.consume() assert not queue.holds_lock() assert queue.get(1) == b"one" assert queue.holds_lock() # Without consuming, should return the same element assert queue.get(1) == b"one" assert queue.consume() assert not queue.holds_lock() assert queue.get(1) == b"two" assert queue.holds_lock() assert queue.consume() assert not queue.holds_lock() assert queue.get(1) == b"three" assert queue.holds_lock() assert queue.consume() assert not queue.holds_lock() assert not queue.consume() assert len(queue) == 0
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def test_release(self): queue = self._makeOne() queue.put(b"one") assert queue.get(1) == b"one" assert queue.holds_lock() assert queue.release() assert not queue.holds_lock() assert queue.get(1) == b"one" assert queue.consume() assert not queue.release() assert len(queue) == 0
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def test_priority(self): queue = self._makeOne() queue.put(b"four", priority=101) queue.put(b"one", priority=0) queue.put(b"two", priority=0) queue.put(b"three", priority=10) assert queue.get(1) == b"one" assert queue.consume() assert queue.get(1) == b"two" assert queue.consume() assert queue.get(1) == b"three" assert queue.consume() assert queue.get(1) == b"four" assert queue.consume()
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def get_concurrently(value, event): q = self.client.LockingQueue(queue.path) value.append(q.get(0.1)) event.set()
python-zk/kazoo
[ 1240, 379, 1240, 117, 1337816802 ]
def __init__(self, error): super().__init__(error['message']) self.status = error['status'] self.code = error['code']
QISKit/qiskit-sdk-py
[ 3515, 1875, 3515, 1061, 1488560562 ]
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode: def helper(inorderL, inorderR): # base case if inorderL >= inorderR: return None nonlocal postorder curr = postorder.pop() root = TreeNode(curr) currPos = inorderMap[curr] root.right = helper(currPos+1, inorderR) root.left = helper(inorderL, currPos) return root
saisankargochhayat/algo_quest
[ 2, 1, 2, 1, 1473454289 ]
def __init__(self, max_age=None, success=None, job_id=None, sequential_mode=None, previous_job_id=None, component=None): """Params - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'max_age': 'int', 'success': 'bool', 'job_id': 'str', 'sequential_mode': 'bool', 'previous_job_id': 'str', 'component': 'str' } self.attribute_map = { 'max_age': 'max_age', 'success': 'success', 'job_id': 'job_id', 'sequential_mode': 'sequential_mode', 'previous_job_id': 'previous_job_id', 'component': 'component' } self._max_age = max_age self._success = success self._job_id = job_id self._sequential_mode = sequential_mode self._previous_job_id = previous_job_id self._component = component
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def max_age(self): """Gets the max_age of this Params. Maximum age (in hours) for the repo to be considered. Any repo tested or being tested after \"now - max_age\" will be taken into account. If set to 0, all repos will be considered. :return: The max_age of this Params. :rtype: int """ return self._max_age
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def max_age(self, max_age): """Sets the max_age of this Params. Maximum age (in hours) for the repo to be considered. Any repo tested or being tested after \"now - max_age\" will be taken into account. If set to 0, all repos will be considered. :param max_age: The max_age of this Params. :type: int """ if max_age is None: raise ValueError("Invalid value for `max_age`, must not be `None`") if max_age is not None and max_age < 0: raise ValueError("Invalid value for `max_age`, must be a value" " greater than or equal to `0`") self._max_age = max_age
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def success(self): """Gets the success of this Params. If set to a value, find repos with a successful/unsuccessful vote (as specified). If not set, any tested repo will be considered. :return: The success of this Params. :rtype: bool """ return self._success
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def success(self, success): """Sets the success of this Params. If set to a value, find repos with a successful/unsuccessful vote (as specified). If not set, any tested repo will be considered. :param success: The success of this Params. :type: bool """ self._success = success
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def job_id(self): """Gets the job_id of this Params. Name of the CI that sent the vote. If not set, no filter will be set on CI. :return: The job_id of this Params. :rtype: str """ return self._job_id
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def job_id(self, job_id): """Sets the job_id of this Params. Name of the CI that sent the vote. If not set, no filter will be set on CI. :param job_id: The job_id of this Params. :type: str """ self._job_id = job_id
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def sequential_mode(self): """Gets the sequential_mode of this Params. Use the sequential mode algorithm. In this case, return the last tested repo within that timeframe for the CI job described by previous_job_id. Defaults to false. :return: The sequential_mode of this Params. :rtype: bool """ return self._sequential_mode
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def sequential_mode(self, sequential_mode): """Sets the sequential_mode of this Params. Use the sequential mode algorithm. In this case, return the last tested repo within that timeframe for the CI job described by previous_job_id. Defaults to false. :param sequential_mode: The sequential_mode of this Params. :type: bool """ self._sequential_mode = sequential_mode
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def previous_job_id(self): """Gets the previous_job_id of this Params. If sequential_mode is set to true, look for jobs tested by the CI identified by previous_job_id. :return: The previous_job_id of this Params. :rtype: str """ return self._previous_job_id
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def previous_job_id(self, previous_job_id): """Sets the previous_job_id of this Params. If sequential_mode is set to true, look for jobs tested by the CI identified by previous_job_id. :param previous_job_id: The previous_job_id of this Params. :type: str """ self._previous_job_id = previous_job_id
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def component(self): """Gets the component of this Params. additional notes :return: The component of this Params. :rtype: str """ return self._component
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def component(self, component): """Sets the component of this Params. additional notes :param component: The component of this Params. :type: str """ self._component = component
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def to_str(self): """Returns the string representation of the model """ return pformat(self.to_dict())
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def __eq__(self, other): """Returns true if both objects are equal """ if not isinstance(other, Params): return False return self.__dict__ == other.__dict__
javierpena/dlrnapi_client
[ 1, 3, 1, 3, 1488890089 ]
def get_user_status(self, user: Optional['User'] = None) -> 'TermsOfServiceUserStatus': """ Get the terms of service user status. :param user: This is the user to get the status of the terms of service for. This defaults to current user. :returns: A :class:`TermsOfServiceUserStatus` object """ url = self._session.get_url('terms_of_service_user_statuses') additional_params = { 'tos_id': self.object_id, } if user is not None: additional_params['user_id'] = user.object_id box_response = self._session.get(url, params=additional_params) response_object = box_response.json() response = response_object['entries'][0] return self.translator.translate( session=self._session, response_object=response, )
box/box-python-sdk
[ 375, 216, 375, 21, 1423182655 ]
def reject(self, user: Optional['User'] = None) -> 'TermsOfServiceUserStatus': """ Reject a terms of service. :param user: The :class:`User` to assign the terms of service to. :returns: A newly created :class:`TermsOfServiceUserStatus` object """ return self.set_user_status(is_accepted=False, user=user)
box/box-python-sdk
[ 375, 216, 375, 21, 1423182655 ]
def usage(): return """ Usage: python runtests.py [UnitTestClass].[method] You can pass the Class name of the `UnitTestClass` you want to test. Append a method name if you only want to test a specific method of that class. """
alanjds/drf-nested-routers
[ 1458, 147, 1458, 48, 1380573232 ]
def main(_): if FLAGS.eager: tf.config.experimental_run_functions_eagerly(FLAGS.eager) tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) action_repeat = FLAGS.action_repeat _, _, domain_name, _ = FLAGS.env_name.split('-') if domain_name in ['cartpole']: FLAGS.set_default('action_repeat', 8) elif domain_name in ['reacher', 'cheetah', 'ball_in_cup', 'hopper']: FLAGS.set_default('action_repeat', 4) elif domain_name in ['finger', 'walker']: FLAGS.set_default('action_repeat', 2) FLAGS.set_default('max_timesteps', FLAGS.max_timesteps // FLAGS.action_repeat) env = utils.load_env( FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack) eval_env = utils.load_env( FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack) is_image_obs = (isinstance(env.observation_spec(), TensorSpec) and len(env.observation_spec().shape) == 3) spec = ( env.observation_spec(), env.action_spec(), env.reward_spec(), env.reward_spec(), # discount spec env.observation_spec() # next observation spec ) replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( spec, batch_size=1, max_length=FLAGS.max_length_replay_buffer) @tf.function def add_to_replay(state, action, reward, discount, next_states): replay_buffer.add_batch((state, action, reward, discount, next_states)) hparam_str = utils.make_hparam_string( FLAGS.xm_parameters, seed=FLAGS.seed, env_name=FLAGS.env_name, algo_name=FLAGS.algo_name) summary_writer = tf.summary.create_file_writer( os.path.join(FLAGS.save_dir, 'tb', hparam_str)) results_writer = tf.summary.create_file_writer( os.path.join(FLAGS.save_dir, 'results', hparam_str)) if 'ddpg' in FLAGS.algo_name: model = ddpg.DDPG( env.observation_spec(), env.action_spec(), cross_norm='crossnorm' in FLAGS.algo_name) elif 'crr' in FLAGS.algo_name: model = awr.AWR( env.observation_spec(), env.action_spec(), f='bin_max') elif 'awr' in FLAGS.algo_name: model = awr.AWR( env.observation_spec(), env.action_spec(), f='exp_mean') elif 'sac_v1' in FLAGS.algo_name: model = sac_v1.SAC( env.observation_spec(), env.action_spec(), target_entropy=-env.action_spec().shape[0]) elif 'asac' in FLAGS.algo_name: model = asac.ASAC( env.observation_spec(), env.action_spec(), target_entropy=-env.action_spec().shape[0]) elif 'sac' in FLAGS.algo_name: model = sac.SAC( env.observation_spec(), env.action_spec(), target_entropy=-env.action_spec().shape[0], cross_norm='crossnorm' in FLAGS.algo_name, pcl_actor_update='pc' in FLAGS.algo_name) elif 'pcl' in FLAGS.algo_name: model = pcl.PCL( env.observation_spec(), env.action_spec(), target_entropy=-env.action_spec().shape[0]) initial_collect_policy = random_tf_policy.RandomTFPolicy( env.time_step_spec(), env.action_spec()) dataset = replay_buffer.as_dataset( num_parallel_calls=tf.data.AUTOTUNE, sample_batch_size=FLAGS.sample_batch_size) if is_image_obs: # Augment images as in DRQ. dataset = dataset.map(image_aug, num_parallel_calls=tf.data.AUTOTUNE, deterministic=False).prefetch(3) else: dataset = dataset.prefetch(3) def repack(*data): return data[0] dataset = dataset.map(repack) replay_buffer_iter = iter(dataset) previous_time = time.time() timestep = env.reset() episode_return = 0 episode_timesteps = 0 step_mult = 1 if action_repeat < 1 else action_repeat for i in tqdm.tqdm(range(FLAGS.max_timesteps)): if i % FLAGS.deployment_batch_size == 0: for _ in range(FLAGS.deployment_batch_size): if timestep.is_last(): if episode_timesteps > 0: current_time = time.time() with summary_writer.as_default(): tf.summary.scalar( 'train/returns', episode_return, step=(i + 1) * step_mult) tf.summary.scalar( 'train/FPS', episode_timesteps / (current_time - previous_time), step=(i + 1) * step_mult) timestep = env.reset() episode_return = 0 episode_timesteps = 0 previous_time = time.time() if (replay_buffer.num_frames() < FLAGS.num_random_actions or replay_buffer.num_frames() < FLAGS.deployment_batch_size): # Use policy only after the first deployment. policy_step = initial_collect_policy.action(timestep) action = policy_step.action else: action = model.actor(timestep.observation, sample=True) next_timestep = env.step(action) add_to_replay(timestep.observation, action, next_timestep.reward, next_timestep.discount, next_timestep.observation) episode_return += next_timestep.reward[0] episode_timesteps += 1 timestep = next_timestep if i + 1 >= FLAGS.start_training_timesteps: with summary_writer.as_default(): info_dict = model.update_step(replay_buffer_iter) if (i + 1) % FLAGS.log_interval == 0: with summary_writer.as_default(): for k, v in info_dict.items(): tf.summary.scalar(f'training/{k}', v, step=(i + 1) * step_mult) if (i + 1) % FLAGS.eval_interval == 0: logging.info('Performing policy eval.') average_returns, evaluation_timesteps = evaluation.evaluate( eval_env, model) with results_writer.as_default(): tf.summary.scalar( 'evaluation/returns', average_returns, step=(i + 1) * step_mult) tf.summary.scalar( 'evaluation/length', evaluation_timesteps, step=(i+1) * step_mult) logging.info('Eval at %d: ave returns=%f, ave episode length=%f', (i + 1) * step_mult, average_returns, evaluation_timesteps) if (i + 1) % FLAGS.eval_interval == 0: model.save_weights( os.path.join(FLAGS.save_dir, 'results', FLAGS.env_name + '__' + str(i + 1)))
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def __init__(self, sc, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) self.fit_params = fit_params if fit_params is not None else {} self.sc = sc self.cv_results_ = None
databricks/spark-sklearn
[ 1075, 234, 1075, 15, 1441219491 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __add__(self, other): return self.value + other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __mul__(self, other): return self.value * other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __floordiv__(self, other): return self.value // other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __divmod__(self, other): return divmod(self.value, other)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __lshift__(self, other): return self.value << other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __and__(self, other): return self.value & other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __or__(self, other): return self.value | other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __radd__(self, other): return other + self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rmul__(self, other): print("\toldstyle_reflect.__rmul__") return other * self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rfloordiv__(self, other): return other // self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rdivmod__(self, other): return divmod(other, self.value)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rlshift__(self, other): return other << self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rand__(self, other): return self.value & other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ror__(self, other): return self.value | other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __iadd__(self, other): return self.value + other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __imul__(self, other): return self.value * other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ifloordiv__(self, other): return self.value // other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __idivmod__(self, other): return divmod(self.value, other)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ilshift__(self, other): return self.value << other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __iand__(self, other): return self.value & other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ior__(self, other): return self.value | other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __add__(self, other): return self.value + other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __mul__(self, other): return self.value * other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __floordiv__(self, other): return self.value // other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __divmod__(self, other): return divmod(self.value, other)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __lshift__(self, other): return self.value << other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __and__(self, other): return self.value & other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __or__(self, other): return self.value | other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __radd__(self, other): return other + self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rmul__(self, other): print("\tnewstyle_reflect.__rmul__") return other * self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rfloordiv__(self, other): return other // self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rdivmod__(self, other): return divmod(other, self.value)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rlshift__(self, other): return other << self.value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __rand__(self, other): return self.value & other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ror__(self, other): return self.value | other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __iadd__(self, other): return self.value + other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __imul__(self, other): return self.value * other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ifloordiv__(self, other): return self.value // other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __idivmod__(self, other): return divmod(self.value, other)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ilshift__(self, other): return self.value << other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __iand__(self, other): return self.value & other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __ior__(self, other): return self.value | other
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self, value): self.value = value
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def normal(self, leftc, rightc): for a in leftc: for b in rightc: try: printwith("case", a, "+", b, type(a), type(b)) printwithtype(a + b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "-", b, type(a), type(b)) printwithtype(a - b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "*", b, type(a), type(b)) printwithtype(a * b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "/", b, type(a), type(b)) printwithtype(a / b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "//", b, type(a), type(b)) printwithtype(a // b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "%", b, type(a), type(b)) printwithtype(a % b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "**", b, type(a), type(b)) printwithtype(a ** b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "<<", b, type(a), type(b)) printwithtype(a << b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, ">>", b, type(a), type(b)) printwithtype(a >> b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "&", b, type(a), type(b)) printwithtype(a & b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "^", b, type(a), type(b)) printwithtype(a ^ b) except: printwith("same", sys.exc_info()[0]) try: printwith("case", a, "|", b, type(a), type(b)) printwithtype(a | b) except: printwith("same", sys.exc_info()[0])
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def clone_list(self, l): l2 = [] for x in l: if x is newstyle_inplace: l2.append(newstyle_inplace(x.value)) elif x is oldstyle_inplace: l2.append(oldstyle_inplace(x.value)) else : l2.append(x) return l2
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def __init__(self): self.collection = testdata.merge_lists( [None], testdata.list_bool, testdata.list_int, testdata.list_float, testdata.list_long[:-1], # the last number is very long testdata.list_complex, testdata.list_myint, testdata.list_myfloat, testdata.list_mylong, testdata.list_mycomplex, testdata.get_Int64_Byte(), ) self.collection_oldstyle = [oldstyle(x) for x in self.collection] self.collection_oldstyle_reflect = [oldstyle_reflect(x) for x in self.collection] self.collection_oldstyle_notdefined = [oldstyle_notdefined(x) for x in self.collection] self.collection_newstyle = [newstyle(x) for x in self.collection] self.collection_newstyle_reflect = [newstyle_reflect(x) for x in self.collection] self.collection_newstyle_notdefined = [newstyle_notdefined(x) for x in self.collection] self.collection_oldstyle_inplace = [oldstyle_inplace(x) for x in self.collection] self.collection_newstyle_inplace = [newstyle_inplace(x) for x in self.collection]
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def test_normal_oc_left(self): super(ops_simple, self).normal(self.collection_oldstyle, self.collection)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def test_normal_nc_left(self): super(ops_simple, self).normal(self.collection_newstyle, self.collection)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def test_reflect_oc_right(self): super(ops_simple, self).normal(self.collection, self.collection_oldstyle_reflect)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def test_oc_notdefined(self): super(ops_simple, self).normal(self.collection_oldstyle_notdefined, self.collection)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def test_oc_notdefined_oc_reflect(self): super(ops_simple, self).normal(self.collection_oldstyle_notdefined, self.collection_oldstyle_reflect)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def test_inplace(self): super(ops_simple, self).inplace(self.collection, self.collection)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def test_inplace_nl(self): super(ops_simple, self).inplace(self.collection_newstyle_inplace, self.collection)
IronLanguages/ironpython3
[ 2070, 249, 2070, 270, 1393537849 ]
def forwards_func(apps, schema_editor): Language = apps.get_model("lexicon", "Language") languages = Language.objects.filter( id__in=set(idDistributionMap.keys())).all() for language in languages: for k, v in idDistributionMap[language.id].items(): setattr(language, k, v) language.save()
lingdb/CoBL-public
[ 3, 3, 3, 1, 1483699154 ]
def Solver(mats): """Return appropriate solver for `mats` Parameters ---------- mats : SparseMatrix or list of SparseMatrices Returns ------- Matrix solver (:class:`.SparseMatrixSolver`) Note ---- The list of matrices may include boundary matrices. The returned solver will incorporate these boundary matrices automatically on the right hand side of the equation system. """ assert isinstance(mats, (SparseMatrix, list)) bc_mats = [] mat = mats if isinstance(mats, list): bc_mats = extract_bc_matrices([mats]) mat = sum(mats[1:], mats[0]) return mat.get_solver()([mat]+bc_mats)
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]
def __init__(self, mat): assert isinstance(mat, (SparseMatrix, list)) self.bc_mats = [] if isinstance(mat, list): bc_mats = extract_bc_matrices([mat]) mat = sum(mat[1:], mat[0]) self.bc_mats = bc_mats self.mat = mat self._lu = None self._inner_arg = None # argument to inner_solve assert self.mat.shape[0] == self.mat.shape[1]
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]
def apply_constraints(self, b, constraints, axis=0): """Apply constraints to matrix `self.mat` and rhs vector `b` Parameters ---------- b : array constraints : tuple of 2-tuples The 2-tuples represent (row, val) The constraint indents the matrix row and sets b[row] = val axis : int The axis we are solving over """ # Only apply constraint to matrix first time around if len(constraints) > 0: if b.ndim > 1: T = b.function_space().bases[axis] A = self.mat if isinstance(A, spmatrix): for (row, val) in constraints: if self._lu is None: A = A.tolil() _, zerorow = A[row].nonzero() A[(row, zerorow)] = 0 A[row, row] = 1 self.mat = A.tocsc() if b.ndim > 1: b[T.si[row]] = val else: b[row] = val elif isinstance(A, SparseMatrix): for (row, val) in constraints: if self._lu is None: for key, vals in A.items(): if key >= 0: M = A.shape[0]-key v = np.broadcast_to(np.atleast_1d(vals), M).copy() if row < M: v[row] = int(key == 0)/A.scale elif key < 0: M = A.shape[0]+key v = np.broadcast_to(np.atleast_1d(vals), M).copy() if row+key < M and row+key > 0: v[row+key] = 0 A[key] = v if b.ndim > 1: b[T.si[row]] = val else: b[row] = val return b
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]
def solve(self, b, u, axis, lu): """Solve Au=b Solve along axis if b and u are multidimensional arrays. Parameters ---------- b, u : arrays of rhs and output Both can be multidimensional axis : int The axis we are solving over lu : LU-decomposition Can be either the output from splu, or a dia-matrix containing the L and U matrices. The latter is used in subclasses. """ if axis > 0: u = np.moveaxis(u, axis, 0) if u is not b: b = np.moveaxis(b, axis, 0) s = slice(0, self.mat.shape[0]) if b.ndim == 1: if b.dtype.char in 'fdg' or self.dtype in 'FDG': u[s] = lu.solve(b[s]) else: u.real[s] = lu.solve(b[s].real) u.imag[s] = lu.solve(b[s].imag) else: N = b[s].shape[0] P = np.prod(b[s].shape[1:]) br = b[s].reshape((N, P)) if b.dtype.char in 'fdg' or self.dtype in 'FDG': u[s] = lu.solve(br).reshape(u[s].shape) else: u.real[s] = lu.solve(br.real).reshape(u[s].shape) u.imag[s] = lu.solve(br.imag).reshape(u[s].shape) if axis > 0: u = np.moveaxis(u, 0, axis) if u is not b: b = np.moveaxis(b, 0, axis) return u
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]
def inner_solve(u, lu): """Solve Au=b for one-dimensional u On entry u is the rhs b, on exit it contains the solution. Parameters ---------- u : array 1D rhs on entry and solution on exit lu : LU-decomposition Can be either a 2-tuple with (output from splu, dtype), or a scipy dia-matrix containing the L and U matrices. The latter is used in subclasses. """ lu, dtype = lu s = slice(0, lu.shape[0]) if u.dtype.char in 'fdg' or dtype in 'FDG': u[s] = lu.solve(u[s]) else: u.real[s] = lu.solve(u.real[s]) u.imag[s] = lu.solve(u.imag[s])
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]
def __init__(self, mat): SparseMatrixSolver.__init__(self, mat) self._lu = self.mat.diags('dia')
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]
def LU(data): """LU-decomposition using either Cython or Numba Parameters ---------- data : 2D-array Storage for dia-matrix on entry and L and U matrices on exit. """ raise NotImplementedError
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]
def Solve(u, data, axis=0): """Fast solve using either Cython or Numba Parameters ---------- u : array rhs on entry, solution on exit data : 2D-array Storage for dia-matrix containing L and U matrices axis : int, optional The axis we are solving over """ raise NotImplementedError
spectralDNS/shenfun
[ 148, 38, 148, 24, 1485264542 ]