code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def main(global_config, **settings): <NEW_LINE> <INDENT> engine = engine_from_config(settings, 'sqlalchemy.') <NEW_LINE> DBSession.configure(bind=engine) <NEW_LINE> Base.metadata.bind = engine <NEW_LINE> config = Configurator(settings=settings) <NEW_LINE> config.include('pyramid_chameleon') <NEW_LINE> config.add_static_view('static', 'static', cache_max_age=3600) <NEW_LINE> config.add_route('home', '/') <NEW_LINE> config.include('pyramid_sacrud') <NEW_LINE> settings = config.registry.settings <NEW_LINE> settings['pyramid_sacrud.models'] = (('Group1', [CompletedProject]), ) <NEW_LINE> config.scan() <NEW_LINE> return config.make_wsgi_app()
This function returns a Pyramid WSGI application.
625941c138b623060ff0ad62
def range(self, var, calibrate=False): <NEW_LINE> <INDENT> q = self.q[(var, 'range')] <NEW_LINE> mi, MI = self._align(self.data.xs('min', 1, 'aggr'), q['min']) <NEW_LINE> ma, MA = self._align(self.data.xs('max', 1, 'aggr'), q['max']) <NEW_LINE> if calibrate == True: <NEW_LINE> <INDENT> self._qual(mi, 'range_min') <NEW_LINE> self._qual(ma, 'range_max') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._qual(mi < MI, 'range_min') <NEW_LINE> self._qual(ma > MA, 'range_max')
values that fall outside climatological range
625941c1d7e4931a7ee9de90
def video_pause(self): <NEW_LINE> <INDENT> return self.run_command('pause', 212)
pause current video
625941c1435de62698dfdbbf
def setUp(self): <NEW_LINE> <INDENT> self.client = APIClient() <NEW_LINE> self.user = get_user_model().objects.create_user( email='test1@test.com', password='testpass' ) <NEW_LINE> self.client.force_authenticate((self.user))
Basic setup
625941c1fb3f5b602dac3604
def timed_run(self, duration): <NEW_LINE> <INDENT> if isinstance(angela2.get_device(), angela2.devices.RuntimeDevice): <NEW_LINE> <INDENT> angela2.run(angela2.defaultclock.dt, level=1) <NEW_LINE> angela2.run(duration, level=1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> angela2.run(duration, level=1)
Do a timed run. This means that for RuntimeDevice it will run for defaultclock.dt before running for the rest of the duration. This means total run duration will be duration+defaultclock.dt. For standalone devices, this feature may or may not be implemented.
625941c150485f2cf553cd0c
def recv(self): <NEW_LINE> <INDENT> message = self.stdout.readline()[:-1] <NEW_LINE> return self._decode(message)
Receive and decode a message from a kernel process. Block until a newline-terminated message is available.
625941c13cc13d1c6d3c72ee
def flash_firmware(hex_file, part_id, confirm=False, check_only=True, timeout=90): <NEW_LINE> <INDENT> ret = {} <NEW_LINE> if not confirm: <NEW_LINE> <INDENT> raise salt.exceptions.CommandExecutionError( "This command will flash firmware release '{:s}' onto the ATtiny - add parameter 'confirm=true' to continue anyway".format(hex_file)) <NEW_LINE> <DEDENT> if not check_only: <NEW_LINE> <INDENT> res = client.send_sync(_msg_pack(hex_file, part_id, no_write=False, _handler="flash_firmware"), timeout=timeout) <NEW_LINE> ret["output"] = res.get("output", None) <NEW_LINE> <DEDENT> return ret
Flashes new SPM firmware to ATtiny.
625941c130c21e258bdfa40f
def patch(self, request, pk=None): <NEW_LINE> <INDENT> return Response({'method':'PATCH'})
Partial handle update
625941c15fcc89381b1e1630
def sanitize_fragment(html): <NEW_LINE> <INDENT> if not html: <NEW_LINE> <INDENT> return u'' <NEW_LINE> <DEDENT> import lxml.html <NEW_LINE> body = lxml.html.document_fromstring(html).find('body') <NEW_LINE> html = lxml.html.tostring(body, encoding='utf-8')[6:-7].decode('utf-8') <NEW_LINE> if html.startswith('<p>') and html.endswith('</p>'): <NEW_LINE> <INDENT> html = html[3:-4] <NEW_LINE> <DEDENT> return html
#html5lib reorders arguments, so not usable import html5lib return html5lib.parseFragment(html).toxml().decode('utf-8')
625941c126068e7796caec4f
def rpf_kernel(self, Z_expand, rpf_h, M=32): <NEW_LINE> <INDENT> Z_expand_row_dim = tf.expand_dims(Z_expand, axis=1) <NEW_LINE> Z_expand_column_dim = tf.expand_dims(Z_expand, axis=2) <NEW_LINE> delta = Z_expand_column_dim - Z_expand_row_dim <NEW_LINE> delta_square = tf.reduce_sum(tf.square(delta), axis=-1) <NEW_LINE> rpf_h_expand = tf.reshape(rpf_h, [-1, 1, 1]) <NEW_LINE> rpf_matrix = tf.exp(-delta_square / rpf_h_expand) <NEW_LINE> rpf_grads = tf.constant(0., dtype=tf.float32, shape=[Z_expand.get_shape()[0].value, 0, Z_expand.get_shape()[2].value]) <NEW_LINE> for i in range(M): <NEW_LINE> <INDENT> rpf_grad = tf.reduce_mean(tf.gradients(rpf_matrix[:, :, i], [Z_expand_column_dim])[0], axis=1) <NEW_LINE> rpf_grads = tf.concat([rpf_grads, rpf_grad], axis=1) <NEW_LINE> <DEDENT> return tf.stop_gradient(rpf_matrix), tf.stop_gradient(rpf_grads)
:param Z_expand: shape=[nbatch, M, cell] :param Z_expand_repr_train: shape=[nbatch, M, cell] :param rpf_h: :return: a [nbatch, M, M] kernel matrix and its gradients towards Z_expand_column_dim
625941c167a9b606de4a7e2f
@click.command() <NEW_LINE> @click.option('-p', '--prefix', default='', help='chromosome prefix') <NEW_LINE> @click.argument('bam_path', type=click.Path(exists=True)) <NEW_LINE> @click.pass_context <NEW_LINE> def sex(context, prefix, bam_path): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> result = sex_from_bam(bam_path, prefix=prefix) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> logging.exception('Something went really wrong :(') <NEW_LINE> context.abort() <NEW_LINE> <DEDENT> click.echo("#{prefix}X_coverage\t{prefix}Y_coverage\tsex" .format(prefix=prefix)) <NEW_LINE> click.echo('\t'.join(map(text_type, result)))
Guess the sex of a BAM alignment.
625941c1d486a94d0b98e0b9
def stop_node(self, i): <NEW_LINE> <INDENT> self.nodes[i].stop_node() <NEW_LINE> self.nodes[i].wait_until_stopped()
Stop a bltgd test node
625941c185dfad0860c3adcd
def __init__(self, array_number=None): <NEW_LINE> <INDENT> self.swagger_types = { 'array_number': 'list[float]' } <NEW_LINE> self.attribute_map = { 'array_number': 'ArrayNumber' } <NEW_LINE> self._array_number = None <NEW_LINE> if array_number is not None: <NEW_LINE> <INDENT> self.array_number = array_number
ArrayOfNumberOnly - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
625941c196565a6dacc8f640
def extra_tabs(self, user): <NEW_LINE> <INDENT> return []
Get extra tabs for the videos page Returns: list of (name, title) tuples. name is used for the tab id, title is a human friendly title. For each tab name you should create a video-<name>.html and video-<name>-tab.html templates. If you need to pass variables to those templates, create a setup_tab_<name> method that inputs the same args as the methods from VideoPageContext and returns a dict of variables for the template.
625941c17b180e01f3dc4775
def _singleton(): <NEW_LINE> <INDENT> if cls not in instances: <NEW_LINE> <INDENT> instances[cls] = cls(*args, **kw) <NEW_LINE> <DEDENT> return instances[cls]
Find existing instance.
625941c1435de62698dfdbc0
def get_method(self, key): <NEW_LINE> <INDENT> return self._get_method(self._get_node(key))
Return compression method of specified key.
625941c18a43f66fc4b53fdb
def build_model_without_suspension(self): <NEW_LINE> <INDENT> for index in self._wheels.keys(): <NEW_LINE> <INDENT> self._wheel_joints[index] = Joint6DoF(self._wheels[index], self.bge_object) <NEW_LINE> self._wheel_joints[index].free_rotation_dof('Z') <NEW_LINE> <DEDENT> scene = blenderapi.scene() <NEW_LINE> caster_wheel_name = self.bge_object.get('CasterWheelName', None) <NEW_LINE> if caster_wheel_name and caster_wheel_name != 'None': <NEW_LINE> <INDENT> wheel = scene.objects[caster_wheel_name] <NEW_LINE> joint = Joint6DoF(wheel, self.bge_object) <NEW_LINE> joint.free_rotation_dof('Z')
Add all the constraints to attach the wheels to the body
625941c1b57a9660fec337f6
def simple_get(url): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with closing(get(url, stream=True, timeout=3.05)) as resp: <NEW_LINE> <INDENT> if is_good_response(resp): <NEW_LINE> <INDENT> return resp.content <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Closing error or timeout') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except RequestException as e: <NEW_LINE> <INDENT> log_error('Error during requests to {0} : {1}'.format(url, str(e))) <NEW_LINE> print('Error during requestse')
Attempts to get the content at `url` by making an HTTP GET request. If the content-type of response is some kind of HTML/XML, return the text content, otherwise return None.
625941c1009cb60464c63327
def update(self, request, profile_pk=None, pk=None, *args, **kwargs): <NEW_LINE> <INDENT> if kwargs.get('partial') == True: <NEW_LINE> <INDENT> return super().update(request, profile_pk, pk, *args, **kwargs) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> if not request.user.is_superuser: <NEW_LINE> <INDENT> raise PermissionDenied({"message": "You don't have permission to associate existing users"}) <NEW_LINE> <DEDENT> profile = self.queryset.get(pk=profile_pk) <NEW_LINE> spouse = self.queryset.get(pk=pk) <NEW_LINE> profile.set_family(spouse=spouse) <NEW_LINE> return Response(status=status.HTTP_201_CREATED) <NEW_LINE> <DEDENT> except Profile.DoesNotExist: <NEW_LINE> <INDENT> raise Http404
Associates a spouse to specified user, creating a Family if none exists :param request: :param profile_pk: the profile that the spouse should be associated with :param pk: the spouse to associate with user_pk :return:
625941c1925a0f43d2549de9
def query_all(self, urn, step_sizes, n_nns, exclude_self=False, vectors=False): <NEW_LINE> <INDENT> result = {} <NEW_LINE> data = self.get_metadata(urn) <NEW_LINE> source = {} <NEW_LINE> source['urn'] = urn <NEW_LINE> source['year'] = data['year'] <NEW_LINE> source['image'] = data['image'] <NEW_LINE> if vectors: <NEW_LINE> <INDENT> source['vector'] = data['vector'].tolist() <NEW_LINE> <DEDENT> result['source'] = source <NEW_LINE> result['neighbors'] = {} <NEW_LINE> for i, step in enumerate(step_sizes): <NEW_LINE> <INDENT> if exclude_self: <NEW_LINE> <INDENT> neighbors = self.query_indices(data['vector'], n_nns=n_nns[i] + 1, step=step) <NEW_LINE> for start, neighbor_list in neighbors.items(): <NEW_LINE> <INDENT> neighbors[start] = [n for n in neighbors[start] if n['urn'] != urn][:n_nns[i]] <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> neighbors = self.query_indices(data['vector'], n_nns=n_nns[i], step=step) <NEW_LINE> <DEDENT> if vectors: <NEW_LINE> <INDENT> for start, neighbor_list in neighbors.items(): <NEW_LINE> <INDENT> for n in neighbor_list: <NEW_LINE> <INDENT> n['vector'] = self.load_vector(n['urn']).tolist() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> result['neighbors'][step] = neighbors <NEW_LINE> <DEDENT> return result
Query indices for multiple step sizes by urn.
625941c1cb5e8a47e48b7a21
def filter_by_cuisine(names_matching_price, cuisine_to_names, cuisines_list): <NEW_LINE> <INDENT> filtered_names = [] <NEW_LINE> for name in names_matching_price: <NEW_LINE> <INDENT> for cuisine in cuisine_to_names.keys(): <NEW_LINE> <INDENT> if name in cuisine_to_names[cuisine][0] and name not in filtered_names: <NEW_LINE> <INDENT> filtered_names.append(name) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return filtered_names
(list of str, dict of {str: list of str}, list of str) -> list of str >>> names = ['Queen St. Cafe', 'Dumplings R Us', 'Deep Fried Everything'] >>> cuis = 'Canadian': ['Georgie Porgie'], 'Pub Food': ['Georgie Porgie', 'Deep Fried Everything'], 'Malaysian': ['Queen St. Cafe'], 'Thai': ['Queen St. Cafe'], 'Chinese': ['Dumplings R Us'], 'Mexican': ['Mexican Grill']} >>> cuisines = ['Chinese', 'Thai'] >>> filter_by_cuisine(names, cuis, cuisines) ['Queen St. Cafe', 'Dumplings R Us']
625941c1004d5f362079a2a9
def test_direction(self): <NEW_LINE> <INDENT> mt = OCIO.MatrixTransform() <NEW_LINE> self.assertEqual(mt.getDirection(), OCIO.TRANSFORM_DIR_FORWARD) <NEW_LINE> for direction in OCIO.TransformDirection.__members__.values(): <NEW_LINE> <INDENT> mt.setDirection(direction) <NEW_LINE> self.assertEqual(mt.getDirection(), direction) <NEW_LINE> <DEDENT> for invalid in (None, 1, 'test'): <NEW_LINE> <INDENT> with self.assertRaises(TypeError): <NEW_LINE> <INDENT> mt.setDirection(invalid)
Test the setDirection() and getDirection() methods.
625941c1adb09d7d5db6c705
def testFromQPainter(self): <NEW_LINE> <INDENT> c = QgsRenderContext.fromQPainter(None) <NEW_LINE> self.assertFalse(c.painter()) <NEW_LINE> self.assertAlmostEqual(c.scaleFactor(), 88 / 25.4, 3) <NEW_LINE> p = QPainter() <NEW_LINE> c = QgsRenderContext.fromQPainter(p) <NEW_LINE> self.assertEqual(c.painter(), p) <NEW_LINE> self.assertAlmostEqual(c.scaleFactor(), 88 / 25.4, 3) <NEW_LINE> im = QImage(1000, 600, QImage.Format_RGB32) <NEW_LINE> dots_per_m = 300 / 25.4 * 1000 <NEW_LINE> im.setDotsPerMeterX(dots_per_m) <NEW_LINE> im.setDotsPerMeterY(dots_per_m) <NEW_LINE> p = QPainter(im) <NEW_LINE> c = QgsRenderContext.fromQPainter(p) <NEW_LINE> self.assertEqual(c.painter(), p) <NEW_LINE> self.assertAlmostEqual(c.scaleFactor(), dots_per_m / 1000, 3)
test QgsRenderContext.fromQPainter
625941c1566aa707497f44e0
def test_returns_error_if_repeats_last_turn(self): <NEW_LINE> <INDENT> self.assertEqual(is_violation("foo", "foo"), DUPLICATE_TERM)
Returns error if `previous_term` is the same as `current_term`.
625941c1099cdd3c635f0bd0
def valid_date_time(var_value, var_name=None): <NEW_LINE> <INDENT> error_message = "" <NEW_LINE> rc, out_buf = gc.shell_cmd("date -d '" + str(var_value) + "'", quiet=1, show_err=0, ignore_err=1) <NEW_LINE> if rc: <NEW_LINE> <INDENT> var_name = get_var_name(var_name) <NEW_LINE> error_message += "Invalid date/time value:\n" <NEW_LINE> error_message += gp.sprint_varx(var_name, var_value, gp.blank() | gp.show_type()) <NEW_LINE> return process_error_message(error_message) <NEW_LINE> <DEDENT> return process_error_message(error_message)
The variable value is valid if it can be interpreted as a date/time (e.g. "14:49:49.981", "tomorrow", etc.) by the linux date command. Description of argument(s): var_value The value being validated.
625941c191af0d3eaac9b98a
def delete_players(): <NEW_LINE> <INDENT> commit_query("TRUNCATE TABLE players CASCADE;")
Remove all the player records from the database.
625941c1e64d504609d747b4
def get_mask(bboxes, shape): <NEW_LINE> <INDENT> mask = np.zeros(shape) <NEW_LINE> for bx in bboxes: <NEW_LINE> <INDENT> mask[bx[1]:bx[3], bx[0]:bx[2], :] = 1 <NEW_LINE> <DEDENT> return mask
get mask of area covered by bboxes
625941c130c21e258bdfa410
def maximumGap(self, nums): <NEW_LINE> <INDENT> if len(nums) < 2 or max(nums) - min(nums) == 0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> max_n, min_n, len_n = max(nums), min(nums), len(nums) <NEW_LINE> bsize = (max_n - min_n + 1.0) / len_n <NEW_LINE> buckets = [[2 ** 31 - 1, -1] for _ in range(len_n + 1)] <NEW_LINE> for i in nums: <NEW_LINE> <INDENT> place = int((i - min_n) // bsize) <NEW_LINE> buckets[place][0] = min(i, buckets[place][0]) <NEW_LINE> buckets[place][1] = max(i, buckets[place][1]) <NEW_LINE> <DEDENT> res, prev = 0, buckets[0][0] <NEW_LINE> for i in buckets: <NEW_LINE> <INDENT> if i != [2 ** 31 - 1, -1]: <NEW_LINE> <INDENT> res = max(res, i[0] - prev) <NEW_LINE> prev = i[1] <NEW_LINE> <DEDENT> <DEDENT> return res
:param nums: :return: @param nums, a list of integer @return an integer
625941c1d58c6744b4257bd4
def do_M(self, miterlimit): <NEW_LINE> <INDENT> self.graphicstate.miterlimit = miterlimit <NEW_LINE> return
Set miter limit
625941c191f36d47f21ac464
def handle_authn_request(self, context): <NEW_LINE> <INDENT> internal_req = self._handle_authn_request(context) <NEW_LINE> if not isinstance(internal_req, InternalData): <NEW_LINE> <INDENT> return internal_req <NEW_LINE> <DEDENT> return self.auth_req_callback_func(context, internal_req)
Handle an authentication request and pass it on to the backend. :type context: satosa.context.Context :rtype: oic.utils.http_util.Response :param context: the current context :return: HTTP response to the client
625941c191af0d3eaac9b98b
def p_operacionunaria(p): <NEW_LINE> <INDENT> p[0] = add_tabs(False) + str(p[1]) + str(p[2])
opeunaria : OPUNA ID
625941c121bff66bcd6848c9
def parse_args(): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser(description= "Multi-criteria optimization algorithm") <NEW_LINE> parser.add_argument("-a", type=restricted_float, help="Average latency - failure free scenario. " "Expects a weight (priority) in interval (0, 1]. ") <NEW_LINE> parser.add_argument("-w", type=restricted_float, help="Worst case latency - failure free scenario. " "Expects a weight (priority) in interval (0, 1]. ") <NEW_LINE> parser.add_argument("-i", type=restricted_float, help="Inter controller latency. " "Expects a weight (priority) in interval (0, 1]. ") <NEW_LINE> parser.add_argument("--dynamic", help="Generate dynamic undirected graph", action="store_true") <NEW_LINE> parser.add_argument("--gml", help="Parse gml graph", action="store_true") <NEW_LINE> parser.add_argument("-n", type=int, help="Number of graph nodes") <NEW_LINE> parser.add_argument("-c", type=int, help="Number of controllers in graph. " "Allowed values are between N/3 and N/7") <NEW_LINE> args = parser.parse_args() <NEW_LINE> if not (args.a or args.w or args.i): <NEW_LINE> <INDENT> raise parser.error("No action requested, add -a or -w or -i option") <NEW_LINE> <DEDENT> nargs = 0 <NEW_LINE> if args.a: <NEW_LINE> <INDENT> nargs = nargs + 1 <NEW_LINE> <DEDENT> if args.w: <NEW_LINE> <INDENT> nargs = nargs + 1 <NEW_LINE> <DEDENT> if args.i: <NEW_LINE> <INDENT> nargs = nargs + 1 <NEW_LINE> <DEDENT> return args, nargs
Parse user arguments Returns ------- args : Namespace object with user arguments nargs : Number of user arguments
625941c173bcbd0ca4b2bfea
def test_parser_pdb(): <NEW_LINE> <INDENT> pdb_strctre_parser.build_parser()
Test building the parser when argsv is None
625941c176d4e153a657eaa4
def test_query_with_invalid_order_field(self): <NEW_LINE> <INDENT> n_records = 20 <NEW_LINE> channels = ["ch1", "ch2", "TRG"] <NEW_LINE> trg_index = -1 <NEW_LINE> channel_count = len(channels) <NEW_LINE> buf = Buffer(channels=channels) <NEW_LINE> for i, data in enumerate(mock_data(n_records, channel_count)): <NEW_LINE> <INDENT> data[trg_index] = 1.0 if i >= 10 else 0.0 <NEW_LINE> timestamp = float(i) <NEW_LINE> buf.append(Record(data, timestamp, None)) <NEW_LINE> <DEDENT> with pytest.raises(Exception): <NEW_LINE> <INDENT> buf.query_data(ordering=("ch3", "asc")) <NEW_LINE> <DEDENT> buf.cleanup()
Test query with invalid order field.
625941c1b830903b967e9881
def get_energy(): <NEW_LINE> <INDENT> blStr = blStrGet() <NEW_LINE> if blStr == -1: return -1 <NEW_LINE> if blStr == 'AMX': <NEW_LINE> <INDENT> energy = vdcm.e.user_readback.get() <NEW_LINE> <DEDENT> elif blStr == 'FMX': <NEW_LINE> <INDENT> energy = hdcm.e.user_readback.get() <NEW_LINE> <DEDENT> return energy
Returns the current photon energy in eV derived from the DCM Bragg angle
625941c13317a56b86939bd2
def make_model(floor_size=None, terrain=False, rangefinders=False, walls_and_ball=False, target=False): <NEW_LINE> <INDENT> xml_string = common.read_model('hexapod.xml') <NEW_LINE> parser = etree.XMLParser(remove_blank_text=True) <NEW_LINE> mjcf = etree.XML(xml_string, parser) <NEW_LINE> if floor_size is not None: <NEW_LINE> <INDENT> floor_geom = mjcf.find('.//geom[@name=\'floor\']') <NEW_LINE> floor_geom.attrib['size'] = f'{floor_size} {floor_size} .5' <NEW_LINE> <DEDENT> if not walls_and_ball: <NEW_LINE> <INDENT> for wall in _WALLS: <NEW_LINE> <INDENT> wall_geom = xml_tools.find_element(mjcf, 'geom', wall) <NEW_LINE> wall_geom.getparent().remove(wall_geom) <NEW_LINE> <DEDENT> ball_body = xml_tools.find_element(mjcf, 'body', 'ball') <NEW_LINE> ball_body.getparent().remove(ball_body) <NEW_LINE> target_site = xml_tools.find_element(mjcf, 'site', 'target') <NEW_LINE> target_site.getparent().remove(target_site) <NEW_LINE> <DEDENT> if target: <NEW_LINE> <INDENT> for wall in _WALLS: <NEW_LINE> <INDENT> wall_geom = xml_tools.find_element(mjcf, 'geom', wall) <NEW_LINE> wall_geom.getparent().remove(wall_geom) <NEW_LINE> <DEDENT> ball_body = xml_tools.find_element(mjcf, 'body', 'ball') <NEW_LINE> ball_body.getparent().remove(ball_body) <NEW_LINE> <DEDENT> if not terrain: <NEW_LINE> <INDENT> terrain_geom = xml_tools.find_element(mjcf, 'geom', 'terrain') <NEW_LINE> terrain_geom.getparent().remove(terrain_geom) <NEW_LINE> <DEDENT> if not rangefinders: <NEW_LINE> <INDENT> rangefinder_sensors = mjcf.findall('.//rangefinder') <NEW_LINE> for rf in rangefinder_sensors: <NEW_LINE> <INDENT> rf.getparent().remove(rf) <NEW_LINE> <DEDENT> <DEDENT> return etree.tostring(mjcf, pretty_print=True)
Returns the model XML string.
625941c1f8510a7c17cf966f
def loss_gradients(forward_info: Dict[str, np.ndarray], weights: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: <NEW_LINE> <INDENT> batch_size = forward_info['X'].shape[0] <NEW_LINE> dLdP = -2 * (forward_info['y'] - forward_info['P']) <NEW_LINE> dPdN = np.ones_like(forward_info['N']) <NEW_LINE> dPdB = np.ones_like(forward_info['B']) <NEW_LINE> dLdN = dLdP * dPdN <NEW_LINE> dNdW = np.transpose(forward_info['X'], (1,0)) <NEW_LINE> dLdW = np.dot(dNdW, dLdN) <NEW_LINE> dLdB = (dLdP * dPdB).sum(axis=0) <NEW_LINE> loss_gradients: Dict[str, np.ndarray] = {} <NEW_LINE> loss_gradients['W'] = dLdW <NEW_LINE> loss_gradients['B'] = dLdB <NEW_LINE> return loss_gradients
선형회귀 모형의 dLdW, dLdB 계산
625941c1507cdc57c6306c4a
def check_archive(archive_file, dest_dir): <NEW_LINE> <INDENT> with tarfile.open(archive_file, mode='r') as archive_fp: <NEW_LINE> <INDENT> for arc_path in archive_fp.getnames(): <NEW_LINE> <INDENT> assert os.path.normpath( os.path.join( dest_dir, arc_path )).startswith(dest_dir.rstrip(os.sep) + os.sep), "Archive member would extract outside target directory: %s" % arc_path <NEW_LINE> <DEDENT> <DEDENT> return True
Ensure that a tar archive has no absolute paths or relative paths outside the archive.
625941c16aa9bd52df036d17
def release(self): <NEW_LINE> <INDENT> self._v.release() <NEW_LINE> del self
deletes instance
625941c15fc7496912cc38f2
def test_push_to_cache(self): <NEW_LINE> <INDENT> n = SCons.Node.Node() <NEW_LINE> r = n.push_to_cache() <NEW_LINE> assert r is None, r
Test the base push_to_cache() method
625941c150812a4eaa59c298
def is_child_of(elem, selector, soup): <NEW_LINE> <INDENT> target = soup.select(selector) <NEW_LINE> return target and target[0] in elem.parents
whether element is a child of element mathcing selector
625941c1462c4b4f79d1d644
def getBallBottom(self): <NEW_LINE> <INDENT> return self._ball.getBottom()
Returns True if self._ball is at or below the bottom of the view Returns False otherwise
625941c129b78933be1e5624
def describe_instance_types(self, args): <NEW_LINE> <INDENT> return instance.describe_instance_types(self.url, self.verb, self.headers, self.version, args)
Gives a description of instance types present. param args: Arguments passed to the function The function expects either no input or a list of specific instance types to describe
625941c1d4950a0f3b08c2c5
def interpolate(model, test_dataset, result_name, steps=1, generation_length=64, interps=20): <NEW_LINE> <INDENT> sampling_dir = os.path.join(model._sampling_dir, result_name) <NEW_LINE> os.makedirs(sampling_dir) <NEW_LINE> test_dataset, _ = test_dataset <NEW_LINE> for step, entry in enumerate(test_dataset): <NEW_LINE> <INDENT> if step == steps: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if len(entry) == 2: <NEW_LINE> <INDENT> x_image, class_names = entry <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> y_sketch_gt, y_sketch_teacher, x_image, class_names = entry[0:4] <NEW_LINE> <DEDENT> z, _, _ = model.embed(x_image, training=False) <NEW_LINE> for idx in range(0, z.shape[0], 4): <NEW_LINE> <INDENT> embeddings = z[idx: idx+4].numpy() <NEW_LINE> classes = class_names[idx: idx+4].numpy() <NEW_LINE> interpolated_embeddings = bilinear_interpolate_4_vectors(embeddings, interps=interps) <NEW_LINE> flattened_embeddings = np.reshape(interpolated_embeddings, (-1, z.shape[-1])).astype(np.float32) <NEW_LINE> _, flattened_strokes = model.decode(flattened_embeddings, training=False, generation_length=generation_length).numpy() <NEW_LINE> flattened_images = [] <NEW_LINE> for strokes in flattened_strokes: <NEW_LINE> <INDENT> stroke_three = stroke_three_format(strokes) <NEW_LINE> flattened_images.append(scale_and_rasterize(stroke_three, (28, 28), 1).astype('uint8')) <NEW_LINE> <DEDENT> flattened_images = np.array(flattened_images, dtype=np.uint8) <NEW_LINE> interpolated_images = np.reshape(flattened_images, list(interpolated_embeddings.shape[:2]) + list(flattened_images.shape[1:])) <NEW_LINE> image_rows = [] <NEW_LINE> for row in interpolated_images: <NEW_LINE> <INDENT> concat_row = np.concatenate(row, axis=1) <NEW_LINE> image_rows.append(concat_row) <NEW_LINE> <DEDENT> np_image = np.concatenate(image_rows, axis=0) <NEW_LINE> Image.fromarray(np_image).save(os.path.join(sampling_dir, "{}-{}_{}_{}_{}.png".format(idx//4, *classes)))
Used to generate 2D interpolated embeddings. :param model: :param test_dataset: :param result_name: :param steps: :param generation_length: :param interps: :return:
625941c17b25080760e393ce
def to_api_repr(self): <NEW_LINE> <INDENT> resource = {} <NEW_LINE> if self.etag is not None: <NEW_LINE> <INDENT> resource['etag'] = self.etag <NEW_LINE> <DEDENT> if self.version is not None: <NEW_LINE> <INDENT> resource['version'] = self.version <NEW_LINE> <DEDENT> if len(self._bindings) > 0: <NEW_LINE> <INDENT> bindings = resource['bindings'] = [] <NEW_LINE> for role, members in sorted(self._bindings.items()): <NEW_LINE> <INDENT> if len(members) > 0: <NEW_LINE> <INDENT> bindings.append( {'role': role, 'members': sorted(set(members))}) <NEW_LINE> <DEDENT> <DEDENT> if len(bindings) == 0: <NEW_LINE> <INDENT> del resource['bindings'] <NEW_LINE> <DEDENT> <DEDENT> return resource
Construct a Policy resource. :rtype: dict :returns: a resource to be passed to the ``setIamPolicy`` API.
625941c1a17c0f6771cbdfc7
def get_tif(self, multifile=True): <NEW_LINE> <INDENT> tf = TiffFile(self.image_path, multifile=multifile) <NEW_LINE> return tf
Get TiffFile instance. Returns ------- tf : :class:`sktracker.io.TiffFile`
625941c18c0ade5d55d3e92d
def print_top_flourish(self): <NEW_LINE> <INDENT> image = load_image('TopFlourish') <NEW_LINE> send_image(image, self.__printer_out_ep)
Convert the certificates top flourish to a set of Pipsta graphics commands that will render the image and then send the commands to the printer.
625941c19f2886367277a803
def check_fix_range(dtc): <NEW_LINE> <INDENT> import numpy as np <NEW_LINE> import quantities as pq <NEW_LINE> sub=[] <NEW_LINE> supra=[] <NEW_LINE> steps=[] <NEW_LINE> dtc.rheobase = 0.0 <NEW_LINE> for k,v in dtc.lookup.items(): <NEW_LINE> <INDENT> if v == 1: <NEW_LINE> <INDENT> dtc.rheobase = float(k) <NEW_LINE> dtc.current_steps = 0.0 <NEW_LINE> dtc.boolean = True <NEW_LINE> return dtc <NEW_LINE> <DEDENT> elif v == 0: <NEW_LINE> <INDENT> sub.append(k) <NEW_LINE> <DEDENT> elif v > 0: <NEW_LINE> <INDENT> supra.append(k) <NEW_LINE> <DEDENT> <DEDENT> sub = np.array(sub) <NEW_LINE> supra = np.array(supra) <NEW_LINE> if 0. in supra and len(sub) == 0: <NEW_LINE> <INDENT> dtc.boolean = True <NEW_LINE> dtc.rheobase = -1 <NEW_LINE> return dtc <NEW_LINE> <DEDENT> if len(sub)!=0 and len(supra)!=0: <NEW_LINE> <INDENT> assert sub.max()<=supra.min() <NEW_LINE> <DEDENT> if len(sub) and len(supra): <NEW_LINE> <INDENT> center = list(np.linspace(sub.max(),supra.min(),9.0)) <NEW_LINE> center = [ i for i in center if not i == sub.max() ] <NEW_LINE> center = [ i for i in center if not i == supra.min() ] <NEW_LINE> center[int(len(center)/2)+1]=(sub.max()+supra.min())/2.0 <NEW_LINE> steps = [ i*pq.pA for i in center ] <NEW_LINE> <DEDENT> elif len(sub): <NEW_LINE> <INDENT> steps = list(np.linspace(sub.max(),2*sub.max(),9.0)) <NEW_LINE> steps = [ i for i in steps if not i == sub.max() ] <NEW_LINE> steps = [ i*pq.pA for i in steps ] <NEW_LINE> <DEDENT> elif len(supra): <NEW_LINE> <INDENT> step = list(np.linspace(-2*(supra.min()),supra.min(),9.0)) <NEW_LINE> steps = [ i for i in steps if not i == supra.min() ] <NEW_LINE> steps = [ i*pq.pA for i in steps ] <NEW_LINE> <DEDENT> dtc.current_steps = steps <NEW_LINE> dtc.rheobase = None <NEW_LINE> return copy.copy(dtc)
Inputs: lookup, A dictionary of previous current injection values used to search rheobase Outputs: A boolean to indicate if the correct rheobase current was found and a dictionary containing the range of values used. If rheobase was actually found then rather returning a boolean and a dictionary, instead logical True, and the rheobase current is returned. given a dictionary of rheobase search values, use that dictionary as input for a subsequent search.
625941c1ad47b63b2c509ef4
def push(self, number): <NEW_LINE> <INDENT> if self.count == self.capacity: <NEW_LINE> <INDENT> raise Exception("Heap is full!") <NEW_LINE> <DEDENT> if self.count == 0: <NEW_LINE> <INDENT> self.array[0] = number <NEW_LINE> self.count = 1 <NEW_LINE> return <NEW_LINE> <DEDENT> self.array[self.count] = number <NEW_LINE> index = self.count <NEW_LINE> parent_index = (index - 1) // 2 <NEW_LINE> while index > 0 and self.array[parent_index] > self.array[index]: <NEW_LINE> <INDENT> parent = self.array[parent_index] <NEW_LINE> self.array[parent_index] = self.array[index] <NEW_LINE> self.array[index] = parent <NEW_LINE> index = parent_index <NEW_LINE> parent_index = (index - 1) // 2 <NEW_LINE> <DEDENT> self.count += 1
Push a new value in the heap. The value is added at the end of the tree and then the tree is re-balanced. :param number: the value to push in the heap :type number: int
625941c1a8370b7717052815
@register.inclusion_tag('left_menu.html') <NEW_LINE> def left_menu(site): <NEW_LINE> <INDENT> blog = models.Blog.objects.filter(site_name=site).first() <NEW_LINE> user = blog.userinfo <NEW_LINE> print('user??', user) <NEW_LINE> tag_list = models.Tag.objects.filter(blog=blog).values("pk").annotate(c=Count("article")).values_list("title", "c") <NEW_LINE> cate_list = models.Category.objects.filter(blog=blog).values("pk"). annotate(c=Count("article__title")).values_list("title", "c") <NEW_LINE> date_list = models.Article.objects.filter(user=user). extra(select={"y_m_date": "strftime('%%Y/%%m', create_time)"}).values("y_m_date").annotate(c=Count("nid")). values_list("y_m_date", "c").order_by("-y_m_date") <NEW_LINE> return {"blog": blog, "cate_list": cate_list, "date_list": date_list, "tag_list": tag_list}
执行以下查询操作之后,把数据返回给inclusion_tag, 渲染left_menu.html这个模板文件 调用时:{% load my_tags %} {% left_menu site %}
625941c14428ac0f6e5ba766
def query_parameters(self, constraints, return_request=None, get_query_payload=False, verbose=False): <NEW_LINE> <INDENT> response = self.query_parameters_async(constraints, return_request=return_request, get_query_payload=get_query_payload) <NEW_LINE> if get_query_payload: <NEW_LINE> <INDENT> return response <NEW_LINE> <DEDENT> result = self._parse_result(response, verbose=verbose) <NEW_LINE> return result
Search based on eccentricity, orbital major axis, or some other parameter that is likely to be catalogued.
625941c1711fe17d825422e4
def set_window_state (self, state): <NEW_LINE> <INDENT> state = str(state).lower() <NEW_LINE> _method = self.STATE.get(state) <NEW_LINE> if callable(_method): <NEW_LINE> <INDENT> _method() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError( _("unsupported window state '{w_state}'.") .format(w_state=state) )
sets this main window state i.e. one of 'minimized', 'maximized', 'normal' or 'hidden' string of chars; sets also *REAL* window state along value; no return value (void);
625941c1ad47b63b2c509ef5
def perlin_ridged(x, y, z): <NEW_LINE> <INDENT> value = pnoise(x, y, z) <NEW_LINE> if value > 0: <NEW_LINE> <INDENT> value = (-value) + 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = value + 1 <NEW_LINE> <DEDENT> return 2*value - 1
Ridged means that instead of varying from -1..1, the value varies from -1..1..-1
625941c173bcbd0ca4b2bfeb
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Smart.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv)
Run administrative tasks.
625941c14e696a04525c93c1
def set_contact_name(self, contact_name): <NEW_LINE> <INDENT> self.contact_name = contact_name
Set contact name for the contact. Args: contact_name(str): Contact name of the contact.
625941c1a05bb46b383ec798
def _filter_handles(l): <NEW_LINE> <INDENT> for o in l: <NEW_LINE> <INDENT> if isinstance(o, _GIPCHandle): <NEW_LINE> <INDENT> yield o <NEW_LINE> <DEDENT> elif isinstance(o, _GIPCDuplexHandle): <NEW_LINE> <INDENT> yield o._writer <NEW_LINE> yield o._reader
Iterate through `l`, filter and yield `_GIPCHandle` instances.
625941c15e10d32532c5ee9c
def setUp(self): <NEW_LINE> <INDENT> super(EdxNotesHelpersTest, self).setUp() <NEW_LINE> ClientFactory(name="edx-notes") <NEW_LINE> self.course = CourseFactory.create() <NEW_LINE> self.chapter = ItemFactory.create(category="chapter", parent_location=self.course.location) <NEW_LINE> self.chapter_2 = ItemFactory.create(category="chapter", parent_location=self.course.location) <NEW_LINE> self.sequential = ItemFactory.create(category="sequential", parent_location=self.chapter.location) <NEW_LINE> self.vertical = ItemFactory.create(category="vertical", parent_location=self.sequential.location) <NEW_LINE> self.html_module_1 = ItemFactory.create(category="html", parent_location=self.vertical.location) <NEW_LINE> self.html_module_2 = ItemFactory.create(category="html", parent_location=self.vertical.location) <NEW_LINE> self.vertical_with_container = ItemFactory.create(category='vertical', parent_location=self.sequential.location) <NEW_LINE> self.child_container = ItemFactory.create( category='split_test', parent_location=self.vertical_with_container.location) <NEW_LINE> self.child_vertical = ItemFactory.create(category='vertical', parent_location=self.child_container.location) <NEW_LINE> self.child_html_module = ItemFactory.create(category="html", parent_location=self.child_vertical.location) <NEW_LINE> self.course = self.store.get_item(self.course.location) <NEW_LINE> self.chapter = self.store.get_item(self.chapter.location) <NEW_LINE> self.chapter_2 = self.store.get_item(self.chapter_2.location) <NEW_LINE> self.sequential = self.store.get_item(self.sequential.location) <NEW_LINE> self.vertical = self.store.get_item(self.vertical.location) <NEW_LINE> self.vertical_with_container = self.store.get_item(self.vertical_with_container.location) <NEW_LINE> self.child_container = self.store.get_item(self.child_container.location) <NEW_LINE> self.child_vertical = self.store.get_item(self.child_vertical.location) <NEW_LINE> self.child_html_module = self.store.get_item(self.child_html_module.location) <NEW_LINE> self.user = UserFactory.create(username="Joe", email="joe@example.com", password="edx") <NEW_LINE> self.client.login(username=self.user.username, password="edx")
Setup a dummy course content.
625941c107d97122c41787fb
def GetInsideValue(self): <NEW_LINE> <INDENT> return _itkDynamicThresholdImageFilterPython.itkDynamicThresholdImageFilterIF2IF2_GetInsideValue(self)
GetInsideValue(self) -> float
625941c1bd1bec0571d905a3
def root_quit(): <NEW_LINE> <INDENT> pool.terminate() <NEW_LINE> root_tk.destroy() <NEW_LINE> sys.stderr.close() <NEW_LINE> sys.stdout.close() <NEW_LINE> return 1
Destroy mainloop and terminate tread pool :return: none
625941c124f1403a92600add
def test_vector_flow(a, b, c, max_angle=pi/32): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> ab = array([b['x'] - a['x'], b['y'] - a['y']]) <NEW_LINE> bc = array([c['x'] - b['x'], c['y'] - b['y']]) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> ab = b-a <NEW_LINE> bc = c-b <NEW_LINE> <DEDENT> vec = array([cv2.cartToPolar(r_[ab[0]], r_[ab[1]])[1].flatten(), cv2.cartToPolar(r_[bc[0]], r_[bc[1]])[1].flatten()]) <NEW_LINE> mins = min(vec,0) <NEW_LINE> vec -= mins <NEW_LINE> vec = max(vec,0) <NEW_LINE> vec[vec>pi] = 2*pi - vec[vec>pi] <NEW_LINE> gdvecs = vec < max_angle <NEW_LINE> divisor = sqrt(sum(ab[:2]**2,0)) <NEW_LINE> nonzero = divisor != 0. <NEW_LINE> scalings = sqrt(sum(bc[:2]**2,0))/divisor <NEW_LINE> meanscale = mean(sqrt(sum(bc[:2,nonzero]**2,0))/sqrt(sum(ab[:2,nonzero]**2,0))) <NEW_LINE> stdscale = std(sqrt(sum(bc[:2,nonzero]**2,0))/sqrt(sum(ab[:2,nonzero]**2,0))) <NEW_LINE> gdscale = (scalings >= (meanscale-stdscale)) & (scalings <= (meanscale+stdscale)) <NEW_LINE> return gdvecs & gdscale
Tests a contiguous set of matches. The difference in angle between a-b and b-c must be less than 'max_angle'. Default is difference of 5 degrees.
625941c1377c676e9127211e
def dl_nicomimi(file, id, title='', artist='', album='', comment='', apic='none'): <NEW_LINE> <INDENT> cj = http.cookiejar.CookieJar() <NEW_LINE> opener = urllib.request.build_opener( urllib.request.HTTPCookieProcessor(cj)) <NEW_LINE> conn = opener.open('http://www.nicomimi.net/play/{}'.format(id)) <NEW_LINE> conn.close() <NEW_LINE> conn = opener.open('http://media2.nicomimi.net/get?vid={}'.format(id)) <NEW_LINE> data = conn.read() <NEW_LINE> with open(file, 'wb') as f: <NEW_LINE> <INDENT> f.write(data) <NEW_LINE> <DEDENT> conn.close() <NEW_LINE> tags.tag(file, id, title, artist, album, comment, apic)
Deprecated. Request an MP3 download from nicomimi.net, then tag using stagger. file should probably match the title and end in '.mp3' as the right extension. See getpic() and tag() for information about apic.
625941c1d8ef3951e32434b2
def increment(self, metric, value=1, dimensions=None, delegated_tenant=None, hostname=None, device_name=None): <NEW_LINE> <INDENT> self.aggregator.increment(metric, value, dimensions, delegated_tenant, hostname, device_name)
Increment a counter with optional dimensions, hostname and device name. :param metric: The name of the metric :param value: The value to increment by :param dimensions: (optional) A dictionary of dimensions for this metric :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID. :param hostname: (optional) A hostname for this metric. Defaults to the current hostname. :param device_name: (optional) The device name for this metric
625941c156ac1b37e6264148
def sort_only_composite_numbers(array): <NEW_LINE> <INDENT> composites = sorted([n for n in array if not is_prime_number(n)]) <NEW_LINE> i = 0 <NEW_LINE> for index, number in enumerate(array): <NEW_LINE> <INDENT> if is_prime_number(number): <NEW_LINE> <INDENT> array[index] = number <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> array[index] = composites[i] <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> <DEDENT> return array
Sort only composite numbers in the "array", prime numbers stay in the same place. :param array: array of integers :return: array on which only composite numbers are sorted
625941c163f4b57ef0001093
def onSaveFile(self,event): <NEW_LINE> <INDENT> wildcard = "Text files (*.txt)|*.txt|Data files (*.dat)|*.dat|All files (*.*)|*.*" <NEW_LINE> dialog = wx.FileDialog(None, "Choose a command file", wildcard=wildcard, style=wx.SAVE|wx.OVERWRITE_PROMPT) <NEW_LINE> if dialog.ShowModal() == wx.ID_OK: <NEW_LINE> <INDENT> f2 = open(dialog.GetPath(),'w') <NEW_LINE> f2.write(self.txtCommand.GetValue()) <NEW_LINE> f2.close() <NEW_LINE> dialog.Destroy() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass
Browse for file
625941c18a349b6b435e80e8
def model_summary(logger): <NEW_LINE> <INDENT> logger.info('\nModel summary ...') <NEW_LINE> model_vars = tf.trainable_variables() <NEW_LINE> slim.model_analyzer.analyze_vars(model_vars, print_info=True)
Use tf.contrib.slim to get the equivalent of keras' model.summary() :param logger: a logging.Logger() instance
625941c107d97122c41787fc
def SetDefaultIcon(self,pszFile:'Any',iIcon:'Any') -> 'None': <NEW_LINE> <INDENT> pass
Description of SetDefaultIcon. Args: pszFile(Any):Description for pszFile iIcon(Any):Description for iIcon Returns: None
625941c1fbf16365ca6f6134
def _get_directory_mappings(): <NEW_LINE> <INDENT> import params <NEW_LINE> return { params.conf_dir : BACKUP_CONF_ARCHIVE }
Gets a dictionary of directory to archive name that represents the directories that need to be backed up and their output tarball archive targets :return: the dictionary of directory to tarball mappings
625941c176e4537e8c3515e5
def run(self): <NEW_LINE> <INDENT> doc_nodes = [] <NEW_LINE> n = 1 <NEW_LINE> method_path = self.arguments[0] <NEW_LINE> src, exc_txt, input_blocks, output_blocks, skipped, failed = get_test_src(method_path) <NEW_LINE> if skipped or failed: <NEW_LINE> <INDENT> body = nodes.literal_block(src, src) <NEW_LINE> body['language'] = 'python' <NEW_LINE> doc_nodes.append(body) <NEW_LINE> if skipped: <NEW_LINE> <INDENT> output = "Test skipped because " + exc_txt <NEW_LINE> output_node = skipped_or_failed_node(text=output, number=n, kind="skipped") <NEW_LINE> <DEDENT> elif failed: <NEW_LINE> <INDENT> output_node = skipped_or_failed_node(text=exc_txt, number=n, kind="failed") <NEW_LINE> <DEDENT> doc_nodes.append(output_node) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if 'no-split' in self.options: <NEW_LINE> <INDENT> input_block = '\n'.join(input_blocks) <NEW_LINE> output_block = '\n'.join(output_blocks) <NEW_LINE> input_node = nodes.literal_block(input_block, input_block) <NEW_LINE> input_node['language'] = 'python' <NEW_LINE> doc_nodes.append(input_node) <NEW_LINE> output_node = in_or_out_node(kind="Out", number=n, text=output_block) <NEW_LINE> doc_nodes.append(output_node) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for input_block, output_block in zip(input_blocks, output_blocks): <NEW_LINE> <INDENT> input_node = nodes.literal_block(input_block, input_block) <NEW_LINE> input_node['language'] = 'python' <NEW_LINE> doc_nodes.append(input_node) <NEW_LINE> output_node = in_or_out_node(kind="Out", number=n, text=output_block) <NEW_LINE> doc_nodes.append(output_node) <NEW_LINE> n += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return doc_nodes
Create a list of document nodes to return.
625941c1379a373c97cfaab9
def save_media(post, location): <NEW_LINE> <INDENT> url = post.url <NEW_LINE> stripped_url = url.split("?")[0] <NEW_LINE> if url.endswith(post.permalink): return None <NEW_LINE> extension = stripped_url.split(".")[-1].lower() <NEW_LINE> domain = ".".join(post.url.split("/")[2].split(".")[-2:]) <NEW_LINE> readable_name = list(filter(bool, post.permalink.split("/")))[-1] <NEW_LINE> if domain == "imgur.com" and "gallery" in url: return None <NEW_LINE> if extension in IMAGE_EXTENSIONS + VIDEO_EXTENSIONS: <NEW_LINE> <INDENT> filename = f"{readable_name}_{post.id}.{extension}" <NEW_LINE> response = requests.get(post.url) <NEW_LINE> media_type = response.headers.get("Content-Type", "") <NEW_LINE> if media_type.startswith("image") or media_type.startswith("video"): <NEW_LINE> <INDENT> with open(os.path.join(location, "media", filename), "wb") as f: <NEW_LINE> <INDENT> f.write(response.content) <NEW_LINE> return filename <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if domain == "redd.it": <NEW_LINE> <INDENT> downloader = Downloader(max_q=True, log=False) <NEW_LINE> downloader.url = url <NEW_LINE> current = os.getcwd() <NEW_LINE> try: <NEW_LINE> <INDENT> name = downloader.download() <NEW_LINE> extension = name.split(".")[-1] <NEW_LINE> filename = f"{readable_name}_{post.id}.{extension}" <NEW_LINE> os.rename(name, os.path.join(location, "media", filename)) <NEW_LINE> return filename <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> os.chdir(current) <NEW_LINE> return None <NEW_LINE> <DEDENT> <DEDENT> if domain == "gfycat.com": <NEW_LINE> <INDENT> html = requests.get(post.url).content <NEW_LINE> if len(html) < 50000: <NEW_LINE> <INDENT> match = re.search(r"http([\dA-Za-z\+\:\/\.]+)\.mp4", html.decode()) <NEW_LINE> if match: <NEW_LINE> <INDENT> url = match.group() <NEW_LINE> <DEDENT> else: return None <NEW_LINE> <DEDENT> <DEDENT> if domain =="imgur.com" and extension != "gifv": <NEW_LINE> <INDENT> for extension in IMAGE_EXTENSIONS: <NEW_LINE> <INDENT> direct_url = f'https://i.{url[url.find("//") + 2:]}.{extension}' <NEW_LINE> direct_url = direct_url.replace("i.imgur.com", "imgur.com") <NEW_LINE> direct_url = direct_url.replace("m.imgur.com", "imgur.com") <NEW_LINE> response = requests.get(direct_url) <NEW_LINE> if response.status_code == 200: <NEW_LINE> <INDENT> filename = f"{readable_name}_{post.id}.{extension}" <NEW_LINE> with open(os.path.join(location, "media", filename), "wb") as f: <NEW_LINE> <INDENT> f.write(response.content) <NEW_LINE> return filename <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if domain in PLATFORMS: <NEW_LINE> <INDENT> options = { "nocheckcertificate": True, "quiet": True, "no_warnings": True, "ignoreerrors": True, "outtmpl": os.path.join( location, "media", f"{readable_name}_{post.id}" + ".%(ext)s" ) } <NEW_LINE> with youtube_dl.YoutubeDL(options) as ydl: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> ydl.download([url]) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> for f in os.listdir(os.path.join(location, "media")): <NEW_LINE> <INDENT> if f.startswith(f"{readable_name}_{post.id}"): <NEW_LINE> <INDENT> return f
Takes a post object and tries to download any image/video it might be associated with. If it can, it will return the filename.
625941c163d6d428bbe44464
def test_plot_topomap_bads(): <NEW_LINE> <INDENT> import matplotlib.pyplot as plt <NEW_LINE> data = np.random.RandomState(0).randn(3, 1000) <NEW_LINE> raw = RawArray(data, create_info(3, 1000., 'eeg')) <NEW_LINE> ch_pos_dict = {name: pos for name, pos in zip(raw.ch_names, np.eye(3))} <NEW_LINE> raw.info.set_montage(make_dig_montage(ch_pos_dict, coord_frame='head')) <NEW_LINE> for count in range(3): <NEW_LINE> <INDENT> raw.info['bads'] = raw.ch_names[:count] <NEW_LINE> raw.info._check_consistency() <NEW_LINE> plot_topomap(data[:, 0], raw.info) <NEW_LINE> <DEDENT> plt.close('all')
Test plotting topomap with bad channels (gh-7213).
625941c121a7993f00bc7c61
def test_bv_boolops(): <NEW_LINE> <INDENT> for name in bv_solvers: <NEW_LINE> <INDENT> s = smt(name, strict=True) <NEW_LINE> bvand = s.BVAnd <NEW_LINE> bvor = s.BVOr <NEW_LINE> bvnot = s.BVNot <NEW_LINE> Equals = s.Equals <NEW_LINE> bvsort = s.BitVec(8) <NEW_LINE> s.SetLogic('QF_BV') <NEW_LINE> s.SetOption('produce-models', 'true') <NEW_LINE> bv1 = s.DeclareConst('bv1', bvsort) <NEW_LINE> bv2 = s.DeclareConst('bv2', bvsort) <NEW_LINE> bv3 = s.DeclareConst('bv3', bvsort) <NEW_LINE> bvresult = s.DeclareConst('bvresult', bvsort) <NEW_LINE> bvresult2 = s.DeclareConst('bvresult2', bvsort) <NEW_LINE> bvnotresult = s.DeclareConst('bvnotresult', bvsort) <NEW_LINE> bv1andbv2 = s.ApplyFun(bvand, bv1, bv2) <NEW_LINE> bv2orbv3 = s.ApplyFun(bvor, bv2, bv3) <NEW_LINE> notbv3 = s.ApplyFun(bvnot, bv3) <NEW_LINE> assert bv2orbv3.sort == s.BitVec(8) <NEW_LINE> if name != 'Boolector': <NEW_LINE> <INDENT> assert bv2orbv3.op == bvor <NEW_LINE> <DEDENT> bvresulteq = s.ApplyFun(Equals, bvresult, bv1andbv2) <NEW_LINE> bvresult2eq = s.ApplyFun(Equals, bvresult2, bv2orbv3) <NEW_LINE> bvnotresulteq = s.ApplyFun(Equals, bvnotresult, notbv3) <NEW_LINE> assert bvnotresulteq.sort == s.Bool() or bvnotresulteq.sort == s.BitVec(1) <NEW_LINE> fifteen = s.TheoryConst(bvsort, 15) <NEW_LINE> twoforty = s.TheoryConst(bvsort, 240) <NEW_LINE> eightyfive = s.TheoryConst(bvsort, 85) <NEW_LINE> bv1eq = s.ApplyFun(Equals, bv1, fifteen) <NEW_LINE> bv2eq = s.ApplyFun(Equals, bv2, twoforty) <NEW_LINE> bv3eq = s.ApplyFun(Equals, bv3, eightyfive) <NEW_LINE> s.Assert(bvresulteq) <NEW_LINE> s.Assert(bvresult2eq) <NEW_LINE> s.Assert(bvnotresulteq) <NEW_LINE> s.Assert(bv1eq) <NEW_LINE> s.Assert(bv2eq) <NEW_LINE> s.Assert(bv3eq) <NEW_LINE> s.CheckSat() <NEW_LINE> bvr1 = s.GetValue(bvresult) <NEW_LINE> bvr2 = s.GetValue(bvresult2) <NEW_LINE> bvnr = s.GetValue(bvnotresult) <NEW_LINE> assert bvr1.as_int() == 0 <NEW_LINE> assert bvr2.as_int() == 245 <NEW_LINE> assert bvnr.as_int() == 170
Sets bv1 = 00001111 bv2 = 11110000 bv3 = 01010101 Then computes: bv1 and bv2 bv2 or bv3 not bv3
625941c1009cb60464c63328
def predict(self, x_vector): <NEW_LINE> <INDENT> return _algorithms.svm_dense_predict(self, x_vector)
predict(self, x_vector) -> float
625941c1ab23a570cc2500f5
def fetch_multiple_labels(mbids, includes=None): <NEW_LINE> <INDENT> if includes is None: <NEW_LINE> <INDENT> includes = [] <NEW_LINE> <DEDENT> includes_data = defaultdict(dict) <NEW_LINE> check_includes('label', includes) <NEW_LINE> with mb_session() as db: <NEW_LINE> <INDENT> query = db.query(models.Label). options(joinedload("type")). options(joinedload("area")) <NEW_LINE> labels = get_entities_by_gids( query=query, entity_type='label', mbids=mbids, ) <NEW_LINE> label_ids = [label.id for label in labels.values()] <NEW_LINE> if 'artist-rels' in includes: <NEW_LINE> <INDENT> get_relationship_info( db=db, target_type='artist', source_type='label', source_entity_ids=label_ids, includes_data=includes_data, ) <NEW_LINE> <DEDENT> if 'url-rels' in includes: <NEW_LINE> <INDENT> get_relationship_info( db=db, target_type='url', source_type='label', source_entity_ids=label_ids, includes_data=includes_data, ) <NEW_LINE> <DEDENT> return {str(mbid): serialize_labels(label, includes_data[label.id]) for mbid, label in labels.items()}
Get info related to multiple labels using their MusicBrainz IDs. Args: mbids (list): List of MBIDs of labels. includes (list): List of information to be included. Returns: A dictionary containing info of multiple labels keyed by their MBID. If an MBID doesn't exist in the database, it isn't returned. If an MBID is a redirect, the dictionary key will be the MBID given as an argument, but the returned object will contain the new MBID in the 'mbid' key.
625941c1d99f1b3c44c67509
def generate_qx(data): <NEW_LINE> <INDENT> tmpl = Template(QX_TMPL) <NEW_LINE> abbrevs = get_abbrevs(data) <NEW_LINE> return tmpl.substitute(ABBREV=abbrevs)
Templatum behelyettesites.
625941c1aad79263cf3909b3
def set_statusbar(self): <NEW_LINE> <INDENT> displayname = parse.get_profile_name(self.stats) <NEW_LINE> lastplayed = parse.get_last_played(self.stats) <NEW_LINE> timeplayed = parse.get_session_seconds(self.stats) <NEW_LINE> status = 'Profile: {} // Last played: {} // Total time played: {}'.format( displayname, lastplayed, timeplayed) <NEW_LINE> self.setStatusTip(status)
Resets the application statusbar.
625941c12c8b7c6e89b35737
def get_job_metadata(self, page): <NEW_LINE> <INDENT> descr, pos = text.extract( page, '<meta name="description" content="', '"') <NEW_LINE> title, pos = text.extract( page, '<span class="j-title-breadcrumb">', '</span>', pos) <NEW_LINE> views, pos = text.extract( page, '<span class="notranslate pippin-data">', 'views<', pos) <NEW_LINE> published, pos = text.extract( page, '<time datetime="', '"', pos) <NEW_LINE> alt_descr, pos = text.extract( page, 'id="slideshow-description-paragraph" class="notranslate">', '</p>', pos) <NEW_LINE> if descr.endswith("…") and alt_descr: <NEW_LINE> <INDENT> descr = text.remove_html(alt_descr).strip() <NEW_LINE> <DEDENT> return { "user": self.user, "presentation": self.presentation, "title": text.unescape(title.strip()), "description": text.unescape(descr), "views": util.safe_int(views.replace(",", "")), "published": published, }
Collect metadata for extractor-job
625941c1097d151d1a222dd0
def longestConsecutive(self, root): <NEW_LINE> <INDENT> return self.longestConsecutive_helper(root, -10000, 1)
:type root: TreeNode :rtype: int
625941c1be7bc26dc91cd579
def clickTile(self, x, y): <NEW_LINE> <INDENT> xString = str(x) <NEW_LINE> yString = str(y) <NEW_LINE> tile = self._gameElements.find_element_by_id("{0}_{1}".format(yString, xString)) <NEW_LINE> tile.click()
INPUT: x, y (ints) Result: Clicks Tile
625941c18c0ade5d55d3e92e
def generateScatterPoints(currDataframe, currContinent, xAxis, yAxis, palette): <NEW_LINE> <INDENT> import plotly.graph_objs as go <NEW_LINE> continent_df = currDataframe[currDataframe['continent']==currContinent] <NEW_LINE> scatter = go.Scatter(x = continent_df[xAxis], y = continent_df[yAxis], name = currContinent, mode = 'markers', marker = dict( size = 10, color = palette[currContinent] ), text = continent_df['city'] + ", " + continent_df['country'], hoverinfo = 'text' ) <NEW_LINE> return scatter;
Return a Plotly scatter based on the given inputs. Keyword arguments: currDataframe -- Pandas dataframe being processed currContinent -- name (string) of the continent that we are filtering by xAxis -- name (string) of the column we want on the x axis yAxis -- name (string) of the column we want on the y axis palette -- takes in a dict of continent:hex code pairs
625941c199fddb7c1c9de307
def details_for_given_date_in_gradebook_history_for_this_course(request_ctx, course_id, date, per_page=None, **request_kwargs): <NEW_LINE> <INDENT> if per_page is None: <NEW_LINE> <INDENT> per_page = request_ctx.per_page <NEW_LINE> <DEDENT> path = '/v1/courses/{course_id}/gradebook_history/{date}' <NEW_LINE> payload = { 'per_page' : per_page, } <NEW_LINE> url = request_ctx.base_api_url + path.format(course_id=course_id, date=date) <NEW_LINE> response = client.get(request_ctx, url, payload=payload, **request_kwargs) <NEW_LINE> return response
Returns the graders who worked on this day, along with the assignments they worked on. More details can be obtained by selecting a grader and assignment and calling the 'submissions' api endpoint for a given date. :param request_ctx: The request context :type request_ctx: :class:RequestContext :param course_id: (required) The id of the contextual course for this API call :type course_id: integer :param date: (required) The date for which you would like to see detailed information :type date: string :param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE :type per_page: integer or None :return: Details for a given date in gradebook history for this course :rtype: requests.Response (with array data)
625941c1b5575c28eb68df74
def block_individual(self, individual, next_node): <NEW_LINE> <INDENT> individual.is_blocked = True <NEW_LINE> self.change_state_block() <NEW_LINE> next_node.blocked_queue.append((self.id_number, individual.id_number)) <NEW_LINE> for svr in next_node.servers: <NEW_LINE> <INDENT> self.simulation.digraph.add_edge(str(individual.server), str(svr))
Blocks the individual from entering the next node >>> seed(4) >>> Q = Simulation('datafortesting/logs_test_for_simulation/') >>> inds = [Individual(i+1) for i in range(7)] >>> N1 = Q.transitive_nodes[2] >>> N1.individuals = inds[:6] >>> N2 = Q.transitive_nodes[3] >>> N2.accept(inds[6], 2) >>> inds[6].is_blocked False >>> N1.blocked_queue [] >>> Q.digraph.edges() [] >>> N2.block_individual(inds[6], N1) >>> inds[6].is_blocked True >>> N1.blocked_queue [(4, 7)] >>> Q.digraph.edges() [('Server 1 at Node 4', 'Server 8 at Node 3'), ('Server 1 at Node 4', 'Server 7 at Node 3'), ('Server 1 at Node 4', 'Server 5 at Node 3'), ('Server 1 at Node 4', 'Server 1 at Node 3'), ('Server 1 at Node 4', 'Server 2 at Node 3'), ('Server 1 at Node 4', 'Server 4 at Node 3'), ('Server 1 at Node 4', 'Server 3 at Node 3'), ('Server 1 at Node 4', 'Server 6 at Node 3')]
625941c1b7558d58953c4e8d
def add_coefficients(self,L,overwrite=False): <NEW_LINE> <INDENT> if not isinstance(L,dict): <NEW_LINE> <INDENT> raise ValueError("Call with dictionary as argument!") <NEW_LINE> <DEDENT> for p in L.keys(): <NEW_LINE> <INDENT> c=mpmath.mpmathify(L[p]) <NEW_LINE> cd=ceil(mpmath.log10(abs(c))) <NEW_LINE> if(cd>self.maxdigs): <NEW_LINE> <INDENT> self.maxdigs=cd <NEW_LINE> <DEDENT> if(is_int(p)): <NEW_LINE> <INDENT> (r,n)=rn_from_D(self._space.WR,p) <NEW_LINE> <DEDENT> elif(isinstance(p,tuple)): <NEW_LINE> <INDENT> (r,n)=p <NEW_LINE> <DEDENT> if r in self._coeffs: <NEW_LINE> <INDENT> if n in self._coeffs[r]: <NEW_LINE> <INDENT> c_old=self._coeffs[r][n] <NEW_LINE> d1=dist_from_int(c)[0] <NEW_LINE> d2=dist_from_int(c_old)[0] <NEW_LINE> if(overwrite): <NEW_LINE> <INDENT> self._coeffs[r][n]=c <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self._coeffs[r][n]=c <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if not r < 0 or r > self._space.multiplier().ambient_rank(): <NEW_LINE> <INDENT> raise ValueError("Key {0} corr to (r,n)=({1},{2}) is invalid for the current space!".format(p,r,n)) <NEW_LINE> <DEDENT> elif r not in self._space.multiplier().D(): <NEW_LINE> <INDENT> if self._space._sym_type==-1 and (r==0 or r==self._space.multiplier().N): <NEW_LINE> <INDENT> if abs(c) > 10**(1-self.prec): <NEW_LINE> <INDENT> raise ValueError("Coefficient should be zero by symmetry. Got c({0},{1})={2}!".format(r,n,c)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._coeffs[r][n]=0 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> mr=2*self.multiplier().N-r <NEW_LINE> if mr in self._coeffs: <NEW_LINE> <INDENT> if n in self._coeffs[mr]: <NEW_LINE> <INDENT> c_old=self._coeffs[mr][n] <NEW_LINE> if abs(c-self._space.multiplier()._sym_type*c_old) > 10**(1-self.prec): <NEW_LINE> <INDENT> st="Might add an erroneous coefficient! Got c({0},{1})={2}. ".format(r,n,c) <NEW_LINE> st+="From previous coefficients should have {0}".format(self._space._sym_type*c_old) <NEW_LINE> raise ValueError(st) <NEW_LINE> <DEDENT> if overwrite: <NEW_LINE> <INDENT> self._coeffs[mr][n]=c <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Coefficient should be zero by symmetry. Got c({0},{1})={2}!" .format(r,n,c))
Add one or more coefficients to self. INPUT: -''L'' -- dictionary of pairs of indices and coefficients -''overwrite'' -- logical, set to True if we want to overwrite present coefficients
625941c123e79379d52ee4db
def __call__(self, value, **flags): <NEW_LINE> <INDENT> if flags.get('tupling__generator', self.generator): <NEW_LINE> <INDENT> return ( transformer(value, **flags) for transformer in self.transformers ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return [ transformer(value, **flags) for transformer in self.transformers ]
TODO: Document
625941c1d18da76e23532449
def execute_benchmark(self) -> None: <NEW_LINE> <INDENT> self._validate() <NEW_LINE> self._prepare_nighthawk() <NEW_LINE> self._prepare_envoy() <NEW_LINE> cmd = ("bazel test " "--test_summary=detailed " "--test_output=all " "--test_arg=--log-cli-level=info " "--test_env=ENVOY_IP_TEST_VERSIONS=v4only " "--test_env=HEAPPROFILE= " "--test_env=HEAPCHECK= " "--cache_test_results=no " "--compilation_mode=opt " "--cxxopt=-g " "--cxxopt=-ggdb3 " "--define tcmalloc=gperftools " "//benchmarks:* ") <NEW_LINE> cmd_params = cmd_exec.CommandParameters(cwd=self._benchmark_dir) <NEW_LINE> env = self._control.environment <NEW_LINE> binary_benchmark_vars = { 'TMPDIR': env.output_dir } <NEW_LINE> if self._envoy_binary_path: <NEW_LINE> <INDENT> binary_benchmark_vars['ENVOY_PATH'] = self._envoy_binary_path <NEW_LINE> <DEDENT> log.debug(f"Using environment: {binary_benchmark_vars}") <NEW_LINE> for (key, value) in binary_benchmark_vars.items(): <NEW_LINE> <INDENT> if key not in env.variables: <NEW_LINE> <INDENT> log.debug(f"Building control environment variables: {key}={value}") <NEW_LINE> env.variables[key] = value <NEW_LINE> <DEDENT> <DEDENT> environment_controller = base_benchmark.BenchmarkEnvController(env) <NEW_LINE> with environment_controller: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cmd_exec.run_command(cmd, cmd_params) <NEW_LINE> <DEDENT> except subprocess.CalledProcessError as cpe: <NEW_LINE> <INDENT> log.error(f"Unable to execute the benchmark: {cpe}")
Execute the binary benchmark Uses either the Envoy specified in ENVOY_PATH, or one built from a specified source.
625941c1c4546d3d9de729a7
def point_antenna_to_transmit(self): <NEW_LINE> <INDENT> self.mai_set_latlong(75, 50, 0, 999999999) <NEW_LINE> self.mai_set_mode(4)
Turn the spacecraft. Point the X-band antennas toward the ground station.
625941c12eb69b55b151c822
def swap_k(L, k): <NEW_LINE> <INDENT> if k != 0: <NEW_LINE> <INDENT> firstkitems = L[:k] <NEW_LINE> lastkitems = L[-k:] <NEW_LINE> L[:k] = lastkitems <NEW_LINE> L[-k:] = firstkitems
(list, int) -> NoneType Precondtion: 0 <= k <= len(L) // 2 Swap the first k items of L with the last k items of L. >>> nums = [1, 2, 3, 4, 5, 6] >>> swap_k(nums, 2) >>> nums [5, 6, 3, 4, 1, 2]
625941c197e22403b379cf0e
def chkdbname(dbname): <NEW_LINE> <INDENT> ret = re.match('[a-zA-Z][a-zA-Z0-9]{0,43}', dbname) is not None and dbname != 'postgres' <NEW_LINE> if not ret: <NEW_LINE> <INDENT> warn("Invalid dbname: {0}".format(safestr(dbname))) <NEW_LINE> <DEDENT> return ret
verify that a database name is valid
625941c1507cdc57c6306c4b
def count_time(): <NEW_LINE> <INDENT> self.time = self.time.addMSecs(interval) <NEW_LINE> self.display(self.time.toString(display_format))
Count time within an interval and show time on display. Note: function is initialized before called.
625941c1d7e4931a7ee9de92
def copy(self): <NEW_LINE> <INDENT> return self.copyto(self.context)
Make a copy of the ndarray on the same context Returns ------- NDArray The copied array Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = x.copy() >>> y.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32)
625941c1925a0f43d2549dea
def predecessor_from_dir(workspace, step): <NEW_LINE> <INDENT> searchstr = '{:02}*'.format(step - 1) <NEW_LINE> results = [] <NEW_LINE> for folder in glob.glob(os.path.join(workspace.root_dir, searchstr)): <NEW_LINE> <INDENT> if os.path.isdir(folder): <NEW_LINE> <INDENT> results.append(folder) <NEW_LINE> <DEDENT> <DEDENT> if len(results) > 1: <NEW_LINE> <INDENT> e = 'WARNING! More than one predecessor workspace found. ' 'Check filenames for conflicts.' <NEW_LINE> raise PipelineError(e) <NEW_LINE> <DEDENT> elif len(results) == 0: <NEW_LINE> <INDENT> e = 'WARNING! No predecessor workspaces found. ' 'Check filenames for conflicts.' <NEW_LINE> raise PipelineError(e) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return results[0]
Get the workspace that precedes the current step.
625941c156b00c62f0f145cd
def init(): <NEW_LINE> <INDENT> args = get_args() <NEW_LINE> global conf <NEW_LINE> conf = { 'lastfm_user': args.lastfm_user, 'lastfm_key': args.key, 'path': args.path, 'format': args.format.lower(), } <NEW_LINE> global log <NEW_LINE> log = get_logger(args.log, args.verbose)
Script initialization
625941c1e76e3b2f99f3a785
def __init__(self, attr, style=None, pattern=None, visible=None, editable=False): <NEW_LINE> <INDENT> super(TextElement, self).__init__() <NEW_LINE> self._bounds = Rectangle(0, 0, width=15, height=10) <NEW_LINE> self._style = Style() <NEW_LINE> self._style.add('text-padding', (2, 2, 2, 2)) <NEW_LINE> self._style.add('text-align', (ALIGN_CENTER, ALIGN_TOP)) <NEW_LINE> self._style.add('text-outside', False) <NEW_LINE> self._style.add('text-rotated', False) <NEW_LINE> self._style.add('text-align-str', None) <NEW_LINE> self._style.add('font', DEFAULT_TEXT_FONT) <NEW_LINE> if style: <NEW_LINE> <INDENT> self._style.update(style) <NEW_LINE> <DEDENT> self.attr = attr <NEW_LINE> self._text = '' <NEW_LINE> if visible: <NEW_LINE> <INDENT> self.is_visible = visible <NEW_LINE> <DEDENT> if pattern: <NEW_LINE> <INDENT> self._pattern = pattern <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._pattern = '%s' <NEW_LINE> <DEDENT> self.editable = editable
Create new text element with bounds (0, 0, 10, 10) and empty text. Parameters: - visible: function, which evaluates to True/False if text should be visible
625941c1ff9c53063f47c16a
def _createPopupMenus(self): <NEW_LINE> <INDENT> ProjectBaseBrowser._createPopupMenus(self) <NEW_LINE> self.editPixmapAct = self.menu.addAction( self.tr('Open in Icon Editor'), self._editPixmap) <NEW_LINE> self.menu.addSeparator() <NEW_LINE> self.mimeTypeAct = self.menu.addAction( self.tr('Show Mime-Type'), self.__showMimeType) <NEW_LINE> self.menu.addSeparator() <NEW_LINE> self.renameFileAct = self.menu.addAction( self.tr('Rename file'), self._renameFile) <NEW_LINE> self.menuActions.append(self.renameFileAct) <NEW_LINE> act = self.menu.addAction( self.tr('Remove from project'), self.__removeItem) <NEW_LINE> self.menuActions.append(act) <NEW_LINE> act = self.menu.addAction(self.tr('Delete'), self.__deleteItem) <NEW_LINE> self.menuActions.append(act) <NEW_LINE> self.menu.addSeparator() <NEW_LINE> self.menu.addAction( self.tr('Add files...'), self.project.addOthersFiles) <NEW_LINE> self.menu.addAction( self.tr('Add directory...'), self.project.addOthersDir) <NEW_LINE> self.menu.addSeparator() <NEW_LINE> self.menu.addAction(self.tr('Refresh'), self.__refreshItem) <NEW_LINE> self.menu.addSeparator() <NEW_LINE> self.menu.addAction( self.tr('Copy Path to Clipboard'), self._copyToClipboard) <NEW_LINE> self.menu.addSeparator() <NEW_LINE> self.menu.addAction( self.tr('Expand all directories'), self._expandAllDirs) <NEW_LINE> self.menu.addAction( self.tr('Collapse all directories'), self._collapseAllDirs) <NEW_LINE> self.menu.addSeparator() <NEW_LINE> self.menu.addAction(self.tr('Configure...'), self._configure) <NEW_LINE> self.backMenu = QMenu(self) <NEW_LINE> self.backMenu.addAction( self.tr('Add files...'), self.project.addOthersFiles) <NEW_LINE> self.backMenu.addAction( self.tr('Add directory...'), self.project.addOthersDir) <NEW_LINE> self.backMenu.addSeparator() <NEW_LINE> self.backMenu.addAction( self.tr('Expand all directories'), self._expandAllDirs) <NEW_LINE> self.backMenu.addAction( self.tr('Collapse all directories'), self._collapseAllDirs) <NEW_LINE> self.backMenu.addSeparator() <NEW_LINE> self.backMenu.addAction(self.tr('Configure...'), self._configure) <NEW_LINE> self.backMenu.setEnabled(False) <NEW_LINE> self.multiMenu.addSeparator() <NEW_LINE> act = self.multiMenu.addAction( self.tr('Remove from project'), self.__removeItem) <NEW_LINE> self.multiMenuActions.append(act) <NEW_LINE> act = self.multiMenu.addAction( self.tr('Delete'), self.__deleteItem) <NEW_LINE> self.multiMenuActions.append(act) <NEW_LINE> self.multiMenu.addSeparator() <NEW_LINE> self.multiMenu.addAction( self.tr('Expand all directories'), self._expandAllDirs) <NEW_LINE> self.multiMenu.addAction( self.tr('Collapse all directories'), self._collapseAllDirs) <NEW_LINE> self.multiMenu.addSeparator() <NEW_LINE> self.multiMenu.addAction(self.tr('Configure...'), self._configure) <NEW_LINE> self.menu.aboutToShow.connect(self.__showContextMenu) <NEW_LINE> self.multiMenu.aboutToShow.connect(self.__showContextMenuMulti) <NEW_LINE> self.backMenu.aboutToShow.connect(self.__showContextMenuBack) <NEW_LINE> self.mainMenu = self.menu
Protected overloaded method to generate the popup menu.
625941c1460517430c394100
def sign_out_task_no_sign_in(self): <NEW_LINE> <INDENT> pass
When there is no corresponding sign out record, the sign out task should append a record like sign in.
625941c1d53ae8145f87a1e9
def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> super().__init__(*args, **kwargs) <NEW_LINE> self.updates_broker = ProjectUpdateBroker()
Initialize resource instance.
625941c15fdd1c0f98dc01a8
@app.route('/movies/<movie_id>') <NEW_LINE> def show_movie_profile(movie_id): <NEW_LINE> <INDENT> movie = Movie.query.filter_by(movie_id=movie_id).first() <NEW_LINE> user_id = session.get('user_id', '') <NEW_LINE> if user_id: <NEW_LINE> <INDENT> user_rating = Rating.query.filter_by( movie_id=movie_id,user_id=user_id).first() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> user_rating = None <NEW_LINE> <DEDENT> score_tups = db.session.query(Rating.score,func.count(Rating.rating_id)).filter_by(movie_id=movie_id).group_by("score").all() <NEW_LINE> return render_template('movie_profile.html', movie=movie, user_rating=user_rating, score_tups=score_tups)
gets movie profile
625941c1167d2b6e31218b0b
def get_download_url(self, inner_response): <NEW_LINE> <INDENT> app_id_pat = 'opendown\((.*?)\);" title="下载到电脑"' <NEW_LINE> app_id = re.findall(app_id_pat, inner_response) <NEW_LINE> if app_id: <NEW_LINE> <INDENT> app_id = app_id[0] <NEW_LINE> <DEDENT> download_url = "http://www.anzhi.com/dl_app.php?s=" + app_id + "&n=5" <NEW_LINE> return download_url
获取下载地址
625941c13617ad0b5ed67e6e
def tfrecord_loader(data_path: str, index_path: typing.Union[str, None], description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None, shard: typing.Optional[typing.Tuple[int, int]] = None, ) -> typing.Iterable[typing.Dict[str, np.ndarray]]: <NEW_LINE> <INDENT> typename_mapping = { "byte": "bytes_list", "float": "float_list", "int": "int64_list" } <NEW_LINE> record_iterator = tfrecord_iterator(data_path, index_path, shard) <NEW_LINE> for record in record_iterator: <NEW_LINE> <INDENT> example = example_pb2.Example() <NEW_LINE> example.ParseFromString(record) <NEW_LINE> all_keys = list(example.features.feature.keys()) <NEW_LINE> if description is None: <NEW_LINE> <INDENT> description = dict.fromkeys(all_keys, None) <NEW_LINE> <DEDENT> elif isinstance(description, list): <NEW_LINE> <INDENT> description = dict.fromkeys(description, None) <NEW_LINE> <DEDENT> features = {} <NEW_LINE> for key, typename in description.items(): <NEW_LINE> <INDENT> if key not in all_keys: <NEW_LINE> <INDENT> raise KeyError(f"Key {key} doesn't exist (select from {all_keys})!") <NEW_LINE> <DEDENT> field = example.features.feature[key].ListFields()[0] <NEW_LINE> inferred_typename, value = field[0].name, field[1].value <NEW_LINE> if typename is not None: <NEW_LINE> <INDENT> tf_typename = typename_mapping[typename] <NEW_LINE> if tf_typename != inferred_typename: <NEW_LINE> <INDENT> reversed_mapping = {v: k for k, v in typename_mapping.items()} <NEW_LINE> raise TypeError(f"Incompatible type '{typename}' for `{key}` " f"(should be '{reversed_mapping[inferred_typename]}').") <NEW_LINE> <DEDENT> <DEDENT> if inferred_typename == "bytes_list": <NEW_LINE> <INDENT> value = np.frombuffer(value[0], dtype=np.uint8) <NEW_LINE> <DEDENT> elif inferred_typename == "float_list": <NEW_LINE> <INDENT> value = np.array(value, dtype=np.float32) <NEW_LINE> <DEDENT> elif inferred_typename == "int64_list": <NEW_LINE> <INDENT> value = np.array(value, dtype=np.int32) <NEW_LINE> <DEDENT> features[key] = value <NEW_LINE> <DEDENT> yield features
Create an iterator over the (decoded) examples contained within the dataset. Decodes raw bytes of the features (contained within the dataset) into its respective format. Params: ------- data_path: str TFRecord file path. index_path: str or None Index file path. Can be set to None if no file is available. description: list or dict of str, optional, default=None List of keys or dict of (key, value) pairs to extract from each record. The keys represent the name of the features and the values ("byte", "float", or "int") correspond to the data type. If dtypes are provided, then they are verified against the inferred type for compatibility purposes. If None (default), then all features contained in the file are extracted. shard: tuple of ints, optional, default=None A tuple (index, count) representing worker_id and num_workers count. Necessary to evenly split/shard the dataset among many workers (i.e. >1). Yields: ------- features: dict of {str, np.ndarray} Decoded bytes of the features into its respective data type (for an individual record).
625941c1090684286d50ec59
def PostRequestForElementTypePlacement(self,elementType): <NEW_LINE> <INDENT> pass
PostRequestForElementTypePlacement(self: UIDocument,elementType: ElementType) Places a request on Revit's command queue for the user to place instances of the specified ElementType. This does not execute immediately, but instead when control returns to Revit from the current API context. elementType: The ElementType of which instances are to be placed.
625941c15fcc89381b1e1632
def __join(self): <NEW_LINE> <INDENT> for greenlet in self.greenlets: <NEW_LINE> <INDENT> greenlet.join()
调用后,阻塞之后的代码 :return:
625941c192d797404e3040ff