content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import glob def wav16khz2mfcc(dir_name): """ Loads all *.wav files from directory dir_name (must be 16kHz), converts them into MFCC features (13 coefficients) and stores them into a dictionary. Keys are the file names and values and 2D numpy arrays of MFCC features. """ features = {} for f in glob(dir_name + '/*.wav'): print('Processing file: ', f) rate, s = wavfile.read(f) assert(rate == 16000) features[f] = mfcc(s, 400, 240, 512, 16000, 23, 13) return features
6eae15a7ac999cd42c1e3161221356cf720d54c0
3,636,927
def add_metadata(infile, outfile, sample_metadata): """Add sample-level metadata to a biom file. Sample-level metadata should be in a format akin to http://qiime.org/tutorials/tutorial.html#mapping-file-tab-delimited-txt :param infile: String; name of the biom file to which metadata shall be added :param outfile: String; name of the resulting metadata-enriched biom file :param sample_metadata: String; name of the sample-level metadata tab-delimited text file. Sample attributes are taken from this file. Note: the sample names in the `sample_metadata` file must match the sample names in the biom file. External dependencies - biom-format: http://biom-format.org/ """ return { "name": "biom_add_metadata: " + infile, "actions": [("biom add-metadata" " -i "+infile+ " -o "+outfile+ " -m "+sample_metadata)], "file_dep": [infile], "targets": [outfile] }
e779f876159741de60e99002a90906b151dc7530
3,636,928
def multinomial(n): """Finds the multinomial coefficient for a given array of numbers. Args: n (list): the interegs to be used. """ binomials = [[np.sum(n),n[0]]] for i in range(1,len(n)): new_sum = binomials[i-1][0]-binomials[i-1][1] binomials.append([new_sum,n[i]]) bins = [] for b in binomials: bins.append(binomial_coefficient(b[0],b[1])) return np.prod(bins)
6f38656d295a4d5ecf32a01a238cdad701e6e530
3,636,929
def get_qc_data(sample_prj, p_con, s_con, fc_id=None): """Get qc data for a project, possibly subset by flowcell. :param sample_prj: project identifier :param p_con: object of type <ProjectSummaryConnection> :param s_con: object of type <SampleRunMetricsConnection> :returns: dictionary of qc results """ project = p_con.get_entry(sample_prj) application = project.get("application", None) if project else None samples = s_con.get_samples(fc_id=fc_id, sample_prj=sample_prj) qcdata = {} for s in samples: qcdata[s["name"]]={"sample":s.get("barcode_name", None), "project":s.get("sample_prj", None), "lane":s.get("lane", None), "flowcell":s.get("flowcell", None), "date":s.get("date", None), "application":application, "TOTAL_READS":int(s.get("picard_metrics", {}).get("AL_PAIR", {}).get("TOTAL_READS", -1)), "PERCENT_DUPLICATION":s.get("picard_metrics", {}).get("DUP_metrics", {}).get("PERCENT_DUPLICATION", "-1.0"), "MEAN_INSERT_SIZE":float(s.get("picard_metrics", {}).get("INS_metrics", {}).get("MEAN_INSERT_SIZE", "-1.0").replace(",", ".")), "GENOME_SIZE":int(s.get("picard_metrics", {}).get("HS_metrics", {}).get("GENOME_SIZE", -1)), "FOLD_ENRICHMENT":float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("FOLD_ENRICHMENT", "-1.0").replace(",", ".")), "PCT_USABLE_BASES_ON_TARGET":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_USABLE_BASES_ON_TARGET", "-1.0"), "PCT_TARGET_BASES_10X":s.get("picard_metrics", {}).get("HS_metrics", {}).get("PCT_TARGET_BASES_10X", "-1.0"), "PCT_PF_READS_ALIGNED":s.get("picard_metrics", {}).get("AL_PAIR", {}).get("PCT_PF_READS_ALIGNED", "-1.0"), } target_territory = float(s.get("picard_metrics", {}).get("HS_metrics", {}).get("TARGET_TERRITORY", -1)) pct_labels = ["PERCENT_DUPLICATION", "PCT_USABLE_BASES_ON_TARGET", "PCT_TARGET_BASES_10X", "PCT_PF_READS_ALIGNED"] for l in pct_labels: if qcdata[s["name"]][l]: qcdata[s["name"]][l] = float(qcdata[s["name"]][l].replace(",", ".")) * 100 if qcdata[s["name"]]["FOLD_ENRICHMENT"] and qcdata[s["name"]]["GENOME_SIZE"] and target_territory: qcdata[s["name"]]["PERCENT_ON_TARGET"] = float(qcdata[s["name"]]["FOLD_ENRICHMENT"]/ (float(qcdata[s["name"]]["GENOME_SIZE"]) / float(target_territory))) * 100 return qcdata
f267148f48f86151852e12fa3be8d5f8aefc6b11
3,636,930
def sql_sanitize(sql_name): """ Return a SQL name (table or column) cleaned of problematic characters. ex. punctuation )(][; whitespace Don't use with values, which can be properly escaped with parameterization. Ideally retaining only alphanumeric char. Credits: Donald Miner, Source: StackOverflow, DateAccessed: 2020-02-20 """ sanitize_name = "".join(char for char in sql_name if char.isalnum()) return sanitize_name
9ce9e0e8bed2348079fb23f2d27c53880fa1c795
3,636,931
def exists(name): """ `True` if a category named `name` exists; `False` otherwise. """ return db.cursor().execute('SELECT COUNT(*) FROM categories WHERE name = ?', (name,)).fetchone()[0] != 0
e15f5d961a4420ef6bd00fa393ab9af440e5f983
3,636,933
def ESMP_MeshGetOwnedElementCount(mesh): """ Preconditions: An ESMP_Mesh has been created.\n Postconditions: The owned elementCount for 'mesh' has been returned.\n Arguments:\n :RETURN: integer :: elementCount\n ESMP_Mesh :: mesh\n """ lec = ct.c_int(0) rc = _ESMF.ESMC_MeshGetOwnedElementCount(mesh.struct.ptr, ct.byref(lec)) if rc != constants._ESMP_SUCCESS: raise ValueError('ESMC_MeshGetOwnedElementCount() failed with rc = '+ str(rc)+'. '+constants._errmsg) elementCount = lec.value return elementCount
067411ba3b2fbc4f862375e2a3699d617999b6ed
3,636,934
def remove_control_chars_author(input): """ :param input: :return: """ return CONTROL_CHAR_RE.sub('', input)
632bb20de05f3461156fa7ed311b9a04459de60f
3,636,935
def run(): """Default Run Method""" return problem51(8)
3357bb4e6461f8142f93fc394f3b5aba0fba7ceb
3,636,936
def calc_c(e, a, b, u=1): # Check units """ calculate the z components of 4 partial waves in medium e: dielectric tensor a,b: components of wavevector in direction of x and y direction return a list containting 4 roots for the z components of the partial waves """ # assign names x = e * u x11, x12, x13 = x[0] x21, x22, x23 = x[1] x31, x32, x33 = x[2] # calculate the coeffciency based on symbolic expression coef4 = x33 coef3 = a * x13 + a * x31 + b * x23 + b * x32 coef2 = a**2*x11 + a**2*x33 + a*b*x12 + a*b*x21 + b**2*x22 + b**2*x33 - \ x11*x33 + x13*x31 - x22*x33 + x23*x32 coef1 = a**3*x13 + a**3*x31 + a**2*b*x23 + a**2*b*x32 + a*b**2*x13 + \ a*b**2*x31 + a*x12*x23 - a*x13*x22 + a*x21*x32 - a*x22*x31 + b**3*x23 \ + b**3*x32 - b*x11*x23 - b*x11*x32 + b*x12*x31 + b*x13*x21 coef0 = a**4*x11 + a**3*b*x12 + a**3*b*x21 + a**2*b**2*x11 + a**2*b**2*x22 \ - a**2*x11*x22 - a**2*x11*x33 + a**2*x12*x21 + a**2*x13*x31 + a*b**3*x12 + \ a*b**3*x21 - a*b*x12*x33 + a*b*x13*x32 - a*b*x21*x33 + a*b*x23*x31 + \ b**4*x22 - b**2*x11*x22 + b**2*x12*x21 - b**2*x22*x33 + b**2*x23*x32 + \ x11*x22*x33 - x11*x23*x32 - x12*x21*x33 + x12*x23*x31 + x13*x21*x32 - \ x13*x22*x31 # calculate the roots of the quartic equation c = np.roots([coef4, coef3, coef2, coef1, coef0]) if len(c) == 2: return np.append(c, c) return c
46a1ae481c9525ecc7ae1e5e3b119b8d3983ca16
3,636,937
from typing import Sequence from typing import Tuple def _jax_decode( compressed_message: ndarray, tail_limit: int, message_len: int, message_shape: Sequence[int], codec: CrayCodec, cdf_state: Sequence[ndarray], ) -> Tuple[Tuple[ndarray, int], ndarray, Sequence[ndarray]]: """ JAX rANS decoding function. At a high level, this function takes a stack of information (``compressed_message``) and peeks at the top of the stack to see what the current symbol is. After identifying the symbol, this function pops a number of bits from the top of the stack approximately equal to the information content of the symbol (i.e. ``-log(symbol probability)``). This is done ``message_len`` times until the full message is retrieved. Args: compressed_message: The input stack containing the compressed meessage. tail_limit: A pointer to the current end of the tail. message_len: The size of the message to be decoded. message_shape: The message shape containing the interleaved dimension size. codec: A named tuple object containing functions for push and pop operations, as well as an initial state fo the CDF functions (for context-adaptive coding) and a data type specification for the message. cdf_state: The initialization state of the inverse CDF function (contains CDF array or can be used for conditional probabilites). Returns: A 3-tuple containing: The decoded messages of size ``(message_len, *message_shape)``. A byte array of compressed data after removing the target message. The final CDF state. """ message = jnp.zeros((message_len, *message_shape), dtype=codec.message_dtype) def pop_one_symbol(msg_index, vals): return codec.pop(msg_index, *vals) result = lax.fori_loop( 0, message_len, pop_one_symbol, ( array_to_craymessage(compressed_message, message_shape, tail_limit), message, cdf_state, ), ) return craymessage_to_array(result[0]), result[1], result[2]
648cb4af4ddaaec01e5d5997e8698aad6acd4c01
3,636,938
def subtask1_eval(_answers, _ref): """ 子任务1的评分函数。 :param _answers: 答卷答案。 :param _ref: 参考答案。 :return: 统计数据对象。 """ _map = { '11': 'TP', '00': 'TN', '10': 'FN', '01': 'FP', } _st = { 'TP': 0, 'TN': 0, 'FN': 0, 'FP': 0, } for _k, _v in _ref.items(): _ga = int(_v) _aa = int(_answers[_k]) if _k in _answers else 0 _st[_map[f"{_ga}{_aa}"]] += 1 _st['Accuracy'] = (_st['TP'] + _st['TN']) / (_st['TP'] + _st['FP'] + _st['FN'] + _st['TN']) return _st
7249992f70b67928a99e96c7877e5ef4be261429
3,636,939
def render_horizontal_fields(*fields_to_render, **kwargs): """Render given fields with optional labels""" labels = kwargs.get('labels', True) media = kwargs.get('media') hidden_fields = [] visible_fields = [] for bound_field in fields_to_render: if bound_field.field.widget.is_hidden: hidden_fields.append(bound_field) else: visible_fields.append(bound_field) return { 'fields_to_render': fields_to_render, 'hidden_fields': hidden_fields, 'visible_fields': visible_fields, 'labels': labels, 'media': media, }
22ac9c05b602c0f65ab2fc348ab9399855780bc3
3,636,940
def get_darwin_memory(): """ Use system-call to extract total memory on macOS """ system_output = sabnzbd.newsunpack.run_simple(["sysctl", "hw.memsize"]) return float(system_output.split()[1])
1458881c61cdb5b765c4c56fa494ff7c6f06c49b
3,636,941
from datetime import datetime def parseTextModeTimeStr(timeStr): """ Parses the specified SMS text mode time string The time stamp format is "yy/MM/dd,hh:mm:ss±zz" (yy = year, MM = month, dd = day, hh = hour, mm = minute, ss = second, zz = time zone [Note: the unit of time zone is a quarter of an hour]) @param timeStr: The time string to parse @type timeStr: str @return: datetime object representing the specified time string @rtype: datetime.datetime """ msgTime = timeStr[:-3] tzOffsetHours = int(int(timeStr[-3:]) * 0.25) return datetime.strptime(msgTime, '%y/%m/%d,%H:%M:%S').replace(tzinfo=SimpleOffsetTzInfo(tzOffsetHours))
52a45116a2b0153595161f94fda38129ddd59b3a
3,636,942
import torch def angle_to_rotation_matrix(angle) -> Tensor: """ Creates a rotation matrix out of angles in degrees Args: angle: (Tensor): tensor of angles in degrees, any shape. Returns: Tensor: tensor of *x2x2 rotation matrices. Shape: - Input: :math:`(*)` - Output: :math:`(*, 2, 2)` Examples: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_to_rotation_matrix(input) # Nx3x2x2 """ ang_rad = angle * np.pi / 180 cos_a = torch.cos(ang_rad) sin_a = torch.sin(ang_rad) return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)
9b88eaa0277d0c3ad672e94e4d41ec45ebe0b272
3,636,943
def extract_text(): """Extracts text from an HTML document.""" html = request.form['html'] article = Article(html) try: return article.text except AttributeError as e: log.warn(e) # NOTE: When a parsing error occurs, an AttributeError is raised. # We'll deal with this exception later. return ''
8efc10539462ab51715b54b17a018e5f296496eb
3,636,944
import json import time def get_new_account_id(event): """Return account id for new account events.""" create_account_status_id = ( event["detail"] .get("responseElements", {}) .get("createAccountStatus", {})["id"] # fmt: no ) log.info("createAccountStatus = %s", create_account_status_id) org = boto3.client("organizations") while True: account_status = org.describe_create_account_status( CreateAccountRequestId=create_account_status_id ) state = account_status["CreateAccountStatus"]["State"].upper() if state == "SUCCEEDED": return account_status["CreateAccountStatus"]["AccountId"] elif state == "FAILED": log.error("Account creation failed:\n%s", json.dumps(account_status)) raise AccountCreationFailedException else: log.info( "Account state: %s. Sleeping 5 seconds and will try again...", state ) time.sleep(5)
4433b080b24d1a7ad276541103e55acf7bbfa137
3,636,945
from typing import List def lag_indexes(tf_stat)-> List[pd.Series]: """ Calculates indexes for 3, 6, 9, 12 months backward lag for the given date range :param begin: start of date range :param end: end of date range :return: List of 4 Series, one for each lag. For each Series, index is date in range(begin, end), value is an index of target (lagged) date in a same Series. If target date is out of (begin,end) range, index is -1 """ date_range = pd.date_range(tf_stat['days'][0],tf_stat['days'][-1]) # key is date, value is day index base_index = pd.Series(np.arange(0, len(date_range)),index=date_range) def lag(offset): dates = date_range - offset return pd.Series(data=base_index[dates].fillna(-1).astype(np.int16).values, index=date_range) return [lag(pd.DateOffset(months=m)) for m in (1, 2)]
de8d355d213146013eb4720860dd844d22ccab45
3,636,946
def weather_outfit(req): """Returns a string containing text with a response to the user with a indication if the outfit provided is appropriate for the current weather or a prompt for more information Takes a city, outfit and (optional) dates uses the template responses found in weather_responses.py as templates and the outfits listed in weather_entities.py """ # validate request parameters, return an error if there are issues error, forecast_params = validate_params(req['queryResult']['parameters']) if error: return error # Validate that there are the required parameters to retrieve a forecast if not forecast_params['outfit']: return 'What are you planning on wearing?' # create a forecast object which retrieves the forecast from a external API try: forecast = Forecast(forecast_params) # return an error if there is an error getting the forecast except (ValueError, IOError) as error: return error return forecast.get_outfit_response()
ee5b3cd3ed10062155bbce532343ef51f9a83177
3,636,947
from sentence_splitter import SentenceSplitter def parse_paragraphs(record): """ parse paragraphs into sentences, returns list """ splitter = SentenceSplitter(language='en') sentences=splitter.split(record['value']) article_id = remove_prefix(record['key'],'paragraphs:') pre = 'sentence:' + article_id l = [{ 'key': f'{pre}','idx':f'{idx}','value': sentence } for idx,sentence in enumerate(sentences)] return l
9a8cce4692af5e61b9f01becd8dafa9234c08f17
3,636,948
def get_stage_environment() -> str: """ Indicates whether the source is running as PRD or DEV. Accounts for the user preference via TEST_WORKING_STAGE. :return: One of the STAGE_* constants. """ return TEST_WORKING_STAGE
1c2e14132af1760a13aae268b5179e70c79f5df5
3,636,949
def get_all_table_acls(conn, schema=None): """Get privileges for all tables, views, materialized views, and foreign tables. Specify `schema` to limit the results to that schema. Returns: List of :class:`~.types.SchemaRelationInfo` objects. """ stmt = _table_stmt(schema=schema) return [SchemaRelationInfo(**row) for row in conn.execute(stmt)]
9067a614197d19c3256828b2a8dbb491bede0fe6
3,636,950
def add_atom_map(molecule, **kwargs): """ Add canonical ordered atom map to molecule Parameters ---------- molecule : `oechem.OEMOl` or `rdkit.Chem.Mol` Returns ------- molecule with map indices """ toolkit = _set_toolkit(molecule) return toolkit.add_atom_map(molecule, **kwargs)
584324aae018f211fc31c9f727687e9a6971822d
3,636,951
from typing import Any def build_put_dictionary_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest: """Put External Resource as a Dictionary. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in our example to find the input shape. External Resource as a Dictionary to put. :paramtype json: any :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). External Resource as a Dictionary to put. :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # JSON input template you can fill out and use as your body input. json = { "str": { "id": "str", # Optional. Resource Id. "location": "str", # Optional. Resource Location. "name": "str", # Optional. Resource Name. "properties": { "p.name": "str", # Optional. "provisioningState": "str", # Optional. "provisioningStateValues": "str", # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK". "type": "str" # Optional. }, "tags": { "str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`. }, "type": "str" # Optional. Resource Type. } } """ content_type = kwargs.pop("content_type", None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", "/model-flatten/dictionary") # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, content=content, **kwargs)
045c00835d592d777a155696bda76a5ecb12aa6f
3,636,952
def midpoint(rooms): """ Helper function to help find the midpoint between the two rooms. Args: rooms: list of rooms Returns: int: Midpoint """ return rooms[0] + (rooms[0] + rooms[2]) // 2, rooms[1] + (rooms[1] + rooms[3]) // 2
60b3ba53fb15154ff97ab9c6fa3cf1b726bc2df1
3,636,953
def secondSolution( fixed, c1, c2, c3 ): """ If given four tangent circles, calculate the other one that is tangent to the last three. @param fixed: The fixed circle touches the other three, but not the one to be calculated. @param c1, c2, c3: Three circles to which the other tangent circle is to be calculated. @type fixed: L{Circle} @type c1: L{Circle} @type c2: L{Circle} @type c3: L{Circle} @return: The circle. @rtype: L{Circle} """ curf = fixed.curvature() cur1 = c1.curvature() cur2 = c2.curvature() cur3 = c3.curvature() curn = 2 * (cur1 + cur2 + cur3) - curf mn = (2 * (cur1*c1.m + cur2*c2.m + cur3*c3.m) - curf*fixed.m ) / curn return Circle( mn.real, mn.imag, 1/curn )
a5f7545a3c4600e29bfdb9c516ede6ba244894c3
3,636,954
import random def generate_concept_chain(concept_desc, sequential): """ Given a list of availiable concepts, generate a dict with (start, id) pairs giving the start of each concept. Parameters ---------- sequential: bool If true, concept transitions are determined by ID without randomness. """ concept_chain = [] num_samples = 0 more_appearences = True appearence = 0 while more_appearences: concepts_still_to_appear = [] for cID in concept_desc: concept = concept_desc[cID] if concept.appearences > appearence: concepts_still_to_appear.append(concept) more_appearences = len(concepts_still_to_appear) > 0 for concept in concepts_still_to_appear: concept_chain.append(concept.id) num_samples += concept.examples_per_appearence appearence += 1 if not sequential: random.shuffle(concept_chain) return concept_chain, num_samples
fcfeb345d92d627684d04da4c1d445120554bf15
3,636,955
def get_inputs(input_queue, num_classes, merge_multiple_label_boxes=False, use_multiclass_scores=False): """Dequeues batch and constructs inputs to object detection model. Args: input_queue: BatchQueue object holding enqueued tensor_dicts. num_classes: Number of classes. merge_multiple_label_boxes: Whether to merge boxes with multiple labels or not. Defaults to false. Merged boxes are represented with a single box and a k-hot encoding of the multiple labels associated with the boxes. use_multiclass_scores: Whether to use multiclass scores instead of groundtruth_classes. Returns: images: a list of 3-D float tensor of images. image_keys: a list of string keys for the images. locations_list: a list of tensors of shape [num_boxes, 4] containing the corners of the groundtruth boxes. classes_list: a list of padded one-hot (or K-hot) float32 tensors containing target classes. masks_list: a list of 3-D float tensors of shape [num_boxes, image_height, image_width] containing instance masks for objects if present in the input_queue. Else returns None. keypoints_list: a list of 3-D float tensors of shape [num_boxes, num_keypoints, 2] containing keypoints for objects if present in the input queue. Else returns None. weights_lists: a list of 1-D float32 tensors of shape [num_boxes] containing groundtruth weight for each box. """ read_data_list = input_queue.dequeue() label_id_offset = 1 def extract_images_and_targets(read_data): """Extract images and targets from the input dict.""" image = read_data[fields.InputDataFields.image] key = '' if fields.InputDataFields.source_id in read_data: key = read_data[fields.InputDataFields.source_id] location_gt = read_data[fields.InputDataFields.groundtruth_boxes] classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], tf.int32) classes_gt -= label_id_offset if merge_multiple_label_boxes and use_multiclass_scores: raise ValueError( 'Using both merge_multiple_label_boxes and use_multiclass_scores is' 'not supported' ) if merge_multiple_label_boxes: location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels( location_gt, classes_gt, num_classes) classes_gt = tf.cast(classes_gt, tf.float32) elif use_multiclass_scores: classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores], tf.float32) else: classes_gt = util_ops.padded_one_hot_encoding( indices=classes_gt, depth=num_classes, left_pad=0) masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks) keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints) if (merge_multiple_label_boxes and ( masks_gt is not None or keypoints_gt is not None)): raise NotImplementedError('Multi-label support is only for boxes.') weights_gt = read_data.get( fields.InputDataFields.groundtruth_weights) return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt, weights_gt) return zip(*map(extract_images_and_targets, read_data_list))
96185efe5e1b6ee3064136e052387da0bfb1ddaa
3,636,956
def doFilter(pTable, proxyService): """ filter candidates by column header candidates - column headers are kept, if they support at least (minSupport * #rows) many cells - only filter for columns that are part of the targets (if activated) subsequently remove: - CTA candidates with less support - CEA candidates that do not support any of the remaining CTA candidates of their column """ # keep track, if this changed anything changed = False # table cols cols = pTable.getCols(unsolved=False) # process each column separately for col in cols: if not col['sel_cand']: continue # check, if we have to process this column at all if not pTable.isTarget(col_id=col['col_id']): continue # grab all cells in this column cells = pTable.getCells(col_id=col['col_id']) beforeCount = len(cells) # get the hierarchy over our candidates hierarchy = proxyService.get_hierarchy_for_lst.send([col['sel_cand']['uri']]) typesSupported = [col['sel_cand']['uri']] for parentList in hierarchy.values(): typesSupported.extend([item['parent'] for item in parentList]) typesSupported = list(set(typesSupported)) # purge the candidate lists # for cell in cells: # candSupport = {} # for cand in cell['cand']: # candSupport[cand['uri']] = 0 # try: # foundTypes = [t for t in cand['types'] if t in typesSupported] # candSupport[cand['uri']] += len(foundTypes) # except KeyError as e: # candSupport[cand['uri']] += 0 # # keep cands with highest support only # maxFreq = max([candSupport[uri] for uri in candSupport.keys()]) # for cand in cell['cand']: # if candSupport[cand['uri']] < maxFreq: # cell['cand'].remove(cand) # purged = [] # # remove all CEA candidates from the cells that are not associated with any remaining type for cell in cells: # add_purged = [] # check if the sel_cand is semantically correct for cand in cell['cand']: try: foundTypes = [t for t in cand['types'] if t in typesSupported] if not foundTypes: # add to purged cells # add_purged.append(cand) cell['cand'].remove(cand) except KeyError as e: # print(e) # add_purged.append(cand) cell['cand'].remove(cand) # if add_purged: # # update the cell # cell['purged_cand'].extend(add_purged) # collect purged candidates # purged.extend(add_purged) # purge the cell-pair list # pTable.purgeCellPairs(purged) # done return changed
8b28f945e94e37302b2086e23f695c40c08b8d7c
3,636,957
def int_or_float(x): """Convert `x` to either `int` or `float`, preferring `int`. Raises: ValueError : If `x` is not convertible to either `int` or `float` """ try: return int(x) except ValueError: return float(x)
d0a4def320f88655e494f89b7239e47e1ee70d0d
3,636,958
def request_factory(): """Pytest setup for factory.""" return RequestFactory()
d6b5710dd42da06f6bb10e23fe3826a6a754228a
3,636,959
def is_onehotencoded(x): """If input is a one-hot encoded representation of some set of values. Parameters ---------- x : array-like Returns ------- bool Whether `x` is a one-hot encoded / categorical representation. """ if x.ndim != 2: return False fractional, integral = np.modf(x) if fractional.sum() != 0: return False if not np.array_equal(integral, integral.astype(bool)): return False return np.all(integral.sum(axis=1) == 1)
21a023afeec886512ef806c76ade5523817ef350
3,636,960
def sequence_of_words(fname_doc, dictionary): """ Compute Sequence-of-Words from word list and dictionary """ txtdata = loadtxt(fname_doc) words = extract_keyword(txtdata, "all") SOW = [] for i,word in enumerate(words): print(word) if word in dictionary.keys(): SOW.append(dictionary[word]["id"]) SOW = sp.array(SOW) return SOW
92aeb61ce91b7149143bfb67905793caee83d3be
3,636,961
def shd(B_est, B_true): """Compute various accuracy metrics for B_est. true positive = predicted association exists in condition in correct direction reverse = predicted association exists in condition in opposite direction false positive = predicted association does not exist in condition Args: B_true (np.ndarray): [d, d] ground truth graph, {0, 1} B_est (np.ndarray): [d, d] estimate, {0, 1, -1}, -1 is undirected edge in CPDAG Returns: fdr: (reverse + false positive) / prediction positive tpr: (true positive) / condition positive fpr: (reverse + false positive) / condition negative shd: undirected extra + undirected missing + reverse nnz: prediction positive """ if (B_est == -1).any(): # cpdag if not ((B_est == 0) | (B_est == 1) | (B_est == -1)).all(): raise ValueError('B_est should take value in {0,1,-1}') if ((B_est == -1) & (B_est.T == -1)).any(): raise ValueError('undirected edge should only appear once') else: # dag if not ((B_est == 0) | (B_est == 1)).all(): raise ValueError('B_est should take value in {0,1}') #if not is_dag(B_est): # raise ValueError('B_est should be a DAG') d = B_true.shape[0] # linear index of nonzeros pred_und = np.flatnonzero(B_est == -1) pred = np.flatnonzero(B_est == 1) cond = np.flatnonzero(B_true) cond_reversed = np.flatnonzero(B_true.T) cond_skeleton = np.concatenate([cond, cond_reversed]) # true pos true_pos = np.intersect1d(pred, cond, assume_unique=True) # treat undirected edge favorably true_pos_und = np.intersect1d(pred_und, cond_skeleton, assume_unique=True) true_pos = np.concatenate([true_pos, true_pos_und]) # false pos false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True) false_pos_und = np.setdiff1d(pred_und, cond_skeleton, assume_unique=True) false_pos = np.concatenate([false_pos, false_pos_und]) # reverse extra = np.setdiff1d(pred, cond, assume_unique=True) reverse = np.intersect1d(extra, cond_reversed, assume_unique=True) # compute ratio pred_size = len(pred) + len(pred_und) cond_neg_size = 0.5 * d * (d - 1) - len(cond) fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1) tpr = float(len(true_pos)) / max(len(cond), 1) fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1) # structural hamming distance pred_lower = np.flatnonzero(np.tril(B_est + B_est.T)) cond_lower = np.flatnonzero(np.tril(B_true + B_true.T)) extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True) missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True) shd = len(extra_lower) + len(missing_lower) + len(reverse) shd_wc = shd + len(pred_und) prc = float(len(true_pos)) / max(float(len(true_pos)+len(reverse) + len(false_pos)), 1.) rec = tpr return {'fdr': fdr, 'tpr': tpr, 'fpr': fpr, 'prc': prc, 'rec' : rec, 'shd': shd, 'shd_wc': shd_wc, 'nnz': pred_size}
04c1fb44025ae1a3cfd86bc877c68e93027b75fe
3,636,962
def nohighlight(nick): """add a ZWNJ to nick to prevent highlight""" return nick[0] + "\u200c" + nick[1:]
1b8d0cafc5df4a442daafdece59af1675ab1de33
3,636,964
def _get_r_val(z, omega_m, omega_l): """Returns the comoving distance at for one z value. Parameters ---------- z : float Redshift. omega_m : float Present matter density. omega_l : float Present dark energy density. """ r, err = integrate.quad(_get_r_integrand, 0., z, args=(omega_m, omega_l)) r *= 3000. return r
4f33eccdf4485c640f5c71808485fbf96a5f7614
3,636,965
def user_response_controller(bank_request, user_response): """ processes user's response for bank's request sent : bank_request --> what is user currently requesting for : user_response --> what a user wants to actually do amongst the options in the above bank_requests """ user_response = validate_user_input_to_int(user_response) if user_response == "error": return ['resend_same_bank_request', 'No valid option choosen'] if user_response >= 1 and user_response <= bank_request.get('available_options'): return user_response else: return ['resend_same_bank_request', 'Selected option not found']
3d959fac84a8460e7ab228127d9b6f0b9cc1a21c
3,636,966
from datetime import datetime def create_features(datestrs): """ Find the features associated with a set of dates. These will include: weekday / weekend day of week season month of year Parameters ---------- datestrs: list of strings Date strings of the format YYYY-MM-DD. Returns ------- features: DataFrame Each row corrsponds to one date. The datestring is the index. """ feature_data = [] for datestr in datestrs: current_date = datetime.datetime.strptime(datestr, '%Y-%m-%d').date() current_weekday = current_date.weekday() day_of_week = np.zeros(7) day_of_week[current_weekday] = 1 current_month = current_date.month month_of_year = np.zeros(12) # Adjust months to January = 0 month_of_year[current_month - 1] = 1 # Season 0 = winter, 1 = spring, 2 = summer, 3 = autumn season = np.zeros(4) if current_month <= 2: season[0] = 1 elif current_month <= 5: season[1] = 1 elif current_month <= 8: season[2] = 1 elif current_month <= 11: season[3] = 1 else: season[0] = 1 feature_set = { 'Saturday': day_of_week[5], 'Sunday': day_of_week[6], 'winter': season[0], 'spring': season[1], 'summer': season[2], 'autumn': season[3], } feature_data.append(feature_set) features_df = pd.DataFrame(data=feature_data, index=datestrs) return features_df
75a72a54396150ed73ea43b3390994b1a41d2cf5
3,636,967
import inspect def obj_src(py_obj, escape_docstring=True): """Get the source for the python object that gets passed in Parameters ---------- py_obj : callable Any python object escape_doc_string : bool If true, prepend the escape character to the docstring triple quotes Returns ------- list Source code lines Raises ------ IOError Raised if the source code cannot be retrieved """ src = inspect.getsource(py_obj) if escape_docstring: src.replace("'''", "\\'''") src.replace('"""', '\\"""') return src # return src.split('\n')
8ce0c7cc7672de5005b5a1c60e6b6cf5fa9ee050
3,636,968
def get_back_button_handler(current_panel: "GenericPanel") -> CallbackQueryHandler: """ returns a Handler for BACK_PATTERN that returns the user to current_panel :param GenericPanel current_panel: the destination panel :return: a CallbackQueryHandler for BACK_PATTERN that returns the user to current_panel """ return CallbackQueryHandler(current_panel.prompt, pattern=Globals.BACK_PATTERN)
365e37b3d362afa31d231613180070be69ac7972
3,636,969
from typing import Optional def openocd_prog_path(request: FixtureRequest) -> Optional[str]: """Enable parametrization for the same cli option""" return _request_param_or_config_option_or_default(request, 'openocd_prog_path', None)
f3628427bde73d7e26e5ed30e103d4ba36df7c1b
3,636,970
def reindexMatrix(iss, jss, A): """iss and jss are lists of indices of equal size, representing a permuation: iss[i] is replaced with jss[i]. all other indices which are not in the lists left unchanged. """ n = len(A) B = np.zeros_like(A) tss = [i for i in range(n)] for i in range(len(iss)): tss[iss[i]] = jss[i] print(tss) for i in range(n): for j in range(n): B[i, j] = A[tss[i], tss[j]] return B
9c36802d7e5f35ca6789d49e47d8124bc4f74c57
3,636,971
def createInfoMatix(character_id): """初始化创建的角色的阵法 """ petlist = getCharacterPetList(character_id) sql = "INSERT INTO `tb_character_matrix`(`characterId`,`eyes_4`,`eyes_5`,`eyes_6`) \ VALUES(%d,%d,%d,%d);"%(character_id,petlist[0],petlist[1],petlist[2]) conn = dbpool.connection() cursor = conn.cursor() count = cursor.execute(sql) conn.commit() cursor.close() conn.close() if(count >= 1): return True return False
7392f899ed8b46fd35ed360601edd8621aace7ac
3,636,972
def help(): """<b>Print available functions as json.<br>""" func_list = {} for rule in app.url_map.iter_rules(): if rule.endpoint != 'static': func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__ return jsonify(func_list)
2ef2193aaa9d882b238a7681cb3e868690a58398
3,636,973
def version(): """ Returns the name, version and api_version of the application when a HTTP GET request is made. """ return jsonify( name='openshift-python-flask-sample', version=VERSION )
70686195978cf9d26e2d4cd954c81fc216d7bd4d
3,636,975
import json def search_quotes(request, currency): """ Consulta a API procurando por ações que contenham o campo 'currency' no nome """ # verifica se a barra de pesquisa foi preenchida ou se está vazia if currency: conn.request("GET", "/auto-complete?q="+currency+"&region=BR", headers=headers) res = conn.getresponse() data = res.read() api_quotes = json.loads(data.decode("utf-8"))['quotes'] # verifica se o usuário quer procurar somente na B3 ou se quer procurar globalmente utilizando selector na página if 'onlyB3' in request.GET: if request.GET['onlyB3'] == '1': SA_quotes = [ { key: quote[key] for key in quote } for quote in api_quotes if quote['exchange'] == 'SAO' or quote['symbol'].endswith('.SA') ] return { 'quotes': SA_quotes } return { 'quotes': api_quotes } else: # se a pesquisa é vazia retorna um dicionario nulo return { 'quotes': {} }
926f29d802a7bb6a9681b3b90fd46966894a0604
3,636,976
def destroy(N, dtype=tf.complex64): """Returns a destruction (lowering) operator in the Fock basis. Args: N (int): Dimension of Hilbert space dtype (tf.dtypes.DType, optional): Returned dtype. Defaults to c64. Returns: Tensor([N, N], dtype): NxN creation operator """ a = diag(tf.sqrt(tf.range(1, N, dtype=tf.float64)), k=1) return tf.cast(a, dtype=dtype)
a92ef2cc5aa9b7bbe2c0cf109282c5fde56d4603
3,636,977
from typing import Optional from typing import Any def get_nearest_operation( db: Redis[bytes], address: hash_t, subdag: Optional[str] = None ) -> Optional[Operation]: """Return the operation at address or the operation generating address.""" root = "root" art = None try: node = Operation.grab(db, address) return node except RuntimeError: # one possibility is that address is an artefact... try: art = Artefact[Any].grab(db, address) except RuntimeError: raise RuntimeError( f"address {address} neither a valid operation nor a valid artefact." ) if art.parent == root: # We have basically just a single artefact as the network... return None else: node = Operation.grab(db, art.parent) return node
a402ed795d60f321cd362517e9350994be836cdd
3,636,978
def load_CIFAR_batch(file_path): """ load single batch of cifar """ data_dict = load_pickle(file_path) data = data_dict['data'] labels = data_dict['labels'] data = data.reshape(10000, 3, 32, 32).astype("float") labels = np.array(labels) return data, labels
0164293fb2f31e7361da5a817c64899db96c6156
3,636,979
def _disposable_and_async_gen_from_obs(obs: Observable): """ Compatability layer for legacy Observable to async generator This should be removed and subscription resolvers changed to return async generators after removal of flask & gevent based dagit. """ queue: Queue = Queue() disposable = obs.subscribe(on_next=queue.put_nowait) async def async_gen(): while True: i = await queue.get() yield i return disposable, async_gen()
ed0620b3615a36e82c20789f6a3b40aa6ae61410
3,636,980
def interesting_pattern(x: float, y: float) -> float: """This function is interesting in x and y in range -10..10, returning a float value in range 0..1 """ z = 0.5 + (np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)) / 2 return z
432e13324b1834cbdd62259f0ac0b59751008f90
3,636,981
def interp_road(d,croad,roads,intersections,normD = False): """ Get the position of a point along a road """ start_int = roads[croad]['start_int'] start_pos = intersections[start_int]['position'] end_int = roads[croad]['end_int'] end_pos = intersections[end_int]['position'] if not normD: length = road_length(croad,roads,intersections) if 'type' not in roads[croad] or roads[croad]['type'] == 'line': if normD: alpha = d else: alpha = d/length return (1.0-alpha)*start_pos + alpha*end_pos else: C = roads[croad]['center'].reshape(2) startR = np.sqrt(np.sum(np.power(C - start_pos,2.0))) endR = np.sqrt(np.sum(np.power(C - end_pos,2.0))) r = 0.5*(startR + endR) startTheta = np.arctan2(start_pos[1] - C[1],start_pos[0] - C[0]) if startTheta < 0: startTheta += 2.0*np.pi endTheta = np.arctan2(end_pos[1] - C[1],end_pos[0] - C[0]) if endTheta < 0: endTheta += 2.0*np.pi if roads[croad]['turn_direction'] < 0 and endTheta > startTheta: startTheta += 2.0*np.pi elif roads[croad]['turn_direction'] > 0 and endTheta < startTheta: endTheta += 2.0*np.pi # startTheta = roads[croad]['start_theta'] # endTheta = roads[croad]['end_theta'] if normD: curr_theta = (1.0-d)*startTheta + d*endTheta else: curr_theta = startTheta + (endTheta - startTheta)*(d/length) return C + r*np.array([np.cos(curr_theta),np.sin(curr_theta)]).reshape(2)
f6406dbb586ba2870d95f627f18085ec12c3b64b
3,636,983
def zero_pad2d(inputs, padding=0, output_dtype="float32", requires_grad=False): """ Zero padding for 2d tensor Args: ----------------------------- inputs : Tensor shape [batch, channel, height, width] padding: (optional:0) int or tuple expected: (h_pad_up, h_pad_down, w_pad_up, w_pad_down) output_dtype : str requires_grad : bool ----------------------------- Returns: ----------------------------- Tensor shape [batch, channel, padded_height, padded_width] ----------------------------- """ padding = (padding, padding, padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding assert isinstance(padding, tuple), "type(padding)={}".format(type(padding)) if len(padding) == 2: padding = (padding[0], padding[0], padding[1], padding[1]) assert (len(padding) == 4) if all([padding[i] == 0 for i in range(len(padding))]): return inputs batch_size, in_channel, height, width = inputs.shape padded_shape = (batch_size, in_channel, height + padding[0] + padding[1], width + padding[2] + padding[3]) padding_value = tvm.tir.expr.const(0, output_dtype) def _inner_zero_pad2d(inputs): def _for_spatial(b, c, h, w): def _for_reduce(): return tvm.te.if_then_else( tvm.te.all(h >= padding[0], h < height + padding[0], w >= padding[2], w < width + padding[2]), inputs[b, c, h - padding[0], w - padding[2]], padding_value ) return _for_reduce, [], "none" return _for_spatial return Compute(padded_shape, output_dtype , inputs, fhint=_inner_zero_pad2d, name="zero_pad2d", requires_grad=requires_grad)
77ae8065f6e1c3b181a6bb49bd84ae4951848d7b
3,636,984
def gtfs_admin(request): """admin page for adding new review categories (and potentially other features down the road)""" return render(request, 'admin/gtfs_admin.html')
14fccf4c1a8758fa223133f6e191860b6aee01a9
3,636,985
def get_file_path(): """ Get current file's directory. Return `None` if there is no file path available. """ try: file_path = sublime.active_window().extract_variables()['file_path'] except KeyError: return None else: return file_path
0f991da4edf82435260aad443a4b506d1e2a5453
3,636,986
import random def mutate_word(word): """Introduce a random change into the word: delete, swap, repeat, and add stray character. This may raise a ValueError. """ word = list(word) choice = random.randrange(4) if choice == 0: # Delete a character word.pop(random.randrange(len(word))) elif choice == 1: # Swap two characters index = random.randrange(0, len(word) - 1) word[index], word[index + 1] = word[index + 1], word[index] elif choice == 2: # Repeat a character index = random.randrange(0, len(word)) word.insert(index, word[index]) elif choice == 3: # Insert a stray character char = chr(random.randint(ord('a'), ord('z'))) word.insert(random.randint(0, len(word)), char) return ''.join(word)
f3b45f36893a7541131710ada5f1343387f06797
3,636,987
def data_layer_property_from_dict(data_layer_property_dictionary: dict, client: cl.Client = None): """ The method converts a dictionary of DataLayerProperty to a DataLayerProperty object. :param data_layer_property_dict: A dictionary that contains the keys of a DataLayerProperty. :type data_layer_property_dict: dict :param client: An IBM PAIRS client. :type client: ibmpairs.client.Client :rtype: ibmpairs.catalog.DataLayerProperty :raises Exception: If not a dict. """ data_layer_property = DataLayerProperty.from_dict(data_layer_property_dictionary) cli = common.set_client(input_client = client, global_client = cl.GLOBAL_PAIRS_CLIENT) data_layer_property.client = cli return data_layer_property
ab579c1d6527abb176cd05c81d89fb1a74af50b0
3,636,988
def pcc_vector(v1, v2): """Pearson Correlation Coefficient for 2 vectors """ len1 = len(v1) len2 = len(v2) if len1 != len2: return None else: length = len1 avg1 = 1.0 * sum(v1) / len(v1) avg2 = 1.0 * sum(v2) / len(v2) dxy = [(v1[i] - avg1) * (v2[i] - avg2) for i in range(length)] dx2 = [(v1[i] - avg1) ** 2 for i in range(length)] dy2 = [(v2[i] - avg2) ** 2 for i in range(length)] return sum(dxy) / (sum(dx2) * sum(dy2)) ** 0.5
98e5f3cc304a5d844be479d65ab7eeb760a34ba3
3,636,989
from io import StringIO def cypher_repr(obj): """ Generate the Cypher representation of an object. """ string = StringIO() writer = CypherWriter(string) writer.write(obj) return string.getvalue()
eae9e848076a4626a001e70b9cd925734864b3ae
3,636,990
def firstlastmile_pipeline(**kwargs): """The first and last mile pipeline attaches any unattached elements to ensure a fully-connected graph""" tags = ['flmile'] firstmile_nodes = [ node( firstmile_edge, ['sjoin_oilfields_data','sjoin_edges_pipelines_oilfields','sjoin_ports_data','sjoin_cities_data','sjoin_pipelines_data'], 'flmile_edges_oilfields', tags=tags+['firstmile','firstmile_oilfields'] ), # assets, existing_edges, closest port, city, [pipeline/railway] node( firstmile_edge, ['sjoin_oilwells_data','sjoin_edges_pipelines_oilwells','sjoin_ports_data','sjoin_cities_data','sjoin_pipelines_data'], 'flmile_edges_oilwells', tags=tags+['firstmile','firstmile_oilwells'] ), # assets, existing_edges, closest port, city, [pipeline/railway] node( firstmile_edge, ['sjoin_coalmines_data','sjoin_edges_railways_coalmines','sjoin_ports_data','sjoin_cities_data','sjoin_railways_data'], 'flmile_edges_coalmines', tags=tags+['firstmile','firstmile_coalmines'] ), # assets, existing_edges, closest port, city, [pipeline/railway] ] lastmile_nodes = [ node( powerstations_lastmile, ['sjoin_powerstations_data','sjoin_edges_pipelines_powerstations','sjoin_edges_railways_powerstations','sjoin_railways_data','sjoin_pipelines_data','sjoin_ports_data','sjoin_cities_data'], # powerstations, ps_edges_pipelines, ps_edges_railways, railways, pipelines, ports, cities 'flmile_edges_powerstations', tags=tags+['lastmile','lastmile_powerstations'] ) ] lastmile_nodes += [ node( cities_delauney, ['sjoin_cities_data','ne'], 'flmile_edges_cities', tags = tags+['lastmile','lastmile_cities'] ), node( shippingroutes_lastmile, ['sjoin_edges_shippingroutes_ports','sjoin_shippingroutes_data','sjoin_ports_data'], 'flmile_edges_shippingroutes_ports', tags= tags+['lastmile','lastmile_shippingroutes','lastmile_shippingroutes_ports'] ), node( shippingroutes_lastmile, ['sjoin_edges_shippingroutes_lngterminals','sjoin_shippingroutes_data','sjoin_lngterminals_data'], 'flmile_edges_shippingroutes_lngterminals', tags= tags+['lastmile','lastmile_shippingroutes','lastmile_shippingroutes_lng'] ), ] IDL_nodes = [ node( connect_IDL, 'sjoin_shippingroutes_data', 'flmile_idl_edges', tags=tags+['flmile_idl'] ) ] null_nodes = [node(null_forward, f'sjoin_{sector}_data', f'flmile_{sector}_data',tags = tags+['flm_null',f'flm_null_{sector}']) for sector in ALL_SECTORS] null_nodes += [node(null_forward, f'sjoin_edges_{sector1}_{sector2}', f'flmile_edges_{sector1}_{sector2}', tags=tags+['flm_null',f'flm_null_{sector1}_{sector2}']) for sector1, sector2 in SJOIN_PAIRS if sector1!='shippingroutes'] return Pipeline(firstmile_nodes + lastmile_nodes + IDL_nodes + null_nodes)
545fda88458fb0266b0f4f98791de83759ba96f5
3,636,991
def photo_new(request, cast: Cast): """ Add a new Photo to a cast """ if request.method == 'POST': form = CastPhotoForm(request.POST, request.FILES) if form.is_valid(): photo = form.save(commit=False) photo.cast = cast photo.save() messages.success(request, f'Photo has been added') return redirect('cast_photo_detail', slug=cast.slug, pk=photo.pk) else: form = CastPhotoForm() return render(request, 'castadmin/photo_edit.html', { 'cast': cast, 'form': form, })
cf9aac5f0ea49e48e571d89227c69f8ff382162a
3,636,992
def decode(argument: str) -> tuple[list[int], ...]: """Decode argument string from command line :param argument: argument string :return: pair of list of digits """ char_lists = map(list, argument.split('-')) range_ = tuple(list(map(int, clist)) for clist in char_lists) return range_
d3805396cab52fc09896ca9553f1ac3450f27e99
3,636,993
def get_search_apps(): """Gets all registered search apps.""" return tuple(_load_search_apps().values())
5287abce0a31e9eb2165aafb8a6cfbaabda85e48
3,636,995
def volume_tetrahedron( point_a: array_like, point_b: array_like, point_c: array_like, point_d: array_like ) -> np.float64: """ Return the volume of a tetrahedron defined by four points. The points are the vertices of the tetrahedron. They must be 3D or less. Parameters ---------- point_a, point_b, point_c, point_d : array_like The four vertices of the tetrahedron. Returns ------- np.float64 The volume of the tetrahedron. References ---------- http://mathworld.wolfram.com/Tetrahedron.html Examples -------- >>> from skspatial.measurement import volume_tetrahedron >>> volume_tetrahedron([0, 0], [3, 2], [-3, 5], [1, 8]) 0.0 >>> volume_tetrahedron([0, 0, 0], [2, 0, 0], [1, 1, 0], [0, 0, 1]).round(3) 0.333 >>> volume_tetrahedron([0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]).round(3) 0.167 """ vector_ab = Vector.from_points(point_a, point_b) vector_ac = Vector.from_points(point_a, point_c) vector_ad = Vector.from_points(point_a, point_d) vector_cross = vector_ac.cross(vector_ad) # Set the dimension to 3 so it matches the cross product. vector_ab = vector_ab.set_dimension(3) return 1 / 6 * abs(vector_ab.dot(vector_cross))
3369044cfe53762c9bbbf8363da5d385b14b51ba
3,636,996
def lemmatizer(word): """Returns: lemmatized word if word >= length 5 """ if len(word)<4: return word return wnl.lemmatize(wnl.lemmatize(word, "n"), "v")
f8e5020b85638464b261e1ec066a141ba4a202a0
3,636,997
def kolmogn(n, x, cdf=True): """Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), for a sample of size n drawn from a distribution with CDF F(t), where D_n &= sup_t |F_n(t) - F(t)|, and F_n(t) is the Empirical Cumulative Distribution Function of the sample. Parameters ---------- n : integer, array_like the number of samples x : float, array_like The K-S statistic, float between 0 and 1 cdf : bool, optional whether to compute the CDF(default=true) or the SF. Returns ------- cdf : ndarray CDF (or SF it cdf is False) at the specified locations. The return value has shape the result of numpy broadcasting n and x. """ it = np.nditer([n, x, cdf, None], op_dtypes=[None, np.float, np.bool, np.float]) for _n, _x, _cdf, z in it: if np.isnan(_n): z[...] = _n continue if int(_n) != _n: raise ValueError(f'n is not integral: {_n}') z[...] = _kolmogn(int(_n), _x, cdf=_cdf) result = it.operands[-1] return result
132672a1bf45bb0b675c3ce503d47ed4f740184b
3,636,998
from typing import Tuple def watermark_pdf(input_file: str, wm_text: str, pages: Tuple = None): """ Adds watermark to a pdf file. """ result, wm_buffer = create_watermark(wm_text) if result: wm_reader = PdfFileReader(wm_buffer) pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False) pdf_writer = PdfFileWriter() try: for page in range(pdf_reader.getNumPages()): # If required to watermark specific pages not all the document pages if pages: if str(page) not in pages: continue page = pdf_reader.getPage(page) page.mergePage(wm_reader.getPage(0)) pdf_writer.addPage(page) except Exception as e: print("Exception = ", e) return False, None, None return True, pdf_reader, pdf_writer
3fb4d51a88db9c509842ee76b7fee22af30a358d
3,637,000
def wrapper_configuration_get(): # noqa: E501 """gets configuration details on the current wrapper configuration # noqa: E501 :rtype: object """ return 'do some magic!'
85ac6abbf09f93a08295584d7051aad2e8cad8d6
3,637,001
def update_qgs(): """Generate QGIS project files.""" try: # create ConfigGenerator generator = config_generator() qgs_writer_log = generator.write_qgs() return { 'message': "Finished writing QGIS project files", 'log': qgs_writer_log } except Exception as e: return { 'error': str(e) }
1cb7f6f844fc40b611dc49b8f2b5a8de795e04e0
3,637,002
from typing import Tuple from typing import List import warnings def time_evolution_derivatives( hamiltonian: pyquil.paulis.PauliSum, time: float, method: str = "Trotter", trotter_order: int = 1, ) -> Tuple[List[circuits.Circuit], List[float]]: """Generates derivative circuits for the time evolution operator defined in function time_evolution Args: hamiltonian: The Hamiltonian to be evolved under. It should contain numeric coefficients, symbolic expressions aren't supported. time: time duration of the evolution. method: time evolution method. Currently the only option is 'Trotter'. trotter_order: order of Trotter evolution Returns: A Circuit simulating time evolution. """ if method != "Trotter": raise ValueError(f"The method {method} is currently not supported.") single_trotter_derivatives = [] factors = [1.0, -1.0] output_factors = [] if isinstance(hamiltonian, QubitOperator): terms = list(hamiltonian.get_operators()) elif isinstance(hamiltonian, pyquil.paulis.PauliSum): warnings.warn( "PauliSum as an input to time_evolution_derivatives will be depreciated, " "please change to QubitOperator instead.", DeprecationWarning, ) terms = hamiltonian.terms for i, term_1 in enumerate(terms): for factor in factors: output = circuits.Circuit() try: if isinstance(term_1, QubitOperator): r = list(term_1.terms.values())[0] / trotter_order else: r = complex(term_1.coefficient).real / trotter_order except TypeError: raise ValueError( "Term coefficients need to be numerical. " f"Offending term: {term_1}" ) output_factors.append(r * factor) shift = factor * (np.pi / (4.0 * r)) for j, term_2 in enumerate(terms): output += time_evolution_for_term( term_2, (time + shift) / trotter_order if i == j else time / trotter_order, ) single_trotter_derivatives.append(output) if trotter_order > 1: output_circuits = [] final_factors = [] repeated_circuit = time_evolution( hamiltonian, time, method="Trotter", trotter_order=1 ) for position in range(trotter_order): for factor, different_circuit in zip( output_factors, single_trotter_derivatives ): output_circuits.append( _generate_circuit_sequence( repeated_circuit, different_circuit, trotter_order, position ) ) final_factors.append(factor) return output_circuits, final_factors else: return single_trotter_derivatives, output_factors
fe793657d9fa199df174a288f59a390c7787598c
3,637,003
def had_cells_strength(strmfunc, min_plev=None, max_plev=None, lat_str=LAT_STR, lev_str=LEV_STR): """Location and signed magnitude of both Hadley cell centers.""" lat = strmfunc[lat_str] # Sometimes the winter Ferrel cell is stronger than the summer Hadley cell. # So find the global extremal negative and positive values as well as the # opposite-signed cell on either side. The Hadley cells will be the two of # these whose centers are nearest the equator. cell_pos_max_strength = had_cell_strength( strmfunc, min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) lat_pos_max = cell_pos_max_strength.coords[lat_str] cell_south_of_pos_strength = -1*had_cell_strength( -1*strmfunc.where(lat < lat_pos_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) cell_north_of_pos_strength = -1*had_cell_strength( -1*strmfunc.where(lat > lat_pos_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) cell_neg_max_strength = had_cell_strength( -1*strmfunc, min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) lat_neg_max = cell_neg_max_strength.coords[lat_str] cell_south_of_neg_strength = had_cell_strength( strmfunc.where(lat < lat_neg_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) cell_north_of_neg_strength = had_cell_strength( strmfunc.where(lat > lat_neg_max), min_plev=min_plev, max_plev=max_plev, lev_str=lev_str, ) # The above procedure generats 6 cells, of which 2 are duplicates. Now, # get rid of the duplicates. strengths = [ cell_pos_max_strength, cell_south_of_pos_strength, cell_north_of_pos_strength, cell_neg_max_strength, cell_south_of_neg_strength, cell_north_of_neg_strength, ] cell_strengths = xr.concat(strengths, dim=lat_str, coords=[lev_str]) dupes = cell_strengths.get_index(LAT_STR).duplicated() cell_strengths = cell_strengths[~dupes] # Pick the two cells closest to the equator. center_lats = cell_strengths[lat_str] hc_strengths = cell_strengths.sortby(np.abs(center_lats))[:2] # Order the cells from south to north. hc_strengths = hc_strengths.sortby(hc_strengths[lat_str]) # Create DataArray with one label for each cell, the cell strengths # as the values, and the cell center latitudes and levels as coords. coords_out = {"cell": ["had_cell_sh", "had_cell_nh"]} ds_strengths = xr.Dataset(coords=coords_out) arr_lat_center = xr.DataArray(hc_strengths[lat_str].values, dims=["cell"], coords=coords_out) arr_lev_center = xr.DataArray(hc_strengths[lev_str].values, dims=["cell"], coords=coords_out) arr_strength = xr.DataArray(hc_strengths.values, dims=["cell"], coords=coords_out) ds_strengths.coords[lat_str] = arr_lat_center ds_strengths.coords[lev_str] = arr_lev_center ds_strengths["cell_strength"] = arr_strength return ds_strengths["cell_strength"]
ba8b4840a3e7e851a7156cd6aed1e3969e362692
3,637,004
def d_enter_waste_cooler(W_mass, rho_waste, w_drift): """ Calculates the tube's diameter of enter waste to waste cooler. Parameters ---------- W_mass : float The mass flow rate of waste, [kg/s] rho_waste : float The density of liquid at boilling temperature, [kg/m**3] w_drift :float The speed of steam at the tube, [m/s] Returns ------- d_enter_waste_cooler : float The tube's diameter of enter waste to waste cooler, [m] References ---------- &&& """ return W_mass/(0,785*rho_waste*w_drift)
651c1adc0b90a286c2c8685c389268bc8834ad73
3,637,005
async def finalize_round(request, persistence): """Finalize an owned round.""" game_id = request.match_info['game_id'] round_name = request.match_info['round_name'] user_session = await get_session(request) if not client_owns_game(game_id, user_session, persistence): return json_response({'error': 'The user is not the moderator of this game.'}, status=403) try: persistence.finalize_round(game_id, round_name) except NoSuchRound: return json_response({'error': 'Round does not exist.'}, status=404) except NoActivePoll: return json_response({'error': 'There is no active poll in this round.'}, status=404) except RoundFinalized: return json_response({'error': 'This round has already been finalized.'}, status=409) return json_response({'game': persistence.serialize_game(game_id)})
21c07b35eb366d1ca78a90940bfb85772469683f
3,637,006
def _arrs_to_ds(arrs, names=None): """Combine DataArrays into a single Dataset.""" if names is None: names = [str(n) for n in range(len(arrs))] return xr.Dataset(data_vars=dict(zip(names, arrs)))
5672ba30c43d646a637d1db5735df23f916f012b
3,637,007
from datetime import datetime import time def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeFormat)
b36e68466c05eb33f178d2568b3c2ff21bc9c707
3,637,008
def exitFlow(x, n_classes): """ Create the exit flow section x : input to the exit flow section n_classes : number of output classes """ def classifier(x, n_classes): """ The output classifier x : input to the classifier n_classes : number of output classes """ # Global Average Pooling will flatten the 10x10 feature maps into 1D # feature maps x = GlobalAveragePooling2D()(x) # Fully connected output layer (classification) x = Dense(n_classes, activation='softmax')(x) return x # Remember the input shortcut = x # Strided convolution to double number of filters in identity link to # match output of residual block for the add operation (projection shortcut) shortcut = Conv2D(1024, (1, 1), strides=(2, 2), padding='same')(shortcut) shortcut = BatchNormalization()(shortcut) # First Depthwise Separable Convolution # Dimensionality reduction - reduce number of filters x = SeparableConv2D(728, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Second Depthwise Separable Convolution # Dimensionality restoration x = SeparableConv2D(1024, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Create pooled feature maps, reduce size by 75% x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) # Add the projection shortcut to the output of the pooling layer x = Add()([x, shortcut]) # Third Depthwise Separable Convolution x = SeparableConv2D(1556, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Fourth Depthwise Separable Convolution x = SeparableConv2D(2048, (3, 3), padding='same')(x) x = BatchNormalization()(x) x = ReLU()(x) # Create classifier section x = classifier(x, n_classes) return x
95ac0696e03cb6e3320cebd20790e2f07c69d4ee
3,637,009
from typing import Optional def SingleChannelDDR4_2400(size: Optional[str] = "1024MB") -> SingleChannel: """ A single channel DDR3_2400. :param size: The size of the memory system. Default value of 1024MB. """ return SingleChannel("DDR4_4Gb_x8_2400", size)
10a83cd74b55f5ec93812fd1d52c8d753d9024d4
3,637,011
def convert_Pa_to_dBSPL(pa): """ Converts units of Pa to dB re 20e-6 Pa (dB SPL) """ return 20. * np.log10(pa / 20e-6)
a14991c7923b7ceb46f279a95b3ef64ff648ae57
3,637,012
def isPalindromic(seq): """ is a sequence palindromic? returns True or False """ if rc_expanded(seq.lower()) == seq.lower(): return(True) return(False)
bbe011e0b599f8df417ffc10eef0ace0d8f08d37
3,637,013
import numpy import random def randomPairsMatch(n_records_A: int, n_records_B: int, sample_size: int) -> IndicesIterator: """ Return random combinations of indices for record list A and B """ n: int = n_records_A * n_records_B if not sample_size: return iter([]) elif sample_size >= n: random_pairs = numpy.arange(n) else: random_pairs = numpy.array(random.sample(range(n), sample_size)) i, j = numpy.unravel_index(random_pairs, (n_records_A, n_records_B)) return zip(i, j)
2cd6f905933149b4f23f656e9db44f57830e1eb9
3,637,015
import logging def GetScaffoldLengths(genome_fna_fp): """ This function gets the lengths of the scaffolds, returns a dict Args: genome_fna_fp: (str) Path to genome fna file (FASTA) Returns: Scaffold_To_Length: (dict) scaffold_name: (str) -> length (int) """ Scaffold_To_Length = {} FNA_FH = open(genome_fna_fp) c_line = FNA_FH.readline().strip() c_scaffold_name = "" while c_line != "": if c_line[0] == ">": if c_scaffold_name != "": Scaffold_To_Length[c_scaffold_name] = cs_len if " " in c_line: logging.warning(f"A space found in scaffold name: '{c_line}'." " This might cause an error.") c_scaffold_name = (c_line.split(' ')[0])[1:] logging.warning(f"Instead using scaffold name {c_scaffold_name}") else: c_scaffold_name = c_line[1:] # Current scaffold length is reset cs_len = 0 else: cs_len += len(c_line) c_line = FNA_FH.readline().strip() FNA_FH.close() if c_scaffold_name != "": Scaffold_To_Length[c_scaffold_name] = cs_len if len(Scaffold_To_Length.keys()) == 0: logging.warning("No Scaffolds found in " + genome_fna_fp) return Scaffold_To_Length
cee4c6a3d9171dc86563e5f74dae6fbdfcb0556a
3,637,016
def flip_mesh(mesh): """ It flips the mesh of a shape. ---------------------------- Args: mesh (obj: 'base.Trimesh'): The mesh of a shape Returns: mesh (obj: 'base.Trimesh'): The flipped mesh of the shape """ triangles = np.zeros((3, len(mesh.faces))) for i, index in enumerate(mesh.faces[1:]): x, y, z = [], [], [] for num in index: vertices = mesh.vertices[num] x.append(vertices[0]) y.append(vertices[1]) z.append(vertices[2]) triangles[0][i] = np.sum(x)/3 triangles[1][i] = np.sum(y)/3 triangles[2][i] = np.sum(z)/3 f_x = calculate_f(triangles[0]) f_y = calculate_f(triangles[1]) f_z = calculate_f(triangles[2]) R = np.array([[np.sign(f_x), 0, 0], [0, np.sign(f_y), 0], [0, 0, np.sign(f_z)]]) mesh.vertices = np.matmul(mesh.vertices, R) return mesh
a527b47a4f1c184a97d4ee7005d05be7926e0258
3,637,018
def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"): """:func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`. This activation function is a modified version :func:`leaky_relu` introduced by the following paper: `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__ This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper: `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ This function push further the logic by adding `leaky` behaviour both below zero and above six. The function return the following results: - When x < 0: ``f(x) = alpha_low * x``. - When x in [0, 6]: ``f(x) = x``. - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``. Parameters ---------- x : Tensor Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``. alpha_low : float Slope for x < 0: ``f(x) = alpha_low * x``. alpha_high : float Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``. name : str The function name (optional). Examples -------- >>> import tensorlayer as tl >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense') Returns ------- Tensor A ``Tensor`` in the same type as ``x``. References ---------- - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__ - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ """ if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1): raise ValueError("`alpha_high` value must be in [0, 1]`") if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1): raise ValueError("`alpha_low` value must be in [0, 1]`") with tf.name_scope(name, "leaky_twice_relu6") as name_scope: x = tf.convert_to_tensor(x, name="features") x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x) x_is_below_0 = tf.minimum(alpha_low * x, 0) return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope)
17c4fce9bd8803cda254fb28cde72e5401760c3d
3,637,019
def fully_connected(inputs, num_outputs, scope, use_xavier=True, stddev=1e-3, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ Fully connected layer with non-linear operation. Args: inputs: 2-D tensor BxN num_outputs: int Returns: Variable tensor of size B x num_outputs. """ with tf.variable_scope(scope) as sc: num_input_units = inputs.get_shape()[-1].value weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay) outputs = tf.matmul(inputs, weights) biases = tf.get_variable('biases', [num_outputs], initializer = tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = tf.contrib.layers.batch_norm(outputs, decay = bn_decay, updates_collections = None, epsilon = 1e-5, scale = True, is_training = is_training, scope = 'bn') if activation_fn is not None: outputs = activation_fn(outputs) return outputs
01646dd4d18a210b298c313b13a03274c69fd127
3,637,020
def _x_orientation_rep_dict(x_orientation): """"Helper function to create replacement dict based on x_orientation""" if x_orientation.lower() == 'east' or x_orientation.lower() == 'e': return {'x': 'e', 'y': 'n'} elif x_orientation.lower() == 'north' or x_orientation.lower() == 'n': return {'x': 'n', 'y': 'e'} else: raise ValueError('x_orientation not recognized.')
83434a8aef7003146a19c470b831e8e9cfa85f19
3,637,021
def move_at_objc_to_access_note(access_notes_file, arg, offset, access_note_name): """Write an @objc attribute into an access notes file, then return the string that will replace the attribute and trailing comment.""" access_notes_file.write(u""" - Name: '{}' ObjC: true""".format(access_note_name)) if arg: access_notes_file.write(u""" ObjCName: '{}'""".format(arg)) # Default to shifting expected diagnostics down 1 line. if offset is None: offset = 1 return u"// access-note-adjust" + offsetify(offset) + u" [attr moved] " + \ u"expected-remark{{access note for fancy tests adds attribute 'objc' to " + \ u"this }} expected-note{{add attribute explicitly to silence this warning}}"
6037b6db15188ce43771d47f01518994f562d409
3,637,022
def test_idempotent_lambda_with_validator_util( config_without_jmespath: IdempotencyConfig, persistence_store: DynamoDBPersistenceLayer, lambda_apigw_event, timestamp_future, serialized_lambda_response, deserialized_lambda_response, hashed_idempotency_key_with_envelope, mock_function, lambda_context, ): """ Test idempotent decorator where event with matching event key has already been succesfully processed, using the validator utility to unwrap the event """ stubber = stub.Stubber(persistence_store.table.meta.client) ddb_response = { "Item": { "id": {"S": hashed_idempotency_key_with_envelope}, "expiration": {"N": timestamp_future}, "data": {"S": serialized_lambda_response}, "status": {"S": "COMPLETED"}, } } expected_params = { "TableName": TABLE_NAME, "Key": {"id": hashed_idempotency_key_with_envelope}, "ConsistentRead": True, } stubber.add_client_error("put_item", "ConditionalCheckFailedException") stubber.add_response("get_item", ddb_response, expected_params) stubber.activate() @validator(envelope=envelopes.API_GATEWAY_HTTP) @idempotent(config=config_without_jmespath, persistence_store=persistence_store) def lambda_handler(event, context): mock_function() return "shouldn't get here!" mock_function.assert_not_called() lambda_resp = lambda_handler(lambda_apigw_event, lambda_context) assert lambda_resp == deserialized_lambda_response stubber.assert_no_pending_responses() stubber.deactivate()
75e0d3a8aabb3e3520c06a0268a7e2d1e534d249
3,637,023
def get_version_if_modified(gh_type, repo_name, typ, force=False): """ Return the latest version if the latest version is different from the previously indexed version. Return None if no change. if force in True, always return the latest version """ latest_version = get_latest_version(gh_type, repo_name, typ) if force: return latest_version indexed_version = get_indexed_version(gh_type, repo_name, typ) if indexed_version == latest_version: print '%s (%s): skipping %s' % (repo_name, gh_type, typ) return None else: return latest_version
51dcd251dece6e6e401261f79007be8fcd653844
3,637,025
import requests import json def do_rest_request(**kwargs): """This function expects full_url or in absence of which, expects a combination of "url" and "query_params""" if 'full_url' in kwargs: query_url = kwargs['full_url'] elif 'rest_url' in kwargs and 'query_params' in kwargs: query_url = kwargs['url'] + '?' + urlparse.urlencode(kwargs['query_params']) else: raise Exception('Provide either "full_url" or a combination of "url" and "query_params"') print("Querying {}".format(query_url)) rest_response = requests.get(query_url, headers={'content-type': 'application/json'}) if rest_response.status_code != 200: raise Exception('Cannot fetch info: {}'.format(rest_response.status_code)) rest_response = json.loads(rest_response.text) return rest_response
0ee2d7e20ca98e2c73b8d4b3e89d709a1b14a911
3,637,026
def variable(init_val, lb=None, ub=None): """ Initialize a scalar design variable. :param init_val: Initial guess :param lb: Optional lower bound :param ub: Optional upper bound :return: The created variable """ var = opti.variable() opti.set_initial(var, init_val) if lb is not None: opti.subject_to(var >= lb) if ub is not None: opti.subject_to(var <= ub) return var
6cd346effba937a43c555e3e0e1e7b3fecf231e3
3,637,027
def current_user() -> str: """ Retorna o usuário corrente. """ session_id = request.get_cookie(cookie_session_name()) c = get_cursor() c.execute( """ select username from sessions where session_id = :session_id """, {"session_id": session_id}, ) return c.fetchone()["username"]
ec2b16f671a9fd11762160bcb73f770d9bc5eb7a
3,637,028
async def async_setup(hass, hassconfig): """Setup Component.""" hass.data.setdefault(DOMAIN, {}) config = hassconfig.get(DOMAIN) or {} hass.data[DOMAIN]['config'] = config hass.data[DOMAIN].setdefault('entities', {}) hass.data[DOMAIN].setdefault('configs', {}) hass.data[DOMAIN].setdefault('miot_main_entity', {}) hass.data[DOMAIN].setdefault('micloud_devices', []) hass.data[DOMAIN].setdefault('cloud_instance_list', []) hass.data[DOMAIN].setdefault('event_fetcher_list', []) hass.data[DOMAIN].setdefault('add_handler', {}) component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL) hass.data[DOMAIN]['component'] = component await component.async_setup(config) return True
5708286ac76bc01ff8b979632d8d030192600e3f
3,637,029
def get_node_centroids(mesh): """ Calculate the node centroids of the given elements. Parameters ---------- mesh : list of dicts or single dict each dict containing at least the following keywords nodes : ndarray Array with all node postions. elements : dict of ndarrays Contains array of nodes for elements sorted by element types. Returns ------- result : list of dictionaries or single dict of ndarrays (like 'mesh') Centroids of elements sorted by element types. """ single = False if not isinstance(mesh, (list, tuple)): tmp_mesh = [mesh] single = True else: tmp_mesh = mesh result = [] for mesh_i in tmp_mesh: out = {} for elem in ELEM_NAMES: if elem not in mesh_i["elements"]: continue points = mesh_i["nodes"][mesh_i["elements"][elem]] out[elem] = np.mean(points, axis=1) result.append(out) if single: result = result[0] return result
eb7244184921a9728ce12e0f7eaf46bd52cf2399
3,637,031
def find_saas_replication_price(package, tier=None, iops=None): """Find the price in the given package for the desired replicant volume :param package: The product package of the endurance storage type :param tier: The tier of the primary storage volume :param iops: The IOPS of the primary storage volume :return: Returns the replication price, or an error if not found """ if tier is not None: target_value = ENDURANCE_TIERS.get(tier) target_item_keyname = 'REPLICATION_FOR_TIERBASED_PERFORMANCE' target_restriction_type = 'STORAGE_TIER_LEVEL' else: target_value = iops target_item_keyname = 'REPLICATION_FOR_IOPSBASED_PERFORMANCE' target_restriction_type = 'IOPS' for item in package['items']: if item['keyName'] != target_item_keyname: continue price_id = _find_price_id( item['prices'], 'performance_storage_replication', target_restriction_type, target_value ) if price_id: return price_id raise ValueError("Could not find price for replicant volume")
5f3abdd4a2edd24abd8c19752316b06e76212532
3,637,032
def _get_option_of_highest_precedence(config, option_name): """looks in the config and returns the option of the highest precedence This assumes that there are options and flags that are equivalent Args: config (_pytest.config.Config): The pytest config object option_name (str): The name of the option Returns: str: The value of the option that is of highest precedence None: no value is present """ # Try to get configs from CLI and ini try: cli_option = config.getoption("--{}".format(option_name)) except ValueError: cli_option = None try: ini_option = config.getini(option_name) except ValueError: ini_option = None highest_precedence = cli_option or ini_option return highest_precedence
4f3bca4ff5b0a1eb04fbdc7a5d22bc09dbc95df6
3,637,033
def get_industry_categories(): """按编制部门输出{代码:名称}映射""" expr = STOCK_DB.industries.drop_field('last_updated') df = odo(expr, pd.DataFrame) res = {} for name, group in df.groupby('department'): res[name] = group.set_index('industry_id').to_dict()['name'] return res
5b50dc2845a903e56071b57b0ee8d307f5e52f27
3,637,034
def rate(t, y, dt, elph_tau, pol_tau, delay, start): """Rate equation function for two state model. y[0] is charge transfer state, y[1] is polaron state, elph_tau is electron-phonon scattering constant, pol_tau is polaron formation constant.""" dydt = [(pulse(t, dt, delay, start) - (y[0] - y[1])/elph_tau - y[0]*y[1]/pol_tau), ((y[0] - y[1])/elph_tau - y[0]*y[1]/pol_tau), (y[0]*y[1]/pol_tau)] return dydt
dd98606f0ab4dd5c3334acfbd080959ac4921030
3,637,035
import copy def trace_module(no_print=True): """ Trace my_module_original exceptions """ with putil.exdoc.ExDocCxt() as exdoc_obj: try: docs.support.my_module.func('John') obj = docs.support.my_module.MyClass() obj.value = 5 obj.value except: raise RuntimeError( 'Tracing did not complete successfully' ) if not no_print: module_prefix = 'docs.support.my_module.' callable_names = ['func', 'MyClass.value'] for callable_name in callable_names: callable_name = module_prefix+callable_name print('\nCallable: {0}'.format(callable_name)) print(exdoc_obj.get_sphinx_doc(callable_name, width=70)) print('\n') return copy.copy(exdoc_obj)
f407cba3f2ae8582bdaa685ae5bcba1ca908e9a9
3,637,036
def data_to_seq(X, Y, t_lag=8, t_future_shift=1, t_future_steps=1, t_sw_step=1, X_pad_with=None): """Slice X and Y into sequences using a sliding window. Arguments: ---------- X : np.ndarray with ndim == 2 Y : np.ndarray with ndim == 2 t_sw_step : uint (default: 1) Time step of the sliding window. t_lag : uint (default: 8) (t_lag - 1) past time steps used to construct a sequence of inputs. t_future_shift : uint (default: 0) How far in the future predictions are supposed to be made. t_future_steps : uint (default: 1) How many steps to be predicted from t + t_future_shift. The sequences are constructed in a way that the model can be trained to predict Y[t_future:t_future+t_future_steps] from X[t-t_lag:t] where t_future = t + t_future_shift. """ # Assume that provided X and Y are matrices and are aligned in time assert X.ndim == 2 and Y.ndim == 2 assert len(X) == len(Y) # Pad X sequence from the beginning X_padding_left = np.zeros((t_lag - 1, X.shape[1])) X = np.vstack([X_padding_left, X]) # The future steps of X should be skipped, hence padded with zeros # X_padding_right = np.zeros((t_future_shift+t_future_steps-1, X.shape[1])) nb_t_steps = 1 + len(X) - (t_future_shift + (t_future_steps - 1)) X_seq, Y_seq = [], [] for t in xrange(t_lag, nb_t_steps, t_sw_step): t_past = t - t_lag t_future = t_past + t_future_shift # X_seq.append(np.vstack([X[t_past:t], X_padding_right])) X_seq.append(X[t_past:t]) Y_seq.append(Y[t_future:t_future+t_future_steps]) X_seq = np.asarray(X_seq) Y_seq = np.asarray(Y_seq) return [X_seq, Y_seq]
477366408309483eb1c9dcc2d90c70f7bd3ab143
3,637,037