language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public static void reconfigure(InputStream config) { if (CONTEXT == null) { log.warn("当前日志上下文不是logback,不能使用该配置器重新配置"); return; } reconfigure(config, CONTEXT); }
java
static String pullFontPathFromStyle(Context context, AttributeSet attrs, int[] attributeId) { if (attributeId == null || attrs == null) return null; final TypedArray typedArray = context.obtainStyledAttributes(attrs, attributeId); if (typedArray != null) { try { // First defined attribute String fontFromAttribute = typedArray.getString(0); if (!TextUtils.isEmpty(fontFromAttribute)) { return fontFromAttribute; } } catch (Exception ignore) { // Failed for some reason. } finally { typedArray.recycle(); } } return null; }
python
def load(self, email, master_token, android_id): """Authenticate to Google with the provided master token. Args: email (str): The account to use. master_token (str): The master token. android_id (str): An identifier for this client. Raises: LoginException: If there was a problem logging in. """ self._email = email self._android_id = android_id self._master_token = master_token self.refresh() return True
python
def count_alleles(self, max_allele=None, subpop=None): """Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. subpop : sequence of ints, optional Indices of samples to include in count. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.count_alleles() <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 2 1 0 0 2 >>> g.count_alleles(max_allele=1) <AlleleCountsArray shape=(3, 2) dtype=int32> 3 1 1 2 0 0 """ # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) mask = memoryview_safe(self.mask).view(dtype='u1') if self.mask is not None else None if subpop is None and mask is None: ac = genotype_array_count_alleles(values, max_allele) elif subpop is None: ac = genotype_array_count_alleles_masked(values, mask, max_allele) elif mask is None: ac = genotype_array_count_alleles_subpop(values, max_allele, subpop) else: ac = genotype_array_count_alleles_subpop_masked(values, mask, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
java
public static String getContextURL(ODataUri oDataUri, EntityDataModel entityDataModel, boolean isPrimitive) throws ODataRenderException { if (ODataUriUtil.isActionCallUri(oDataUri) || ODataUriUtil.isFunctionCallUri(oDataUri)) { return buildContextUrlFromOperationCall(oDataUri, entityDataModel, isPrimitive); } Option<String> contextOption = getContextUrl(oDataUri); if (contextOption.isEmpty()) { throw new ODataRenderException("Could not construct context"); } return contextOption.get(); }
python
def add_user( self, username, first_name, last_name, email, role, password="", hashed_password="", ): """ Generic function to create user """ try: user = self.user_model() user.first_name = first_name user.last_name = last_name user.username = username user.email = email user.active = True user.roles.append(role) if hashed_password: user.password = hashed_password else: user.password = generate_password_hash(password) self.get_session.add(user) self.get_session.commit() log.info(c.LOGMSG_INF_SEC_ADD_USER.format(username)) return user except Exception as e: log.error(c.LOGMSG_ERR_SEC_ADD_USER.format(str(e))) self.get_session.rollback() return False
python
def list_rows( self, table, selected_fields=None, max_results=None, page_token=None, start_index=None, page_size=None, retry=DEFAULT_RETRY, ): """List the rows of the table. See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list .. note:: This method assumes that the provided schema is up-to-date with the schema as defined on the back-end: if the two schemas are not identical, the values returned may be incomplete. To ensure that the local copy of the schema is up-to-date, call ``client.get_table``. Args: table (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableListItem`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): The table to list, or a reference to it. When the table object does not contain a schema and ``selected_fields`` is not supplied, this method calls ``get_table`` to fetch the table schema. selected_fields (Sequence[ \ :class:`~google.cloud.bigquery.schema.SchemaField` \ ]): The fields to return. If not supplied, data for all columns are downloaded. max_results (int): (Optional) maximum number of rows to return. page_token (str): (Optional) Token representing a cursor into the table's rows. If not passed, the API will return the first page of the rows. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.cloud.bigquery.table.RowIterator`. start_index (int): (Optional) The zero-based index of the starting row to read. page_size (int): Optional. The maximum number of rows in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. retry (:class:`google.api_core.retry.Retry`): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.table.RowIterator: Iterator of row data :class:`~google.cloud.bigquery.table.Row`-s. During each page, the iterator will have the ``total_rows`` attribute set, which counts the total number of rows **in the table** (this is distinct from the total number of rows in the current page: ``iterator.page.num_items``). """ table = _table_arg_to_table(table, default_project=self.project) if not isinstance(table, Table): raise TypeError(_NEED_TABLE_ARGUMENT) schema = table.schema # selected_fields can override the table schema. if selected_fields is not None: schema = selected_fields # No schema, but no selected_fields. Assume the developer wants all # columns, so get the table resource for them rather than failing. elif len(schema) == 0: table = self.get_table(table.reference, retry=retry) schema = table.schema params = {} if selected_fields is not None: params["selectedFields"] = ",".join(field.name for field in selected_fields) if start_index is not None: params["startIndex"] = start_index row_iterator = RowIterator( client=self, api_request=functools.partial(self._call_api, retry), path="%s/data" % (table.path,), schema=schema, page_token=page_token, max_results=max_results, page_size=page_size, extra_params=params, table=table, # Pass in selected_fields separately from schema so that full # tables can be fetched without a column filter. selected_fields=selected_fields, ) return row_iterator
python
def project_views( self, projects, access='all-access', agent='all-agents', granularity='daily', start=None, end=None): """ Get pageview counts for one or more wikimedia projects See `<https://wikimedia.org/api/rest_v1/metrics/pageviews/?doc\\ #!/Pageviews_data/get_metrics_pageviews_aggregate_project\\ _access_agent_granularity_start_end>`_ :Parameters: project : list(str) a list of wikimedia projects such as en.wikipedia or commons.wikimedia access : str access method (desktop, mobile-web, mobile-app, or by default, all-access) agent : str user agent type (spider, user, bot, or by default, all-agents) granularity : str the granularity of the timeseries to return (hourly, daily, or monthly) end : str|date can be a datetime.date object or string in YYYYMMDDHH format default: today start : str|date can be a datetime.date object or string in YYYYMMDDHH format default: 30 days before end date :Returns: a nested dictionary that looks like: { start_date: { project_1: view_count, project_2: view_count, ... project_n: view_count, }, ... end_date: { project_1: view_count, project_2: view_count, ... project_n: view_count, } } The view_count will be None where no data is available, to distinguish from 0 """ endDate = end or date.today() if type(endDate) is not date: endDate = parse_date(end) startDate = start or endDate - timedelta(30) if type(startDate) is not date: startDate = parse_date(start) urls = [ '/'.join([ endpoints['project'], p, access, agent, granularity, format_date(startDate), format_date(endDate), ]) for p in projects ] if granularity == 'hourly': increment = timedelta(hours=1) elif granularity == 'daily': increment = timedelta(days=1) elif granularity == 'monthly': increment = timedelta(months=1) outputDays = timestamps_between(startDate, endDate, increment) output = defaultdict(dict, { day: {p: None for p in projects} for day in outputDays }) try: results = self.get_concurrent(urls) some_data_returned = False for result in results: if 'items' in result: some_data_returned = True else: continue for item in result['items']: output[parse_date(item['timestamp'])][item['project']] = item['views'] if not some_data_returned: raise Exception( 'The pageview API returned nothing useful at: {}'.format(urls) ) return output except: print('ERROR while fetching and parsing ' + str(urls)) traceback.print_exc() raise
python
def url(self): """ Returns the whole URL from the base to this node. """ path = None nodes = self.parents() while not nodes.empty(): path = urljoin(path, nodes.get().path()) return path
java
static void countHetero(List<IRingSet> rsets) { for (IRingSet rset : rsets) { int numHeteroAtoms = 0; int numHeteroRings = 0; for (IAtomContainer ring : rset.atomContainers()) { int prevNumHeteroAtoms = numHeteroAtoms; for (IAtom atom : ring.atoms()) { Integer elem = atom.getAtomicNumber(); if (elem != null && elem != 6 && elem != 1) numHeteroAtoms++; } if (numHeteroAtoms > prevNumHeteroAtoms) numHeteroRings++; } rset.setProperty(NUM_HETERO_ATOMS, numHeteroAtoms); rset.setProperty(NUM_HETERO_RINGS, numHeteroRings); } }
java
protected double computeABOF(KernelMatrix kernelMatrix, DBIDRef pA, DBIDArrayIter pB, DBIDArrayIter pC, MeanVariance s) { s.reset(); // Reused double simAA = kernelMatrix.getSimilarity(pA, pA); for(pB.seek(0); pB.valid(); pB.advance()) { if(DBIDUtil.equal(pB, pA)) { continue; } double simBB = kernelMatrix.getSimilarity(pB, pB); double simAB = kernelMatrix.getSimilarity(pA, pB); double sqdAB = simAA + simBB - simAB - simAB; if(!(sqdAB > 0.)) { continue; } for(pC.seek(pB.getOffset() + 1); pC.valid(); pC.advance()) { if(DBIDUtil.equal(pC, pA)) { continue; } double simCC = kernelMatrix.getSimilarity(pC, pC); double simAC = kernelMatrix.getSimilarity(pA, pC); double sqdAC = simAA + simCC - simAC - simAC; if(!(sqdAC > 0.)) { continue; } // Exploit bilinearity of scalar product: // <B-A, C-A> = <B,C-A> - <A,C-A> // = <B,C> - <B,A> - <A,C> + <A,A> double simBC = kernelMatrix.getSimilarity(pB, pC); double numerator = simBC - simAB - simAC + simAA; double div = 1. / (sqdAB * sqdAC); s.put(numerator * div, FastMath.sqrt(div)); } } // Sample variance probably would be better here, but the ABOD publication // uses the naive variance. return s.getNaiveVariance(); }
java
public void transform(final URI input, final URI output, final List<XMLFilter> filters) throws DITAOTException { if (input.equals(output)) { transform(input, filters); } else { transformURI(input, output, filters); } }
java
@Restricted(NoExternalUse.class) public synchronized void persistInstallStatus() { List<UpdateCenterJob> jobs = getJobs(); boolean activeInstalls = false; for (UpdateCenterJob job : jobs) { if (job instanceof InstallationJob) { InstallationJob installationJob = (InstallationJob) job; if(!installationJob.status.isSuccess()) { activeInstalls = true; } } } if(activeInstalls) { InstallUtil.persistInstallStatus(jobs); // save this info } else { InstallUtil.clearInstallStatus(); // clear this info } }
python
def zoom(ax, xy='x', factor=1): """Zoom into axis. Parameters ---------- """ limits = ax.get_xlim() if xy == 'x' else ax.get_ylim() new_limits = (0.5*(limits[0] + limits[1]) + 1./factor * np.array((-0.5, 0.5)) * (limits[1] - limits[0])) if xy == 'x': ax.set_xlim(new_limits) else: ax.set_ylim(new_limits)
java
@Override public GetKeyPairResult getKeyPair(GetKeyPairRequest request) { request = beforeClientExecution(request); return executeGetKeyPair(request); }
python
def cf_tokenize(s): """ Parses UserData for Cloudformation helper functions. http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference.html http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-cloudformation.html#scenario-userdata-base64 It breaks apart the given string at each recognized function (see HELPERS) and instantiates the helper function objects in place of those. Returns a list of parts as a result. Useful when used with Join() and Base64() CloudFormation functions to produce user data. ie: Base64(Join('', cf_tokenize(userdata_string))) """ t = [] parts = split_re.split(s) for part in parts: cf_func = replace_re.search(part) if cf_func: args = [a.strip("'\" ") for a in cf_func.group("args").split(",")] t.append(HELPERS[cf_func.group("helper")](*args).data) else: t.append(part) return t
python
def setup(self): """Setup.""" self.blocks = self.config['block_comments'] self.lines = self.config['line_comments'] self.group_comments = self.config['group_comments'] self.prefix = self.config['prefix'] self.generic_mode = self.config['generic_mode'] self.strings = self.config['strings'] self.trigraphs = self.config['trigraphs'] self.decode_escapes = self.config['decode_escapes'] self.charset_size = self.config['charset_size'] self.wide_charset_size = self.config['wide_charset_size'] self.exec_charset = self.get_encoding_name(self.config['exec_charset']) self.wide_exec_charset = self.get_encoding_name(self.config['wide_exec_charset']) self.string_types, self.wild_string_types = self.eval_string_type(self.config['string_types']) if not self.generic_mode: self.pattern = RE_CPP
python
def query(method='droplets', droplet_id=None, command=None, args=None, http_method='get'): ''' Make a web call to DigitalOcean ''' base_path = six.text_type(config.get_cloud_config_value( 'api_root', get_configured_provider(), __opts__, search_global=False, default='https://api.digitalocean.com/v2' )) path = '{0}/{1}/'.format(base_path, method) if droplet_id: path += '{0}/'.format(droplet_id) if command: path += command if not isinstance(args, dict): args = {} personal_access_token = config.get_cloud_config_value( 'personal_access_token', get_configured_provider(), __opts__, search_global=False ) data = salt.utils.json.dumps(args) requester = getattr(requests, http_method) request = requester(path, data=data, headers={'Authorization': 'Bearer ' + personal_access_token, 'Content-Type': 'application/json'}) if request.status_code > 299: raise SaltCloudSystemExit( 'An error occurred while querying DigitalOcean. HTTP Code: {0} ' 'Error: \'{1}\''.format( request.status_code, # request.read() request.text ) ) log.debug(request.url) # success without data if request.status_code == 204: return True content = request.text result = salt.utils.json.loads(content) if result.get('status', '').lower() == 'error': raise SaltCloudSystemExit( pprint.pformat(result.get('error_message', {})) ) return result
python
def _resolve_viewname(self, viewname): """ Check for special view names and return existing rTorrent one. """ if viewname == "-": try: # Only works with rTorrent-PS at this time! viewname = self.open().ui.current_view() except xmlrpc.ERRORS as exc: raise error.EngineError("Can't get name of current view: %s" % (exc)) return viewname
java
public boolean isRectangular () { return (_isPolygonal) && (_rulesSize <= 5) && (_coordsSize <= 8) && (_coords[1] == _coords[3]) && (_coords[7] == _coords[5]) && (_coords[0] == _coords[6]) && (_coords[2] == _coords[4]); }
python
def as_dict(self): """ Serializes the object necessary data in a dictionary. :returns: Serialized data in a dictionary. :rtype: dict """ result_dict = super(Group, self).as_dict() statuses = list() version = None titles = list() descriptions = list() platforms = list() groups = list() rules = list() for child in self.children: if isinstance(child, Version): version = child.as_dict() elif isinstance(child, Status): statuses.append(child.as_dict()) elif isinstance(child, Title): titles.append(child.as_dict()) elif isinstance(child, Description): descriptions.append(child.as_dict()) elif isinstance(child, Platform): platforms.append(child.as_dict()) elif isinstance(child, Group): groups.append(child.as_dict()) elif isinstance(child, Rule): rules.append(child.as_dict()) if version is not None: result_dict['version'] = version if len(statuses) > 0: result_dict['statuses'] = statuses if len(titles) > 0: result_dict['titles'] = titles if len(descriptions) > 0: result_dict['descriptions'] = descriptions if len(platforms) > 0: result_dict['platforms'] = platforms if len(groups) > 0: result_dict['groups'] = groups if len(rules) > 0: result_dict['rules'] = rules return result_dict
python
def geometry_checker(geometry): """Perform a cleaning if the geometry is not valid. :param geometry: The geometry to check and clean. :type geometry: QgsGeometry :return: Tuple of bool and cleaned geometry. True if the geometry is already valid, False if the geometry was not valid. A cleaned geometry, or None if the geometry could not be repaired :rtype: (bool, QgsGeometry) """ if geometry is None: # The geometry can be None. return False, None if geometry.isGeosValid(): return True, geometry else: new_geom = geometry.makeValid() if new_geom.isGeosValid(): return False, new_geom else: # Make valid was not enough, the feature will be deleted. return False, None
python
def _api_type(self, value): """ Returns the API type of the given value based on its python type. """ if isinstance(value, six.string_types): return 'string' elif isinstance(value, six.integer_types): return 'integer' elif type(value) is datetime.datetime: return 'date'
python
def hexists(self, key, field): """Determine if hash field exists.""" fut = self.execute(b'HEXISTS', key, field) return wait_convert(fut, bool)
python
def net_transform(transform_func, block=None, **kwargs): """ Maps nets to new sets of nets according to a custom function :param transform_func: Function signature: func(orig_net (logicnet)) -> keep_orig_net (bool) :return: """ block = working_block(block) with set_working_block(block, True): for net in block.logic.copy(): keep_orig_net = transform_func(net, **kwargs) if not keep_orig_net: block.logic.remove(net)
python
def keyvalue( name, key=None, value=None, key_values=None, separator="=", append_if_not_found=False, prepend_if_not_found=False, search_only=False, show_changes=True, ignore_if_missing=False, count=1, uncomment=None, key_ignore_case=False, value_ignore_case=False): ''' Key/Value based editing of a file. .. versionadded:: Neon This function differs from ``file.replace`` in that it is able to search for keys, followed by a customizable separator, and replace the value with the given value. Should the value be the same as the one already in the file, no changes will be made. Either supply both ``key`` and ``value`` parameters, or supply a dictionary with key / value pairs. It is an error to supply both. name Name of the file to search/replace in. key Key to search for when ensuring a value. Use in combination with a ``value`` parameter. value Value to set for a given key. Use in combination with a ``key`` parameter. key_values Dictionary of key / value pairs to search for and ensure values for. Used to specify multiple key / values at once. separator : "=" Separator which separates key from value. append_if_not_found : False Append the key/value to the end of the file if not found. Note that this takes precedence over ``prepend_if_not_found``. prepend_if_not_found : False Prepend the key/value to the beginning of the file if not found. Note that ``append_if_not_found`` takes precedence. show_changes : True Show a diff of the resulting removals and inserts. ignore_if_missing : False Return with success even if the file is not found (or not readable). count : 1 Number of occurences to allow (and correct), default is 1. Set to -1 to replace all, or set to 0 to remove all lines with this key regardsless of its value. .. note:: Any additional occurences after ``count`` are removed. A count of -1 will only replace all occurences that are currently uncommented already. Lines commented out will be left alone. uncomment : None Disregard and remove supplied leading characters when finding keys. When set to None, lines that are commented out are left for what they are. .. note:: The argument to ``uncomment`` is not a prefix string. Rather; it is a set of characters, each of which are stripped. key_ignore_case : False Keys are matched case insensitively. When a value is changed the matched key is kept as-is. value_ignore_case : False Values are checked case insensitively, trying to set e.g. 'Yes' while the current value is 'yes', will not result in changes when ``value_ignore_case`` is set to True. An example of using ``file.keyvalue`` to ensure sshd does not allow for root to login with a password and at the same time setting the login-gracetime to 1 minute and disabling all forwarding: .. code-block:: yaml sshd_config_harden: file.keyvalue: - name: /etc/ssh/sshd_config - key_values: permitrootlogin: 'without-password' LoginGraceTime: '1m' DisableForwarding: 'yes' - separator: ' ' - uncomment: '# ' - key_ignore_case: True - append_if_not_found: True The same example, except for only ensuring PermitRootLogin is set correctly. Thus being able to use the shorthand ``key`` and ``value`` parameters instead of ``key_values``. .. code-block:: yaml sshd_config_harden: file.keyvalue: - name: /etc/ssh/sshd_config - key: PermitRootLogin - value: without-password - separator: ' ' - uncomment: '# ' - key_ignore_case: True - append_if_not_found: True .. note:: Notice how the key is not matched case-sensitively, this way it will correctly identify both 'PermitRootLogin' as well as 'permitrootlogin'. ''' name = os.path.expanduser(name) # default return values ret = { 'name': name, 'changes': {}, 'pchanges': {}, 'result': None, 'comment': '', } if not name: return _error(ret, 'Must provide name to file.keyvalue') if key is not None and value is not None: if isinstance(key_values, dict): return _error(ret, 'file.keyvalue can not combine key_values with key and value') key_values = {str(key): value} elif not isinstance(key_values, dict): return _error(ret, 'file.keyvalue key and value not supplied and key_values empty') # try to open the file and only return a comment if ignore_if_missing is # enabled, also mark as an error if not file_contents = [] try: with salt.utils.files.fopen(name, 'r') as fd: file_contents = fd.readlines() except (OSError, IOError): ret['comment'] = 'unable to open {n}'.format(n=name) ret['result'] = True if ignore_if_missing else False return ret # used to store diff combinations and check if anything has changed diff = [] # store the final content of the file in case it needs to be rewritten content = [] # target format is templated like this tmpl = '{key}{sep}{value}'+os.linesep # number of lines changed changes = 0 # keep track of number of times a key was updated diff_count = {k: count for k in key_values.keys()} # read all the lines from the file for line in file_contents: test_line = line.lstrip(uncomment) did_uncomment = True if len(line) > len(test_line) else False if key_ignore_case: test_line = test_line.lower() for key, value in key_values.items(): test_key = key.lower() if key_ignore_case else key # if the line starts with the key if test_line.startswith(test_key): # if the testline got uncommented then the real line needs to # be uncommented too, otherwhise there might be separation on # a character which is part of the comment set working_line = line.lstrip(uncomment) if did_uncomment else line # try to separate the line into its' components line_key, line_sep, line_value = working_line.partition(separator) # if separation was unsuccessful then line_sep is empty so # no need to keep trying. continue instead if line_sep != separator: continue # start on the premises the key does not match the actual line keys_match = False if key_ignore_case: if line_key.lower() == test_key: keys_match = True else: if line_key == test_key: keys_match = True # if the key was found in the line and separation was successful if keys_match: # trial and error have shown it's safest to strip whitespace # from values for the sake of matching line_value = line_value.strip() # make sure the value is an actual string at this point test_value = str(value).strip() # convert test_value and line_value to lowercase if need be if value_ignore_case: line_value = line_value.lower() test_value = test_value.lower() # values match if they are equal at this point values_match = True if line_value == test_value else False # in case a line had its comment removed there are some edge # cases that need considderation where changes are needed # regardless of values already matching. needs_changing = False if did_uncomment: # irrespective of a value, if it was commented out and # changes are still to be made, then it needs to be # commented in if diff_count[key] > 0: needs_changing = True # but if values did not match but there are really no # changes expected anymore either then leave this line elif not values_match: values_match = True else: # a line needs to be removed if it has been seen enough # times and was not commented out, regardless of value if diff_count[key] == 0: needs_changing = True # then start checking to see if the value needs replacing if not values_match or needs_changing: # the old line always needs to go, so that will be # reflected in the diff (this is the original line from # the file being read) diff.append('- {0}'.format(line)) line = line[:0] # any non-zero value means something needs to go back in # its place. negative values are replacing all lines not # commented out, positive values are having their count # reduced by one every replacement if diff_count[key] != 0: # rebuild the line using the key and separator found # and insert the correct value. line = str(tmpl.format(key=line_key, sep=line_sep, value=value)) # display a comment in case a value got converted # into a string if not isinstance(value, str): diff.append('+ {0} (from {1} type){2}'.format( line.rstrip(), type(value).__name__, os.linesep)) else: diff.append('+ {0}'.format(line)) changes += 1 # subtract one from the count if it was larger than 0, so # next lines are removed. if it is less than 0 then count is # ignored and all lines will be updated. if diff_count[key] > 0: diff_count[key] -= 1 # at this point a continue saves going through the rest of # the keys to see if they match since this line already #matched the current key continue # with the line having been checked for all keys (or matched before all # keys needed searching), the line can be added to the content to be # written once the last checks have been performed content.append(line) # finally, close the file fd.close() # if append_if_not_found was requested, then append any key/value pairs # still having a count left on them if append_if_not_found: tmpdiff = [] for key, value in key_values.items(): if diff_count[key] > 0: line = tmpl.format(key=key, sep=separator, value=value) tmpdiff.append('+ {0}'.format(line)) content.append(line) changes += 1 if tmpdiff: tmpdiff.insert(0, '- <EOF>'+os.linesep) tmpdiff.append('+ <EOF>'+os.linesep) diff.extend(tmpdiff) # only if append_if_not_found was not set should prepend_if_not_found be # considered, benefit of this is that the number of counts left does not # mean there might be both a prepend and append happening elif prepend_if_not_found: did_diff = False for key, value in key_values.items(): if diff_count[key] > 0: line = tmpl.format(key=key, sep=separator, value=value) if not did_diff: diff.insert(0, ' <SOF>'+os.linesep) did_diff = True diff.insert(1, '+ {0}'.format(line)) content.insert(0, line) changes += 1 # if a diff was made if changes > 0: # return comment of changes if test if __opts__['test']: ret['comment'] = 'File {n} is set to be changed ({c} lines)'.format( n=name, c=changes) if show_changes: # For some reason, giving an actual diff even in test=True mode # will be seen as both a 'changed' and 'unchanged'. this seems to # match the other modules behaviour though ret['pchanges']['diff'] = ''.join(diff) # add changes to comments for now as well because of how # stateoutputter seems to handle pchanges etc. # See: https://github.com/saltstack/salt/issues/40208 ret['comment'] += '\nPredicted diff:\n\r\t\t' ret['comment'] += '\r\t\t'.join(diff) ret['result'] = None # otherwise return the actual diff lines else: ret['comment'] = 'Changed {c} lines'.format(c=changes) if show_changes: ret['changes']['diff'] = ''.join(diff) else: ret['result'] = True return ret # if not test=true, try and write the file if not __opts__['test']: try: with salt.utils.files.fopen(name, 'w') as fd: # write all lines to the file which was just truncated fd.writelines(content) fd.close() except (OSError, IOError): # return an error if the file was not writable ret['comment'] = '{n} not writable'.format(n=name) ret['result'] = False return ret # if all went well, then set result to true ret['result'] = True return ret
java
@Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { boolean xml = false; logger.debug("Got request: {}?{}", request.getRequestURL(), request.getQueryString()); // Check for xml parameter. for (Enumeration<?> e = request.getParameterNames(); e.hasMoreElements();) { String name = URLDecoder.decode((String) e.nextElement(), "UTF-8"); if (name.equalsIgnoreCase("xml")) { xml = Boolean.parseBoolean(request.getParameter(name)); } } Context context = ReadOnlyContext.getContext(Constants.HTTP_REQUEST.REST.uri, request); try { describeRepository(context, xml, response); } catch (AuthzException ae) { throw RootException.getServletException(ae, request, ACTION_LABEL, EMPTY_STRING_ARRAY); } catch (Throwable th) { throw new InternalError500Exception("", th, request, ACTION_LABEL, "", EMPTY_STRING_ARRAY); } }
java
private void nextChunk() throws IOException { if (!this.bof) { this.readCrlf(); } this.size = ChunkedInputStream.chunkSize(this.origin); this.bof = false; this.pos = 0; if (this.size == 0) { this.eof = true; } }
java
public void terminate() { if (!isTerminated()) { for (final Stage stage : stages.values()) { stage.stop(); } loggerProviderKeeper.close(); mailboxProviderKeeper.close(); completesProviderKeeper.close(); } }
python
def install_libs(self): """Install Required Libraries using pip.""" # default or current python version lib_data = [{'python_executable': sys.executable, 'lib_dir': self.lib_directory}] # check for requirements.txt if not os.path.isfile(self.requirements_file): self.handle_error('A requirements.txt file is required to install modules.') # if branch arg is provide use git branch instead of pypi if self.args.branch is not None: self._create_temp_requirements() # overwrite default with config data if self.tcex_json.get('lib_versions'): lib_data = self.tcex_json.get('lib_versions') print('{}Using "lib" directories defined in tcex.json file.'.format(c.Style.BRIGHT)) # configure proxy settings proxy_enabled = self._configure_proxy() # install all requested lib directories for data in lib_data: # pattern to match env vars in data env_var = re.compile(r'\$env\.([a-zA-Z0-9]+)') lib_dir = data.get('lib_dir') # replace env vars with env val in the lib dir matches = re.findall(env_var, lib_dir) if matches: env_val = os.environ.get(matches[0]) if env_val is None: self.handle_error( '"{}" env variable set in tcex.json, but could not be resolved.'.format( matches[0] ) ) lib_dir = re.sub(env_var, env_val, lib_dir) lib_dir_fq = os.path.join(self.app_path, lib_dir) if os.access(lib_dir_fq, os.W_OK): # remove lib directory from previous runs shutil.rmtree(lib_dir_fq) # replace env vars with env val in the python executable python_executable = data.get('python_executable') matches = re.findall(env_var, python_executable) if matches: env_val = os.environ.get(matches[0]) python_executable = re.sub(env_var, env_val, python_executable) print('Building Lib Dir: {}{}{}'.format(c.Style.BRIGHT, c.Fore.CYAN, lib_dir_fq)) exe_command = self._build_command(python_executable, lib_dir_fq, proxy_enabled) print('Running: {}{}{}'.format(c.Style.BRIGHT, c.Fore.GREEN, ' '.join(exe_command))) p = subprocess.Popen( exe_command, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = p.communicate() # pylint: disable=W0612 if p.returncode != 0: print('{}{}FAIL'.format(c.Style.BRIGHT, c.Fore.RED)) print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err.decode('utf-8'))) sys.exit('ERROR: {}'.format(err.decode('utf-8'))) # version comparison try: python_version = lib_dir.split('_', 1)[1] except IndexError: self.handle_error('Could not determine version from lib string.') # track the latest Python version if self.latest_version is None: self.latest_version = python_version elif StrictVersion(python_version) > StrictVersion(self.latest_version): self.latest_version = python_version # cleanup temp file if required if self.use_temp_requirements_file: os.remove(self.requirements_file) # create lib_latest self._create_lib_latest()
python
def epubcheck(epubname, config=None): """ This method takes the name of an epub file as an argument. This name is the input for the java execution of a locally installed epubcheck-.jar. The location of this .jar file is configured in config.py. """ if config is None: config = load_config_module() r, e = os.path.splitext(epubname) if not e: log.warning('Missing file extension, appending ".epub"') e = '.epub' epubname = r + e elif not e == '.epub': log.warning('File does not have ".epub" extension, appending it') epubname += '.epub' subprocess.call(['java', '-jar', config.epubcheck_jarfile, epubname])
java
public static void configure(Job conf, SimpleConfiguration config) { try { FluoConfiguration fconfig = new FluoConfiguration(config); try (Environment env = new Environment(fconfig)) { long ts = env.getSharedResources().getTimestampTracker().allocateTimestamp().getTxTimestamp(); conf.getConfiguration().setLong(TIMESTAMP_CONF_KEY, ts); ByteArrayOutputStream baos = new ByteArrayOutputStream(); config.save(baos); conf.getConfiguration().set(PROPS_CONF_KEY, new String(baos.toByteArray(), StandardCharsets.UTF_8)); AccumuloInputFormat.setZooKeeperInstance(conf, fconfig.getAccumuloInstance(), fconfig.getAccumuloZookeepers()); AccumuloInputFormat.setConnectorInfo(conf, fconfig.getAccumuloUser(), new PasswordToken(fconfig.getAccumuloPassword())); AccumuloInputFormat.setInputTableName(conf, env.getTable()); AccumuloInputFormat.setScanAuthorizations(conf, env.getAuthorizations()); } } catch (Exception e) { throw new RuntimeException(e); } }
java
public final void removeRelationshipsWithTag(@Nonnull String tag) { getRelationships().stream() .map(RelationshipView::getRelationship) .filter(r -> r.hasTag(tag)) .forEach(this::remove); }
java
public boolean moveAndDeleteFromEachVolume(String pathName) throws IOException { boolean result = true; for (int i = 0; i < volumes.length; i++) { result = result && moveAndDeleteRelativePath(volumes[i], pathName); } return result; }
python
def attriblist2str(discoursegraph): """ converts all node/edge attributes whose values are lists into string values (e.g. to export them into the `gexf` and `graphml` formats). WARNING: This function iterates over all nodes and edges! You can speed up conversion by writing a custom function that only fixes those attributes that have lists (of strings) as values. Parameters ---------- discoursegraph : DiscourseDocumentGraph """ for node_id in discoursegraph: node_dict = discoursegraph.node[node_id] for attrib in node_dict: if isinstance(node_dict[attrib], list): node_dict[attrib] = str(node_dict[attrib]) for (from_id, to_id) in discoursegraph.edges_iter(): # there might be multiple edges between 2 nodes edge_dict = discoursegraph.edge[from_id][to_id] for edge_id in edge_dict: for attrib in edge_dict[edge_id]: if isinstance(edge_dict[edge_id][attrib], list): edge_dict[edge_id][attrib] \ = str(edge_dict[edge_id][attrib])
python
def free_param_names(self): """Returns the names of the free hyperparameters. Returns ------- free_param_names : :py:class:`Array` Array of the names of the free parameters, in order. """ return scipy.concatenate((self.k1.free_param_names, self.k2.free_param_names))
java
private void doRemoveDatasource() throws PageException { admin.removeDataSource(getString("admin", action, "name")); store(); adminSync.broadcast(attributes, config); }
java
protected BufferedImage create_FOREGROUND_Image(final int WIDTH, final boolean WITH_CENTER_KNOB, final ForegroundType TYPE) { return create_FOREGROUND_Image(WIDTH, WITH_CENTER_KNOB, TYPE, null); }
java
public void delete(String resourceGroupName, String jobName) { deleteWithServiceResponseAsync(resourceGroupName, jobName).toBlocking().last().body(); }
python
def _af_inv_scaled(x): """Scale a random vector for using the affinely invariant measures""" x = _transform_to_2d(x) cov_matrix = np.atleast_2d(np.cov(x, rowvar=False)) cov_matrix_power = _mat_sqrt_inv(cov_matrix) return x.dot(cov_matrix_power)
python
def time2pbspro(timeval, unit="s"): """ Convert a number representing a time value in the given unit (Default: seconds) to a string following the PbsPro convention: "hours:minutes:seconds". >>> assert time2pbspro(2, unit="d") == '48:0:0' """ h, m, s = 3600, 60, 1 timeval = Time(timeval, unit).to("s") hours, minutes = divmod(timeval, h) minutes, secs = divmod(minutes, m) return "%d:%d:%d" % (hours, minutes, secs)
java
public static byte[] asArray(ByteBuffer byteBuffer) { ByteBuffer bb = ByteBufferUtil.clone(byteBuffer); byte[] bytes = new byte[bb.remaining()]; bb.get(bytes); return bytes; }
java
public com.google.api.ads.admanager.axis.v201811.AudienceSegmentDataProvider getDataProvider() { return dataProvider; }
java
void setLocale(Locale locale) { m_taskNames = LocaleData.getStringArray(locale, LocaleData.TASK_NAMES); String name; m_taskNumbers.clear(); for (int loop = 0; loop < m_taskNames.length; loop++) { name = m_taskNames[loop]; if (name != null) { m_taskNumbers.put(name, Integer.valueOf(loop)); } } }
java
public FessMessages addErrorsFailedToPrintThreadDump(String property) { assertPropertyNotNull(property); add(property, new UserMessage(ERRORS_failed_to_print_thread_dump)); return this; }
java
@java.lang.Deprecated public java.util.Map<java.lang.String, com.google.cloud.redis.v1.ZoneMetadata> getAvailableZones() { return getAvailableZonesMap(); }
java
@Bench(runs = RUNS, beforeEachRun = "vectorAdd") public void vectorGet() { for (int i = 0; i < vector.size(); i++) { vector.get(i); } }
python
def _closeCompletion(self): """Close completion, if visible. Delete widget """ if self._widget is not None: self._widget.close() self._widget = None self._completionOpenedManually = False
python
def _ISO8601_to_UNIXtime(iso): """ Converts an ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00`` to the correspondant UNIXtime :param iso: the ISO8601-formatted string :type iso: string :returns: an int UNIXtime :raises: *TypeError* when bad argument types are provided, *ValueError* when the ISO8601 string is badly formatted """ try: d = datetime.strptime(iso, '%Y-%m-%d %H:%M:%S+00') except ValueError: raise ValueError(__name__ + ": bad format for input ISO8601 string, ' \ 'should have been: YYYY-MM-DD HH:MM:SS+00") return _datetime_to_UNIXtime(d)
python
def txt_line_iterator(path): """Iterate through lines of file.""" with tf.gfile.Open(path) as f: for line in f: yield line.strip()
python
def run(self, host=None, port=None, debug=None, **options): """ Start the AgoraApp expecting the provided config to have at least REDIS and PORT fields. """ tasks = options.get('tasks', []) for task in tasks: if task is not None and hasattr(task, '__call__'): _batch_tasks.append(task) thread = Thread(target=self.batch_work) thread.start() try: super(AgoraApp, self).run(host='0.0.0.0', port=self.config['PORT'], debug=True, use_reloader=False) except Exception, e: print e.message self._stop_event.set() if thread.isAlive(): thread.join()
python
def compute_displays_sweep( self, program: Union[circuits.Circuit, schedules.Schedule], params: Optional[study.Sweepable] = None, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, initial_state: Union[int, np.ndarray] = 0, ) -> List[study.ComputeDisplaysResult]: """Computes displays in the supplied Circuit or Schedule. In contrast to `compute_displays`, this allows for sweeping over different parameter values. Args: program: The circuit or schedule to simulate. params: Parameters to run with the program. qubit_order: Determines the canonical ordering of the qubits used to define the order of amplitudes in the wave function. initial_state: If an int, the state is set to the computational basis state corresponding to this state. Otherwise if it is a np.ndarray it is the full initial state, either a pure state or the full density matrix. If it is the pure state it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. If it is a mixed state it must be correctly sized and positive semidefinite with trace one. Returns: List of ComputeDisplaysResults for this run, one for each possible parameter resolver. """ circuit = (program if isinstance(program, circuits.Circuit) else program.to_circuit()) param_resolvers = study.to_resolvers(params or study.ParamResolver({})) qubit_order = ops.QubitOrder.as_qubit_order(qubit_order) qubits = qubit_order.order_for(circuit.all_qubits()) compute_displays_results = [] # type: List[study.ComputeDisplaysResult] for param_resolver in param_resolvers: display_values = {} # type: Dict[Hashable, Any] # Compute the displays in the first Moment moment = circuit[0] matrix = density_matrix_utils.to_valid_density_matrix( initial_state, num_qubits=len(qubits), dtype=self._dtype) qubit_map = {q: i for i, q in enumerate(qubits)} _enter_moment_display_values_into_dictionary( display_values, moment, matrix, qubit_order, qubit_map) # Compute the displays in the rest of the Moments all_step_results = self.simulate_moment_steps( circuit, param_resolver, qubit_order, initial_state) for step_result, moment in zip(all_step_results, circuit[1:]): _enter_moment_display_values_into_dictionary( display_values, moment, step_result.density_matrix(), qubit_order, step_result._qubit_map) compute_displays_results.append(study.ComputeDisplaysResult( params=param_resolver, display_values=display_values)) return compute_displays_results
java
public static boolean isMethodCall(Expression expression, String methodObjectPattern, String methodNamePattern) { return isMethodCallOnObject(expression, methodObjectPattern) && isMethodNamed((MethodCallExpression) expression, methodNamePattern); }
python
def Convert(self, metadata, value, token=None): """Converts a single ArtifactFilesDownloaderResult.""" for r in self.BatchConvert([(metadata, value)], token=token): yield r
python
def get_storage_controller_hotplug_capable(self, controller_type): """Returns whether the given storage controller supports hot-plugging devices. in controller_type of type :class:`StorageControllerType` The storage controller to check the setting for. return hotplug_capable of type bool Returned flag indicating whether the controller is hotplug capable """ if not isinstance(controller_type, StorageControllerType): raise TypeError("controller_type can only be an instance of type StorageControllerType") hotplug_capable = self._call("getStorageControllerHotplugCapable", in_p=[controller_type]) return hotplug_capable
python
def body_block_content_render(tag, recursive=False, base_url=None): """ Render the tag as body content and call recursively if the tag has child tags """ block_content_list = [] tag_content = OrderedDict() if tag.name == "p": for block_content in body_block_paragraph_render(tag, base_url=base_url): if block_content != {}: block_content_list.append(block_content) else: tag_content = body_block_content(tag, base_url=base_url) nodenames = body_block_nodenames() tag_content_content = [] # Collect the content of the tag but only for some tags if tag.name not in ["p", "fig", "table-wrap", "list", "media", "disp-quote", "code"]: for child_tag in tag: if not(hasattr(child_tag, 'name')): continue if child_tag.name == "p": # Ignore paragraphs that start with DOI: if node_text(child_tag) and len(remove_doi_paragraph([child_tag])) <= 0: continue for block_content in body_block_paragraph_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) elif child_tag.name == "fig" and tag.name == "fig-group": # Do not fig inside fig-group a second time pass elif child_tag.name == "media" and tag.name == "fig-group": # Do not include a media video inside fig-group a second time if child_tag.get("mimetype") == "video": pass else: for block_content in body_block_content_render(child_tag, recursive=True, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) if len(tag_content_content) > 0: if tag.name in nodenames or recursive is False: tag_content["content"] = [] for block_content in tag_content_content: tag_content["content"].append(block_content) block_content_list.append(tag_content) else: # Not a block tag, e.g. a caption tag, let the content pass through block_content_list = tag_content_content else: block_content_list.append(tag_content) return block_content_list
java
private void tryToMoveMemberFunction( NameInfo nameInfo, JSModule deepestCommonModuleRef, ClassMemberFunction classMemberFunction) { // We should only move a property across chunks if: // 1) We can move it deeper in the chunk graph, // 2) and it's a normal member function, and not a GETTER_DEF or a SETTER_DEF, // 3) and the class is available in the global scope. Var rootVar = classMemberFunction.getRootVar(); if (rootVar == null || !rootVar.isGlobal()) { return; } Node definitionNode = classMemberFunction.getDefinitionNode(); // Only attempt to move normal member functions. // A getter or setter cannot be as easily defined outside of the class to which it belongs. if (!definitionNode.isMemberFunctionDef()) { return; } if (moduleGraph.dependsOn(deepestCommonModuleRef, classMemberFunction.getModule())) { if (hasUnmovableRedeclaration(nameInfo, classMemberFunction)) { // If it has been redeclared on the same object, skip it. return; } Node destinationParent = compiler.getNodeForCodeInsertion(deepestCommonModuleRef); String className = rootVar.getName(); if (noStubFunctions) { moveClassInstanceMethodWithoutStub(className, definitionNode, destinationParent); } else { moveClassInstanceMethodWithStub(className, definitionNode, destinationParent); } } }
python
def replace(self, repl_class, replacement, target_segment_name=None): """Replace a pipe segment, specified by its class, with another segment""" for segment_name, pipes in iteritems(self): if target_segment_name and segment_name != target_segment_name: raise Exception() repl_pipes = [] found = False for pipe in pipes: if isinstance(pipe, repl_class): pipe = replacement found = True repl_pipes.append(pipe) if found: found = False self[segment_name] = repl_pipes
java
public TimeFrameFilterType getType() { if (startDate == null && endDate == null) { return TimeFrameFilterType.INOPERATIVE; } else if (startDate != null && endDate == null) { return TimeFrameFilterType.MIN_DATE; } else if (startDate == null && endDate != null) { return TimeFrameFilterType.MAX_DATE; } else { return TimeFrameFilterType.TIMEFRAME; } }
python
def read_string(self, content): """Parse a string containing C/C++ source code. :param content: C/C++ source code. :type content: str :rtype: Declarations """ reader = source_reader.source_reader_t( self.__config, None, self.__decl_factory) decls = reader.read_string(content) self.__xml_generator_from_xml_file = reader.xml_generator_from_xml_file return decls
java
public static FilterOperator toFilterOperator(ComparableFilter.Operator operator) { if (operator == ComparableFilter.Operator.EQUAL_TO) { return FilterOperator.EQUAL; } else if (operator == ComparableFilter.Operator.GREATER_THAN) { return FilterOperator.GREATER_THAN; } else if (operator == ComparableFilter.Operator.GREATER_THAN_OR_EQUAL_TO) { return FilterOperator.GREATER_THAN_OR_EQUAL; } else if (operator == ComparableFilter.Operator.LESS_THAN) { return FilterOperator.LESS_THAN; } else if (operator == ComparableFilter.Operator.LESS_THAN_OR_EQUAL_TO) { return FilterOperator.LESS_THAN_OR_EQUAL; } else if (operator == ComparableFilter.Operator.NOT_EQUAL_TO) { return FilterOperator.NOT_EQUAL; } else { throw new UnsupportedOperationException("Operator [" + operator + "] is not supported by Google App Engine Datastore"); } }
java
static final <T> void validateValues(final T[] values, final Comparator<? super T> comparator) { final int lenM1 = values.length - 1; for (int j = 0; j < lenM1; j++) { if ((values[j] != null) && (values[j + 1] != null) && (comparator.compare(values[j], values[j + 1]) < 0)) { continue; } throw new SketchesArgumentException( "Values must be unique, monotonically increasing and not null."); } }
python
def __get_tags(vm_): ''' Get configured tags. ''' t = config.get_cloud_config_value( 'tags', vm_, __opts__, default='[]', search_global=False) # Consider warning the user that the tags in the cloud profile # could not be interpreted, bad formatting? try: tags = literal_eval(t) except Exception: # pylint: disable=W0703 tags = None if not tags or not isinstance(tags, list): tags = None return tags
java
public void setMaximumDuration(@NotNull final Duration maximumDuration) { if (maximumDuration.compareTo(MAXIMUM_DURATION) > 0) { throw new IllegalArgumentException("The maximum duaration has to be smaller than 23:59:59."); } this.maximumDuration = durationToDate(maximumDuration); }
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'synonyms') and self.synonyms is not None: _dict['synonyms'] = [x._to_dict() for x in self.synonyms] if hasattr(self, 'pagination') and self.pagination is not None: _dict['pagination'] = self.pagination._to_dict() return _dict
python
def _set_params(self, x): """set the value of the parameters.""" assert x.size == self.num_params self.varianceU = x[0] self.varianceY = x[1] self.lengthscaleU = x[2] self.lengthscaleY = x[3]
python
def rl_ellipsis(x): """ Replace three dots to ellipsis """ patterns = ( # если больше трех точек, то не заменяем на троеточие # чтобы не было глупых .....->….. (r'([^\.]|^)\.\.\.([^\.]|$)', u'\\1\u2026\\2'), # если троеточие в начале строки или возле кавычки -- # это цитата, пробел между троеточием и первым # словом нужно убрать (re.compile(u'(^|\\"|\u201c|\xab)\\s*\u2026\\s*([А-Яа-яA-Za-z])', re.UNICODE), u'\\1\u2026\\2'), ) return _sub_patterns(patterns, x)
java
@Override public boolean validate(Field f, Object validationObject) { boolean checkvalidation = true; for (Annotation annotation : f.getDeclaredAnnotations()) { AttributeConstraintType eruleType = getERuleType(annotation.annotationType().getSimpleName()); if (eruleType != null) { Object fieldValue = PropertyAccessorHelper.getObject(validationObject, f); switch (eruleType) { case ASSERT_FALSE: checkvalidation = validateFalse(fieldValue, annotation); break; case ASSERT_TRUE: checkvalidation = validateTrue(fieldValue, annotation); break; case DECIMAL_MAX: checkvalidation = validateMaxDecimal(fieldValue, annotation); break; case DECIMAL_MIN: checkvalidation = validateMinDecimal(fieldValue, annotation); break; case DIGITS: checkvalidation = validateDigits(fieldValue, annotation); break; case FUTURE: checkvalidation = validateFuture(fieldValue, annotation); break; case MAX: checkvalidation = validateMaxValue(fieldValue, annotation); break; case MIN: checkvalidation = validateMinValue(fieldValue, annotation); break; case NOT_NULL: checkvalidation = validateNotNull(fieldValue, annotation); break; case NULL: checkvalidation = validateNull(fieldValue, annotation); break; case PAST: checkvalidation = validatePast(fieldValue, annotation); break; case PATTERN: checkvalidation = validatePattern(fieldValue, annotation); break; case SIZE: checkvalidation = validateSize(fieldValue, annotation); break; } } } return checkvalidation; }
python
def commit(self, frame): """ Handles COMMIT command: Commits specified transaction. """ if not frame.transaction: raise ProtocolError("Missing transaction for COMMIT command.") if not frame.transaction in self.engine.transactions: raise ProtocolError("Invalid transaction: %s" % frame.transaction) for tframe in self.engine.transactions[frame.transaction]: del tframe.headers['transaction'] self.process_frame(tframe) self.engine.queue_manager.clear_transaction_frames( self.engine.connection, frame.transaction) del self.engine.transactions[frame.transaction]
python
def getsource(classorfunc): """ Return the source code for a class or function. Notes: Returned source will not include any decorators for the object. This will only return the explicit declaration of the object, not any dependencies Args: classorfunc (type or function): the object to get the source code for Returns: str: text of source code (without any decorators). Note: in python 2, this returns unicode """ if _isbuiltin(classorfunc): return '' try: source = inspect.getsource(classorfunc) except TypeError: # raised if defined in __main__ - use fallback to get the source instead source = getsourcefallback(classorfunc) declaration = [] lines = source.splitlines() if PY2 and not isinstance(source, unicode): encoding = detect_encoding(iter(lines).next)[0] sourcelines = (s.decode(encoding) for s in lines) else: sourcelines = iter(lines) # First, get the declaration found_keyword = False for line in sourcelines: words = line.split() if not words: continue if words[0] in ('def', 'class'): found_keyword = True if found_keyword: cind = line.find(':') if cind > 0: declaration.append(line[:cind + 1]) after_decl = line[cind + 1:].strip() break else: declaration.append(line) bodylines = list(sourcelines) # the rest of the lines are body # If it's a class, make sure we import its superclasses # Unfortunately, we need to modify the code to make sure the # parent classes have the correct names # TODO: find a better way to do this without having to parse code if type(classorfunc) == type: cls = classorfunc base_imports = {} for base in cls.__bases__: if base.__name__ == 'object' and base.__module__ == 'builtins': # don't import `object` continue if base in base_imports: continue if base.__module__ == '__main__': continue base_imports[base] = 'from %s import %s' % (base.__module__, base.__name__) cind = declaration[0].index('class ') declstring = declaration[0][:cind] + 'class %s(%s):%s' % ( cls.__name__, ','.join([base.__name__ for base in cls.__bases__]), after_decl) declaration = [impstring for c, impstring in base_imports.items() if c.__module__ != '__builtin__'] declaration.append(declstring) else: declaration[-1] += after_decl return '\n'.join(declaration + bodylines)
python
def save(self, fname): """ Saves the dictionary in json format :param fname: file to save to """ with open(fname, 'wb') as f: json.dump(self, f)
java
public static BufferedImage getImage(ByteBuffer imageData, Format format, Rectangle size) { final int width = size.getWidth(); final int height = size.getHeight(); final BufferedImage image = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB); final int[] pixels = ((DataBufferInt) image.getRaster().getDataBuffer()).getData(); for (int x = 0; x < width; x++) { for (int y = 0; y < height; y++) { final int srcIndex = (x + y * width) * format.getComponentCount(); final int destIndex = x + (height - y - 1) * width; if (format.hasRed()) { pixels[destIndex] |= (imageData.get(srcIndex) & 0xff) << 16; } if (format.hasGreen()) { pixels[destIndex] |= (imageData.get(srcIndex + 1) & 0xff) << 8; } if (format.hasBlue()) { pixels[destIndex] |= imageData.get(srcIndex + 2) & 0xff; } if (format.hasAlpha()) { pixels[destIndex] |= (imageData.get(srcIndex + 3) & 0xff) << 24; } else { pixels[destIndex] |= 0xff000000; } } } return image; }
java
public Map<String, CmsJspResourceWrapper> getLocaleResource() { Map<String, CmsJspResourceWrapper> result = getPageResource().getLocaleResource(); List<Locale> locales = CmsLocaleGroupService.getPossibleLocales(m_cms, getPageResource()); for (Locale locale : locales) { if (!result.containsKey(locale.toString())) { result.put(locale.toString(), null); } } return result; }
python
def report(self, device_name_filter=None, tensor_name_filter=None): """Get a report of offending device/tensor names. The report includes information about the device name, tensor name, first (earliest) timestamp of the alerting events from the tensor, in addition to counts of nan, positive inf and negative inf events. Args: device_name_filter: regex filter for device name, or None (not filtered). tensor_name_filter: regex filter for tensor name, or None (not filtered). Returns: A list of NumericsAlertReportRow, sorted by the first_timestamp in asecnding order. """ report = [] for key in self._data: device_name, tensor_name = key history = self._data[key] report.append( NumericsAlertReportRow( device_name=device_name, tensor_name=tensor_name, first_timestamp=history.first_timestamp(), nan_event_count=history.event_count(constants.NAN_KEY), neg_inf_event_count=history.event_count(constants.NEG_INF_KEY), pos_inf_event_count=history.event_count(constants.POS_INF_KEY))) if device_name_filter: device_name_pattern = re.compile(device_name_filter) report = [item for item in report if device_name_pattern.match(item.device_name)] if tensor_name_filter: tensor_name_pattern = re.compile(tensor_name_filter) report = [item for item in report if tensor_name_pattern.match(item.tensor_name)] # Sort results chronologically. return sorted(report, key=lambda x: x.first_timestamp)
java
@Override public void eSet(int featureID, Object newValue) { switch (featureID) { case SimpleAntlrPackage.REFERENCE_OR_LITERAL__NAME: setName((String)newValue); return; } super.eSet(featureID, newValue); }
java
@Programmatic public Clob downloadMetaModel() { final Collection<ObjectSpecification> specifications = specificationLoader.allSpecifications(); final List<MetaModelRow> rows = Lists.newArrayList(); for (final ObjectSpecification spec : specifications) { if (exclude(spec)) { continue; } final List<ObjectAssociation> properties = spec.getAssociations(Contributed.EXCLUDED, ObjectAssociation.Filters.PROPERTIES); for (final ObjectAssociation property : properties) { final OneToOneAssociation otoa = (OneToOneAssociation) property; if (exclude(otoa)) { continue; } rows.add(new MetaModelRow(spec, otoa)); } final List<ObjectAssociation> associations = spec.getAssociations(Contributed.EXCLUDED, ObjectAssociation.Filters.COLLECTIONS); for (final ObjectAssociation collection : associations) { final OneToManyAssociation otma = (OneToManyAssociation) collection; if (exclude(otma)) { continue; } rows.add(new MetaModelRow(spec, otma)); } final List<ObjectAction> actions = spec.getObjectActions(Contributed.INCLUDED); for (final ObjectAction action : actions) { if (exclude(action)) { continue; } rows.add(new MetaModelRow(spec, action)); } } Collections.sort(rows); final StringBuilder buf = new StringBuilder(); buf.append(MetaModelRow.header()).append("\n"); for (final MetaModelRow row : rows) { buf.append(row.asTextCsv()).append("\n"); } return new Clob("metamodel.csv", mimeTypeTextCsv, buf.toString().toCharArray()); }
python
def _call(self, x, out=None): """Implement ``self(x[, out])``.""" if out is None: return self.operator(x * self.vector) else: tmp = self.domain.element() x.multiply(self.vector, out=tmp) self.operator(tmp, out=out)
python
def set_parent(self, task_id, params={}, **options): """Changes the parent of a task. Each task may only be a subtask of a single parent, or no parent task at all. Returns an empty data block. Parameters ---------- task : {Id} Globally unique identifier for the task. [data] : {Object} Data for the request - parent : {Id} The new parent of the task, or `null` for no parent. """ path = '/tasks/%s/setParent' % (task_id) return self.client.post(path, params, **options)
python
def _GetSignatureScanner(cls, specification_store): """Initializes a signature scanner based on a specification store. Args: specification_store (FormatSpecificationStore): specification store. Returns: pysigscan.scanner: signature scanner. """ signature_scanner = pysigscan.scanner() signature_scanner.set_scan_buffer_size(cls._SCAN_BUFFER_SIZE) for format_specification in specification_store.specifications: for signature in format_specification.signatures: pattern_offset = signature.offset if pattern_offset is None: signature_flags = pysigscan.signature_flags.NO_OFFSET elif pattern_offset < 0: pattern_offset *= -1 signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START signature_scanner.add_signature( signature.identifier, pattern_offset, signature.pattern, signature_flags) return signature_scanner
java
void deleteResource(String id) throws IOException { storeWithRetries(() -> { datastore.delete(datastore.newKeyFactory().setKind(KIND_COUNTER_LIMIT).newKey(id)); return null; }); deleteShardsForCounter(id); }
java
public static String urlInJobHistory( Path jobHistoryFileLocation, String jobId) throws IOException { try { FileSystem fs = jobHistoryFileLocation.getFileSystem(conf); fs.getFileStatus(jobHistoryFileLocation); } catch (FileNotFoundException e) { return null; } return "http://" + LOCALMACHINE + ":" + LOCALPORT + "/coronajobdetailshistory.jsp?jobid=" + jobId + "&logFile=" + URLEncoder.encode(jobHistoryFileLocation.toString()); }
python
def _member_defs(self): """ A single string containing the aggregated member definitions section of the documentation page """ members = self._clsdict['__members__'] member_defs = [ self._member_def(member) for member in members if member.name is not None ] return '\n'.join(member_defs)
java
public ExceptionRule addExceptionRule(Recurrence recur) { ExceptionRule prop = new ExceptionRule(recur); addExceptionRule(prop); return prop; }
java
public static WindowOver<Double> regrIntercept(Expression<? extends Number> arg1, Expression<? extends Number> arg2) { return new WindowOver<Double>(Double.class, SQLOps.REGR_INTERCEPT, arg1, arg2); }
java
protected void registerColumnType(String columnType, int jdbcType) { columnTypeMatchers.add(new ColumnTypeMatcher(columnType)); jdbcTypes.put(columnType, jdbcType); }
java
@Override public DRFModel createImpl() { DRFV3.DRFParametersV3 p = this.parameters; DRFModel.DRFParameters parms = p.createImpl(); return new DRFModel( model_id.key(), parms, new DRFModel.DRFOutput(null) ); }
python
def has_local_job_refs(io_hash): ''' :param io_hash: input/output hash :type io_hash: dict :returns: boolean indicating whether any job-based object references are found in *io_hash* ''' q = [] for field in io_hash: if is_job_ref(io_hash[field]): if get_job_from_jbor(io_hash[field]).startswith('localjob'): return True elif isinstance(io_hash[field], list) or isinstance(io_hash[field], dict): q.append(io_hash[field]) while len(q) > 0: thing = q.pop() if isinstance(thing, list): for i in range(len(thing)): if is_job_ref(thing[i]): if get_job_from_jbor(thing[i]).startswith('localjob'): return True elif isinstance(thing[i], list) or isinstance(thing[i], dict): q.append(thing[i]) else: for field in thing: if is_job_ref(thing[field]): if get_job_from_jbor(thing[field]).startswith('localjob'): return True elif isinstance(thing[field], list) or isinstance(thing[field], dict): q.append(thing[field]) return False
java
public static ClientInterceptor newCaptureMetadataInterceptor( AtomicReference<Metadata> headersCapture, AtomicReference<Metadata> trailersCapture) { return new MetadataCapturingClientInterceptor(headersCapture, trailersCapture); }
java
Txn txn_begin(Txn parent, IsolationLevel level, int timeout, TimeUnit unit) throws Exception { return txn_begin(parent, level); }
python
def get_float_info(self, field): """Get float property from the DMatrix. Parameters ---------- field: str The field name of the information Returns ------- info : array a numpy array of float information of the data """ length = ctypes.c_ulong() ret = ctypes.POINTER(ctypes.c_float)() _check_call(_LIB.XGDMatrixGetFloatInfo(self.handle, c_str(field), ctypes.byref(length), ctypes.byref(ret))) return ctypes2numpy(ret, length.value, np.float32)
python
def DOM_copyTo(self, nodeId, targetNodeId, **kwargs): """ Function path: DOM.copyTo Domain: DOM Method name: copyTo WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to copy. 'targetNodeId' (type: NodeId) -> Id of the element to drop the copy into. Optional arguments: 'insertBeforeNodeId' (type: NodeId) -> Drop the copy before this node (if absent, the copy becomes the last child of <code>targetNodeId</code>). Returns: 'nodeId' (type: NodeId) -> Id of the node clone. Description: Creates a deep copy of the specified node and places it into the target container before the given anchor. """ expected = ['insertBeforeNodeId'] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are ['insertBeforeNodeId']. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command('DOM.copyTo', nodeId=nodeId, targetNodeId=targetNodeId, **kwargs) return subdom_funcs
java
public static String generateSwidFileName(final String regId, final String productName, final String uniqueSoftwareId, final String extension) { StringBuilder res = new StringBuilder() .append(regId) .append("_") .append(productName) .append("-") .append(uniqueSoftwareId).append("."); if (StringUtils.isNotBlank(extension)) { res.append(extension); } else { res.append(SWIDTAG_FILE_EXTENSION); } return res.toString(); }
python
def ssn(self): """ Returns a 9 digits Dutch SSN called "burgerservicenummer (BSN)". the Dutch "burgerservicenummer (BSN)" needs to pass the "11-proef", which is a check digit approach; this function essentially reverses the checksum steps to create a random valid BSN (which is 9 digits). """ # see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch) def _checksum(digits): factors = (9, 8, 7, 6, 5, 4, 3, 2, -1) s = 0 for i in range(len(digits)): s += digits[i] * factors[i] return s while True: # create an array of first 8 elements initialized randomly digits = self.generator.random.sample(range(10), 8) # sum those 8 digits according to (part of) the "11-proef" s = _checksum(digits) # determine the last digit to make it qualify the test digits.append((s % 11) % 10) # repeat steps until it does qualify the test if 0 == (_checksum(digits) % 11): break # build the resulting BSN bsn = "".join([str(e) for e in digits]) # finally return our random but valid BSN return bsn
java
public void marshall(CreateGeoMatchSetRequest createGeoMatchSetRequest, ProtocolMarshaller protocolMarshaller) { if (createGeoMatchSetRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(createGeoMatchSetRequest.getName(), NAME_BINDING); protocolMarshaller.marshall(createGeoMatchSetRequest.getChangeToken(), CHANGETOKEN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Override public ITextNode getTextNode(String key) { for (ITextNode node : textNodeList) { if (key.equals(node.getKey())) { return node; } } return null; }
java
protected String getValue(String str) { int index = str.indexOf('='); String value = null; if (index > 0) { value = str.substring(index + 1).trim(); if (value.charAt(0) == '\"') { value = value.substring(1, value.length() - 1); } } return value; }
python
def lift(obj, memo=None): """Make a promise out of object `obj`, where `obj` may contain promises internally. :param obj: Any object. :param memo: used for internal caching (similar to :func:`deepcopy`). If the object is a :class:`PromisedObject`, or *pass-by-value* (:class:`str`, :class:`int`, :class:`float`, :class:`complex`) it is returned as is. If the object's `id` has an entry in `memo`, the value from `memo` is returned. If the object has a method `__lift__`, it is used to get the promise. `__lift__` should take one additional argument for the `memo` dictionary, entirely analogous to :func:`deepcopy`. If the object is an instance of one of the basic container types (list, dictionary, tuple and set), we use the analogous function (:func:`make_list`, :func:`make_dict`, :func:`make_tuple`, and :func:`make_set`) to promise their counterparts should these objects contain any promises. First, we map all items in the container through :func:`lift`, then check the result for any promises. Note that in the case of dictionaries, we lift all the items (i.e. the list of key/value tuples) and then construct a new dictionary. If the object is an instance of a subclass of any of the basic container types, the `__dict__` of the object is lifted as well as the object cast to its base type. We then use :func:`set_dict` to set the `__dict__` of the new promise. Again, if the object did not contain any promises, we return it without change. Otherwise, we lift the `__dict__` and create a promise of a new object of the same class as the input, using :func:`create_object`. This works fine for what we call *reasonable* objects. Since calling :func:`lift` is an explicit action, we do not require reasonable objects to be derived from :class:`Reasonable` as we do with serialisation, where such a default behaviour could lead to unexplicable bugs.""" if memo is None: memo = {} if isinstance(obj, (PromisedObject, str, int, float, complex)): return obj if id(obj) in memo: return memo[id(obj)] if hasattr(obj, '__lift__'): rv = obj.__lift__(memo) memo[id(obj)] = rv return rv actions = { list: (lambda x: x, make_list), dict: (lambda x: list(x.items()), make_dict), tuple: (lambda x: x, make_tuple), set: (lambda x: x, make_set) } if obj.__class__ in actions: items, construct = actions[obj.__class__] tmp = [lift(a, memo) for a in items(obj)] if any(isinstance(a, PromisedObject) for a in tmp): rv = construct(*tmp) memo[id(obj)] = rv return rv else: memo[id(obj)] = obj return obj subclass = next(filter( lambda x: issubclass(obj.__class__, x), actions.keys()), None) if subclass: members = lift(obj.__dict__, memo) internal = lift(subclass(obj), memo) if isinstance(internal, PromisedObject): internal = construct_object(obj.__class__, internal) rv = set_dict(internal, members) elif isinstance(members, PromisedObject): rv = set_dict(obj.__class__(internal), members) else: rv = obj memo[id(obj)] = rv return rv try: members = lift(obj.__dict__, memo) if isinstance(members, PromisedObject): rv = create_object(obj.__class__, members) else: rv = obj except AttributeError: memo[id(obj)] = obj return obj memo[id(obj)] = rv return rv
python
def filter_host_by_regex(regex): """Filter for host Filter on regex :param regex: regex to filter :type regex: str :return: Filter :rtype: bool """ host_re = re.compile(regex) def inner_filter(items): """Inner filter for host. Accept if regex match host_name""" host = items["host"] if host is None: return False return host_re.match(host.host_name) is not None return inner_filter
java
@TargetApi(Build.VERSION_CODES.KITKAT) public static boolean hasStepDetectorSensorFeature(Context context) { return hasStepDetectorSensorFeature(context.getPackageManager()); }
python
def save_password(entry, password, username=None): """ Saves the given password in the user's keychain. :param entry: The entry in the keychain. This is a caller specific key. :param password: The password to save in the keychain. :param username: The username to get the password for. Default is the current user. """ if username is None: username = get_username() has_keychain = initialize_keychain() if has_keychain: try: keyring.set_password(entry, username, password) except Exception as e: log.warn("Unable to set password in keyring. Continuing..") log.debug(e)