language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def help_center_article_translation_create(self, article_id, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/translations#create-translation" api_path = "/api/v2/help_center/articles/{article_id}/translations.json" api_path = api_path.format(article_id=article_id) return self.call(api_path, method="POST", data=data, **kwargs)
java
public static ContextualLogger getLogger(Class clazz, LoggerContext context) { return new ContextualLogger(LoggerFactory.getLogger(clazz), context); }
python
def on_down(self, host): """ Called by the parent Cluster instance when a node is marked down. Only intended for internal use. """ future = self.remove_pool(host) if future: future.add_done_callback(lambda f: self.update_created_pools())
python
def get_device_info(self) -> Mapping[str, str]: ''' Queries Temp-Deck for it's build version, model, and serial number returns: dict Where keys are the strings 'version', 'model', and 'serial', and each value is a string identifier { 'serial': '1aa11bb22', 'model': '1aa11bb22', 'version': '1aa11bb22' } Example input from Temp-Deck's serial response: "serial:aa11bb22 model:aa11bb22 version:aa11bb22" ''' try: return self._recursive_get_info(DEFAULT_COMMAND_RETRIES) except (TempDeckError, SerialException, SerialNoResponse) as e: return {'error': str(e)}
java
public ApiResponse<Void> deleteCharactersCharacterIdContactsWithHttpInfo(Integer characterId, List<Integer> contactIds, String datasource, String token) throws ApiException { com.squareup.okhttp.Call call = deleteCharactersCharacterIdContactsValidateBeforeCall(characterId, contactIds, datasource, token, null); return apiClient.execute(call); }
java
@Override public DescribeLoadBalancerTargetGroupsResult describeLoadBalancerTargetGroups(DescribeLoadBalancerTargetGroupsRequest request) { request = beforeClientExecution(request); return executeDescribeLoadBalancerTargetGroups(request); }
java
public Object evaluate(Object pContext, VariableResolver pResolver, Map functions, String defaultPrefix, Logger pLogger) throws ELException { Object ret = mPrefix.evaluate(pContext, pResolver, functions, defaultPrefix, pLogger); // Apply the suffixes for (int i = 0; mSuffixes != null && i < mSuffixes.size(); i++) { ValueSuffix suffix = (ValueSuffix) mSuffixes.get(i); ret = suffix.evaluate(ret, pContext, pResolver, functions, defaultPrefix, pLogger); } return ret; }
python
def hide_routemap_holder_route_map_content_set_origin_origin_igp(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") origin = ET.SubElement(set, "origin") origin_igp = ET.SubElement(origin, "origin-igp") callback = kwargs.pop('callback', self._callback) return callback(config)
python
def generate(self, tree): ''' generates code based on templates and gen functions defined in the <x> lang generator ''' for middleware in DEFAULT_MIDDLEWARES + self.middlewares: tree = middleware.process(tree) # changed in place!! original = self._generate_node(tree) # first n lines n dependencies # after that additional code if self.a and tree.type == 'module': p = original.split('\n') r = '\n'.join(p[:len(tree.dependencies)] + (['\n'] if tree.dependencies else []) + self.a + ['\n'] + p[len(tree.dependencies):]) + '\n' else: r = original r = re.sub(CLOSING_CURLY_ENDLINES, r'}\n\2}', r) r = re.sub(JS_BRACKET, r'}\1', r) return re.sub(TOO_MANY_ENDLINES, r'\n\n', r)
java
public ExecutionEnvironment.ExecutionState getExecutionState(String topologyName) { return awaitResult(delegate.getExecutionState(null, topologyName)); }
java
public String getVolumeLabel() { final StringBuilder sb = new StringBuilder(); for (int i=0; i < MAX_VOLUME_LABEL_LENGTH; i++) { final char c = (char) get8(VOLUME_LABEL_OFFSET + i); if (c != 0) { sb.append(c); } else { break; } } return sb.toString(); }
python
def file_to_url(self, file_rel_path): """Convert a relative file path to a file URL.""" _abs_path = os.path.abspath(file_rel_path) return urlparse.urlparse(_abs_path, scheme='file').geturl()
java
private FileStatusEntry createPathEntry(final FileStatusEntry parent, final Path childPath) throws IOException { final FileStatusEntry entry = parent.newChildInstance(childPath); entry.refresh(childPath); final FileStatusEntry[] children = doListPathsEntry(childPath, entry); entry.setChildren(children); return entry; }
java
public void writeHeader(List<? extends AttributeProvider> providers) throws IOException, AttributeException { if (this.columns == null) { this.columns = extractColumns(providers); writeDescriptionHeader(providers.size()); writeColumns(); } }
python
def to_json(self, indent=None): """Serialize this workflow to JSON""" inputs = ParameterCollection(self.inputs) d = { 'meta': { 'name': self.name, 'description': self.description }, 'inputs': [], 'workflow': [], 'outputs': [{'name': k, 'node': v} for k, v in six.iteritems(self.output_mapping)] } for parameter in self.inputs: input_info = { 'name': parameter.name, 'type': parameter.id } args, kwargs = parameter.serialize_args() args = list(args) args.pop(0) # 'name' is already taken care of kwargs.pop('required', None) # 'required' is assumed True for workflow inputs if args or kwargs: input_info['args'] = [args, kwargs] d['inputs'].append(input_info) for node in sorted(six.itervalues(self.nodes_by_id), key=lambda x: x.id): task_name = node.task.name if not task_name: raise ValueError('The task {0} does not have a name and therefore cannot be serialized.'.format( node.task.__class__.__name__) ) node_inputs = {} for input_name, (source, value) in six.iteritems(node.inputs): input_info = {'source': source} if source == 'input': input_info['input'] = inputs.by_name[value].name else: input_info['node'] = value node_inputs[input_name] = input_info d['workflow'].append({ 'id': node.id, 'task': task_name, 'inputs': node_inputs }) return json.dumps(d, indent=indent)
python
def get_printer(colors: bool = True, width_limit: bool = True, disabled: bool = False) -> Printer: """ Returns an already initialized instance of the printer. :param colors: If False, no colors will be printed. :param width_limit: If True, printing width will be limited by console width. :param disabled: If True, nothing will be printed. """ global _printer global _colors # Make sure we can print colors if needed. colors = colors and _colors # If the printer was never defined before, or the settings have changed. if not _printer or (colors != _printer._colors) or (width_limit != _printer._width_limit): _printer = Printer(DefaultWriter(disabled=disabled), colors=colors, width_limit=width_limit) return _printer
python
def _ConvertDictToObject(self, json_dict): """Converts a JSON dict into a path specification object. The dictionary of the JSON serialized objects consists of: { '__type__': 'PathSpec' 'type_indicator': 'OS' 'parent': { ... } ... } Here '__type__' indicates the object base type in this case this should be 'PathSpec'. The rest of the elements of the dictionary make up the path specification object properties. Note that json_dict is a dict of dicts and the _ConvertDictToObject method will be called for every dict. That is how the path specification parent objects are created. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: PathSpec: a path specification. Raises: TypeError: if the JSON serialized object does not contain a '__type__' attribute that contains 'PathSpec'. """ # Use __type__ to indicate the object class type. class_type = json_dict.get('__type__', None) if class_type not in self._CLASS_TYPES: raise TypeError('Missing path specification object type.') # Remove the class type from the JSON dict since we cannot pass it. del json_dict['__type__'] type_indicator = json_dict.get('type_indicator', None) if type_indicator: del json_dict['type_indicator'] # Convert row_condition back to a tuple. if 'row_condition' in json_dict: json_dict['row_condition'] = tuple(json_dict['row_condition']) return path_spec_factory.Factory.NewPathSpec(type_indicator, **json_dict)
java
private static void parseWelcomeFiles(final WelcomeFileListType welcomeFileList, final WebApp webApp) { if (welcomeFileList != null && welcomeFileList.getWelcomeFile() != null && !welcomeFileList.getWelcomeFile().isEmpty()) { welcomeFileList.getWelcomeFile().forEach(webApp::addWelcomeFile); } }
python
def get_nfc_chars(self): """ Returns the set of IPA symbols that are precomposed (decomposable) chars. These should not be decomposed during string normalisation, because they will not be recognised otherwise. In IPA 2015 there is only one precomposed character: ç, the voiceless palatal fricative. """ ex = [] for char in self.ipa.keys(): if len(char) == 1: decomp = unicodedata.normalize('NFD', char) if len(decomp) == 2: ex.append(char) return set(ex)
python
def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False): """Performs forward pass through the layer weights at layer_index. Args: vector: vector that has to be passed through in forward pass layer_index: index of the layer is_transpose: whether the weights of the layer have to be transposed is_abs: whether to take the absolute value of the weights Returns: tensor that corresponds to the forward pass through the layer Raises: ValueError: if the layer_index is negative or more than num hidden layers """ if(layer_index < 0 or layer_index > self.num_hidden_layers): raise ValueError('Invalid layer index') layer_type = self.layer_types[layer_index] weight = self.weights[layer_index] if is_abs: weight = tf.abs(weight) if is_transpose: vector = tf.reshape(vector, self.output_shapes[layer_index]) else: vector = tf.reshape(vector, self.input_shapes[layer_index]) if layer_type in {'ff', 'ff_relu'}: if is_transpose: weight = tf.transpose(weight) return_vector = tf.matmul(weight, vector) elif layer_type in {'conv', 'conv_relu'}: if is_transpose: return_vector = tf.nn.conv2d_transpose(vector, weight, output_shape=self.input_shapes[layer_index], strides=[1, self.cnn_params[layer_index]['stride'], self.cnn_params[layer_index]['stride'], 1], padding=self.cnn_params[layer_index]['padding']) else: return_vector = tf.nn.conv2d(vector, weight, strides=[1, self.cnn_params[layer_index]['stride'], self.cnn_params[layer_index]['stride'], 1], padding=self.cnn_params[layer_index]['padding']) else: raise NotImplementedError('Unsupported layer type: {0}'.format(layer_type)) if is_transpose: return tf.reshape(return_vector, (self.sizes[layer_index], 1)) return tf.reshape(return_vector, (self.sizes[layer_index + 1], 1))
python
def get_display_list(brains_or_objects=None, none_item=False): """ Returns a DisplayList with the items sorted by Title :param brains_or_objects: list of brains or objects :param none_item: adds an item with empty uid and text "Select.." in pos 0 :return: DisplayList (uid, title) sorted by title ascending :rtype: DisplayList """ if brains_or_objects is None: return get_display_list(list(), none_item) items = list() for brain in brains_or_objects: uid = api.get_uid(brain) if not uid: continue title = api.get_title(brain) items.append((uid, title)) # Sort items by title ascending items.sort(lambda x, y: cmp(x[1], y[1])) # Add the first item? if none_item: items.insert(0, ('', t('Select...'))) return DisplayList(items)
python
def stemmed(text): """ Returns a list of simplified and stemmed down terms for the inputted text. This will remove common terms and words from the search and return only the important root terms. This is useful in searching algorithms. :param text | <str> :return [<str>, ..] """ terms = re.split('\s*', toAscii(text)) output = [] for term in terms: # ignore apostrophe's if term.endswith("'s"): stripped_term = term[:-2] else: stripped_term = term single_term = singularize(stripped_term) if term in COMMON_TERMS or stripped_term in COMMON_TERMS or single_term in COMMON_TERMS: continue output.append(single_term) return output
python
def get_metadata(self, key) -> str: """ Get the value of a metadata. Returns None if metadata does not exist. Args: key (str): name of the metadata Returns: str: the value of the metadata (or None) """ return self.metadata[key] if key in self.metadata else None
python
def get_git_file_path(filename): """ Get relative path for filename in git root. :param filename: File name :return: relative path or None """ git_root = get_git_root(filename) return relpath(filename, git_root).replace("\\", "/") if git_root else ''
java
@SuppressWarnings("unchecked") public EList<IfcRelSequence> getIsSuccessorFrom() { return (EList<IfcRelSequence>) eGet(Ifc2x3tc1Package.Literals.IFC_PROCESS__IS_SUCCESSOR_FROM, true); }
java
public void applyConfiguration(Configuration configuration) { if (plugins != null) { plugins.stream() .filter(pluginDefinition -> pluginDefinition.isConfigurationSupported(configuration)) .forEach(pluginDefinition -> project.getDependencies().add(configuration.getName(), generateDependencyNotation(configuration, pluginDefinition))); } }
python
def render(self, **kwargs): """Renders the HTML representation of the element.""" if isinstance(self._parent, GeoJson): keys = tuple(self._parent.data['features'][0]['properties'].keys()) self.warn_for_geometry_collections() elif isinstance(self._parent, TopoJson): obj_name = self._parent.object_path.split('.')[-1] keys = tuple(self._parent.data['objects'][obj_name][ 'geometries'][0]['properties'].keys()) else: raise TypeError('You cannot add a GeoJsonTooltip to anything else ' 'than a GeoJson or TopoJson object.') keys = tuple(x for x in keys if x not in ('style', 'highlight')) for value in self.fields: assert value in keys, ('The field {} is not available in the data. ' 'Choose from: {}.'.format(value, keys)) super(GeoJsonTooltip, self).render(**kwargs)
python
async def workerTypeHealth(self, *args, **kwargs): """ Look up the resource health for a workerType Return a view of the health of a given worker type This method gives output: ``v1/health.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["workerTypeHealth"], *args, **kwargs)
java
public void setEditable(String editable) { if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(editable)) { m_editable = Boolean.valueOf(editable).booleanValue(); } }
java
@Override protected void initGraphics() { super.initGraphics(); titleText = new Text(); titleText.setFill(tile.getTitleColor()); Helper.enableNode(titleText, !tile.getTitle().isEmpty()); text = new Text(tile.getText()); text.setFill(tile.getUnitColor()); Helper.enableNode(text, tile.isTextVisible()); LocalTime duration = tile.getDuration(); leftText = new Text(Integer.toString(duration.getHour() > 0 ? duration.getHour() : duration.getMinute())); leftText.setFill(tile.getValueColor()); leftUnit = new Text(duration.getHour() > 0 ? "h" : "m"); leftUnit.setFill(tile.getValueColor()); rightText = new Text(Integer.toString(duration.getHour() > 0 ? duration.getMinute() : duration.getSecond())); rightText.setFill(tile.getValueColor()); rightUnit = new Text(duration.getHour() > 0 ? "m" : "s"); rightUnit.setFill(tile.getValueColor()); timeText = new TextFlow(leftText, leftUnit, rightText, rightUnit); timeText.setTextAlignment(TextAlignment.RIGHT); timeText.setPrefWidth(PREFERRED_WIDTH * 0.9); description = new Label(tile.getDescription()); description.setAlignment(Pos.TOP_RIGHT); description.setWrapText(true); description.setTextFill(tile.getTextColor()); Helper.enableNode(description, !tile.getDescription().isEmpty()); getPane().getChildren().addAll(titleText, text, timeText, description); }
python
def encode (self): """Encodes this SeqDelay to a binary bytearray.""" delay_s = int( math.floor(self.delay) ) delay_ms = int( (self.delay - delay_s) * 255.0 ) return struct.pack('>H', delay_s) + struct.pack('B', delay_ms)
python
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: RoomContext for this RoomInstance :rtype: twilio.rest.video.v1.room.RoomContext """ if self._context is None: self._context = RoomContext(self._version, sid=self._solution['sid'], ) return self._context
java
public Replication queryParams(Map<String, Object> queryParams) { this.replication = replication.queryParams(queryParams); return this; }
python
def first_phase(invec, outvec, N1, N2): """ This implements the first phase of the FFT decomposition, using the standard FFT many plans. Parameters ----------- invec : array The input array. outvec : array The output array. N1 : int Number of rows. N2 : int Number of columns. """ global _theplan if _theplan is None: _theplan = plan_first_phase(N1, N2) fexecute(_theplan, invec.ptr, outvec.ptr)
python
def transform_audio(self, y): '''Compute the HCQT Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape = (n_frames, n_bins, n_harmonics) The CQT magnitude data['phase']: np.ndarray, shape = mag.shape The CQT phase ''' cqtm, phase = [], [] n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) for h in self.harmonics: C = cqt(y=y, sr=self.sr, hop_length=self.hop_length, fmin=self.fmin * h, n_bins=(self.n_octaves * self.over_sample * 12), bins_per_octave=(self.over_sample * 12)) C = fix_length(C, n_frames) C, P = magphase(C) if self.log: C = amplitude_to_db(C, ref=np.max) cqtm.append(C) phase.append(P) cqtm = np.asarray(cqtm).astype(np.float32) phase = np.angle(np.asarray(phase)).astype(np.float32) return {'mag': self._index(cqtm), 'phase': self._index(phase)}
python
def websocket(self, uri, *args, **kwargs): """Create a websocket route from a decorated function :param uri: endpoint at which the socket endpoint will be accessible. :type uri: str :param args: captures all of the positional arguments passed in :type args: tuple(Any) :param kwargs: captures the keyword arguments passed in :type kwargs: dict(Any) :return: The exception function to use as the decorator :rtype: fn """ kwargs.setdefault('host', None) kwargs.setdefault('strict_slashes', None) kwargs.setdefault('subprotocols', None) kwargs.setdefault('name', None) kwargs['with_context'] = True # This is the whole point of this plugin def wrapper(handler_f): nonlocal self, uri, args, kwargs return super(Contextualize, self).websocket( uri, *args, **kwargs)(handler_f) return wrapper
python
def fetch_result_sets(cls, db, datasource_type): """Returns a list of tables [schema1.table1, schema2.table2, ...] Datasource_type can be 'table' or 'view'. Empty schema corresponds to the list of full names of the all tables or views: <schema>.<result_set_name>. """ result_set_df = db.get_df( """SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S ORDER BY concat(table_schema, '.', table_name)""".format( datasource_type.upper(), ), None) result_sets = [] for unused, row in result_set_df.iterrows(): result_sets.append('{}.{}'.format( row['table_schema'], row['table_name'])) return result_sets
python
def watch_dir(path: str) -> None: """Add ``path`` to watch for autoreload.""" _compile_exclude_patterns() if config.autoreload or config.debug: # Add files to watch for autoreload p = pathlib.Path(path) p.resolve() _add_watch_path(p)
java
private File findManifestFileThrowing() throws IOException, URISyntaxException { JavaFileObject dummySourceFile = filer.createSourceFile("dummy" + System.currentTimeMillis()); String dummySourceFilePath = dummySourceFile.toUri().toString(); if (dummySourceFilePath.startsWith("file:")) { if (!dummySourceFilePath.startsWith("file://")) { dummySourceFilePath = "file://" + dummySourceFilePath.substring("file:".length()); } } else { dummySourceFilePath = "file://" + dummySourceFilePath; } log.debug("Dummy source file: " + dummySourceFilePath); URI cleanURI = new URI(dummySourceFilePath); File dummyFile = new File(cleanURI); File sourcesGenerationFolder = dummyFile.getParentFile(); File projectRoot = sourcesGenerationFolder.getParentFile(); File androidManifestFile = new File (projectRoot, "AndroidManifest.xml"); for (int i = 0; i < MAX_PARENTS_FROM_SOURCE_FOLDER; i++) { if (androidManifestFile.exists()) { break; } else { if (projectRoot.getParentFile() != null) { projectRoot = projectRoot.getParentFile(); androidManifestFile = new File(projectRoot, "AndroidManifest.xml"); } else { break; } } } if (!androidManifestFile.exists()) { throw new IllegalStateException("Could not find the AndroidManifest.xml file, going up from path [" + sourcesGenerationFolder.getAbsolutePath() + "] found using dummy file [" + dummySourceFilePath + "] (max atempts: " + MAX_PARENTS_FROM_SOURCE_FOLDER + ")"); } else { log.debug("AndroidManifest.xml file found: " + androidManifestFile.toString()); } return androidManifestFile; }
python
def lookup(domain): """Find the virNetwork object associated to the domain. If the domain has more than one network interface, the first one is returned. None is returned if the domain is not attached to any network. """ xml = domain.XMLDesc(0) element = etree.fromstring(xml) subelm = element.find('.//interface[@type="network"]') if subelm is not None: network = subelm.find('.//source').get('network') hypervisor = domain.connect() return hypervisor.networkLookupByName(network) return None
java
public static void setReferences(IReferences references, Iterable<Object> components) throws ReferenceException, ConfigException { for (Object component : components) setReferencesForOne(references, component); } /** * Unsets references in specific component. * * To unset references components must implement IUnreferenceable interface. If * they don't the call to this method has no effect. * * @param component the component to unset references. * * @see IUnreferenceable */ public static void unsetReferencesForOne(Object component) { if (component instanceof IUnreferenceable) ((IUnreferenceable) component).unsetReferences(); } /** * Unsets references in multiple components. * * To unset references components must implement IUnreferenceable interface. If * they don't the call to this method has no effect. * * @param components the list of components, whose references must be cleared. * * @see IUnreferenceable */ public static void unsetReferences(Iterable<Object> components) { for (Object component : components) unsetReferencesForOne(component); } }
java
public OvhOperation serviceName_output_graylog_dashboard_POST(String serviceName, Boolean autoSelectOption, String description, String optionId, String title) throws IOException { String qPath = "/dbaas/logs/{serviceName}/output/graylog/dashboard"; StringBuilder sb = path(qPath, serviceName); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "autoSelectOption", autoSelectOption); addBody(o, "description", description); addBody(o, "optionId", optionId); addBody(o, "title", title); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhOperation.class); }
python
def _check_index_in_compilations(context: BaseContext, index: str): """Store compilation flag at specified index in context's shared data.""" compilations = 'compilations' if compilations not in context.shared_data: return False return index in context.shared_data[compilations]
python
def get_json(self): """Create JSON for CNA port. :returns: JSON for CNA port as follows: { "@PortIdx":1, "PortEnable":{ }, "Functions":{ } } """ port = self.get_basic_json() port['Functions'] = { 'Function': [f.get_json() for f in self.functions.values()] } return port
python
def get_java_remote_console_url(self, ip=None): """ Generates a Single Sign-On (SSO) session for the iLO Java Applet console and returns the URL to launch it. If the server hardware is unmanaged or unsupported, the resulting URL will not use SSO and the iLO Java Applet will prompt for credentials. This is not supported on G7/iLO3 or earlier servers. Args: ip: IP address or host name of the server's iLO management processor Returns: URL """ uri = "{}/javaRemoteConsoleUrl".format(self.data["uri"]) if ip: uri = "{}?ip={}".format(uri, ip) return self._helper.do_get(uri)
java
public java.util.List<String> getClassicLinkVPCSecurityGroups() { if (classicLinkVPCSecurityGroups == null) { classicLinkVPCSecurityGroups = new com.amazonaws.internal.SdkInternalList<String>(); } return classicLinkVPCSecurityGroups; }
java
@Override public int compareTo(final Outcome other) { return Data.getTotalComparator().compare(this.invocationID, other.invocationID); }
python
def flg(self, name, help, abbrev=None): """Describe a flag""" abbrev = abbrev or '-' + name[0] longname = '--' + name.replace('_', '-') self._add(name, abbrev, longname, action='store_true', help=help)
java
public CertificateListDescriptionInner listByIotHub(String resourceGroupName, String resourceName) { return listByIotHubWithServiceResponseAsync(resourceGroupName, resourceName).toBlocking().single().body(); }
java
public static Vector<Object> toXmlRpcReferencesParameters(Collection<Reference> references) { Vector<Object> referencesParams = new Vector<Object>(); for(Reference reference : references) { referencesParams.add(reference.marshallize()); } return referencesParams; }
python
def log_params(params, name="params"): """Dumps the params with `logging.error`.""" for i, param in enumerate(params): if not param: # Empty tuple. continue if not isinstance(param, (list, tuple)): logging.error( "%s[%d] : (%s) = [%s]", name, i, param.shape, onp.array(param)) else: for j, p in enumerate(param): logging.error( "\t%s[%d, %d] = [%s]", name, i, j, onp.array(p))
python
def verify_gmt_integrity(gmt): """ Make sure that set ids are unique. Args: gmt (GMT object): list of dicts Returns: None """ # Verify that set ids are unique set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt] assert len(set(set_ids)) == len(set_ids), ( "Set identifiers should be unique. set_ids: {}".format(set_ids))
java
@CheckReturnValue public RoleData newRole() { final RoleData role = new RoleData(roles.size()); this.roles.add(role); return role; }
python
def supports_tagging(obj): """ :param obj: a class or instance """ if isinstance(obj, type): return issubclass(obj, SupportTagging) if not isinstance(obj, SupportTagging): return False if obj.id is None: return False return True
java
public static Iterable<Function<Object[], Object[]>> getTransformers(Constructor<?> constructor) { Iterable<ArgumentTransformer> transformers = Lists.newArrayList( new PrimitiveAwareVarArgsTransformer(constructor), new PrimitiveTransformer(constructor), new VarArgsTransformer(constructor)); return ImmutableList .<Function<Object[], Object[]>>copyOf(filter(transformers, applicableFilter)); }
java
public final int[] getVariableSizes() { int[] sizes = new int[size()]; for (int i = 0; i < vars.length; i++) { sizes[i] = ((DiscreteVariable) vars[i]).numValues(); } return sizes; }
python
def editContactItem(self, contactType, contactItem, contactInfo): """ Edit the given contact item with the given contact type. Broadcast the edit to all L{IOrganizerPlugin} powerups. @type contactType: L{IContactType} @param contactType: The contact type which will be used to edit the contact item. @param contactItem: The contact item to edit. @type contactInfo: C{dict} @param contactInfo: The contact information to use to edit the contact item. @return: C{None} """ contactType.editContactItem( contactItem, **_stringifyKeys(contactInfo)) self._callOnOrganizerPlugins('contactItemEdited', contactItem)
java
public int addElement(int element, int treap) { int treap_; if (treap == -1) { if (m_defaultTreap == nullNode()) m_defaultTreap = createTreap(-1); treap_ = m_defaultTreap; } else { treap_ = treap; } return addElement_(element, 0, treap_); }
python
def _add_slice(seq, slc): """ Our textwrap routine deals in slices. This function will concat contiguous slices as an optimization so lookup performance is faster. It expects a sequence (probably a list) to add slice to or will extend the last slice of the sequence if it ends where the new slice begins. """ if seq and seq[-1].stop == slc.start: seq[-1] = slice(seq[-1].start, slc.stop) else: seq.append(slc)
python
def parse_toplevel_config(self, config): """Parse @config to setup @self state.""" if not Formatter.initialized: html_theme = config.get('html_theme', 'default') if html_theme != 'default': uri = urllib.parse.urlparse(html_theme) if not uri.scheme: html_theme = config.get_path('html_theme') debug("Using theme located at %s" % html_theme) elif uri.scheme.startswith('http'): html_theme = self.__download_theme(uri) if html_theme == 'default': default_theme = os.path.join(HERE, os.pardir, 'hotdoc_bootstrap_theme', 'dist') html_theme = os.path.abspath(default_theme) debug("Using default theme") theme_meta_path = os.path.join(html_theme, 'theme.json') if os.path.exists(theme_meta_path): with open(theme_meta_path, 'r') as _: Formatter.theme_meta = json.loads(_.read()) searchpath = [] self.__load_theme_templates(searchpath, HERE) Formatter.theme_path = html_theme if html_theme: self.__load_theme_templates(searchpath, html_theme) Formatter.extra_theme_path = config.get_path('html_extra_theme') if Formatter.extra_theme_path: self.__load_theme_templates(searchpath, Formatter.extra_theme_path) Formatter.engine = Engine( loader=FileLoader(searchpath, encoding='UTF-8'), extensions=[CoreExtension(), CodeExtension()]) Formatter.engine.global_vars.update({'e': html.escape}) Formatter.initialized = True
java
public T findById(ID id) { T retVal = (T) getEntityManager().find(getEntityClass(), id); return retVal; }
python
def down(self, n=1, interval=0, pre_dl=None, post_dl=None): """Press down key n times. **中文文档** 按下方向键 n 次。 """ self.delay(pre_dl) self.k.tap_key(self.k.down_key, n, interval) self.delay(post_dl)
java
private void writeModelDetails() throws IOException { ModelSchemaV3 modelSchema = (ModelSchemaV3) SchemaServer.schema(3, model).fillFromImpl(model); startWritingTextFile("experimental/modelDetails.json"); writeln(modelSchema.toJsonString()); finishWritingTextFile(); }
java
private void obtainPadding(@StyleRes final int themeResourceId) { TypedArray typedArray = getContext().getTheme().obtainStyledAttributes(themeResourceId, new int[]{R.attr.materialDialogPaddingLeft, R.attr.materialDialogPaddingTop, R.attr.materialDialogPaddingRight, R.attr.materialDialogPaddingBottom}); int defaultLeftPadding = getContext().getResources().getDimensionPixelSize(R.dimen.dialog_left_padding); int defaultTopPadding = getContext().getResources().getDimensionPixelSize(R.dimen.dialog_top_padding); int defaultRightPadding = getContext().getResources().getDimensionPixelSize(R.dimen.dialog_right_padding); int defaultBottomPadding = getContext().getResources().getDimensionPixelSize(R.dimen.dialog_bottom_padding); int left = typedArray.getDimensionPixelSize(0, defaultLeftPadding); int top = typedArray.getDimensionPixelSize(1, defaultTopPadding); int right = typedArray.getDimensionPixelSize(2, defaultRightPadding); int bottom = typedArray.getDimensionPixelSize(3, defaultBottomPadding); setPadding(left, top, right, bottom); }
python
def read_document(fnm): """Read a document that is stored in a text file as JSON. Parameters ---------- fnm: str The path of the document. Returns ------- Text """ with codecs.open(fnm, 'rb', 'ascii') as f: return Text(json.loads(f.read()))
python
def _calc(cls, **kwargs): """ Calculate sunrise or sunset based on: Parameters: jd: Julian Day lat: latitude lon: longitude stage: sunrise or sunset """ zenith = 90.833333 # offical value jd = kwargs.get("jd", None) lat = kwargs.get("lat", None) lon = kwargs.get("lon", None) stage = kwargs.get("stage", None) if jd is None or stage is None or lat is None or lon is None: raise ValueError("Must supply an 'jd', 'lat, 'lon', and 'stage' parameter") if stage != SunCycles.RISING and stage != SunCycles.SETTING: raise ValueError("'stage' parameter must be %s or %s" % (SunCycles.RISING, SunCycles.SETTING)) longhr = lon / 15. if stage == SunCycles.RISING: apx = jd + ( (6 - longhr) / 24 ) elif stage == SunCycles.SETTING: apx = jd + ( (18 - longhr) / 24 ) sun_mean_anom = ( 0.9856 * apx ) - 3.289 # sun's mean anomaly #sun's longitude sun_lon = sun_mean_anom + (1.916 * np.sin( np.radians(sun_mean_anom) )) \ + (0.02 * np.sin( np.radians(2 * sun_mean_anom) )) + 282.634 if sun_lon > 360: sun_lon = sun_lon - 360 elif sun_lon < 0: sun_lon = sun_lon + 360 right_ascension = np.degrees(np.arctan( 0.91764 * np.tan( np.radians(sun_lon) ) )) # sun's right ascension if right_ascension > 360: right_ascension = right_ascension - 360 elif right_ascension < 0: right_ascension = right_ascension + 360 # put sun's right ascension value in the same quadrant as the sun's # true longitude lQuad = 90. * np.floor(sun_lon / 90.) raQuad = 90. * np.floor(right_ascension / 90.) right_ascension = right_ascension + ( lQuad - raQuad) right_ascension = right_ascension / 15. # Convert to hours # Sun's declination sinDecl = 0.39782 * np.sin( np.radians(sun_lon) ) cosDecl = np.cos( np.arcsin( sinDecl ) ) # Sun's local hour angle cosHr = (np.cos( np.radians(zenith) ) - ( sinDecl * np.sin(np.radians(lat)) )) \ / ( cosDecl * np.cos( np.radians(lat) ) ) if cosHr > 1: # Sun doesnt rise on this loc on this date return -1, -1 elif cosHr < -1: # Sun doesnt set on this location on this date return -1, -1 elif stage == SunCycles.RISING: # Sunrise hr = 360 - np.degrees(np.arccos(cosHr)) elif stage == SunCycles.SETTING: # Sunset hr = np.degrees(np.arccos(cosHr)) hr = hr / 15. # Convert angle to hours localTime = hr + right_ascension - ( 0.06571 * apx ) - 6.622# local meantime of rise/set UTtime = localTime - longhr # adjust to UTC if UTtime < 0: UTtime = UTtime + 24 elif UTtime > 24: UTtime = UTtime - 24 hour = np.floor(UTtime) minute = (UTtime - hour) * 60 if minute == 60: hour = hour + 1 minute = 0 return hour, minute
python
def register(**criteria): """ class decorator to add :class:`Part <cqparts.Part>` or :class:`Assembly <cqparts.Assembly>` to the ``cqparts`` search index: .. testcode:: import cqparts from cqparts.params import * # Created Part or Assembly @cqparts.search.register( type='motor', current_class='dc', part_number='ABC123X', ) class SomeMotor(cqparts.Assembly): shaft_diam = PositiveFloat(5) def make_components(self): return {} # build assembly content motor_class = cqparts.search.find(part_number='ABC123X') motor = motor_class(shaft_diam=6.0) Then use :meth:`find` &/or :meth:`search` to instantiate it. .. warning:: Multiple classes *can* be registered with identical criteria, but should be avoided. If multiple classes share the same criteria, :meth:`find` will never yield the part you want. Try adding unique criteria, such as *make*, *model*, *part number*, *library name*, &/or *author*. To avoid this, learn more in :ref:`tutorial_component-index`. """ def inner(cls): # Add class references to search index class_list.add(cls) for (category, value) in criteria.items(): index[category][value].add(cls) # Retain search criteria _entry = dict((k, set([v])) for (k, v) in criteria.items()) if cls not in class_criteria: class_criteria[cls] = _entry else: for key in _entry.keys(): class_criteria[cls][key] = class_criteria[cls].get(key, set()) | _entry[key] # Return class return cls return inner
python
def get_special_scen_code(regions, emissions): """ Get special code for MAGICC6 SCEN files. At the top of every MAGICC6 and MAGICC5 SCEN file there is a two digit number. The first digit, the 'scenfile_region_code' tells MAGICC how many regions data is being provided for. The second digit, the 'scenfile_emissions_code', tells MAGICC which gases are in the SCEN file. The variables which are part of ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1`` are the emissions species which are expected when scenfile_emissions_code is 1. Similarly, ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0`` defines the emissions species which are expected when scenfile_emissions_code is 0. Having these definitions allows Pymagicc to check that the right set of emissions has been provided before writing SCEN files. Parameters ---------- region : list_like Regions to get code for. emissions : list-like Emissions to get code for. Raises ------ ValueError If the special scen code cannot be determined. Returns ------- int The special scen code for the regions-emissions combination provided. """ if sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0)) == sorted(set(emissions)): scenfile_emissions_code = 0 elif sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1)) == sorted(set(emissions)): scenfile_emissions_code = 1 else: msg = "Could not determine scen special code for emissions {}".format(emissions) raise ValueError(msg) if set(regions) == set(["WORLD"]): scenfile_region_code = 1 elif set(regions) == set(["WORLD", "OECD90", "REF", "ASIA", "ALM"]): scenfile_region_code = 2 elif set(regions) == set(["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM"]): scenfile_region_code = 3 elif set(regions) == set( ["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM", "BUNKERS"] ): scenfile_region_code = 4 try: return scenfile_region_code * 10 + scenfile_emissions_code except NameError: msg = "Could not determine scen special code for regions {}".format(regions) raise ValueError(msg)
python
def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter)
python
def _expand_wildcard_action(action): """ :param action: 'autoscaling:*' :return: A list of all autoscaling permissions matching the wildcard """ if isinstance(action, list): expanded_actions = [] for item in action: expanded_actions.extend(_expand_wildcard_action(item)) return expanded_actions else: if '*' in action: expanded = [ expanded_action.lower() for expanded_action in all_permissions if fnmatch.fnmatchcase( expanded_action.lower(), action.lower() ) ] # if we get a wildcard for a tech we've never heard of, just return the wildcard if not expanded: return [action.lower()] return expanded return [action.lower()]
python
def destroy_s3(app='', env='dev', **_): """Destroy S3 Resources for _app_ in _env_. Args: app (str): Application name env (str): Deployment environment/account name Returns: boolean: True if destroyed sucessfully """ session = boto3.Session(profile_name=env) client = session.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() bucket = client.Bucket(archaius['bucket']) for item in bucket.objects.filter(Prefix=archaius['path']): item.Object().delete() LOG.info('Deleted: %s/%s', item.bucket_name, item.key) return True
python
def plot_fft(f, S, dt): '''Plot fft Args ---- f: ndarray Array of frequencies produced with PSD S: ndarray Array of powers produced with PSD dt: ndarray Sampling rate of sensor ''' import numpy xf = numpy.linspace(0.0, 1/(2.0*dt), N/2) plt.plot(xf, 2.0/N * numpy.abs(S[:N//2]), linewidth=_linewidth) plt.show() return None
java
public Collection<Logger> getParentLoggers() { LoggerContext ctx = (LoggerContext) LogManager.getContext(false); List<Logger> loggers = new ArrayList<>(ctx.getLoggers()); Map<String, Logger> parentMap = new HashMap<>(); try { for (Logger logger : loggers) { if (null != logger.getParent() && parentMap.get(logger.getParent().getName()) == null) { parentMap.put(logger.getParent().getName(), logger.getParent()); } } List<Logger> parents = new ArrayList<>(parentMap.values()); Collections.sort(parents, LOGGER_COMP); return parents; } finally { loggers.clear(); parentMap.clear(); } }
java
protected void set(PofValue target, Object value) { navigator.navigate(target).setValue(value); }
java
public static Encoding getEncodingFromString(String encodingString){ if(encodingString == null || encodingString.isEmpty() || encodingString.equalsIgnoreCase("Plain")){ return Encoding.Plain; } else if(encodingString.equalsIgnoreCase("gzip")){ return Encoding.Gzip; } else { throw new IllegalArgumentException("Unsupported encoding"); } }
python
def sync_month_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier num_months = self._num_months(start_date, end_date) first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1) metric_key_date_range = self._get_weekly_date_range( first_of_month, relativedelta(months=num_months)) month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count()) #generate a list of first_of_month's in between the start date and the end date months_to_update = list(itertools.islice(month_date_generator, num_months)) for uid in unique_identifier: for single_metric in metric: for month in months_to_update: _, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1]) month_counter = sum([value for key, value in series_results.items()]) hash_key_monthly = self._get_weekly_metric_key(uid, month) monthly_metric_name = self._get_monthly_metric_name(single_metric, month) with self._analytics_backend.map() as conn: conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
java
public boolean dumpTo(File outFile) throws IOException { if (exists()) { dump(getOffset(), outFile); return true; } else { return false; } }
java
protected String getArgumentValue(String arg, String[] args, ConsoleWrapper stdin, PrintStream stdout) { for (int i = 1; i < args.length; i++) { String key = args[i].split("=")[0]; if (key.equals(arg)) { return getValue(args[i]); } } return null; }
java
public WithCache.CacheState getCacheState() { return new WithCache.CacheState(size(), _max, _get, _hit, _rep); }
java
List<String> split(String value) { if (value == null) { return Collections.emptyList(); } String[] parts = value.split(",", -1); return Arrays.asList(parts); }
python
def _render_full_resource(self, instance, include, fields): """ Generate a representation of a full resource to match JSON API spec. :param instance: The instance to serialize :param include: Dictionary of relationships to include :param fields: Dictionary of fields to filter """ api_type = instance.__jsonapi_type__ orm_desc_keys = instance.__mapper__.all_orm_descriptors.keys() to_ret = { 'id': instance.id, 'type': api_type, 'attributes': {}, 'relationships': {}, 'included': {} } attrs_to_ignore = {'__mapper__', 'id'} if api_type in fields.keys(): local_fields = list(map(( lambda x: instance.__jsonapi_map_to_py__[x]), fields[api_type])) else: local_fields = orm_desc_keys for key, relationship in instance.__mapper__.relationships.items(): attrs_to_ignore |= set([c.name for c in relationship.local_columns ]) | {key} api_key = instance.__jsonapi_map_to_api__[key] try: desc = get_rel_desc(instance, key, RelationshipActions.GET) except PermissionDeniedError: continue if relationship.direction == MANYTOONE: if key in local_fields: to_ret['relationships'][api_key] = { 'links': self._lazy_relationship(api_type, instance.id, api_key) } if api_key in include.keys(): related = desc(instance) if related is not None: perm = get_permission_test( related, None, Permissions.VIEW) if (key in local_fields and (related is None or not perm(related))): to_ret['relationships'][api_key]['data'] = None continue if key in local_fields: to_ret['relationships'][api_key]['data'] = self._render_short_instance(related) # NOQA new_include = self._parse_include(include[api_key]) built = self._render_full_resource( related, new_include, fields) included = built.pop('included') to_ret['included'].update(included) to_ret['included'][(related.__jsonapi_type__, related.id)] = built # NOQA else: if key in local_fields: to_ret['relationships'][api_key] = { 'links': self._lazy_relationship( api_type, instance.id, api_key), } if api_key not in include.keys(): continue if key in local_fields: to_ret['relationships'][api_key]['data'] = [] related = desc(instance) for item in related: try: check_permission(item, None, Permissions.VIEW) except PermissionDeniedError: continue if key in local_fields: to_ret['relationships'][api_key]['data'].append( self._render_short_instance(item)) new_include = self._parse_include(include[api_key]) built = self._render_full_resource(item, new_include, fields) included = built.pop('included') to_ret['included'].update(included) to_ret['included'][(item.__jsonapi_type__, item.id)] = built # NOQA for key in set(orm_desc_keys) - attrs_to_ignore: try: desc = get_attr_desc(instance, key, AttributeActions.GET) if key in local_fields: to_ret['attributes'][instance.__jsonapi_map_to_api__[key]] = desc(instance) # NOQA except PermissionDeniedError: continue return to_ret
java
public String getText() { //convert textable to map of text Object key = null; try { //read bindTemplate String bindTemplate = getTemplate(); Map<Object,String> textMap = new Hashtable<Object,String>(); for(Map.Entry<String, Textable> entry: map.entrySet()) { key = entry.getKey(); try { //convert to text textMap.put(key,(entry.getValue()).getText()); } catch (Exception e) { throw new SystemException("Unable to build text for key:"+key+" error:"+e.getMessage(),e); } } Debugger.println(this, "bindTemplate="+bindTemplate); String formattedOutput = Text.format(bindTemplate, textMap); Debugger.println(this, "formattedOutput="+formattedOutput); return formattedOutput; } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new SetupException(e.getMessage(),e); } }
python
def plot_seebeck_mu(self, temp=600, output='eig', xlim=None): """ Plot the seebeck coefficient in function of Fermi level Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) Returns: a matplotlib object """ import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[ temp] plt.plot(self._bz.mu_steps, seebeck, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if output == 'eig': plt.legend(['S$_1$', 'S$_2$', 'S$_3$']) if xlim is None: plt.xlim(-0.5, self._bz.gap + 0.5) else: plt.xlim(xlim[0], xlim[1]) plt.ylabel("Seebeck \n coefficient ($\\mu$V/K)", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
python
def process_target(self): """Return target with transformations, if any""" if isinstance(self.target, str): # Replace single and double quotes with escaped single-quote self.target = self.target.replace("'", "\'").replace('"', "\'") return "\"{target}\"".format(target=self.target) return self.target
python
def uma_rp_get_rpt(self, ticket, claim_token=None, claim_token_format=None, pct=None, rpt=None, scope=None, state=None): """Function to be used by a UMA Requesting Party to get RPT token. Parameters: * **ticket (str, REQUIRED):** ticket * **claim_token (str, OPTIONAL):** claim token * **claim_token_format (str, OPTIONAL):** claim token format * **pct (str, OPTIONAL):** pct * **rpt (str, OPTIONAL):** rpt * **scope (list, OPTIONAL):** scope * **state (str, OPTIONAL):** state that is returned from `uma_rp_get_claims_gathering_url` command Returns: **dict:** The response from the OP. Success response:: { "status":"ok", "data":{ "access_token":"SSJHBSUSSJHVhjsgvhsgvshgsv", "token_type":"Bearer", "pct":"c2F2ZWRjb25zZW50", "upgraded":true } } NeedInfoError response:: { "error":"need_info", "ticket":"ZXJyb3JfZGV0YWlscw==", "required_claims":[ { "claim_token_format":[ "http://openid.net/specs/openid-connect-core-1_0.html#IDToken" ], "claim_type":"urn:oid:0.9.2342.19200300.100.1.3", "friendly_name":"email", "issuer":["https://example.com/idp"], "name":"email23423453ou453" } ], "redirect_user":"https://as.example.com/rqp_claims?id=2346576421" } Raises: **OxdServerError:** When oxd-server reports a generic internal_error **InvalidTicketError:** When the oxd server returns a "invalid_ticket" error """ params = { "oxd_id": self.oxd_id, "ticket": ticket } if claim_token: params["claim_token"] = claim_token if claim_token_format: params["claim_token_format"] = claim_token_format if pct: params["pct"] = pct if rpt: params["rpt"] = rpt if scope: params["scope"] = scope if state: params["state"] = state logger.debug("Sending command `uma_rp_get_rpt` with params %s", params) response = self.msgr.request("uma_rp_get_rpt", **params) logger.debug("Received response: %s", response) if response['status'] == 'ok': return response['data'] if response['data']['error'] == 'internal_error': raise OxdServerError(response['data']) if response['data']['error'] == 'need_info': return response['data'] if response['data']['error'] == 'invalid_ticket': raise InvalidTicketError(response['data'])
java
protected void initialize(String pFileName, boolean parse) { this.fileName = pFileName; Path path = null; try { path = getPath(pFileName); List<String> lines = Files.readAllLines(path, Charset.defaultCharset()); int i = 1; for (String line : lines) { processLine(line, i, parse); } } catch (URISyntaxException | IOException | FileSystemNotFoundException ex) { LOGGER.severe(String.format("Error with file: %s, path: %s.", pFileName, path)); LOGGER.severe(ex.getLocalizedMessage()); } }
python
def annotated_dataset_path(cls, project, dataset, annotated_dataset): """Return a fully-qualified annotated_dataset string.""" return google.api_core.path_template.expand( "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}", project=project, dataset=dataset, annotated_dataset=annotated_dataset, )
python
def replacebranch(idf, loop, branch, listofcomponents, fluid=None, debugsave=False, testing=None): """It will replace the components in the branch with components in listofcomponents""" if fluid is None: fluid = '' # -------- testing --------- testn = 0 # -------- testing --------- # join them into a branch # ----------------------- # np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet # change the node names in the component # empty the old branch # fill in the new components with the node names into this branch listofcomponents = _clean_listofcomponents(listofcomponents) components = [item[0] for item in listofcomponents] connectcomponents(idf, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh3.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- fields = SomeFields.a_fields thebranch = branch componentsintobranch(idf, thebranch, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh4.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') if debugsave: idf.savecopy("hhh7.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # check for the end nodes of the loop if loop.key == 'AIRLOOPHVAC': fields = SomeFields.a_fields if loop.key == 'PLANTLOOP': fields = SomeFields.p_fields if loop.key == 'CONDENSERLOOP': fields = SomeFields.c_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] if fluid.upper() == 'WATER': supplyconlistname = loop[flnames[3]] # Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name elif fluid.upper() == 'AIR': supplyconlistname = loop[flnames[1]] # Connector_List_Name' supplyconlist = idf.getobject('CONNECTORLIST', supplyconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = supplyconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = supplyconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[0]]] # Plant_Side_Inlet_Node_Name else: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[1]]] # .Plant_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if fluid.upper() == 'WATER': demandconlistname = loop[flnames[7]] # .Demand_Side_Connector_List_Name demandconlist = idf.getobject('CONNECTORLIST', demandconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = demandconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = demandconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[4]]] # .Demand_Side_Inlet_Node_Name if not isfirst: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[5]]] # .Demand_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh8.idf") # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh9.idf") return thebranch
java
public static FacesMessage getMessage(String messageId, String... params) { String summary = null; String detail = null; ResourceBundle bundle; String bundleName; FacesContext context = FacesContext.getCurrentInstance(); Locale locale = context.getViewRoot().getLocale(); // see if we have a user-provided bundle Application app = (FacesContext.getCurrentInstance().getApplication()); if (null != (bundleName = app.getMessageBundle())) { if (null != (bundle = ResourceBundle.getBundle(bundleName, locale, Thread.currentThread().getContextClassLoader()))) { // see if we have a hit try { summary = bundle.getString(messageId); detail = bundle.getString(messageId + "_detail"); } catch (MissingResourceException e) { // ignore } } } // we couldn't find a summary in the user-provided bundle if (null == summary) { // see if we have a summary in the app provided bundle bundle = ResourceBundle.getBundle(FacesMessage.FACES_MESSAGES, locale, Thread.currentThread().getContextClassLoader()); if (null == bundle) { throw new NullPointerException(); } // see if we have a hit try { summary = bundle.getString(messageId); detail = bundle.getString(messageId + "_detail"); } catch (MissingResourceException e) { // ignore } } for (int i = 0; i < params.length; i++) { if (null != summary) { summary = summary.replace("{" + i + "}", params[i]); } if (null != detail) { detail = detail.replace("{" + i + "}", params[i]); } } // At this point, we have a summary and a bundle. FacesMessage ret = new FacesMessage(FacesMessage.SEVERITY_ERROR, summary, detail); ret.setSeverity(FacesMessage.SEVERITY_ERROR); return ret; }
python
def purge_docs(cls, app, env, docname): # pragma: no cover """Handler for Sphinx's env-purge-doc event. This event is emitted when all traces of a source file should be cleaned from the environment (that is, if the source file is removed, or before it is freshly read). This is for extensions that keep their own caches in attributes of the environment. For example, there is a cache of all modules on the environment. When a source file has been changed, the cache's entries for the file are cleared, since the module declarations could have been removed from the file. """ state = getattr(env, cls.directive_name, None) if state and docname in state.doc_names: state.doc_names.remove(docname)
python
def setActiveModule(Module): r"""Helps with collecting the members of the imported modules. """ module_name = Module.__name__ if module_name not in ModuleMembers: ModuleMembers[module_name] = [] ModulesQ.append(module_name) Group(Module, {}) # brand the module with __ec_member__ state.ActiveModuleMemberQ = ModuleMembers[module_name]
python
def ping(host, timeout=False, return_boolean=False): ''' Performs a ping to a host CLI Example: .. code-block:: bash salt '*' network.ping archlinux.org .. versionadded:: 2016.11.0 Return a True or False instead of ping output. .. code-block:: bash salt '*' network.ping archlinux.org return_boolean=True Set the time to wait for a response in seconds. .. code-block:: bash salt '*' network.ping archlinux.org timeout=3 ''' if timeout: # Windows ping differs by having timeout be for individual echo requests.' # Divide timeout by tries to mimic BSD behaviour. timeout = int(timeout) * 1000 // 4 cmd = ['ping', '-n', '4', '-w', six.text_type(timeout), salt.utils.network.sanitize_host(host)] else: cmd = ['ping', '-n', '4', salt.utils.network.sanitize_host(host)] if return_boolean: ret = __salt__['cmd.run_all'](cmd, python_shell=False) if ret['retcode'] != 0: return False else: return True else: return __salt__['cmd.run'](cmd, python_shell=False)
java
public void reset(String name){ if (name.equals(Names.MAIN_BRUSH.toString())) mainBrushWorkTime = 0; if (name.equals(Names.SENSOR.toString())) sensorTimeSinceCleaning = 0; if (name.equals(Names.SIDE_BRUSH.toString())) sideBrushWorkTime = 0; if (name.equals(Names.FILTER.toString())) filterWorkTime = 0; }
java
private Set<String> findResourceNamesFromFileSystem(String classPathRootOnDisk, String scanRootLocation, File folder) { LOGGER.debug("Scanning for resources in path: {} ({})", folder.getPath(), scanRootLocation); File[] files = folder.listFiles(); if (files == null) { return emptySet(); } Set<String> resourceNames = new TreeSet<>(); for (File file : files) { if (file.canRead()) { if (file.isDirectory()) { resourceNames.addAll(findResourceNamesFromFileSystem(classPathRootOnDisk, scanRootLocation, file)); } else { resourceNames.add(toResourceNameOnClasspath(classPathRootOnDisk, file)); } } } return resourceNames; }
java
public static void assertJsonEquals(Object expected, Object actual, Configuration configuration) { assertJsonPartEquals(expected, actual, ROOT, configuration); }
java
public synchronized boolean consolidate() throws IOException { if (isTainted()) { // proceed with consolidation XSequentialEventBuffer nBuffer = new XSequentialEventBuffer(buffer .getProvider(), this.attributeMapSerializer); int overflowIndex = 0; int fileBufferIndex = 0; for (int i = 0; i < size; i++) { if (overflowIndex < overflowSize && overflowIndices[overflowIndex] == i) { nBuffer.append(overflowEntries[overflowIndex]); overflowIndex++; } else { while (holeFlags.get(fileBufferIndex) == true) { fileBufferIndex++; } nBuffer.append(buffer.get(fileBufferIndex)); fileBufferIndex++; } } buffer.cleanup(); buffer = nBuffer; overflowSize = 0; holeFlags.clear(); return true; } else { return false; } }
java
public static List<Map<String, Object>> readExcelToMapList(File file, Integer scale) throws ReadExcelException { return XLSReader.readExcel(file, scale).getDatas(); }
java
@Pure @SuppressWarnings("checkstyle:npathcomplexity") public static URL replaceExtension(URL filename, String extension) { if (filename == null) { return null; } if (extension == null) { return filename; } String path = filename.getPath().replaceFirst(Pattern.quote(URL_PATH_SEPARATOR) + "+$", ""); //$NON-NLS-1$ //$NON-NLS-2$ if (!path.isEmpty()) { int idx = path.lastIndexOf(URL_PATH_SEPARATOR); final StringBuilder buf = new StringBuilder((idx < 0) ? "" : //$NON-NLS-1$ decodeHTMLEntities(path.substring(0, idx + 1))); final String largeBasename = decodeHTMLEntities(path.substring(idx + 1)); idx = largeBasename.lastIndexOf(getFileExtensionCharacter()); if (idx < 0) { buf.append(largeBasename); } else { buf.append(largeBasename.substring(0, idx)); } if (!"".equals(extension) && !extension.startsWith(EXTENSION_SEPARATOR)) { //$NON-NLS-1$ buf.append(EXTENSION_SEPARATOR); } buf.append(extension); path = buf.toString(); } try { if (isJarURL(filename)) { return new URL( filename.getProtocol(), filename.getHost(), filename.getPort(), path); } return new URI( filename.getProtocol(), filename.getUserInfo(), filename.getHost(), filename.getPort(), path, encodeHTMLEntities(filename.getQuery()), filename.getRef()).toURL(); } catch (AssertionError e) { throw e; } catch (Throwable exception) { // } try { return new URL( filename.getProtocol(), filename.getHost(), path); } catch (AssertionError e) { throw e; } catch (Throwable exception) { return null; } }
java
public static boolean isBindable(Class<?> subClass) { return !subClass.isInterface() && !Modifier.isAbstract( subClass.getModifiers()) && subClass.getTypeParameters().length == 0; }
python
def apply_injectables(self, targets): """Given an iterable of `Target` instances, apply their transitive injectables.""" target_types = {type(t) for t in targets} target_subsystem_deps = {s for s in itertools.chain(*(t.subsystems() for t in target_types))} for subsystem in target_subsystem_deps: # TODO: The is_initialized() check is primarily for tests and would be nice to do away with. if issubclass(subsystem, InjectablesMixin) and subsystem.is_initialized(): subsystem.global_instance().injectables(self)