language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def adam(f, x, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, maxiter=1000, tol=1e-16, callback=None): r"""ADAM method to minimize an objective function. General implementation of ADAM for solving .. math:: \min f(x) where :math:`f` is a differentiable functional. The algorithm is described in [KB2015] (`arxiv <https://arxiv.org/abs/1412.6980>`_). All parameter names and default valuesare taken from the article. Parameters ---------- f : `Functional` Goal functional. Needs to have ``f.gradient``. x : ``f.domain`` element Starting point of the iteration, updated in place. learning_rate : positive float, optional Step length of the method. beta1 : float in [0, 1), optional Update rate for first order moment estimate. beta2 : float in [0, 1), optional Update rate for second order moment estimate. eps : positive float, optional A small constant for numerical stability. maxiter : int, optional Maximum number of iterations. tol : positive float, optional Tolerance that should be used for terminating the iteration. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. See Also -------- odl.solvers.smooth.gradient.steepest_descent : Simple gradient descent. odl.solvers.iterative.iterative.landweber : Optimized solver for the case ``f(x) = ||Ax - b||_2^2``. odl.solvers.iterative.iterative.conjugate_gradient : Optimized solver for the case ``f(x) = x^T Ax - 2 x^T b``. References ---------- [KB2015] Kingma, D P and Ba, J. *Adam: A Method for Stochastic Optimization*, ICLR 2015. """ grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `grad` {!r}' ''.format(x, grad.domain)) m = grad.domain.zero() v = grad.domain.zero() grad_x = grad.range.element() for _ in range(maxiter): grad(x, out=grad_x) if grad_x.norm() < tol: return m.lincomb(beta1, m, 1 - beta1, grad_x) v.lincomb(beta2, v, 1 - beta2, grad_x ** 2) step = learning_rate * np.sqrt(1 - beta2) / (1 - beta1) x.lincomb(1, x, -step, m / (np.sqrt(v) + eps)) if callback is not None: callback(x)
java
public long factMatching(long n) { return match(n) .when(caseLong(0)).get(() -> 1L) .when(caseLong(any())).get(i -> i * factMatching(i - 1)) .getMatch(); }
python
def _get_auth(self): """Return the authorization data for an SNMP request. :returns: A :class:`pysnmp.entity.rfc3413.oneliner.cmdgen.CommunityData` object. """ if self.version == SNMP_V3: # Handling auth/encryption credentials is not (yet) supported. # This version supports a security name analogous to community. return cmdgen.UsmUserData(self.security) else: mp_model = 1 if self.version == SNMP_V2C else 0 return cmdgen.CommunityData(self.community, mpModel=mp_model)
python
def listify(values, N=1, delim=None): """Return an N-length list, with elements values, extrapolating as necessary. >>> listify("don't split into characters") ["don't split into characters"] >>> listify("len = 3", 3) ['len = 3', 'len = 3', 'len = 3'] >>> listify("But split on a delimeter, if requested.", delim=',') ['But split on a delimeter', ' if requested.'] >>> listify(["obj 1", "obj 2", "len = 4"], N=4) ['obj 1', 'obj 2', 'len = 4', 'len = 4'] >>> listify(iter("len=7"), N=7) ['l', 'e', 'n', '=', '7', '7', '7'] >>> listify(iter("len=5")) ['l', 'e', 'n', '=', '5'] >>> listify(None, 3) [[], [], []] >>> listify([None],3) [None, None, None] >>> listify([], 3) [[], [], []] >>> listify('', 2) ['', ''] >>> listify(0) [0] >>> listify(False, 2) [False, False] """ ans = [] if values is None else values # convert non-string non-list iterables into a list if hasattr(ans, '__iter__') and not isinstance(ans, basestring): ans = list(ans) else: # split the string (if possible) if isinstance(delim, basestring) and isinstance(ans, basestring): try: ans = ans.split(delim) except (IndexError, ValueError, AttributeError, TypeError): ans = [ans] else: ans = [ans] # pad the end of the list if a length has been specified if len(ans): if len(ans) < N and N > 1: ans += [ans[-1]] * (N - len(ans)) else: if N > 1: ans = [[]] * N return ans
python
def execute_r(prog, quiet): """Run the R code prog an R subprocess @raises ValueError if the subprocess exits with non-zero status """ FNULL = open(os.devnull, 'w') if quiet else None try: input_proc = subprocess.Popen(["echo", prog], stdout=subprocess.PIPE) status = subprocess.call("R --no-save --quiet", stdin=input_proc.stdout, stdout=FNULL, stderr=subprocess.STDOUT, shell=True) # warning, this is a security problem if status != 0: raise ValueError("ggplot2 bridge failed for program: {}." " Check for an error".format(prog)) finally: if FNULL is not None: FNULL.close()
java
public void extendedGet(String remoteFileName, long size, DataSink sink, MarkerListener mListener) throws IOException, ClientException, ServerException { extendedGet(remoteFileName, 0, size, sink, mListener); }
java
private void lexicalDescend(PageCompilingContext pc, Element element, boolean shouldPopScope) { //pop form if ("form".equals(element.tagName())) pc.form = null; //pop compiler if the scope ends if (shouldPopScope) { pc.lexicalScopes.pop(); } }
java
public void resetRow(int filepos, int rowsize) throws IOException { mark = 0; reset(); if (buf.length < rowsize) { buf = new byte[rowsize]; } filePos = filepos; size = count = rowsize; pos = 4; buf[0] = (byte) ((rowsize >>> 24) & 0xFF); buf[1] = (byte) ((rowsize >>> 16) & 0xFF); buf[2] = (byte) ((rowsize >>> 8) & 0xFF); buf[3] = (byte) ((rowsize >>> 0) & 0xFF); }
java
public void reinitializeClientDefaultSSLProperties() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) Tr.entry(tc, "reinitializeClientDefaultSSLProperties"); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) Tr.exit(tc, "reinitializeClientDefaultSSLProperties"); }
python
def timer(diff, processed): """Return the passed time.""" # Changes seconds into minutes and seconds minutes, seconds = divmod(diff, 60) try: # Finds average time taken by requests time_per_request = diff / float(len(processed)) except ZeroDivisionError: time_per_request = 0 return minutes, seconds, time_per_request
java
public Observable<TransformationInner> updateAsync(String resourceGroupName, String jobName, String transformationName, TransformationInner transformation, String ifMatch) { return updateWithServiceResponseAsync(resourceGroupName, jobName, transformationName, transformation, ifMatch).map(new Func1<ServiceResponseWithHeaders<TransformationInner, TransformationsUpdateHeaders>, TransformationInner>() { @Override public TransformationInner call(ServiceResponseWithHeaders<TransformationInner, TransformationsUpdateHeaders> response) { return response.body(); } }); }
java
protected void updateTransientActions() { Vector<Action> ta = new Vector<>(); for (LabelAtomProperty p : properties) ta.addAll(p.getActions(this)); transientActions = ta; }
java
protected static void invalidateSwitchPoints() { if (LOG_ENABLED) { LOG.info("invalidating switch point"); } SwitchPoint old = switchPoint; switchPoint = new SwitchPoint(); synchronized(IndyInterface.class) { SwitchPoint.invalidateAll(new SwitchPoint[]{old}); } }
python
def _merge(self, value): """ Returns a list based on `value`: * missing required value is converted to an empty list; * missing required items are never created; * nested items are merged recursively. """ if not value: return [] if value is not None and not isinstance(value, list): # bogus value; will not pass validation but should be preserved return value item_spec = self._nested_validator return [x if x is None else item_spec.get_default_for(x) for x in value]
python
def process_header(self, data): """ Reads the first part of the file to get some essential metadata # Returns return (dict): the metadata in the header """ metadata = { "datacolumns": data.read_chunk("I"), "firstyear": data.read_chunk("I"), "lastyear": data.read_chunk("I"), "annualsteps": data.read_chunk("I"), } if metadata["annualsteps"] != 1: raise InvalidTemporalResError( "{}: Only annual files can currently be processed".format(self.filepath) ) return metadata
python
async def start(self): """Start process execution.""" # arguments passed to the Docker command command_args = { 'command': self.command, 'container_image': self.requirements.get('image', constants.DEFAULT_CONTAINER_IMAGE), } # Get limit defaults. limit_defaults = SETTINGS.get('FLOW_PROCESS_RESOURCE_DEFAULTS', {}) # Set resource limits. limits = [] # Each core is equivalent to 1024 CPU shares. The default for Docker containers # is 1024 shares (we don't need to explicitly set that). limits.append('--cpu-shares={}'.format(int(self.process['resource_limits']['cores']) * 1024)) # Some SWAP is needed to avoid OOM signal. Swappiness is low to prevent # extensive usage of SWAP (this would reduce the performance). memory = self.process['resource_limits']['memory'] + DOCKER_MEMORY_HARD_LIMIT_BUFFER memory_swap = int(memory * DOCKER_MEMORY_SWAP_RATIO) limits.append('--memory={}m'.format(memory)) limits.append('--memory-swap={}m'.format(memory_swap)) limits.append('--memory-reservation={}m'.format(self.process['resource_limits']['memory'])) limits.append('--memory-swappiness={}'.format(DOCKER_MEMORY_SWAPPINESS)) # Set ulimits for interactive processes to prevent them from running too long. if self.process['scheduling_class'] == PROCESS_META['SCHEDULING_CLASS_INTERACTIVE']: # TODO: This is not very good as each child gets the same limit. limits.append('--ulimit cpu={}'.format(limit_defaults.get('cpu_time_interactive', 30))) command_args['limits'] = ' '.join(limits) # set container name self.container_name_prefix = SETTINGS.get('FLOW_EXECUTOR', {}).get('CONTAINER_NAME_PREFIX', 'resolwe') command_args['container_name'] = '--name={}'.format(self._generate_container_name()) if 'network' in self.resources: # Configure Docker network mode for the container (if specified). # By default, current Docker versions use the 'bridge' mode which # creates a network stack on the default Docker bridge. network = SETTINGS.get('FLOW_EXECUTOR', {}).get('NETWORK', '') command_args['network'] = '--net={}'.format(network) if network else '' else: # No network if not specified. command_args['network'] = '--net=none' # Security options. security = [] # Generate and set seccomp policy to limit syscalls. policy_file = tempfile.NamedTemporaryFile(mode='w') json.dump(SECCOMP_POLICY, policy_file) policy_file.file.flush() if not SETTINGS.get('FLOW_DOCKER_DISABLE_SECCOMP', False): security.append('--security-opt seccomp={}'.format(policy_file.name)) self.temporary_files.append(policy_file) # Drop all capabilities and only add ones that are needed. security.append('--cap-drop=all') command_args['security'] = ' '.join(security) # Setup Docker volumes. def new_volume(kind, base_dir_name, volume, path=None, read_only=True): """Generate a new volume entry. :param kind: Kind of volume, which is used for getting extra options from settings (the ``FLOW_DOCKER_VOLUME_EXTRA_OPTIONS`` setting) :param base_dir_name: Name of base directory setting for volume source path :param volume: Destination volume mount point :param path: Optional additional path atoms appended to source path :param read_only: True to make the volume read-only """ if path is None: path = [] path = [str(atom) for atom in path] options = set(SETTINGS.get('FLOW_DOCKER_VOLUME_EXTRA_OPTIONS', {}).get(kind, '').split(',')) options.discard('') # Do not allow modification of read-only option. options.discard('ro') options.discard('rw') if read_only: options.add('ro') else: options.add('rw') return { 'src': os.path.join(SETTINGS['FLOW_EXECUTOR'].get(base_dir_name, ''), *path), 'dest': volume, 'options': ','.join(options), } volumes = [ new_volume( 'data', 'DATA_DIR', constants.DATA_VOLUME, [DATA_LOCATION['subpath']], read_only=False ), new_volume('data_all', 'DATA_DIR', constants.DATA_ALL_VOLUME), new_volume('upload', 'UPLOAD_DIR', constants.UPLOAD_VOLUME, read_only=False), new_volume( 'secrets', 'RUNTIME_DIR', constants.SECRETS_VOLUME, [DATA_LOCATION['subpath'], ExecutorFiles.SECRETS_DIR] ), ] # Generate dummy passwd and create mappings for it. This is required because some tools # inside the container may try to lookup the given UID/GID and will crash if they don't # exist. So we create minimal user/group files. passwd_file = tempfile.NamedTemporaryFile(mode='w') passwd_file.write('root:x:0:0:root:/root:/bin/bash\n') passwd_file.write('user:x:{}:{}:user:/:/bin/bash\n'.format(os.getuid(), os.getgid())) passwd_file.file.flush() self.temporary_files.append(passwd_file) group_file = tempfile.NamedTemporaryFile(mode='w') group_file.write('root:x:0:\n') group_file.write('user:x:{}:user\n'.format(os.getgid())) group_file.file.flush() self.temporary_files.append(group_file) volumes += [ new_volume('users', None, '/etc/passwd', [passwd_file.name]), new_volume('users', None, '/etc/group', [group_file.name]), ] # Create volumes for tools. # NOTE: To prevent processes tampering with tools, all tools are mounted read-only self.tools_volumes = [] for index, tool in enumerate(self.get_tools_paths()): self.tools_volumes.append(new_volume( 'tools', None, os.path.join('/usr/local/bin/resolwe', str(index)), [tool] )) volumes += self.tools_volumes # Create volumes for runtime (all read-only). runtime_volume_maps = SETTINGS.get('RUNTIME_VOLUME_MAPS', None) if runtime_volume_maps: for src, dst in runtime_volume_maps.items(): volumes.append(new_volume( 'runtime', 'RUNTIME_DIR', dst, [DATA_LOCATION['subpath'], src], )) # Add any extra volumes verbatim. volumes += SETTINGS.get('FLOW_DOCKER_EXTRA_VOLUMES', []) # Make sure that tmp dir exists. os.makedirs(constants.TMPDIR, mode=0o755, exist_ok=True) # Create Docker --volume parameters from volumes. command_args['volumes'] = ' '.join(['--volume="{src}":"{dest}":{options}'.format(**volume) for volume in volumes]) # Set working directory to the data volume. command_args['workdir'] = '--workdir={}'.format(constants.DATA_VOLUME) # Change user inside the container. command_args['user'] = '--user={}:{}'.format(os.getuid(), os.getgid()) # A non-login Bash shell should be used here (a subshell will be spawned later). command_args['shell'] = '/bin/bash' # Check if image exists locally. If not, command will exit with non-zero returncode check_command = '{command} image inspect {container_image}'.format(**command_args) logger.debug("Checking existence of docker image: {}".format(command_args['container_image'])) check_proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split(check_command), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) await check_proc.communicate() if check_proc.returncode != 0: pull_command = '{command} pull {container_image}'.format(**command_args) logger.info("Pulling docker image: {}".format(command_args['container_image'])) pull_proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split(pull_command), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) _, stderr = await pull_proc.communicate() if pull_proc.returncode != 0: error_msg = "Docker failed to pull {} image.".format(command_args['container_image']) if stderr: error_msg = '\n'.join([error_msg, stderr.decode('utf-8')]) raise RuntimeError(error_msg) docker_command = ( '{command} run --rm --interactive {container_name} {network} {volumes} {limits} ' '{security} {workdir} {user} {container_image} {shell}'.format(**command_args) ) logger.info("Starting docker container with command: {}".format(docker_command)) start_time = time.time() # Workaround for pylint issue #1469 # (https://github.com/PyCQA/pylint/issues/1469). self.proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member *shlex.split(docker_command), limit=4 * (2 ** 20), # 4MB buffer size for line buffering stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) stdout = [] async def wait_for_container(): """Wait for Docker container to start to avoid blocking the code that uses it.""" self.proc.stdin.write(('echo PING' + os.linesep).encode('utf-8')) await self.proc.stdin.drain() while True: line = await self.proc.stdout.readline() stdout.append(line) if line.rstrip() == b'PING': break if self.proc.stdout.at_eof(): raise RuntimeError() try: await asyncio.wait_for(wait_for_container(), timeout=DOCKER_START_TIMEOUT) except (asyncio.TimeoutError, RuntimeError): error_msg = "Docker container has not started for {} seconds.".format(DOCKER_START_TIMEOUT) stdout = ''.join([line.decode('utf-8') for line in stdout if line]) if stdout: error_msg = '\n'.join([error_msg, stdout]) raise RuntimeError(error_msg) end_time = time.time() logger.info("It took {:.2f}s for Docker container to start".format(end_time - start_time)) self.stdout = self.proc.stdout
python
def draft(self, **kwargs): '''Allows for easily re-drafting a policy After a policy has been created, it was not previously possible to re-draft the published policy. This method makes it possible for a user with existing, published, policies to create drafts from them so that they are modifiable. See https://github.com/F5Networks/f5-common-python/pull/1099 :param kwargs: :return: ''' tmos_ver = self._meta_data['bigip']._meta_data['tmos_version'] legacy = kwargs.pop('legacy', False) if LooseVersion(tmos_ver) < LooseVersion('12.1.0') or legacy: raise DraftPolicyNotSupportedInTMOSVersion( "Drafting on this version of BIG-IP is not supported" ) kwargs = dict( createDraft=True ) super(Policy, self)._modify(**kwargs) get_kwargs = { 'name': self.name, 'partition': self.partition, 'uri_as_parts': True, 'subPath': 'Drafts' } base_uri = self._meta_data['container']._meta_data['uri'] session = self._meta_data['bigip']._meta_data['icr_session'] response = session.get(base_uri, **get_kwargs) json_data = response.json() self._local_update(json_data) self._activate_URI(json_data['selfLink'])
python
def extract_now_state(self): ''' Extract now map state. Returns: `np.ndarray` of state. ''' x, y = self.__agent_pos state_arr = np.zeros(self.__map_arr.shape) state_arr[x, y] = 1 return np.expand_dims(state_arr, axis=0)
python
def redraw(self, whence=0): """Redraw the canvas. Parameters ---------- whence See :meth:`get_rgb_object`. """ with self._defer_lock: whence = min(self._defer_whence, whence) if not self.defer_redraw: if self._hold_redraw_cnt == 0: self._defer_whence = self._defer_whence_reset self.redraw_now(whence=whence) else: self._defer_whence = whence return elapsed = time.time() - self.time_last_redraw # If there is no redraw scheduled, or we are overdue for one: if (not self._defer_flag) or (elapsed > self.defer_lagtime): # If more time than defer_lagtime has passed since the # last redraw then just do the redraw immediately if elapsed > self.defer_lagtime: if self._hold_redraw_cnt > 0: #self._defer_flag = True self._defer_whence = whence return self._defer_whence = self._defer_whence_reset self.logger.debug("lagtime expired--forced redraw") self.redraw_now(whence=whence) return # Indicate that a redraw is necessary and record whence self._defer_flag = True self._defer_whence = whence # schedule a redraw by the end of the defer_lagtime secs = self.defer_lagtime - elapsed self.logger.debug("defer redraw (whence=%.2f) in %.f sec" % ( whence, secs)) self.reschedule_redraw(secs) else: # A redraw is already scheduled. Just record whence. self._defer_whence = whence self.logger.debug("update whence=%.2f" % (whence))
java
protected long getBlockSize(final int idNamespace) { Preconditions.checkArgument(blockSizer != null, "Blocksizer has not yet been initialized"); isActive = true; long blockSize = blockSizer.getBlockSize(idNamespace); Preconditions.checkArgument(blockSize>0,"Invalid block size: %s",blockSize); Preconditions.checkArgument(blockSize<getIdUpperBound(idNamespace), "Block size [%s] cannot be larger than upper bound [%s] for partition [%s]",blockSize,getIdUpperBound(idNamespace),idNamespace); return blockSize; }
python
def register_task_with_maintenance_window(WindowId=None, Targets=None, TaskArn=None, ServiceRoleArn=None, TaskType=None, TaskParameters=None, Priority=None, MaxConcurrency=None, MaxErrors=None, LoggingInfo=None, ClientToken=None): """ Adds a new task to a Maintenance Window. See also: AWS API Documentation :example: response = client.register_task_with_maintenance_window( WindowId='string', Targets=[ { 'Key': 'string', 'Values': [ 'string', ] }, ], TaskArn='string', ServiceRoleArn='string', TaskType='RUN_COMMAND', TaskParameters={ 'string': { 'Values': [ 'string', ] } }, Priority=123, MaxConcurrency='string', MaxErrors='string', LoggingInfo={ 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'S3Region': 'string' }, ClientToken='string' ) :type WindowId: string :param WindowId: [REQUIRED] The id of the Maintenance Window the task should be added to. :type Targets: list :param Targets: [REQUIRED] The targets (either instances or tags). Instances are specified using Key=instanceids,Values=instanceid1,instanceid2. Tags are specified using Key=tag name,Values=tag value. (dict) --An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. Key (string) --User-defined criteria for sending commands that target instances that meet the criteria. Key can be tag:Amazon EC2 tagor InstanceIds. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . Values (list) --User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, you could specify value:WebServer to execute a command on instances that include Amazon EC2 tags of ServerRole,WebServer. For more information about how to send commands that target instances using Key,Value parameters, see Executing a Command Using Systems Manager Run Command . (string) -- :type TaskArn: string :param TaskArn: [REQUIRED] The ARN of the task to execute :type ServiceRoleArn: string :param ServiceRoleArn: [REQUIRED] The role that should be assumed when executing the task. :type TaskType: string :param TaskType: [REQUIRED] The type of task being registered. :type TaskParameters: dict :param TaskParameters: The parameters that should be passed to the task when it is executed. (string) -- (dict) --Defines the values for a task parameter. Values (list) --This field contains an array of 0 or more strings, each 1 to 255 characters in length. (string) -- :type Priority: integer :param Priority: The priority of the task in the Maintenance Window, the lower the number the higher the priority. Tasks in a Maintenance Window are scheduled in priority order with tasks that have the same priority scheduled in parallel. :type MaxConcurrency: string :param MaxConcurrency: [REQUIRED] The maximum number of targets this task can be run for in parallel. :type MaxErrors: string :param MaxErrors: [REQUIRED] The maximum number of errors allowed before this task stops being scheduled. :type LoggingInfo: dict :param LoggingInfo: A structure containing information about an Amazon S3 bucket to write instance-level logs to. S3BucketName (string) -- [REQUIRED]The name of an Amazon S3 bucket where execution logs are stored . S3KeyPrefix (string) --(Optional) The Amazon S3 bucket subfolder. S3Region (string) -- [REQUIRED]The region where the Amazon S3 bucket is located. :type ClientToken: string :param ClientToken: User-provided idempotency token. This field is autopopulated if not provided. :rtype: dict :return: { 'WindowTaskId': 'string' } """ pass
java
public ResultList<MovieBasic> getGenreMovies(int genreId, String language, Integer page, Boolean includeAllMovies, Boolean includeAdult) throws MovieDbException { return tmdbGenre.getGenreMovies(genreId, language, page, includeAllMovies, includeAdult); }
java
@Override public OutputStream getOutputStream() throws IOException { URLConnection connection = url.openConnection(); if (connection == null) return null; connection.setDoOutput(true); // is it necessary? return connection.getOutputStream(); }
python
def _format_operation_dict(operation, parameters): """Formats parameters in operation in the way BigQuery expects. The input operation will be a query like ``SELECT %(namedparam)s`` and the output will be a query like ``SELECT @namedparam``. :type operation: str :param operation: A Google BigQuery query string. :type parameters: Mapping[str, Any] :param parameters: Dictionary of parameter values. :rtype: str :returns: A formatted query string. :raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError` if a parameter used in the operation is not found in the ``parameters`` argument. """ formatted_params = {} for name in parameters: escaped_name = name.replace("`", r"\`") formatted_params[name] = "@`{}`".format(escaped_name) try: return operation % formatted_params except KeyError as exc: raise exceptions.ProgrammingError(exc)
python
def _pfp__process_metadata(self): """Process the metadata once the entire struct has been declared. """ if self._pfp__metadata_processor is None: return metadata_info = self._pfp__metadata_processor() if isinstance(metadata_info, list): for metadata in metadata_info: if metadata["type"] == "watch": self._pfp__set_watch( metadata["watch_fields"], metadata["update_func"], *metadata["func_call_info"] ) elif metadata["type"] == "packed": del metadata["type"] self._pfp__set_packer(**metadata) if self._pfp__can_unpack(): self._pfp__unpack_data(self.raw_data)
java
private static <T> T[] grow(T[] array, int required) { int oldCapacity = array.length; // x1.5: 20, 30, 45, 67, 100, 150, 225, 337, 505, etc int newCapacity = oldCapacity == 0 ? DEFAULT_CAPACITY : oldCapacity + (oldCapacity >> 1); if (newCapacity < required) newCapacity = required; return Arrays.copyOf(array, newCapacity); }
java
@Override public IIOMetadata getImageMetadata(final int imageIndex) throws IOException { checkBounds(imageIndex); readHeader(); return new TGAMetadata(header, extensions); }
java
public void marshall(DescribeConfigurationsRequest describeConfigurationsRequest, ProtocolMarshaller protocolMarshaller) { if (describeConfigurationsRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(describeConfigurationsRequest.getConfigurationIds(), CONFIGURATIONIDS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def _parse_siz_segment(cls, fptr): """Parse the SIZ segment. Parameters ---------- fptr : file Open file object. Returns ------- SIZSegment The current SIZ segment. """ offset = fptr.tell() - 2 read_buffer = fptr.read(2) length, = struct.unpack('>H', read_buffer) read_buffer = fptr.read(length - 2) data = struct.unpack_from('>HIIIIIIIIH', read_buffer) rsiz = data[0] if rsiz not in _KNOWN_PROFILES: msg = "Invalid profile: (Rsiz={rsiz}).".format(rsiz=rsiz) warnings.warn(msg, UserWarning) xysiz = (data[1], data[2]) xyosiz = (data[3], data[4]) xytsiz = (data[5], data[6]) xytosiz = (data[7], data[8]) # Csiz is the number of components Csiz = data[9] data = struct.unpack_from('>' + 'B' * (length - 36 - 2), read_buffer, offset=36) bitdepth = tuple(((x & 0x7f) + 1) for x in data[0::3]) signed = tuple(((x & 0x80) > 0) for x in data[0::3]) xrsiz = data[1::3] yrsiz = data[2::3] for j, subsampling in enumerate(zip(xrsiz, yrsiz)): if 0 in subsampling: msg = ("Invalid subsampling value for component {comp}: " "dx={dx}, dy={dy}.") msg = msg.format(comp=j, dx=subsampling[0], dy=subsampling[1]) warnings.warn(msg, UserWarning) try: num_tiles_x = (xysiz[0] - xyosiz[0]) / (xytsiz[0] - xytosiz[0]) num_tiles_y = (xysiz[1] - xyosiz[1]) / (xytsiz[1] - xytosiz[1]) except ZeroDivisionError: msg = ("Invalid tile specification: " "size of {num_tile_rows} x {num_tile_cols}, " "offset of {row_offset} x {col_offset}.") msg = msg.format(num_tile_rows=xytsiz[1], num_tile_cols=xytsiz[0], row_offset=xytosiz[1], col_offset=xytosiz[0]) warnings.warn(msg, UserWarning) else: numtiles = math.ceil(num_tiles_x) * math.ceil(num_tiles_y) if numtiles > 65535: msg = "Invalid number of tiles: ({numtiles})." msg = msg.format(numtiles=numtiles) warnings.warn(msg, UserWarning) kwargs = { 'rsiz': rsiz, 'xysiz': xysiz, 'xyosiz': xyosiz, 'xytsiz': xytsiz, 'xytosiz': xytosiz, 'Csiz': Csiz, 'bitdepth': bitdepth, 'signed': signed, 'xyrsiz': (xrsiz, yrsiz), 'length': length, 'offset': offset } segment = SIZsegment(**kwargs) # Need to keep track of the number of components from SIZ for # other segments. cls._csiz = Csiz return segment
python
def dismiss(self, userId, groupId): """ 解散群组方法。(将该群解散,所有用户都无法再接收该群的消息。) 方法 @param userId:操作解散群的用户 Id。(必传) @param groupId:要解散的群 Id。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/group/dismiss.json', params={"userId": userId, "groupId": groupId}) return Response(r, desc)
java
public Matrix3x2d scaleAround(double factor, double ox, double oy, Matrix3x2d dest) { return scaleAround(factor, factor, ox, oy, this); }
java
private void finishCompletion() { // assert state > COMPLETING; for (WaitNode q; (q = waiters) != null;) { if (U.compareAndSwapObject(this, WAITERS, q, null)) { for (;;) { Thread t = q.thread; if (t != null) { q.thread = null; LockSupport.unpark(t); } WaitNode next = q.next; if (next == null) break; q.next = null; // unlink to help gc q = next; } break; } } done(); callable = null; // to reduce footprint }
python
def commit(self): """ Commit this transaction. """ if not self._parent._is_active: raise exc.InvalidRequestError("This transaction is inactive") yield from self._do_commit() self._is_active = False
java
protected void loadBasicStyle() { ctx.updateForGraphics(style, g); display = style.getProperty("display"); if (display == null) display = CSSProperty.Display.INLINE; CSSProperty.Float floating = style.getProperty("float"); if (floating == null) floating = BlockBox.FLOAT_NONE; position = style.getProperty("position"); if (position == null) position = BlockBox.POS_STATIC; //apply combination rules //http://www.w3.org/TR/CSS21/visuren.html#dis-pos-flo if (display == ElementBox.DISPLAY_NONE) { position = BlockBox.POS_STATIC; floating = BlockBox.FLOAT_NONE; } else if (position == BlockBox.POS_ABSOLUTE || position == BlockBox.POS_FIXED) { floating = BlockBox.FLOAT_NONE; } //compute the display computed value if (floating != BlockBox.FLOAT_NONE || position == BlockBox.POS_ABSOLUTE || position == BlockBox.POS_FIXED || isRootElement()) { if (display == DISPLAY_INLINE_TABLE) display = DISPLAY_TABLE; else if (display == DISPLAY_INLINE || display == DISPLAY_RUN_IN || display == DISPLAY_TABLE_ROW_GROUP || display == DISPLAY_TABLE_COLUMN || display == DISPLAY_TABLE_COLUMN_GROUP || display == DISPLAY_TABLE_HEADER_GROUP || display == DISPLAY_TABLE_FOOTER_GROUP || display == DISPLAY_TABLE_ROW || display == DISPLAY_TABLE_CELL || display == DISPLAY_TABLE_CAPTION || display == DISPLAY_INLINE_BLOCK) display = DISPLAY_BLOCK; } isblock = (display == DISPLAY_BLOCK); displayed = (display != DISPLAY_NONE && display != DISPLAY_TABLE_COLUMN); visible = (style.getProperty("visibility") != CSSProperty.Visibility.HIDDEN); //line height CSSProperty.LineHeight lh = style.getProperty("line-height"); if (lh == null || lh == CSSProperty.LineHeight.NORMAL) lineHeight = Math.round(DEFAULT_LINE_HEIGHT * ctx.getFontHeight()); else if (lh == CSSProperty.LineHeight.length) { TermLength len = style.getValue(TermLength.class, "line-height"); lineHeight = (int) ctx.pxLength(len); } else if (lh == CSSProperty.LineHeight.percentage) { TermPercent len = style.getValue(TermPercent.class, "line-height"); lineHeight = (int) ctx.pxLength(len, ctx.getFontHeight()); } else //must be INTEGER or NUMBER { Term<?> len = style.getValue("line-height", true); float r; if (len instanceof TermInteger) r = ((TermInteger) len).getValue(); else r = ((TermNumber) len).getValue(); lineHeight = Math.round(r * ctx.getFontHeight()); } //whitespace whitespace = style.getProperty("white-space"); if (whitespace == null) whitespace = WHITESPACE_NORMAL; //background loadBackground(); //z-index CSSProperty.ZIndex z = style.getProperty("z-index"); if (z != null && z != ZIndex.AUTO) { zset = true; Term<?> zterm = style.getValue("z-index", true); if (zterm instanceof TermInteger) zIndex = ((TermInteger) zterm).getValue().intValue(); else zset = false; } else zset = false; //transformations -- applied on block-level or atomic inline-level elements only if (isBlock() || isReplaced()) transform = style.getProperty("transform"); if (transform == null) transform = CSSProperty.Transform.NONE; }
java
@Override public IPromise timeoutIn(long millis) { final Actor actor = Actor.sender.get(); if ( actor != null ) actor.delayed(millis, ()-> timedOut(Timeout.INSTANCE)); else { Actors.delayedCalls.schedule( new TimerTask() { @Override public void run() { timedOut(Timeout.INSTANCE); } },millis); } return this; }
java
public static AtomixConfig config(AtomixRegistry registry) { return config(Thread.currentThread().getContextClassLoader(), null, registry); }
java
@Override public void sawOpcode(int seen) { if ((seen == Const.INVOKEVIRTUAL) && "printStackTrace".equals(getNameConstantOperand()) && SignatureBuilder.SIG_VOID_TO_VOID.equals(getSigConstantOperand())) { bugReporter .reportBug(new BugInstance(this, BugType.IMC_IMMATURE_CLASS_PRINTSTACKTRACE.name(), NORMAL_PRIORITY) .addClass(this).addMethod(this).addSourceLine(this)); } }
python
def getWinner(self, type = 'activation'): """ Returns the winner of the type specified {'activation' or 'target'}. """ maxvalue = -10000 maxpos = -1 ttlvalue = 0 if type == 'activation': ttlvalue = Numeric.add.reduce(self.activation) maxpos = Numeric.argmax(self.activation) maxvalue = self.activation[maxpos] elif type == 'target': # note that backprop() resets self.targetSet flag if self.verify and self.targetSet == 0: raise LayerError('getWinner() called with \'target\' but target has not been set.', \ self.targetSet) ttlvalue = Numeric.add.reduce(self.target) maxpos = Numeric.argmax(self.target) maxvalue = self.target[maxpos] else: raise LayerError('getWinner() called with unknown layer attribute.', \ type) if self.size > 0: avgvalue = ttlvalue / float(self.size) else: raise LayerError('getWinner() called for layer of size zero.', \ self.size) return maxpos, maxvalue, avgvalue
java
public <R> R withCommitTransaction(TransFunc<R> transFunc) throws IOException { return withTransaction(transFunc, true); }
python
def press_enter(multiple=False, silent=False): """Return a generator function which yields every time the user presses return.""" def f(): try: while True: if silent: yield input() else: sys.stderr.write("<press enter> ") sys.stderr.flush() yield input() if not multiple: break except (EOFError, KeyboardInterrupt): # User Ctrl+D or Ctrl+C'd if not silent: # Prevents the user's terminal getting clobbered sys.stderr.write("\n") sys.stderr.flush() return return f
java
public static String convertFormat(String format) { if (format == null) return null; else { // day of week format = format.replaceAll("EEE", "D"); // year format = format.replaceAll("yy", "y"); // month if (format.indexOf("MMM") != -1) { format = format.replaceAll("MMM", "M"); } else { format = format.replaceAll("M", "m"); } return format; } }
java
private void addJSONElement(Map<String, Object> siteData, JSONElement data) { if(EdenUtils.elementIsObject(data)) { addJSONObject(siteData, (JSONObject) data.getElement()); } else if(EdenUtils.elementIsArray(data)) { addJSONArray(siteData, (JSONArray) data.getElement()); } }
python
def write_or_delete_file(self, what, filename, data, force=False): """Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true). """ if data: self.write_file(what, filename, data) elif os.path.exists(filename): if data is None and not force: log.warn( "%s not set in setup(), but %s exists", what, filename ) return else: self.delete_file(filename)
java
public void setName(String name) { super.setName(name); m_tf.setName(name); if (m_button != null) m_button.setName(name); if (m_buttonTime != null) m_buttonTime.setName(name); }
java
public void objectAvailable (ConfigObject object) { // keep this for later _object = object; // create our field editors try { Field[] fields = object.getClass().getFields(); for (Field field : fields) { // if the field is anything but a plain old public field, // we don't want to edit it if (field.getModifiers() == Modifier.PUBLIC) { add(_object.getEditor(_ctx, field)); } } } catch (SecurityException se) { log.warning("Unable to introspect DObject!? " + se); } SwingUtil.refresh(this); }
java
private void addPostParams(final Request request) { if (from != null) { request.addPostParam("From", from.toString()); } if (to != null) { request.addPostParam("To", to.toString()); } if (statusCallback != null) { request.addPostParam("StatusCallback", statusCallback.toString()); } if (statusCallbackMethod != null) { request.addPostParam("StatusCallbackMethod", statusCallbackMethod.toString()); } if (statusCallbackEvent != null) { for (String prop : statusCallbackEvent) { request.addPostParam("StatusCallbackEvent", prop); } } if (timeout != null) { request.addPostParam("Timeout", timeout.toString()); } if (record != null) { request.addPostParam("Record", record.toString()); } if (muted != null) { request.addPostParam("Muted", muted.toString()); } if (beep != null) { request.addPostParam("Beep", beep); } if (startConferenceOnEnter != null) { request.addPostParam("StartConferenceOnEnter", startConferenceOnEnter.toString()); } if (endConferenceOnExit != null) { request.addPostParam("EndConferenceOnExit", endConferenceOnExit.toString()); } if (waitUrl != null) { request.addPostParam("WaitUrl", waitUrl.toString()); } if (waitMethod != null) { request.addPostParam("WaitMethod", waitMethod.toString()); } if (earlyMedia != null) { request.addPostParam("EarlyMedia", earlyMedia.toString()); } if (maxParticipants != null) { request.addPostParam("MaxParticipants", maxParticipants.toString()); } if (conferenceRecord != null) { request.addPostParam("ConferenceRecord", conferenceRecord); } if (conferenceTrim != null) { request.addPostParam("ConferenceTrim", conferenceTrim); } if (conferenceStatusCallback != null) { request.addPostParam("ConferenceStatusCallback", conferenceStatusCallback.toString()); } if (conferenceStatusCallbackMethod != null) { request.addPostParam("ConferenceStatusCallbackMethod", conferenceStatusCallbackMethod.toString()); } if (conferenceStatusCallbackEvent != null) { for (String prop : conferenceStatusCallbackEvent) { request.addPostParam("ConferenceStatusCallbackEvent", prop); } } if (recordingChannels != null) { request.addPostParam("RecordingChannels", recordingChannels); } if (recordingStatusCallback != null) { request.addPostParam("RecordingStatusCallback", recordingStatusCallback.toString()); } if (recordingStatusCallbackMethod != null) { request.addPostParam("RecordingStatusCallbackMethod", recordingStatusCallbackMethod.toString()); } if (sipAuthUsername != null) { request.addPostParam("SipAuthUsername", sipAuthUsername); } if (sipAuthPassword != null) { request.addPostParam("SipAuthPassword", sipAuthPassword); } if (region != null) { request.addPostParam("Region", region); } if (conferenceRecordingStatusCallback != null) { request.addPostParam("ConferenceRecordingStatusCallback", conferenceRecordingStatusCallback.toString()); } if (conferenceRecordingStatusCallbackMethod != null) { request.addPostParam("ConferenceRecordingStatusCallbackMethod", conferenceRecordingStatusCallbackMethod.toString()); } if (recordingStatusCallbackEvent != null) { for (String prop : recordingStatusCallbackEvent) { request.addPostParam("RecordingStatusCallbackEvent", prop); } } if (conferenceRecordingStatusCallbackEvent != null) { for (String prop : conferenceRecordingStatusCallbackEvent) { request.addPostParam("ConferenceRecordingStatusCallbackEvent", prop); } } if (coaching != null) { request.addPostParam("Coaching", coaching.toString()); } if (callSidToCoach != null) { request.addPostParam("CallSidToCoach", callSidToCoach); } }
python
def _inherited_row(row, base_rows_from_pillar, ret): '''Return a row with properties from parents.''' base_rows = [] for base_row_from_pillar in base_rows_from_pillar: base_row = __salt__['pillar.get'](base_row_from_pillar) if base_row: base_rows.append(base_row) elif base_row_from_pillar != _DEFAULT_ROW_PILLAR: ret.setdefault('warnings', []) warning_message = 'Cannot find row pillar "{0}".'.format( base_row_from_pillar) if warning_message not in ret['warnings']: ret['warnings'].append(warning_message) base_rows.append(row) result_row = {} for row in base_rows: result_row.update(row) return result_row
java
public void trigger(String vaultName, String resourceGroupName, String fabricName, String containerName, String protectedItemName, String recoveryPointId, RestoreRequestResource resourceRestoreRequest) { triggerWithServiceResponseAsync(vaultName, resourceGroupName, fabricName, containerName, protectedItemName, recoveryPointId, resourceRestoreRequest).toBlocking().single().body(); }
python
def manifest(self): """The manifest definition of the stencilset as a dict.""" if not self._manifest: with open(self.manifest_path) as man: self._manifest = json.load(man) return self._manifest
java
@Override public EClass getPercentageChange() { if (percentageChangeEClass == null) { percentageChangeEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(StorePackage.eNS_URI).getEClassifiers().get(85); } return percentageChangeEClass; }
java
@Override public int getOffset(int era, int year, int month, int day, int dayOfWeek, int milliseconds) { if (era == GregorianCalendar.BC) { // Convert to extended year year = 1 - year; } long time = Grego.fieldsToDay(year, month, day) * Grego.MILLIS_PER_DAY + milliseconds; int[] offsets = new int[2]; getOffset(time, true, LOCAL_DST, LOCAL_STD, offsets); return (offsets[0] + offsets[1]); }
python
def cat_adb_log(self, tag, begin_time): """Takes an excerpt of the adb logcat log from a certain time point to current time. Args: tag: An identifier of the time period, usualy the name of a test. begin_time: Logline format timestamp of the beginning of the time period. """ if not self.adb_logcat_file_path: raise Error( self._ad, 'Attempting to cat adb log when none has been collected.') end_time = mobly_logger.get_log_line_timestamp() self._ad.log.debug('Extracting adb log from logcat.') adb_excerpt_path = os.path.join(self._ad.log_path, 'AdbLogExcerpts') utils.create_dir(adb_excerpt_path) f_name = os.path.basename(self.adb_logcat_file_path) out_name = f_name.replace('adblog,', '').replace('.txt', '') out_name = ',%s,%s.txt' % (begin_time, out_name) out_name = out_name.replace(':', '-') tag_len = utils.MAX_FILENAME_LEN - len(out_name) tag = tag[:tag_len] out_name = tag + out_name full_adblog_path = os.path.join(adb_excerpt_path, out_name) with io.open(full_adblog_path, 'w', encoding='utf-8') as out: in_file = self.adb_logcat_file_path with io.open( in_file, 'r', encoding='utf-8', errors='replace') as f: in_range = False while True: line = None try: line = f.readline() if not line: break except: continue line_time = line[:mobly_logger.log_line_timestamp_len] if not mobly_logger.is_valid_logline_timestamp(line_time): continue if self._is_timestamp_in_range(line_time, begin_time, end_time): in_range = True if not line.endswith('\n'): line += '\n' out.write(line) else: if in_range: break
java
private static MonetaryRoundingsSingletonSpi monetaryRoundingsSingletonSpi() { try { return Optional.ofNullable(Bootstrap .getService(MonetaryRoundingsSingletonSpi.class)) .orElseGet(DefaultMonetaryRoundingsSingletonSpi::new); } catch (Exception e) { Logger.getLogger(Monetary.class.getName()) .log(Level.SEVERE, "Failed to load MonetaryCurrenciesSingletonSpi, using default.", e); return new DefaultMonetaryRoundingsSingletonSpi(); } }
java
private void parse(Reader reader, char separator) throws IOException, ParseException { LOG.info("Parsing CSV file..."); /* * Ideally one would configure the format using withHeader(String ...) but since the master column is optional * this is of no use. Therefore, you'll get null if you were to call getHeaderMap() later on the parser. The header * is returned as the 1st record (i.e. row). */ CSVParser csvParser = CSVFormat.DEFAULT.withDelimiter(separator).withIgnoreSurroundingSpaces(true).parse(reader); List<CSVRecord> records = csvParser.getRecords(); String[] header = transformHeaderMapToArray(records.get(0)); verifyHeader(header); for (int i = 1; i < records.size(); i++) { CSVRecord csvRecord = records.get(i); if (csvRecord.size() != header.length) { throw new ParseException(String.format("Expected %d columns but got %d.", header.length, csvRecord.size()), i + 1); } String key = csvRecord.get(0); Status status = Status.valueOf(csvRecord.get(1)); String masterValue = null; String value; if (hasMasterLanguage()) { masterValue = csvRecord.get(2); value = csvRecord.get(3); } else { value = csvRecord.get(2); } // the context is irrelevant, so it is not added add(key, status, masterValue, value); } LOG.info("Parsing of CSV file finished."); }
python
def render(self, template, context_stack, delimiters=None): """ Render a unicode template string, and return as unicode. Arguments: template: a template string of type unicode (but not a proper subclass of unicode). context_stack: a ContextStack instance. """ parsed_template = parse(template, delimiters) return parsed_template.render(self, context_stack)
java
public void reference (Object object) { if (copyDepth > 0) { if (needsCopyReference != null) { if (object == null) throw new IllegalArgumentException("object cannot be null."); originalToCopy.put(needsCopyReference, object); needsCopyReference = null; } } else if (references && object != null) { int id = readReferenceIds.pop(); if (id != NO_REF) referenceResolver.setReadObject(id, object); } }
python
def _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars): """ Find and return continuum pixels given the flux and sigma cut Parameters ---------- f_cut: float the upper limit imposed on the quantity (fbar-1) sig_cut: float the upper limit imposed on the quantity (f_sig) wl: numpy ndarray of length npixels rest-frame wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape nstars, npixels inverse variances, parallel to fluxes Returns ------- contmask: boolean mask of length npixels True indicates that the pixel is continuum """ f_bar = np.median(fluxes, axis=0) sigma_f = np.var(fluxes, axis=0) bad = np.logical_and(f_bar==0, sigma_f==0) cont1 = np.abs(f_bar-1) <= f_cut cont2 = sigma_f <= sig_cut contmask = np.logical_and(cont1, cont2) contmask[bad] = False return contmask
java
public OvhMigration project_serviceName_migration_migrationId_GET(String serviceName, String migrationId) throws IOException { String qPath = "/cloud/project/{serviceName}/migration/{migrationId}"; StringBuilder sb = path(qPath, serviceName, migrationId); String resp = exec(qPath, "GET", sb.toString(), null); return convertTo(resp, OvhMigration.class); }
python
def rebuild(mode=''): """Rebuild the site with a nice UI.""" scan_site() # for good measure if not current_user.can_rebuild_site: return error('You are not permitted to rebuild the site.</p>' '<p class="lead">Contact an administartor for ' 'more information.', 401) if db is not None: db.set('site:needs_rebuild', '-1') if not q.fetch_job('build') and not q.fetch_job('orphans'): b = q.enqueue_call(func=coil.tasks.build, args=(app.config['REDIS_URL'], app.config['NIKOLA_ROOT'], mode), job_id='build') q.enqueue_call(func=coil.tasks.orphans, args=(app.config['REDIS_URL'], app.config['NIKOLA_ROOT']), job_id='orphans', depends_on=b) return render('coil_rebuild.tmpl', {'title': 'Rebuild'}) else: status, outputb = coil.tasks.build_single(mode) _, outputo = coil.tasks.orphans_single() site.coil_needs_rebuild = '0' return render('coil_rebuild_single.tmpl', {'title': 'Rebuild', 'status': '1' if status else '0', 'outputb': outputb, 'outputo': outputo})
java
public XAnnotation<javax.persistence.OneToOne> createOneToOne( OneToOne cOneToOne) { return cOneToOne == null ? null : // new XAnnotation<javax.persistence.OneToOne>( javax.persistence.OneToOne.class, // cOneToOne.getTargetEntity() == null ? null : new XSingleAnnotationField<Class<Object>>( "targetEntity", Class.class, new XClassByNameAnnotationValue<Object>( cOneToOne.getTargetEntity())), // AnnotationUtils.create("cascade", getCascadeType(cOneToOne.getCascade())), // AnnotationUtils.create("fetch", getFetchType(cOneToOne.getFetch())), // AnnotationUtils.create("optional", cOneToOne.isOptional()), // AnnotationUtils.create("mappedBy", cOneToOne.getMappedBy()), // AnnotationUtils.create("orphanRemoval", cOneToOne.isOrphanRemoval()) // ); }
java
public static void doByteBufferPutCompressed(INDArray arr, ByteBuffer allocated, boolean rewind) { CompressedDataBuffer compressedDataBuffer = (CompressedDataBuffer) arr.data(); CompressionDescriptor descriptor = compressedDataBuffer.getCompressionDescriptor(); ByteBuffer codecByteBuffer = descriptor.toByteBuffer(); ByteBuffer buffer = arr.data().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); ByteBuffer shapeBuffer = arr.shapeInfoDataBuffer().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); allocated.putInt(arr.rank()); //put data opType next so its self describing allocated.putInt(arr.data().dataType().ordinal()); //put shape next allocated.put(shapeBuffer); //put codec information next allocated.put(codecByteBuffer); //finally put the data allocated.put(buffer); if (rewind) allocated.rewind(); }
java
public void invokeRequest (byte[] arg1, InvocationService.ResultListener arg2) { InvocationMarshaller.ResultMarshaller listener2 = new InvocationMarshaller.ResultMarshaller(); listener2.listener = arg2; sendRequest(INVOKE_REQUEST, new Object[] { arg1, listener2 }); }
python
def fail_api(channel): """Creates an embed UI for when the API call didn't work Args: channel (discord.Channel): The Discord channel to bind the embed to Returns: ui (ui_embed.UI): The embed UI object """ gui = ui_embed.UI( channel, "Couldn't get stats off RLTrackerNetwork.", "Maybe the API changed, please tell Infraxion.", modulename=modulename, colour=0x0088FF ) return gui
java
public void setDeclaringType(JvmDeclaredType newDeclaringType) { if (newDeclaringType != eInternalContainer() || (eContainerFeatureID() != TypesPackage.JVM_MEMBER__DECLARING_TYPE && newDeclaringType != null)) { if (EcoreUtil.isAncestor(this, newDeclaringType)) throw new IllegalArgumentException("Recursive containment not allowed for " + toString()); NotificationChain msgs = null; if (eInternalContainer() != null) msgs = eBasicRemoveFromContainer(msgs); if (newDeclaringType != null) msgs = ((InternalEObject)newDeclaringType).eInverseAdd(this, TypesPackage.JVM_DECLARED_TYPE__MEMBERS, JvmDeclaredType.class, msgs); msgs = basicSetDeclaringType(newDeclaringType, msgs); if (msgs != null) msgs.dispatch(); } else if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, TypesPackage.JVM_MEMBER__DECLARING_TYPE, newDeclaringType, newDeclaringType)); }
python
def _parseBoundImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the bound import directory. @type rva: int @param rva: The RVA where the bound import directory starts. @type size: int @param size: The size of the bound import directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBoundImportDescriptor} @return: A new L{ImageBoundImportDescriptor} object. """ data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) boundImportDirectory = directories.ImageBoundImportDescriptor.parse(rd) # parse the name of every bounded import. for i in range(len(boundImportDirectory) - 1): if hasattr(boundImportDirectory[i], "forwarderRefsList"): if boundImportDirectory[i].forwarderRefsList: for forwarderRefEntry in boundImportDirectory[i].forwarderRefsList: offset = forwarderRefEntry.offsetModuleName.value forwarderRefEntry.moduleName = self.readStringAtRva(offset + rva) offset = boundImportDirectory[i].offsetModuleName.value boundImportDirectory[i].moduleName = self.readStringAtRva(offset + rva) return boundImportDirectory
python
def get_nulldata(self, rawtx): """Returns nulldata from <rawtx> as hexdata.""" tx = deserialize.tx(rawtx) index, data = control.get_nulldata(tx) return serialize.data(data)
python
def match_filename( filename ): """ Checks whether a file exists, either as named, or as a a gzippped file (filename.gz) Args: (Str): The root filename. Returns: (Str|None): if the file exists (either as the root filename, or gzipped), the return value will be the actual filename. If no matching filename is found the return value is set to None """ f = next( ( '{}{}'.format( filename, extension ) for extension in [ '', '.gz' ] if Path( '{}{}'.format( filename, extension ) ).is_file() ), None ) return f
java
private void deleteFiles(Predicate<File> predicate) { directory.mkdirs(); // Iterate through all files in the storage directory. for (File file : directory.listFiles(f -> f.isFile() && predicate.test(f))) { try { Files.delete(file.toPath()); } catch (IOException e) { // Ignore the exception. } } }
python
def ext(self): """ Canonical file extension for this image e.g. ``'png'``. The returned extension is all lowercase and is the canonical extension for the content type of this image, regardless of what extension may have been used in its filename, if any. """ ext_map = { 'BMP': 'bmp', 'GIF': 'gif', 'JPEG': 'jpg', 'PNG': 'png', 'TIFF': 'tiff', 'WMF': 'wmf' } format = self._format if format not in ext_map: tmpl = "unsupported image format, expected one of: %s, got '%s'" raise ValueError(tmpl % (ext_map.keys(), format)) return ext_map[format]
python
def list_from_content(cls, content): """ Gets a list of guilds from the HTML content of the world guilds' page. Parameters ---------- content: :class:`str` The HTML content of the page. Returns ------- :class:`list` of :class:`ListedGuild` List of guilds in the current world. ``None`` if it's the list of a world that doesn't exist. Raises ------ InvalidContent If content is not the HTML of a guild's page. """ parsed_content = parse_tibiacom_content(content) selected_world = parsed_content.find('option', selected=True) try: if "choose world" in selected_world.text: # It belongs to a world that doesn't exist return None world = selected_world.text except AttributeError: raise InvalidContent("Content does not belong to world guild list.") # First TableContainer contains world selector. _, *containers = parsed_content.find_all('div', class_="TableContainer") guilds = [] for container in containers: header = container.find('div', class_="Text") active = "Active" in header.text header, *rows = container.find_all("tr", {'bgcolor': ["#D4C0A1", "#F1E0C6"]}) for row in rows: columns = row.find_all('td') logo_img = columns[0].find('img')["src"] description_lines = columns[1].get_text("\n").split("\n", 1) name = description_lines[0] description = None if len(description_lines) > 1: description = description_lines[1].replace("\r", "").replace("\n", " ") guild = cls(name, world, logo_img, description, active) guilds.append(guild) return guilds
java
private void addDependencyToGraph(final DbDependency dependency, final AbstractGraph graph, final int depth, final String parentId) { // In that case of Axway artifact we will add a module to the graph if (filters.getCorporateFilter().filter(dependency)) { final DbModule dbTarget = repoHandler.getModuleOf(dependency.getTarget()); // if there is no module, add the artifact to the graph if(dbTarget == null){ LOG.error("Got missing reference: " + dependency.getTarget()); final DbArtifact dbArtifact = DataUtils.createDbArtifact(dependency.getTarget()); final String targetElementId = graph.getId(dbArtifact); graph.addElement(targetElementId, dbArtifact.getVersion(), false); graph.addDependency(parentId, targetElementId, dependency.getScope()); return; } // Add the element to the graph addModuleToGraph(dbTarget, graph, depth + 1); //Add the dependency to the graph final String moduleElementId = graph.getId(dbTarget); graph.addDependency(parentId, moduleElementId, dependency.getScope()); } // In case a third-party we will add an artifact else { final DbArtifact dbTarget = repoHandler.getArtifact(dependency.getTarget()); if(dbTarget == null){ LOG.error("Got missing artifact: " + dependency.getTarget()); return; } if(!graph.isTreated(graph.getId(dbTarget))){ final ModelMapper modelMapper = new ModelMapper(repoHandler); final Artifact target = modelMapper.getArtifact(dbTarget); final String targetElementId = graph.getId(target); graph.addElement(targetElementId, target.getVersion(), false); graph.addDependency(parentId, targetElementId, dependency.getScope()); } } }
python
def surfacemass(self,R,log=False): """ NAME: surfacemass PURPOSE: return the surface density profile at this R INPUT: R - Galactocentric radius (/ro) log - if True, return the log (default: False) OUTPUT: Sigma(R) HISTORY: 2010-03-26 - Written - Bovy (NYU) """ if log: return -R/self._params[0] else: return sc.exp(-R/self._params[0])
python
def file_copy(filename, settings): """ Copies a file. {'_file_copy': {'dest': 'new_file_name'}} Args: filename (str): Filename. settings (dict): Must be {"dest": path of new file} """ for k, v in settings.items(): if k.startswith("dest"): shutil.copyfile(filename, v)
java
public Annotation createAnnotation(@NonNull AnnotationType type, int start, int end, @NonNull Map<AttributeType, ?> attributeMap) { Preconditions.checkArgument(start >= start(), "Annotation must have a starting position >= the start of the document"); Preconditions.checkArgument(end <= end(), "Annotation must have a ending position <= the end of the document"); Annotation annotation = new Annotation(this, type, start, end); annotation.setId(idGenerator.getAndIncrement()); annotation.putAll(attributeMap); annotationSet.add(annotation); return annotation; }
python
def paging(self): """ Gets the pagination type; compatible with entry.archive(page_type=...) """ if 'date' in self.spec: _, date_span, _ = utils.parse_date(self.spec['date']) return date_span return 'offset'
java
public static <W extends WitnessType<W>,A> EvalT<W,A> of(final AnyM<W,Eval<A>> monads) { return new EvalT<>( monads); }
java
public static Object getFor(int status, Request request, Response response) { Object customRenderer = CustomErrorPages.getInstance().customPages.get(status); Object customPage = CustomErrorPages.getInstance().getDefaultFor(status); if (customRenderer instanceof String) { customPage = customRenderer; } else if (customRenderer instanceof Route) { try { customPage = ((Route) customRenderer).handle(request, response); } catch (Exception e) { // The custom page renderer is causing an internal server error. Log exception as a warning and use default page instead LOG.warn("Custom error page handler for status code {} has thrown an exception: {}. Using default page instead.", status, e.getMessage()); } } return customPage; }
python
def ignore(code): """Should this code be ignored. :param str code: Error code (e.g. D201). :return: True if code should be ignored, False otherwise. :rtype: bool """ if code in Main.options['ignore']: return True if any(c in code for c in Main.options['ignore']): return True return False
python
def done(self, *args, **kwargs): """Mark the whole ProgressSection as done""" kwargs['state'] = 'done' pr_id = self.add(*args, log_action='done', **kwargs) self._session.query(Process).filter(Process.group == self._group).update({Process.state: 'done'}) self.start.state = 'done' self._session.commit() return pr_id
java
public final Boolean evaluateBoolean( Message message ) throws JMSException { Object value = evaluate(message); if (value == null) return null; if (value instanceof Boolean) return (Boolean)value; throw new FFMQException("Expected a boolean but got : "+value.toString(),"INVALID_SELECTOR_EXPRESSION"); }
java
public ByteArrayInputStream output2InputStream(final OutputStream out) { if (out == null) return null; return new ByteArrayInputStream(((ByteArrayOutputStream) out).toByteArray()); }
python
def ynticks(self, nticks, index=1): """Set the number of ticks.""" self.layout['yaxis' + str(index)]['nticks'] = nticks return self
java
@SuppressWarnings("unchecked") public static <E, C extends Counter<E>> C scale(C c, double s) { C scaled = (C) c.getFactory().create(); for (E key : c.keySet()) { scaled.setCount(key, c.getCount(key) * s); } return scaled; }
java
public static JTextComponent textComponentAsLabel(JTextComponent textcomponent) { // Make the text component non editable textcomponent.setEditable(false); // Make the text area look like a label textcomponent.setBackground((Color)UIManager.get("Label.background")); textcomponent.setForeground((Color)UIManager.get("Label.foreground")); textcomponent.setBorder(null); return textcomponent; }
python
def parse(s): r""" Returns a list of strings or format dictionaries to describe the strings. May raise a ValueError if it can't be parsed. >>> parse(">>> []") ['>>> []'] >>> #parse("\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m") """ stuff = [] rest = s while True: front, token, rest = peel_off_esc_code(rest) if front: stuff.append(front) if token: try: tok = token_type(token) if tok: stuff.extend(tok) except ValueError: raise ValueError("Can't parse escape sequence: %r %r %r %r" % (s, repr(front), token, repr(rest))) if not rest: break return stuff
java
public <E> Concatenator NOT_EQUALS(E value) { getBooleanOp().setOperator(Operator.NOT_EQUALS); return this.operateOn(value); }
java
public static <T, IV> int detectIndexWith( T[] objectArray, Predicate2<? super T, IV> predicate, IV injectedValue) { if (objectArray == null) { throw new IllegalArgumentException("Cannot perform a detectIndexWith on null"); } for (int i = 0; i < objectArray.length; i++) { if (predicate.accept(objectArray[i], injectedValue)) { return i; } } return -1; }
python
def _norm_include(self, record, hist=None): """ Normalization 'normIncludes' replace 'almost' values based on at least one of the following: includes strings, excludes strings, starts with string, ends with string :param dict record: dictionary of values to validate :param dict hist: existing input of history values """ if hist is None: hist = {} for field in record: if record[field] != '' and record[field] is not None: if field in self.fields: if 'normIncludes' in self.fields[field]['lookup']: field_val_new, hist, _ = IncludesLookup( fieldVal=record[field], lookupType='normIncludes', db=self.mongo, fieldName=field, histObj=hist) record[field] = field_val_new return record, hist
java
public static Single<String> read(String path) { return SingleRxXian.call("cosService", "cosRead", new JSONObject() {{ put("path", path); }}).flatMap(response -> { response.throwExceptionIfNotSuccess(); return Single.just(Objects.requireNonNull(response.dataToStr())); }); }
java
private EndPointInfoImpl updateEndpointMBean(String name, String host, int port) { EndPointInfoImpl existingEP = endpoints.get(name); existingEP.updateHost(host); existingEP.updatePort(port); return existingEP; }
python
def list_extensions(request): """List all nova extensions, except the ones in the blacklist.""" blacklist = set(getattr(settings, 'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', [])) nova_api = _nova.novaclient(request) return tuple( extension for extension in nova_list_extensions.ListExtManager(nova_api).show_all() if extension.name not in blacklist )
java
@Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { if (getEmbeddedHttp2Exception(cause) != null) { // Some exception in the causality chain is an Http2Exception - handle it. onError(ctx, false, cause); } else { super.exceptionCaught(ctx, cause); } }
python
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Link to a GitHub user. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ app = inliner.document.settings.env.app #app.info('user link %r' % text) ref = 'https://www.github.com/' + text node = nodes.reference(rawtext, text, refuri=ref, **options) return [node], []
python
def _get_element_attr_or_none(document, selector, attribute): """ Using a CSS selector, get the element and return the given attribute value, or None if no element. Args: document (HTMLElement) - HTMLElement document selector (str) - CSS selector attribute (str) - The attribute to get from the element """ element = document.cssselect(selector) if element: return element[0].get(attribute) return None
java
public void updateContentList(List<CmsTreeItem> treeItemsToShow) { m_scrollList.clearList(); if ((treeItemsToShow != null) && !treeItemsToShow.isEmpty()) { for (CmsTreeItem dataValue : treeItemsToShow) { dataValue.removeOpener(); m_scrollList.add(dataValue); CmsScrollPanel scrollparent = (CmsScrollPanel)m_scrollList.getParent(); scrollparent.onResizeDescendant(); } } else { showIsEmptyLabel(); } scheduleResize(); }
python
def filter_curriculum(curriculum, week, weekday=None): """ 筛选出指定星期[和指定星期几]的课程 :param curriculum: 课程表数据 :param week: 需要筛选的周数, 是一个代表周数的正整数 :param weekday: 星期几, 是一个代表星期的整数, 1-7 对应周一到周日 :return: 如果 weekday 参数没给出, 返回的格式与原课表一致, 但只包括了在指定周数的课程, 否则返回指定周数和星期几的当天课程 """ if weekday: c = [deepcopy(curriculum[weekday - 1])] else: c = deepcopy(curriculum) for d in c: l = len(d) for t_idx in range(l): t = d[t_idx] if t is None: continue # 一般同一时间课程不会重复,重复时给出警告 t = list(filter(lambda k: week in k['上课周数'], t)) or None if t is not None and len(t) > 1: logger.warning('第 %d 周周 %d 第 %d 节课有冲突: %s', week, weekday or c.index(d) + 1, t_idx + 1, t) d[t_idx] = t return c[0] if weekday else c
java
public static Response getExceptionResponse(final int status, final String msg) { return Response.status(status).entity(new LinkedHashMap<String, Object>() { private static final long serialVersionUID = 1L; { put("code", status); put("message", msg); } }).type(MediaType.APPLICATION_JSON).build(); }
java
public int executeUpdateDeleteQuery(String cqlQuery) { if (log.isDebugEnabled()) { log.debug("Executing cql query {}.", cqlQuery); } try { CqlResult result = (CqlResult) executeCQLQuery(cqlQuery, true); return result.getNum(); } catch (Exception e) { log.error("Error while executing updated query: {}, Caused by: . ", cqlQuery, e); return 0; } }
python
def unzip(archive, destination, filenames=None): """Unzip a zip archive into destination directory. It unzips either the whole archive or specific file(s) from the archive. Usage: >>> output = os.path.join(os.getcwd(), 'output') >>> # Archive can be an instance of a ZipFile class >>> archive = zipfile.ZipFile('test.zip', 'r') >>> # Or just a filename >>> archive = 'test.zip' >>> # Extracts all files >>> unzip(archive, output) >>> # Extract only one file >>> unzip(archive, output, 'my_file.txt') >>> # Extract a list of files >>> unzip(archive, output, ['my_file1.txt', 'my_file2.txt']) >>> unzip_file('test.zip', 'my_file.txt', output) Args: archive (zipfile.ZipFile or str): Zipfile object to extract from or path to the zip archive. destination (str): Path to the output directory. filenames (str or list of str or None): Path(s) to the filename(s) inside the zip archive that you want to extract. """ close = False try: if not isinstance(archive, zipfile.ZipFile): archive = zipfile.ZipFile(archive, "r", allowZip64=True) close = True logger.info("Extracting: %s -> %s" % (archive.filename, destination)) if isinstance(filenames, str): filenames = [filenames] if filenames is None: # extract all filenames = archive.namelist() for filename in filenames: if filename.endswith("/"): # it's a directory shell.mkdir(os.path.join(destination, filename)) else: if not _extract_file(archive, destination, filename): raise Exception() logger.info('Extracting zip archive "%s" succeeded' % archive.filename) return True except Exception: logger.exception("Error while unzipping archive %s" % archive.filename) return False finally: if close: archive.close()
java
private Query getQueryBySqlCount(QueryBySQL aQuery) { String countSql = aQuery.getSql(); int fromPos = countSql.toUpperCase().indexOf(" FROM "); if(fromPos >= 0) { countSql = "select count(*)" + countSql.substring(fromPos); } int orderPos = countSql.toUpperCase().indexOf(" ORDER BY "); if(orderPos >= 0) { countSql = countSql.substring(0, orderPos); } return new QueryBySQL(aQuery.getSearchClass(), countSql); }