function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def find_answer_spans_sentencepiece(tokenized_context: TokenizedText,
answer: str) -> List[Tuple[int, int]]:
"""Finds all answer occurrence SentencePiece token spans (inclusive).
Args:
tokenized_context: SentencePiece tokenized context with indices mapping.
answer: Answer string.
Returns:
A list of (begin, end) WordPiece token level indices (inclusive) of all the
answer occurrences in the context. If the answer is empty or there is no
answer occurrence in the context, return empty list.
"""
# The answer occurrence always corresponds to char level occurrence.
# This is to avoid the following case,
# context: "..Italian composer who wrote 39 operas.."
# answer: "opera"
# Since both "operas" and "opera" are in the vocab, simply searching token
# level spans will miss such kind of occurrence.
token_spans = []
for char_begin, char_end in find_char_spans(tokenized_context.text, answer):
token_spans.append((tokenized_context.chars_to_tokens[char_begin],
tokenized_context.chars_to_tokens[char_end]))
return token_spans | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def __init__(self, image, configHolder):
self.image = image
self.configHolder = configHolder
self.newImageGroupName = ''
self.newInstalledSoftwareName = ''
self.newInstalledSoftwareVersion = ''
self.newImageGroupVersion = ''
self.newImageGroupVersionWithManifestId = False
self.author = ''
self.title = ''
self.comment = ''
self.os = ''
self.authorEmail = ''
self.marketplaceEndpointNewimage = ''
self.endpoint = ''
self.extraOsReposUrls = ''
self.packages = ''
self.scripts = ''
self.prerecipe = ''
self.recipe = ''
self.verboseLevel = ''
self.shutdownVm = True
self.signManifest = True
self.vmStartTimeout = self.VM_START_TIMEOUT
self.vmPingTimeout = self.VM_PING_TIMEOUT
self.options = VmManager.defaultRunOptions()
self.options.update(configHolder.options)
self.configHolder.options.update(self.options)
configHolder.assign(self)
self._set_stdouterr()
credentials = AuthnFactory.getCredentials(self)
self.cloud = CloudConnectorFactory.getCloud(credentials)
self.cloud.setEndpoint(self.endpoint)
self.runner = None
self.vmAddress = None
self.vmId = None
self.vmIp = None
self.vmName = 'creator'
self.userPublicKeyFile = self.options.get('userPublicKeyFile',
Defaults.sshPublicKeyLocation)
self.userPrivateKeyFile = self.userPublicKeyFile.strip('.pub')
self.mainDisk = ''
self.extraDisk = ''
self.mountPointExtraDisk = '/media'
self.imageFile = ''
self.imageFileBundled = ''
self.excludeFromCreatedImage = \
self.excludeFromCreatedImageDefault + \
self.options.get('excludeFromCreatedImage', '').split(',')
self.installer = self.options.get('installer')
self.targetImageUri = ''
self.targetManifestUri = ''
self.manifest = ''
self.manifestObject = None
self.newManifestFileName = None
self.manifestLocalFileName = ''
self.__listener = CreatorBaseListener() | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def printDetail(self, msg):
return Util.printDetail(msg, self.verboseLevel, Util.VERBOSE_LEVEL_NORMAL) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def startNode(self):
self._imageExists()
self._retrieveManifest()
self.__setAttributesFromManifest()
self.__createRunner()
self._startMachine()
self._waitMachineNetworkUpOrAbort()
self._checkIfCanConnectToMachine() | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _printAction(self, msg):
Util.printAction(msg)
self._notifyOnAction(msg) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _printError(self, msg):
self._notifyOnError(msg)
Util.printError(msg) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _notifyOnAction(self, note):
self._notify('Action', note) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _notifyOnError(self, note):
self._notify('Error', note) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def callListener():
notifyFunction = getattr(self.__listener, onOperation)
notifyFunction(note) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _checkIfCanConnectToMachine(self):
self._printStep('Check if we can connect to the machine')
cmd = 'true'
try:
self._sshCmdWithOutputVerb(cmd)
except ExecutionException:
sleepTime = 6
maxCount = 40
counter = 0
while True:
try:
self.printDetail('Sleeping %i sec. Retry %i out of %i.' % (sleepTime, counter + 1, maxCount))
time.sleep(sleepTime)
self._sshCmdWithOutputVerb(cmd)
break
except ExecutionException, e:
if counter >= maxCount:
raise ExecutionException(e)
counter += 1 | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _checkImageExists(self):
image = Image(self.configHolder)
image.checkImageExists(self.image) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def createRunner(self):
self.__createRunner() | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _startMachine(self):
self._printStep('Starting base image')
try:
self.vmId = self.runner.runInstance()[0]
except Exception, msg:
self._printError('An error occurred while starting machine: \n\t%s' % msg)
try:
_, self.vmIp = self.runner.getNetworkDetail(self.vmId)
self.vmAddress = self.vmIp
except Exception, e:
self._printError('An error occurred while getting machine network details: \n\t%s' % str(e))
self._printStep('Waiting for machine to boot')
vmStarted = self.runner.waitUntilVmRunningOrTimeout(self.vmId,
self.vmStartTimeout,
failOn='Failed')
if not vmStarted:
if self.runner.getVmState(self.vmId) == 'Failed':
msg = 'Failed to start VM (id=%s, ip=%s): %s' % \
(self.vmId, self.vmAddress,
self._getVmFailureMessage(self.vmId))
else:
msg = 'Failed to start VM within %i seconds (id=%s, ip=%s)' % \
(self.vmStartTimeout, self.vmId, self.vmAddress)
self.printDetail(msg)
self._killMachine()
self._printError(msg) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _killMachine(self):
self._printStep('Killing machine')
if self.vmId:
self.cloud.vmKill(self.vmId)
else:
Util.printWarning('Undefined VM ID, when trying to kill machine.') | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _shutdownNode(self):
if self.shutdownVm:
self._stopMachine()
else:
self._printStep('Machine ready for use')
msg = '\n\tMachine IP: %s\tRemember to stop the machine when finished' % self.vmIp
Util.printInfo(msg) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _getPublicAddress(self):
return self.vmIp | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def __setAttributesFromManifest(self):
self._setOsFromManifest()
self._setInstallerBasedOnOs() | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _setInstallerBasedOnOs(self):
if not self.installer:
self.installer = Systems.getInstallerBasedOnOs(self.os) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _installPackages(self):
self._printStep('Installing user packages')
if len(self.packages) == 0:
self.printDetail('No packages to install')
return
self._setUpExtraRepositories()
self.printDetail('Updating installer')
ret = self._doInstallerUpdate()
self.printDetail('Installing packages: %s' % self.packages)
ret = self._doInstallPackagesRemotly(self.packages)
if ret != 0:
self._printError('An error occurred while installing packages') | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _doInstallPackagesRemotly(self, packages):
cmd = self._buildInstallerCommand() + ' '
cmd += ' '.join(packages.split(','))
return self._sshCmd(cmd, stderr=self.stderr, stdout=self.stdout) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _buildInstallerCommand(self):
if self.installer == 'yum':
return yumInstallCmd
elif self.installer == 'apt':
return aptInstallCmd | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _buildPackageCacheCleanerCommand(self):
if self.installer == 'yum':
return yumCleanPackageCacheCmd
elif self.installer == 'apt':
return aptCleanPackageCacheCmd | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _uploadAndExecuteRemoteScript(self, script):
def __tellScriptNameAndArgs(script):
scriptNameAndArgs = os.path.basename(script)
scriptNameAndArgsList = scriptNameAndArgs.split(' ', 1)
if len(scriptNameAndArgsList) == 1: # no arguments given
scriptNameAndArgsList = scriptNameAndArgsList + ['']
return scriptNameAndArgsList
def _uploadScript(script):
scriptName, args = __tellScriptNameAndArgs(script)
scriptDirectory = Util.sanitizePath(os.path.dirname(script))
scriptPathLocal = os.path.abspath(os.path.join(scriptDirectory, scriptName))
scriptPathRemote = '/tmp/%s' % scriptName
rc, output = self._scpWithOutput(scriptPathLocal, 'root@%s:%s' % (self.vmAddress, scriptPathRemote))
if rc != 0:
self._printError('An error occurred while uploading script %s\n%s' % (script, output))
self._sshCmdWithOutput('chmod 0755 %s' % scriptPathRemote)
return scriptPathRemote, args
def _executeRemoteScript(scriptPathRemote, args=''):
rc = self._sshCmd('%s %s' % (scriptPathRemote, args), throwOnError=False,
pseudoTTY=True)
if rc != 0:
self._printError('An error occurred while executing script %s' % script)
scriptPathRemote, args = _uploadScript(script)
_executeRemoteScript(scriptPathRemote, args) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _executeRecipe(self):
self._printStep('Executing user recipe')
if len(self.recipe) == 0:
self.printDetail('No recipe to execute')
return
self._uploadAndExecuteRemoteRecipe(self.recipe) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _localCleanUp(self):
Util.execute(['rm', '-rf', self.manifestLocalFileName]) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _scpWithOutput(self, src, dst):
return self._scp(src, dst, withOutput=True) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _sshCmdWithOutput(self, cmd, throwOnError=True, **kwargs):
rc, output = sshCmdWithOutput(cmd, self.vmAddress,
sshKey=self.userPrivateKeyFile,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED,
**kwargs)
if rc and throwOnError:
raise ExecutionException('Error executing command: %s\n%s' % (cmd, output))
return rc, output | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def _sshCmdWithOutputQuiet(self, cmd, **kwargs):
return self._sshCmdWithOutput(cmd, sshQuiet=True, **kwargs) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def getVmId(self):
return self.vmId | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def __init__(self, verbose=False):
if verbose:
self.write = self.__beVerbose | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def __beVerbose(self, msg):
print msg | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def onStep(self, msg):
self.write('step: %s' % msg) | StratusLab/client | [
2,
1,
2,
1,
1335119615
] |
def explicit_terms(self, state: PyTreeState) -> PyTreeState:
"""Evaluates explicit terms in the ODE."""
raise NotImplementedError | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def implicit_solve(
self, state: PyTreeState, step_size: float, | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def backward_forward_euler(
equation: ImplicitExplicitODE, time_step: float, | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def step_fn(u0):
g = u0 + dt * F(u0)
u1 = G_inv(g, dt)
return u1 | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def crank_nicolson_rk2(
equation: ImplicitExplicitODE, time_step: float, | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def step_fn(u0):
g = u0 + 0.5 * dt * G(u0)
h1 = F(u0)
u1 = G_inv(g + dt * h1, 0.5 * dt)
h2 = 0.5 * (F(u1) + h1)
u2 = G_inv(g + dt * h2, 0.5 * dt)
return u2 | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def low_storage_runge_kutta_crank_nicolson(
alphas: Sequence[float],
betas: Sequence[float],
gammas: Sequence[float],
equation: ImplicitExplicitODE,
time_step: float, | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def step_fn(u):
h = 0
for k in range(len(β)):
h = F(u) + β[k] * h
µ = 0.5 * dt * (α[k + 1] - α[k])
u = G_inv(u + γ[k] * dt * h + µ * G(u), µ)
return u | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def crank_nicolson_rk3(
equation: ImplicitExplicitODE, time_step: float, | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def crank_nicolson_rk4(
equation: ImplicitExplicitODE, time_step: float, | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def __post_init__(self):
if len({len(self.a_ex) + 1,
len(self.a_im) + 1,
len(self.b_ex),
len(self.b_im)}) > 1:
raise ValueError("inconsistent Butcher tableau") | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def step_fn(y0):
f = [None] * num_steps
g = [None] * num_steps
f[0] = F(y0)
g[0] = G(y0)
for i in range(1, num_steps):
ex_terms = dt * sum(a_ex[i-1][j] * f[j] for j in range(i) if a_ex[i-1][j])
im_terms = dt * sum(a_im[i-1][j] * g[j] for j in range(i) if a_im[i-1][j])
Y_star = y0 + ex_terms + im_terms
Y = G_inv(Y_star, dt * a_im[i-1][i])
if any(a_ex[j][i] for j in range(i, num_steps - 1)) or b_ex[i]:
f[i] = F(Y)
if any(a_im[j][i] for j in range(i, num_steps - 1)) or b_im[i]:
g[i] = G(Y)
ex_terms = dt * sum(b_ex[j] * f[j] for j in range(num_steps) if b_ex[j])
im_terms = dt * sum(b_im[j] * g[j] for j in range(num_steps) if b_im[j])
y_next = y0 + ex_terms + im_terms
return y_next | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def imex_rk_sil3(
equation: ImplicitExplicitODE, time_step: float, | google/jax-cfd | [
434,
58,
434,
34,
1616435172
] |
def configure_loader_modules():
return {openvswitch_port: {"__opts__": {"test": False}}} | saltstack/salt | [
13089,
5388,
13089,
3074,
1298233016
] |
def parse_schema(yaml_schema: str) -> Tuple[str, Dict[str, SchemaFieldType]]:
"""Parses yaml schema.
Ensures that schema is well-formed and returns dictionary of properties and
its type for type-checking.
Args:
yaml_schema: Yaml schema to be parsed.
Returns:
str: Title set in the schema.
Dict: Property name to SchemaFieldType enum.
Raises:
ValueError if title field is not set in schema or an
unsupported(i.e. not defined in SchemaFieldType)
type is specified for the field.
"""
schema = yaml.full_load(yaml_schema)
if 'title' not in schema.keys():
raise ValueError('Invalid _schema, title must be set. \
Got: {}'.format(yaml_schema))
title = schema['title']
properties = {}
if 'properties' in schema.keys():
schema_properties = schema['properties'] or {}
for property_name, property_def in schema_properties.items():
try:
properties[property_name] = SchemaFieldType(
property_def['type'])
except ValueError:
raise ValueError('Unsupported type:{} specified for field: {} \
in schema'.format(property_def['type'], property_name))
return title, properties | kubeflow/pipelines | [
3125,
1400,
3125,
892,
1526085107
] |
def compare(self, a, b):
return a in b | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def exit_function(signal_number=0,
frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
"""
dprint_executing()
dprint_var(signal_number)
qprint_pgm_footer() | openbmc/openbmc-test-automation | [
75,
81,
75,
51,
1450462058
] |
def validate_parms():
r"""
Validate program parameters, etc. Return True or False accordingly.
"""
gen_post_validation(exit_function, signal_handler)
return True | openbmc/openbmc-test-automation | [
75,
81,
75,
51,
1450462058
] |
def __init__(self, handle=0):
super(GXRA, self).__init__(GXContext._get_tls_geo(), handle) | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def null(cls):
"""
A null (undefined) instance of `GXRA <geosoft.gxapi.GXRA>` | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def is_null(self):
"""
Check if this is a null (undefined) instance | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def create(cls, file):
"""
Creates `GXRA <geosoft.gxapi.GXRA>` | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def create_sbf(cls, sbf, file):
"""
Creates `GXRA <geosoft.gxapi.GXRA>` on an `GXSBF <geosoft.gxapi.GXSBF>` | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def gets(self, strbuff):
"""
Get next full line from `GXRA <geosoft.gxapi.GXRA>` | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def len(self):
"""
Returns the total number of lines in `GXRA <geosoft.gxapi.GXRA>` | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def line(self):
"""
Returns current line #, 0 is the first | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def seek(self, line):
"""
Position next read to specified line # | GeosoftInc/gxpy | [
27,
28,
27,
30,
1476726730
] |
def inactive_registrations(self):
return self.registrations.filter(canceled__isnull=False) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def copy_to_school_year(old, school_year):
new = Orderable.objects.get(id=old.id)
new.id, new.pk = None, None
new.school_year = school_year
new.public = False
new.evaluation = ""
new.note = ""
new.save()
new.groups.set(old.groups.all())
new.age_groups.set(old.age_groups.all())
new.target_groups.set(old.target_groups.all())
for leader in old.all_leaders:
school_year.leaders.add(leader)
new.leaders.set(old.all_leaders)
new.questions.set(old.questions.all())
copy_related_objects(
new,
attachments=old.attachments,
times=old.times,
variants=old.variants,
)
return new | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def get_payment_status(self, d=None):
return PaymentStatus(
price=self.price,
discount=self.get_discounted(d),
explanation=",\n".join(
discount.explanation.strip()
for discount in self.all_discounts
if (d is None or discount.accounted.date() <= d) and discount.explanation.strip()
),
received=self.get_received(d),
returned=self.get_returned(d),
current_date=d or date.today(),
due_from=self.payment_requested
and (
self.payment_requested.date()
if self.subject.orderable.due_from_days is None
else max(
self.start_date - timedelta(days=self.subject.orderable.due_from_days),
self.payment_requested.date(),
)
),
due_date=self.payment_requested
and max(
self.start_date - timedelta(days=self.subject.orderable.due_date_days),
self.payment_requested.date() + timedelta(days=self.subject.min_due_date_days),
),
) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def end_date(self):
if self.start_time:
return (datetime.combine(self.start_date, self.start_time) + self.subject.orderable.duration).date()
else:
return self.start_date + self.subject.orderable.duration | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def end_time(self):
if self.start_time:
return (datetime.combine(self.start_date, self.start_time) + self.subject.orderable.duration).time() | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def copy_relations(self, oldinstance):
self.departments.set(oldinstance.departments.all())
self.event_types.set(oldinstance.event_types.all())
self.groups.set(oldinstance.groups.all())
self.age_groups.set(oldinstance.age_groups.all())
self.target_groups.set(oldinstance.age_groups.all())
self.leaders.set(oldinstance.leaders.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def all_departments(self):
return list(self.departments.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def all_event_types(self):
return list(self.event_types.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def all_groups(self):
return list(self.groups.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def all_age_groups(self):
return list(self.age_groups.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def all_target_groups(self):
return list(self.target_groups.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def all_leaders(self):
return list(self.leaders.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def render(self, context):
school_year = (
self.school_year or getattr(context.get("request"), "school_year") or SchoolYear.objects.get_current()
)
events = Orderable.objects.filter(school_year=school_year, public=True).distinct()
if self.all_departments:
events = events.filter(department__in=self.all_departments)
if self.all_event_types:
events = events.filter(subject_type__in=self.all_event_types)
if self.all_age_groups:
events = events.filter(age_groups__in=self.all_age_groups)
if self.all_target_groups:
events = events.filter(target_groups__in=self.all_target_groups)
if self.all_leaders:
events = events.filter(leaders__in=self.all_leaders)
if self.all_groups:
events = events.filter(groups__in=self.all_groups)
groups = self.all_groups
elif self.all_event_types:
groups = SubjectGroup.objects.filter(subject_types__in=self.all_event_types)
else:
groups = SubjectGroup.objects.all()
context.update(
{
"school_year": school_year,
"events": events,
"groups": (self.Group(group=group, objects=events.filter(groups=group)) for group in groups),
}
)
return context | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def copy_relations(self, oldinstance):
self.event_types = oldinstance.event_types.all() | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def all_event_types(self):
return list(self.event_types.all()) | leprikon-cz/leprikon | [
6,
1,
6,
29,
1458950472
] |
def __init__(self):
self.best_fitnesses = []
self.timestamps = []
self.start_time = None
self.current_best = None
self.return_spec = None | cigroup-ol/metaopt | [
21,
3,
21,
11,
1382447731
] |
def on_result(self, invocation):
fitness = invocation.current_result
if self.current_best is None or fitness < self.current_best:
self.current_best = fitness
self.best_fitnesses.append(self.current_best.raw_values)
time_delta = datetime.now() - self.start_time
self.timestamps.append(time_delta.total_seconds()) | cigroup-ol/metaopt | [
21,
3,
21,
11,
1382447731
] |
def show_fitness_time_plot(self):
"""Show a fitness--time plot"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel("Time")
ax.set_ylabel(self.get_y_label())
ax.plot(self.timestamps, self.best_fitnesses)
plt.show() | cigroup-ol/metaopt | [
21,
3,
21,
11,
1382447731
] |
def __init__(self, *args, **kwargs):
super(EditUpcomingMeetingForm, self).__init__(*args, **kwargs)
self.fields['upcoming_meeting_title'].label = _('Title')
self.fields['upcoming_meeting_scheduled_at'].label = _('Scheduled at')
self.fields['upcoming_meeting_location'].label = _('Location')
self.fields['upcoming_meeting_comments'].label = _('Background') | hasadna/OpenCommunity | [
8,
16,
8,
18,
1372881176
] |
def clean(self):
#prevent voting end time from illegal values (past time,
#time after meeting schedule) | hasadna/OpenCommunity | [
8,
16,
8,
18,
1372881176
] |
def save(self):
c = super(EditUpcomingMeetingForm, self).save()
c.voting_ends_at = datetime.combine(date(2025, 1, 1), time(12, 0, 0))
c.save()
return c | hasadna/OpenCommunity | [
8,
16,
8,
18,
1372881176
] |
def __init__(self, *args, **kwargs):
super(UpcomingMeetingParticipantsForm, self).__init__(*args, **kwargs)
participants = self.instance.upcoming_meeting_participants.values_list(
'id', flat=True)
board_in = []
board_choices = []
for b in self.instance.get_board_members():
board_choices.append((b.id, b.display_name,))
if b.id in participants:
board_in.append(b.id)
self.fields['board'].choices = board_choices
self.initial['board'] = board_in
self.fields['upcoming_meeting_participants'].queryset = self.instance.get_members()
self.fields['upcoming_meeting_participants'].label = "" | hasadna/OpenCommunity | [
8,
16,
8,
18,
1372881176
] |
def _bifur_ ( self , mu , sigma1 , sigma2 ) :
"""Generate the bifurcated gaussian
>>> value = bifur ( 0 , -1 , +2 )
"""
if sigma1 * sigma2 > 0.0 :
raise ValueError( 'Lower and upper errors must have opposite signs' ) | OstapHEP/ostap | [
15,
10,
15,
9,
1486653996
] |
def _cauchy_ ( self , mu , gamma ) :
"""Generate Cauchy random numbers
- rely on the distribution of the ratio for two Gaussian variables
- see https://en.wikipedia.org/wiki/Cauchy_distribution
"""
g1 = self.gauss ( 0.0 , 1.0 )
while abs ( g1 ) < _fmin : g1 = self.gauss ( 0.0 , 1.0 )
g2 = self.gauss ( 0.0 , 1.0 )
return 1.0 * mu + ( 1.0 * g2 / g1 ) * gamma | OstapHEP/ostap | [
15,
10,
15,
9,
1486653996
] |
def _ve_gauss_ ( self , val ) :
"""Generate the gaussian according to Ostap.Math.ValueWithError
>>> ve = VE ( 1 , 2 )
>>> value = ve_gauss ( ve )
"""
mean = val.value ()
sigma = val.error ()
return self.gauss ( mean , sigma ) | OstapHEP/ostap | [
15,
10,
15,
9,
1486653996
] |
def _poisson_ ( self , mu ) : return _poisson ( mu ) | OstapHEP/ostap | [
15,
10,
15,
9,
1486653996
] |
def _poisson_ ( self , mu ) : return _poisson ( mu ) | OstapHEP/ostap | [
15,
10,
15,
9,
1486653996
] |
def _poisson_ ( self , mu ) :
mu = float ( mu )
if _MAX <= mu :
r = -1
while r < 0 : r = self.gauss ( mu , _sqrt( mu ) )
return max ( _round ( r ) , 0 )
x = 0
p = _exp ( -mu )
s = p
u = self.uniform ( 0 , 1 )
while s < u :
x += 1
p *= mu / x
s += p
return x | OstapHEP/ostap | [
15,
10,
15,
9,
1486653996
] |
def ValidateInput(compiler, parallelism, sourcefile_path, cflags,
resultfile_path):
"""Make sure the arguments being passed in are sane."""
assert os.path.isfile(compiler)
assert parallelism >= 1
assert type(sourcefile_path) is str
assert type(cflags) is list
for flag in cflags:
assert type(flag) is str
assert type(resultfile_path) is str | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def ExtractTestConfigs(sourcefile_path, suite_name):
"""Parses the source file for test configurations.
Each no-compile test in the file is separated by an ifdef macro. We scan
the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
they demark one no-compile test and try to extract the test configuration
from that.
Args:
sourcefile_path: The path to the source file.
suite_name: The name of the test suite.
Returns:
A list of test configurations. Each test configuration is a dictionary of
the form:
{ name: 'NCTEST_NAME'
suite_name: 'SOURCE_FILE_NAME'
expectations: [re.Pattern, re.Pattern] }
The |suite_name| is used to generate a pretty gtest output on successful
completion of the no compile test.
The compiled regexps in |expectations| define the valid outputs of the
compiler. If any one of the listed patterns matches either the stderr or
stdout from the compilation, and the compilation failed, then the test is
considered to have succeeded. If the list is empty, than we ignore the
compiler output and just check for failed compilation. If |expectations|
is actually None, then this specifies a compiler sanity check test, which
should expect a SUCCESSFUL compilation.
"""
sourcefile = open(sourcefile_path, 'r')
# Start with at least the compiler sanity test. You need to always have one
# sanity test to show that compiler flags and configuration are not just
# wrong. Otherwise, having a misconfigured compiler, or an error in the
# shared portions of the .nc file would cause all tests to erroneously pass.
test_configs = []
for line in sourcefile:
match_result = NCTEST_CONFIG_RE.match(line)
if not match_result:
continue
groups = match_result.groups()
# Grab the name and remove the defined() predicate if there is one.
name = groups[0]
strip_result = STRIP_DEFINED_RE.match(name)
if strip_result:
name = strip_result.group(1)
# Read expectations if there are any.
test_configs.append({'name': name,
'suite_name': suite_name,
'expectations': ParseExpectation(groups[1])})
sourcefile.close()
return test_configs | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def PassTest(resultfile, resultlog, test):
"""Logs the result of a test started by StartTest(), or a disabled test
configuration.
Args:
resultfile: File object for .cc file that results are written to.
resultlog: File object for the log file.
test: An instance of the dictionary returned by StartTest(), a
configuration from ExtractTestConfigs().
"""
resultfile.write(GUNIT_TEMPLATE % (
test['suite_name'], test['name']))
# The 'started_at' key is only added if a test has been started.
if 'started_at' in test:
resultlog.write(LOG_TEMPLATE % (
test['suite_name'], test['name'],
test['finished_at'] - test['started_at'],
test['started_at'], test['finished_at'])) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def WriteStats(resultlog, suite_name, timings):
"""Logs the peformance timings for each stage of the script.
Args:
resultlog: File object for the log file.
suite_name: The name of the GUnit suite this test belongs to.
timings: Dictionary with timestamps for each stage of the script run.
"""
stats_template = """ | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def ExtractTestOutputAndCleanup(test):
"""Test output is in temp files. Read those and delete them.
Returns: A tuple (stderr, stdout).
"""
outputs = [None, None]
for i, stream_name in ((0, "stdout"), (1, "stderr")):
stream = test[stream_name]
stream.seek(0)
outputs[i] = stream.read()
stream.close()
return outputs | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def CompleteAtLeastOneTest(executing_tests):
"""Blocks until at least one task is removed from executing_tests.
This function removes completed tests from executing_tests, logging failures
and output. If no tests can be removed, it will enter a poll-loop until one
test finishes or times out. On a timeout, this function is responsible for
terminating the process in the appropriate fashion.
Args:
executing_tests: A dict mapping a string containing the test name to the
test dict return from StartTest().
Returns:
A list of tests that have finished.
"""
finished_tests = []
busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
while len(finished_tests) == 0:
# If we don't make progress for too long, assume the code is just dead.
assert busy_loop_timeout > time.time()
# Select on the output files to block until we have something to
# do. We ignore the return value from select and just poll all
# processes.
read_set = []
for test in executing_tests.values():
read_set.extend([test['stdout'], test['stderr']])
select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
# Now attempt to process results.
now = time.time()
for test in executing_tests.values():
proc = test['proc']
if proc.poll() is not None:
test['finished_at'] = now
finished_tests.append(test)
elif test['terminate_timeout'] < now:
proc.terminate()
test['aborted_at'] = now
elif test['kill_timeout'] < now:
proc.kill()
test['aborted_at'] = now
if len(finished_tests) == 0:
# We had output from some process but no process had
# finished. To avoid busy looping while waiting for a process to
# finish, insert a small 100 ms delay here.
time.sleep(0.1)
for test in finished_tests:
del executing_tests[test['name']]
return finished_tests | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_boots_casual_12.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Ship()
result.template = "object/ship/shared_blacksun_medium_s03_tier2.iff"
result.attribute_template_id = -1
result.stfName("","") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_armor_mandalorian_bracer_r.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","") | anhstudios/swganh | [
62,
37,
62,
37,
1297996365
] |
def module_tree(name,url,iconimage,mode,parser,parserfunction):
if not parserfunction: torrenttv()
elif parserfunction == 'channels': torrenttv_play(name,url) | repotvsupertuga/tvsupertuga.repository | [
1,
8,
1,
4,
1493763534
] |
def torrenttv():
dict_torrent = {}
html_source = get_page_source(base_url)
match = re.compile('#EXTINF:-1,(.+?)\n(.*)').findall(html_source)
for title, acehash in match:
channel_name = re.compile('(.+?) \(').findall(title)
match_cat = re.compile('\((.+?)\)').findall(title)
for i in xrange(0,len(match_cat)):
if match_cat[i] == "Для взрослых" and settings.getSetting('hide_porn') == "true":
pass
elif match_cat[i] == "Ночной канал" and settings.getSetting('hide_porn') == "true":
pass
else:
if settings.getSetting('russian_translation') == "true": categorie = russiandictionary(match_cat[i])
else: categorie=match_cat[i]
if categorie not in dict_torrent.keys():
try:
dict_torrent[categorie] = [(channel_name[0],acehash)]
except: pass
else:
try:
dict_torrent[categorie].append((channel_name[0],acehash))
except: pass
for categories in dict_torrent.keys():
addDir(categories,str(dict_torrent),401,os.path.join(current_dir,"icon.png"),401,True,parser="torrenttvruall",parserfunction="channels") | repotvsupertuga/tvsupertuga.repository | [
1,
8,
1,
4,
1493763534
] |
def torrenttv_play(name,url):
dict_torrent=eval(url)
for channel in dict_torrent[name]:
try: addDir(channel[0],channel[1],1,os.path.join(current_dir,"icon.png"),2,False)
except:pass | repotvsupertuga/tvsupertuga.repository | [
1,
8,
1,
4,
1493763534
] |
def isFunction(v):
return type(v) == type(isFunction) | ruibarreira/linuxtrail | [
2,
2,
2,
1,
1434186057
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.