id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3343876 | import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from typing import Any, Dict, List, Type
from torch.distributions import kl_divergence
from tianshou.policy import A2CPolicy
from tianshou.data import Batch, ReplayBuffer
class NPGPolicy(A2CPolicy):
"""Implementation of Natural Policy Gradient.
https://proceedings.neurips.cc/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Paper.pdf
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.nn.Module critic: the critic network. (s -> V(s))
:param torch.optim.Optimizer optim: the optimizer for actor and critic network.
:param dist_fn: distribution class for computing the action.
:type dist_fn: Type[torch.distributions.Distribution]
:param bool advantage_normalization: whether to do per mini-batch advantage
normalization. Default to True.
:param int optim_critic_iters: Number of times to optimize critic network per
update. Default to 5.
:param float gae_lambda: in [0, 1], param for Generalized Advantage Estimation.
Default to 0.95.
:param bool reward_normalization: normalize estimated values to have std close to
1. Default to False.
:param int max_batchsize: the maximum size of the batch when computing GAE,
depends on the size of available memory and the memory cost of the
model; should be as large as possible within the memory constraint.
Default to 256.
:param bool action_scaling: whether to map actions from range [-1, 1] to range
[action_spaces.low, action_spaces.high]. Default to True.
:param str action_bound_method: method to bound action to range [-1, 1], can be
either "clip" (for simply clipping the action), "tanh" (for applying tanh
squashing) for now, or empty string for no bounding. Default to "clip".
:param Optional[gym.Space] action_space: env's action space, mandatory if you want
to use option "action_scaling" or "action_bound_method". Default to None.
:param lr_scheduler: a learning rate scheduler that adjusts the learning rate in
optimizer in each policy.update(). Default to None (no lr_scheduler).
:param bool deterministic_eval: whether to use deterministic action instead of
stochastic action sampled by the policy. Default to False.
"""
def __init__(
self,
actor: torch.nn.Module,
critic: torch.nn.Module,
optim: torch.optim.Optimizer,
dist_fn: Type[torch.distributions.Distribution],
advantage_normalization: bool = True,
optim_critic_iters: int = 5,
actor_step_size: float = 0.5,
**kwargs: Any,
) -> None:
super().__init__(actor, critic, optim, dist_fn, **kwargs)
del self._weight_vf, self._weight_ent, self._grad_norm
self._norm_adv = advantage_normalization
self._optim_critic_iters = optim_critic_iters
self._step_size = actor_step_size
# adjusts Hessian-vector product calculation for numerical stability
self._damping = 0.1
def process_fn(
self, batch: Batch, buffer: ReplayBuffer, indices: np.ndarray
) -> Batch:
batch = super().process_fn(batch, buffer, indices)
old_log_prob = []
with torch.no_grad():
for b in batch.split(self._batch, shuffle=False, merge_last=True):
old_log_prob.append(self(b).dist.log_prob(b.act))
batch.logp_old = torch.cat(old_log_prob, dim=0)
if self._norm_adv:
batch.adv = (batch.adv - batch.adv.mean()) / batch.adv.std()
return batch
def learn( # type: ignore
self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any
) -> Dict[str, List[float]]:
actor_losses, vf_losses, kls = [], [], []
for step in range(repeat):
for b in batch.split(batch_size, merge_last=True):
# optimize actor
# direction: calculate villia gradient
dist = self(b).dist
log_prob = dist.log_prob(b.act)
log_prob = log_prob.reshape(log_prob.size(0), -1).transpose(0, 1)
actor_loss = -(log_prob * b.adv).mean()
flat_grads = self._get_flat_grad(
actor_loss, self.actor, retain_graph=True).detach()
# direction: calculate natural gradient
with torch.no_grad():
old_dist = self(b).dist
kl = kl_divergence(old_dist, dist).mean()
# calculate first order gradient of kl with respect to theta
flat_kl_grad = self._get_flat_grad(kl, self.actor, create_graph=True)
search_direction = -self._conjugate_gradients(
flat_grads, flat_kl_grad, nsteps=10)
# step
with torch.no_grad():
flat_params = torch.cat([param.data.view(-1)
for param in self.actor.parameters()])
new_flat_params = flat_params + self._step_size * search_direction
self._set_from_flat_params(self.actor, new_flat_params)
new_dist = self(b).dist
kl = kl_divergence(old_dist, new_dist).mean()
# optimize citirc
for _ in range(self._optim_critic_iters):
value = self.critic(b.obs).flatten()
vf_loss = F.mse_loss(b.returns, value)
self.optim.zero_grad()
vf_loss.backward()
self.optim.step()
actor_losses.append(actor_loss.item())
vf_losses.append(vf_loss.item())
kls.append(kl.item())
# update learning rate if lr_scheduler is given
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return {
"loss/actor": actor_losses,
"loss/vf": vf_losses,
"kl": kls,
}
def _MVP(self, v: torch.Tensor, flat_kl_grad: torch.Tensor) -> torch.Tensor:
"""Matrix vector product."""
# caculate second order gradient of kl with respect to theta
kl_v = (flat_kl_grad * v).sum()
flat_kl_grad_grad = self._get_flat_grad(
kl_v, self.actor, retain_graph=True).detach()
return flat_kl_grad_grad + v * self._damping
def _conjugate_gradients(
self,
b: torch.Tensor,
flat_kl_grad: torch.Tensor,
nsteps: int = 10,
residual_tol: float = 1e-10
) -> torch.Tensor:
x = torch.zeros_like(b)
r, p = b.clone(), b.clone()
# Note: should be 'r, p = b - MVP(x)', but for x=0, MVP(x)=0.
# Change if doing warm start.
rdotr = r.dot(r)
for i in range(nsteps):
z = self._MVP(p, flat_kl_grad)
alpha = rdotr / p.dot(z)
x += alpha * p
r -= alpha * z
new_rdotr = r.dot(r)
if new_rdotr < residual_tol:
break
p = r + new_rdotr / rdotr * p
rdotr = new_rdotr
return x
def _get_flat_grad(
self, y: torch.Tensor, model: nn.Module, **kwargs: Any
) -> torch.Tensor:
grads = torch.autograd.grad(y, model.parameters(), **kwargs) # type: ignore
return torch.cat([grad.reshape(-1) for grad in grads])
def _set_from_flat_params(
self, model: nn.Module, flat_params: torch.Tensor
) -> nn.Module:
prev_ind = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.data.copy_(
flat_params[prev_ind:prev_ind + flat_size].view(param.size()))
prev_ind += flat_size
return model
| StarcoderdataPython |
3321424 | print("Successfully imported 'other_script.py'")
| StarcoderdataPython |
3200078 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""This module provides functions and constants used in other modules in this package."""
import asyncio
import json
from typing import Union
import httpx
import requests
from privatebinapi.exceptions import BadServerResponseError, PrivateBinAPIError
__all__ = ('get_loop', 'verify_response', 'DEFAULT_HEADERS')
DEFAULT_HEADERS = {'X-Requested-With': 'JSONHttpRequest'}
def verify_response(response: Union[requests.Response, httpx.Response]) -> dict:
"""Checks a response to see it it contains JSON.
:param response: An HTTP response from a PrivateBin host.
:return: The JSON data included in the response.
"""
try:
data = response.json()
except json.JSONDecodeError as error:
raise BadServerResponseError('Unable to parse response from %s' % response.url) from error
if data['status'] != 0:
raise PrivateBinAPIError(data['message'])
return data
def get_loop():
"""Returns the running event loop
If Python 3.6 is running, it falls back to asyncio.get_event_loop()
:return: The currently running event loop
"""
try:
return asyncio.get_running_loop()
except AttributeError:
return asyncio.get_event_loop()
| StarcoderdataPython |
3203852 | # Generated by Django 2.2.5 on 2020-02-18 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0021_auto_20200127_2207'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name_kana',
field=models.CharField(blank=True, max_length=30, verbose_name='姓(カナ)'),
),
]
| StarcoderdataPython |
102815 | <reponame>pootle/pimotors
#!/usr/bin/python3
"""
This module provides feedback control for motors
So far a PID controller......
"""
class PIDfeedback():
"""
This class can be used as part of a dc motor controller. It provides feedback control using a PID controller
(https://en.wikipedia.org/wiki/PID_controller)
It handles only the error value, the calling software works out what the error is to allow it to be calculated in
or measured in different ways.
This is a pretty trivial class so far
"""
def __init__(self, timenow, Pfact, Ifact, Dfact):
"""
timenow : timestamp of initial reading / setup
Pfact : Proportion factor
Ifact : Integral factor
Dfact : Derivative (slope) factor
"""
self.timeprev = timenow
self.timestart = timenow
self.errorprev = 0
self.errortotal= 0
self.Pfact = Pfact
self.Ifact = Ifact
self.Dfact = Dfact
def reset(self, timenow, errornow):
"""
simple reset function to save having to discard and recreate an instance
"""
self.timeprev = timenow
self.timestart = timenow
self.errorprev = 0
self.errortotal= 0
def factors(self, Pfact=None, Ifact=None, Dfact=None):
"""
set and return any combination of the factors
Any parameter not None will update that factor to the supplied Value.
return a 3-tuple of the values
"""
if not Pfact is None:
self.Pfact=Pfact
if not Ifact is None:
self.Ifact=Pfact
if not Dfact is None:
self.Dfact=Pfact
return self.Pfact, self.Ifact, self.Dfact
def onefact(self, factor, newvalue):
"""
set and return a single factor
factor : 'P', 'I', or 'D'
value : None to just return the current value, or an integer or float to set and return the new value
"""
assert newvalue is None or isinstance(newvalue,(int, float, str)), 'Value is not a number'
value = float(newvalue) if isinstance(newvalue, str) else newvalue
if factor=='P':
if not value is None:
self.Pfact=value
return self.Pfact
elif factor=='I':
if not value is None:
self.Ifact=value
return self.Ifact
elif factor=='D':
if not value is None:
self.Dfact=='D'
return self.Dfact
else:
raise ValueError('factor should be "P", "I" or "D"; not %S' % str(factor))
def ticker(self, timenow, errornow):
"""
called on a regular basis, this calculates the correction factor to be applied.
as long as ticks are regular we don't need to use the time as part of the slope calculation,
timenow : time at which the position was measured
errornow : the absolute error at that time - note this is the error in position, not the error in velocity
"""
lastinterval = timenow-self.timeprev
slope = errornow-self.errorprev
self.errortotal += errornow
self.errorprev = errornow
self.timeprev = timenow
return self.Pfact*errornow + self.Ifact*self.errortotal + self.Dfact*slope
def odef(self):
return {'className': type(self).__name__, 'Pfact': self.Pfact, 'Ifact': self.Ifact, 'Dfact': self.Dfact}
| StarcoderdataPython |
1693586 | <gh_stars>1-10
# -*- coding: utf-8 -*-
###############################################################################
#
# Person
# Returns members of Congress and U.S. Presidents since the founding of the nation.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Person(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Person Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Person, self).__init__(temboo_session, '/Library/GovTrack/Person')
def new_input_set(self):
return PersonInputSet()
def _make_result_set(self, result, path):
return PersonResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PersonChoreographyExecution(session, exec_id, path)
class PersonInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Person
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return in the response. Use double-underscores to span relationships (e.g. person__firstname).)
"""
super(PersonInputSet, self)._set_input('Fields', value)
def set_Gender(self, value):
"""
Set the value of the Gender input for this Choreo. ((optional, string) The person's gender (male or female). For historical data, gender is sometimes not specified. Filter operators allowed. Sortable.)
"""
super(PersonInputSet, self)._set_input('Gender', value)
def set_LastName(self, value):
"""
Set the value of the LastName input for this Choreo. ((optional, string) The representative's last name. Filter operators allowed. Sortable.)
"""
super(PersonInputSet, self)._set_input('LastName', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Results are paged 100 per call by default. Set the limit input to a high value to get all of the results at once.)
"""
super(PersonInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Offset the results by the number given, useful for paging through results.)
"""
super(PersonInputSet, self)._set_input('Offset', value)
def set_PersonID(self, value):
"""
Set the value of the PersonID input for this Choreo. ((optional, integer) The ID number for a person to retrieve. When using this input, all other filter parameters are ignored, and a single record is returned.)
"""
super(PersonInputSet, self)._set_input('PersonID', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((conditional, string) Filters according to a full-text search on the object.)
"""
super(PersonInputSet, self)._set_input('Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(PersonInputSet, self)._set_input('ResponseFormat', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, string) You can order the results using fieldname (ascending) or -fieldname (descending) where "fieldname" is one of the variables that is listed as 'Sortable' in the description. Ex: '-lastname')
"""
super(PersonInputSet, self)._set_input('Sort', value)
class PersonResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Person Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from GovTrack.)
"""
return self._output.get('Response', None)
class PersonChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PersonResultSet(response, path)
| StarcoderdataPython |
4841605 | <filename>setup.py<gh_stars>1-10
import os
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
with open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
return line.split("'")[1]
raise RuntimeError('Unable to find version string.')
with open('requirements.txt', 'r') as requirements:
setup(
name='dbify',
version=get_version('dbify/__init__.py'),
install_requires=list(requirements.read().splitlines()),
packages=find_packages(),
description='decorator for storing function results in a database',
python_requires='>=3.6',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'],
long_description=long_description,
long_description_content_type='text/markdown',
setup_requires=['setuptools_scm'],
)
| StarcoderdataPython |
139708 | <reponame>xe1gyq/metal
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from oslo_config import cfg
from oslo_utils._i18n import _
INVENTORY_LLDP_OPTS = [
cfg.ListOpt('drivers',
default=['lldpd'],
help=_("An ordered list of inventory LLDP driver "
"entrypoints to be loaded from the "
"inventory.agent namespace.")),
]
cfg.CONF.register_opts(INVENTORY_LLDP_OPTS, group="lldp")
| StarcoderdataPython |
3335861 | import logging
import socket
from datetime import datetime
from os import path, remove
from subprocess import DEVNULL, CalledProcessError, check_call
from time import sleep
from typing import Union
from urllib.parse import urlparse
import redis
from azure.storage.blob import BlobServiceClient
from pythonjsonlogger import jsonlogger
from settings import settings
logger = logging.getLogger()
logHandler = logging.StreamHandler()
logFmt = jsonlogger.JsonFormatter(timestamp=True)
logHandler.setFormatter(logFmt)
logger.addHandler(logHandler)
def is_leader(dbname):
if settings.leader_election_enabled:
r = redis.Redis.from_url(settings.redis_url)
lock_key = f"postgres-to-azureblob-{dbname}"
hostname = socket.gethostname()
is_leader = False
with r.pipeline() as pipe:
try:
pipe.watch(lock_key)
leader_host = pipe.get(lock_key)
if leader_host in (hostname.encode(), None):
pipe.multi()
pipe.setex(lock_key, 10, hostname)
pipe.execute()
is_leader = True
except redis.WatchError:
pass
else:
is_leader = True
return is_leader
def cleanup(filename: str) -> None:
try:
logging.warning(msg="Performing Cleanup", extra={"file": filename})
remove(filename)
except FileNotFoundError:
pass
return None
def upload_blob(filename: str, date: datetime) -> None:
client = BlobServiceClient.from_connection_string(settings.blob_storage_connection_string)
blobname = f"{date.year}/{date.month}/{date.day}/{path.basename(filename)}"
if settings.blob_storage_path_prefix:
blobname = f"{settings.blob_storage_path_prefix}/{blobname}"
logging.warning(
msg="Uploading blob",
extra={"local_file": filename, "remote_container": settings.blob_storage_container, "remote_file": blobname},
)
blob = client.get_blob_client(container=settings.blob_storage_container, blob=blobname)
with open(filename, "rb") as f:
blob.upload_blob(f)
return None
def dump_database() -> Union[dict, None]:
date = datetime.utcnow()
database_name = urlparse(settings.psql_connection_string).path[1:]
filename = f"/tmp/{date.strftime('%FT%H%M%SZ')}-{database_name}.psql"
command = ["pg_dump", "--format=custom", settings.psql_connection_string]
if is_leader(dbname=database_name):
for i in range(settings.retry_count):
with open(filename, "wb") as f:
attempt = i + 1
log_extras = {
"database": database_name,
"file": filename,
"retry_count": settings.retry_count,
"attempt": attempt,
}
try:
logging.warning(msg="Export start", extra=log_extras)
check_call(command, stdout=f, stderr=DEVNULL)
logging.warning(msg="Export complete", extra=log_extras)
return {"filename": filename, "date": date}
except CalledProcessError:
if attempt < settings.retry_count:
logging.error(msg="Export failed, retrying", extra=log_extras)
sleep(settings.retry_delay)
continue
else:
logging.error(msg="Export failed, giving up", extra=log_extras)
return None
else:
logging.warning(msg="Leader Election Failed, Skipping", extra={"database": database_name})
if __name__ == "__main__":
dump = dump_database()
if dump is not None:
upload_blob(filename=dump.get("filename"), date=dump.get("date"))
cleanup(filename=dump.get("filename"))
| StarcoderdataPython |
162899 | <filename>conanfile.py
from conans import ConanFile, CMake, tools
class LibCommuniConan(ConanFile):
name = "communi"
version = "3.6.0"
license = "MIT"
author = "Edgar <EMAIL>"
url = "https://github.com/AnotherFoxGuy/libcommuni-cmake"
description = "A cross-platform IRC framework written with Qt"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
exports_sources = "include*", "src*", "CMakeLists.txt"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.includedirs = ['include',
'include/IrcUtil',
'include/IrcModel',
'include/IrcCore'
]
self.cpp_info.libs = tools.collect_libs(self)
| StarcoderdataPython |
3383696 | # Generated by Django 2.0.5 on 2019-04-23 15:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hostlock', '0002_host_owner'),
]
operations = [
migrations.AlterField(
model_name='lock',
name='requester',
field=models.ForeignKey(blank=True, help_text='user requesting this lock', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| StarcoderdataPython |
2528 | # encoding: utf-8
#
# Copyright (C) 2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest import ( assert_that,
contains,
contains_string,
equal_to,
has_entries,
has_entry,
matches_regexp )
from pprint import pprint
import requests
import os.path
from ycmd.tests.clangd import ( IsolatedYcmd,
SharedYcmd,
PathToTestFile,
RunAfterInitialized )
from ycmd.tests.test_utils import ( BuildRequest,
ChunkMatcher,
CombineRequest,
LineColMatcher,
LocationMatcher,
ErrorMatcher,
WithRetry,
WaitUntilCompleterServerReady )
from ycmd.utils import ReadFile
# This test is isolated to trigger objcpp hooks, rather than fetching completer
# from cache.
@IsolatedYcmd()
def Subcommands_DefinedSubcommands_test( app ):
file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' )
RunAfterInitialized( app, {
'request': {
'completer_target': 'filetype_default',
'line_num': 10,
'column_num': 3,
'filetype': 'objcpp',
'filepath': file_path
},
'expect': {
'response': requests.codes.ok,
'data': contains( *sorted( [ 'ExecuteCommand',
'FixIt',
'Format',
'GetDoc',
'GetDocImprecise',
'GetType',
'GetTypeImprecise',
'GoTo',
'GoToDeclaration',
'GoToDefinition',
'GoToImprecise',
'GoToInclude',
'GoToReferences',
'RefactorRename',
'RestartServer' ] ) )
},
'route': '/defined_subcommands',
} )
@SharedYcmd
def Subcommands_GoTo_ZeroBasedLineAndColumn_test( app ):
file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' )
RunAfterInitialized( app, {
'request': {
'contents': ReadFile( file_path ),
'completer_target': 'filetype_default',
'command_arguments': [ 'GoToDefinition' ],
'line_num': 10,
'column_num': 3,
'filetype': 'cpp',
'filepath': file_path
},
'expect': {
'response': requests.codes.ok,
'data': {
'filepath': os.path.abspath( file_path ),
'line_num': 2,
'column_num': 8
}
},
'route': '/run_completer_command',
} )
@SharedYcmd
def RunGoToTest_all( app, folder, command, test ):
filepath = PathToTestFile( folder, test[ 'req' ][ 0 ] )
common_request = {
'completer_target' : 'filetype_default',
'filepath' : filepath,
'command_arguments': [ command ],
'contents' : ReadFile( filepath ),
'filetype' : 'cpp'
}
request = common_request
request.update( {
'line_num' : test[ 'req' ][ 1 ],
'column_num': test[ 'req' ][ 2 ],
} )
response = test[ 'res' ]
if isinstance( response, list ):
expect = {
'response': requests.codes.ok,
'data': contains( *[
LocationMatcher(
PathToTestFile( folder, os.path.normpath( location[ 0 ] ) ),
location[ 1 ],
location[ 2 ]
) for location in response
] )
}
elif isinstance( response, tuple ):
expect = {
'response': requests.codes.ok,
'data': LocationMatcher(
PathToTestFile( folder, os.path.normpath( response[ 0 ] ) ),
response[ 1 ],
response[ 2 ]
)
}
else:
expect = {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError, test[ 'res' ] )
}
RunAfterInitialized( app, {
'request': request,
'route' : '/run_completer_command',
'expect' : expect
} )
def Subcommands_GoTo_all_test():
tests = [
# Local::x -> definition/declaration of x
{ 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) },
# Local::in_line -> definition/declaration of Local::in_line
{ 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) },
# Local -> definition/declaration of Local
{ 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) },
# Local::out_of_line -> definition of Local::out_of_line
{ 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 14, 13 ) },
# GoToDeclaration alternates between definition and declaration
{ 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) },
{ 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) },
# test -> definition and declaration of test
{ 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) },
{ 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) },
# Unicøde
{ 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) },
# Another_Unicøde
{ 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) },
# Expected failures
{ 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' },
{ 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' },
]
for test in tests:
for cmd in [ 'GoToDefinition', 'GoTo', 'GoToImprecise' ]:
yield RunGoToTest_all, '', cmd, test
def Subcommands_GoToDeclaration_all_test():
tests = [
# Local::x -> definition/declaration of x
{ 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) },
# Local::in_line -> definition/declaration of Local::in_line
{ 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) },
# Local -> definition/declaration of Local
{ 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) },
# Local::out_of_line -> declaration of Local::out_of_line
{ 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 11, 10 ) },
# GoToDeclaration alternates between definition and declaration
{ 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) },
{ 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) },
# test -> definition and declaration of test
{ 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) },
{ 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) },
# Unicøde
{ 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) },
# Another_Unicøde
{ 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) },
# Expected failures
{ 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' },
{ 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' },
]
for test in tests:
yield RunGoToTest_all, '', 'GoToDeclaration', test
def Subcommands_GoToInclude_test():
tests = [
{ 'req': ( 'main.cpp', 1, 6 ), 'res': ( 'a.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 2, 14 ), 'res': ( 'system/a.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 3, 1 ), 'res': ( 'quote/b.hpp', 1, 1 ) },
# FIXME: should fail since b.hpp is included with angled brackets but its
# folder is added with -iquote.
{ 'req': ( 'main.cpp', 4, 10 ), 'res': ( 'quote/b.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 5, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 6, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) },
# Expected failures
{ 'req': ( 'main.cpp', 7, 1 ), 'res': 'Cannot jump to location' },
{ 'req': ( 'main.cpp', 10, 13 ), 'res': 'Cannot jump to location' },
]
for test in tests:
for cmd in [ 'GoToInclude', 'GoTo', 'GoToImprecise' ]:
yield RunGoToTest_all, 'test-include', cmd, test
def Subcommands_GoToReferences_test():
tests = [
# Function
{ 'req': ( 'goto.cc', 14, 21 ), 'res': [ ( 'goto.cc', 11, 10 ),
( 'goto.cc', 14, 13 ),
( 'goto.cc', 25, 22 ) ] },
# Namespace
{ 'req': ( 'goto.cc', 24, 17 ), 'res': [ ( 'goto.cc', 2, 11 ),
( 'goto.cc', 14, 6 ),
( 'goto.cc', 23, 14 ),
( 'goto.cc', 24, 15 ),
( 'goto.cc', 25, 15 ) ] },
# Expected failure
{ 'req': ( 'goto.cc', 27, 8 ), 'res': 'Cannot jump to location' },
]
for test in tests:
yield RunGoToTest_all, '', 'GoToReferences', test
@SharedYcmd
def RunGetSemanticTest( app,
filepath,
filetype,
test,
command,
response = requests.codes.ok ):
contents = ReadFile( filepath )
common_args = {
'completer_target' : 'filetype_default',
'command_arguments': command,
'line_num' : 10,
'column_num' : 3,
'filepath' : filepath,
'contents' : contents,
'filetype' : filetype
}
args = test[ 0 ]
if response == requests.codes.ok:
if not isinstance( test[ 1 ], BaseMatcher ):
expected = has_entry( 'message', contains_string( test[ 1 ] ) )
else:
expected = has_entry( 'message', test[ 1 ] )
else:
expected = test[ 1 ]
request = common_args
request.update( args )
test = { 'request': request,
'route': '/run_completer_command',
'expect': { 'response': response,
'data': expected } }
RunAfterInitialized( app, test )
def Subcommands_GetType_test():
tests = [
# Basic pod types
[ { 'line_num': 24, 'column_num': 3 }, 'Foo' ],
# [ { 'line_num': 12, 'column_num': 2 }, 'Foo' ],
[ { 'line_num': 12, 'column_num': 8 }, 'Foo' ],
[ { 'line_num': 12, 'column_num': 9 }, 'Foo' ],
[ { 'line_num': 12, 'column_num': 10 }, 'Foo' ],
# [ { 'line_num': 13, 'column_num': 3 }, 'int' ],
[ { 'line_num': 13, 'column_num': 7 }, 'int' ],
# [ { 'line_num': 15, 'column_num': 7 }, 'char' ],
# Function
# [ { 'line_num': 22, 'column_num': 2 }, 'int main()' ],
[ { 'line_num': 22, 'column_num': 6 }, 'int main()' ],
# Declared and canonical type
# On Ns::
[ { 'line_num': 25, 'column_num': 3 }, 'namespace Ns' ],
# On Type (Type)
# [ { 'line_num': 25, 'column_num': 8 },
# 'Ns::Type => Ns::BasicType<char>' ],
# On "a" (Ns::Type)
# [ { 'line_num': 25, 'column_num': 15 },
# 'Ns::Type => Ns::BasicType<char>' ],
# [ { 'line_num': 26, 'column_num': 13 },
# 'Ns::Type => Ns::BasicType<char>' ],
# Cursor on decl for refs & pointers
[ { 'line_num': 39, 'column_num': 3 }, 'Foo' ],
[ { 'line_num': 39, 'column_num': 11 }, 'Foo &' ],
[ { 'line_num': 39, 'column_num': 15 }, 'Foo' ],
[ { 'line_num': 40, 'column_num': 3 }, 'Foo' ],
[ { 'line_num': 40, 'column_num': 11 }, 'Foo *' ],
[ { 'line_num': 40, 'column_num': 18 }, 'Foo' ],
# [ { 'line_num': 42, 'column_num': 3 }, 'const Foo &' ],
[ { 'line_num': 42, 'column_num': 16 }, 'const struct Foo &' ],
# [ { 'line_num': 43, 'column_num': 3 }, 'const Foo *' ],
[ { 'line_num': 43, 'column_num': 16 }, 'const struct Foo *' ],
# Cursor on usage
[ { 'line_num': 45, 'column_num': 13 }, 'const struct Foo' ],
# [ { 'line_num': 45, 'column_num': 19 }, 'const int' ],
[ { 'line_num': 46, 'column_num': 13 }, 'const struct Foo *' ],
# [ { 'line_num': 46, 'column_num': 20 }, 'const int' ],
[ { 'line_num': 47, 'column_num': 12 }, 'Foo' ],
[ { 'line_num': 47, 'column_num': 17 }, 'int' ],
[ { 'line_num': 48, 'column_num': 12 }, 'Foo *' ],
[ { 'line_num': 48, 'column_num': 18 }, 'int' ],
# Auto in declaration
# [ { 'line_num': 28, 'column_num': 3 }, 'struct Foo &' ],
# [ { 'line_num': 28, 'column_num': 11 }, 'struct Foo &' ],
[ { 'line_num': 28, 'column_num': 18 }, 'struct Foo' ],
# [ { 'line_num': 29, 'column_num': 3 }, 'Foo *' ],
# [ { 'line_num': 29, 'column_num': 11 }, 'Foo *' ],
[ { 'line_num': 29, 'column_num': 18 }, 'Foo' ],
# [ { 'line_num': 31, 'column_num': 3 }, 'const Foo &' ],
# [ { 'line_num': 31, 'column_num': 16 }, 'const Foo &' ],
# [ { 'line_num': 32, 'column_num': 3 }, 'const Foo *' ],
# [ { 'line_num': 32, 'column_num': 16 }, 'const Foo *' ],
# Auto in usage
# [ { 'line_num': 34, 'column_num': 14 }, 'const Foo' ],
# [ { 'line_num': 34, 'column_num': 21 }, 'const int' ],
# [ { 'line_num': 35, 'column_num': 14 }, 'const Foo *' ],
# [ { 'line_num': 35, 'column_num': 22 }, 'const int' ],
[ { 'line_num': 36, 'column_num': 13 }, 'Foo' ],
[ { 'line_num': 36, 'column_num': 19 }, 'int' ],
# [ { 'line_num': 37, 'column_num': 13 }, 'Foo *' ],
[ { 'line_num': 37, 'column_num': 20 }, 'int' ],
# Unicode
[ { 'line_num': 51, 'column_num': 13 }, 'Unicøde *' ],
# Bound methods
# On Win32, methods pick up an __attribute__((thiscall)) to annotate their
# calling convention. This shows up in the type, which isn't ideal, but
# also prohibitively complex to try and strip out.
[ { 'line_num': 53, 'column_num': 15 },
matches_regexp(
r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ],
[ { 'line_num': 54, 'column_num': 18 },
matches_regexp(
r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ],
]
for subcommand in [ 'GetType', 'GetTypeImprecise' ]:
for test in tests:
yield ( RunGetSemanticTest,
PathToTestFile( 'GetType_Clang_test.cc' ),
'cpp',
test,
[ subcommand ] )
def Subcommands_GetDoc_test():
tests = [
# from local file
[ { 'line_num': 5, 'column_num': 10 }, 'docstring', requests.codes.ok ],
# from header
[ { 'line_num': 6, 'column_num': 10 }, 'docstring', requests.codes.ok ],
# no docstring
[ { 'line_num': 7, 'column_num': 7 }, 'int x = 3', requests.codes.ok ],
# no hover
[ { 'line_num': 8, 'column_num': 1 },
ErrorMatcher( RuntimeError, 'No hover information.' ),
requests.codes.server_error ]
]
for subcommand in [ 'GetDoc', 'GetDocImprecise' ]:
for test in tests:
yield ( RunGetSemanticTest,
PathToTestFile( 'GetDoc_Clang_test.cc' ),
'cpp',
test,
[ subcommand ],
test[ 2 ] )
@SharedYcmd
def RunFixItTest( app, line, column, lang, file_path, check ):
contents = ReadFile( file_path )
language_options = {
'cpp11': {
'filetype' : 'cpp',
},
'cuda': {
'filetype' : 'cuda',
},
'objective-c': {
'filetype' : 'objc',
},
}
args = {
'completer_target' : 'filetype_default',
'contents' : contents,
'filepath' : file_path,
'command_arguments': [ 'FixIt' ],
'line_num' : line,
'column_num' : column,
}
args.update( language_options[ lang ] )
test = { 'request': args, 'route': '/detailed_diagnostic' }
# First get diags.
diags = RunAfterInitialized( app, test )
while 'message' in diags and 'diagnostics' in diags[ 'message' ].lower():
receive_diags = { 'request': args, 'route': '/receive_messages' }
RunAfterInitialized( app, receive_diags )
diags = RunAfterInitialized( app, test )
results = app.post_json( '/run_completer_command',
BuildRequest( **args ) ).json
pprint( results )
check( results )
def FixIt_Check_cpp11_Ins( results ):
# First fixit
# switch(A()) { // expected-error{{explicit conversion to}}
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'static_cast<int>(' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 16, 'column_num': 10 } ),
'end' : has_entries( { 'line_num': 16, 'column_num': 10 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( ')' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 16, 'column_num': 13 } ),
'end' : has_entries( { 'line_num': 16, 'column_num': 13 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 16, 'column_num': 0 } )
} ) )
} ) )
def FixIt_Check_cpp11_InsMultiLine( results ):
# Similar to FixIt_Check_cpp11_1 but inserts split across lines
#
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'static_cast<int>(' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 26, 'column_num': 7 } ),
'end' : has_entries( { 'line_num': 26, 'column_num': 7 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( ')' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 28, 'column_num': 2 } ),
'end' : has_entries( { 'line_num': 28, 'column_num': 2 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 25, 'column_num': 14 } )
} ) )
} ) )
def FixIt_Check_cpp11_Del( results ):
# Removal of ::
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 35, 'column_num': 7 } ),
'end' : has_entries( { 'line_num': 35, 'column_num': 9 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 35, 'column_num': 7 } )
} ) )
} ) )
def FixIt_Check_cpp11_Repl( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'foo' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 40, 'column_num': 6 } ),
'end' : has_entries( { 'line_num': 40, 'column_num': 9 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 40, 'column_num': 6 } )
} ) )
} ) )
def FixIt_Check_cpp11_DelAdd( results ):
assert_that( results, has_entries( {
'fixits': contains(
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 48, 'column_num': 3 } ),
'end' : has_entries( { 'line_num': 48, 'column_num': 4 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( '~' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 48, 'column_num': 9 } ),
'end' : has_entries( { 'line_num': 48, 'column_num': 9 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 48, 'column_num': 3 } )
} ),
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '= default;' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 48, 'column_num': 15 } ),
'end' : has_entries( { 'line_num': 48, 'column_num': 17 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 48, 'column_num': 3 } )
} ),
)
} ) )
def FixIt_Check_objc( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'id' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 5, 'column_num': 3 } ),
'end' : has_entries( { 'line_num': 5, 'column_num': 3 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 5, 'column_num': 3 } )
} ) )
} ) )
def FixIt_Check_objc_NoFixIt( results ):
# and finally, a warning with no fixits
assert_that( results, equal_to( { 'fixits': [] } ) )
def FixIt_Check_cpp11_MultiFirst( results ):
assert_that( results, has_entries( {
'fixits': contains(
# first fix-it at 54,16
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'foo' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 15 } )
} ),
# second fix-it at 54,52
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( '~' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 54, 'column_num': 15 } )
} ),
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '= default;' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 64 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 15 } )
} ),
)
} ) )
def FixIt_Check_cpp11_MultiSecond( results ):
assert_that( results, has_entries( {
'fixits': contains(
# first fix-it at 54,16
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'foo' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 51 } )
} ),
# second fix-it at 54,52
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( '~' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 54, 'column_num': 51 } )
} ),
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '= default;' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 64 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 51 } )
} ),
)
} ) )
def FixIt_Check_unicode_Ins( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '=' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 21, 'column_num': 9 } ),
'end' : has_entries( { 'line_num': 21, 'column_num': 11 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 21, 'column_num': 16 } )
} ) )
} ) )
def FixIt_Check_cpp11_Note( results ):
assert_that( results, has_entries( {
'fixits': contains(
# First note: put parens around it
has_entries( {
'text': contains_string( 'parentheses around the assignment' ),
'chunks': contains(
ChunkMatcher( '(',
LineColMatcher( 59, 8 ),
LineColMatcher( 59, 8 ) ),
ChunkMatcher( ')',
LineColMatcher( 61, 12 ),
LineColMatcher( 61, 12 ) )
),
'location': LineColMatcher( 60, 1 ),
} ),
# Second note: change to ==
has_entries( {
'text': contains_string( '==' ),
'chunks': contains(
ChunkMatcher( '==',
LineColMatcher( 60, 8 ),
LineColMatcher( 60, 9 ) )
),
'location': LineColMatcher( 60, 1 ),
} ),
# Unresolved, requires /resolve_fixit request
has_entries( {
'text': 'Extract subexpression to variable',
'resolve': True,
'command': has_entries( { 'command': 'clangd.applyTweak' } )
} )
)
} ) )
def FixIt_Check_cpp11_SpellCheck( results ):
assert_that( results, has_entries( {
'fixits': contains(
# Change to SpellingIsNotMyStrongPoint
has_entries( {
'text': contains_string( "change 'SpellingIsNotMyStringPiont' to "
"'SpellingIsNotMyStrongPoint'" ),
'chunks': contains(
ChunkMatcher( 'SpellingIsNotMyStrongPoint',
LineColMatcher( 72, 9 ),
LineColMatcher( 72, 35 ) )
),
'location': LineColMatcher( 72, 9 ),
} ) )
} ) )
def FixIt_Check_cuda( results ):
assert_that( results, has_entries( {
'fixits': contains(
has_entries( {
'text': contains_string(
"change 'int' to 'void'" ),
'chunks': contains(
ChunkMatcher( 'void',
LineColMatcher( 3, 12 ),
LineColMatcher( 3, 15 ) )
),
'location': LineColMatcher( 3, 12 ),
} ) )
} ) )
def FixIt_Check_SubexprExtract_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': 'Extract subexpression to variable',
'chunks': contains(
ChunkMatcher( 'auto dummy = foo(i + 3);\n ',
LineColMatcher( 84, 3 ),
LineColMatcher( 84, 3 ) ),
ChunkMatcher( 'dummy',
LineColMatcher( 84, 10 ),
LineColMatcher( 84, 22 ) ),
)
} ) )
} ) )
def FixIt_Check_RawStringReplace_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': 'Convert to raw string',
'chunks': contains(
ChunkMatcher( 'R"(\\\\r\\asd\n\\v)"',
LineColMatcher( 80, 19 ),
LineColMatcher( 80, 36 ) ),
)
} ) )
} ) )
def FixIt_Check_MacroExpand_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': "Expand macro 'DECLARE_INT'",
'chunks': contains(
ChunkMatcher( 'int i',
LineColMatcher( 83, 3 ),
LineColMatcher( 83, 17 ) ),
)
} ) )
} ) )
def FixIt_Check_AutoExpand_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': "Expand auto type",
'chunks': contains(
ChunkMatcher( 'const char *',
LineColMatcher( 80, 1 ),
LineColMatcher( 80, 6 ) ),
)
} ) )
} ) )
def Subcommands_FixIt_all_test():
cfile = PathToTestFile( 'FixIt_Clang_cpp11.cpp' )
mfile = PathToTestFile( 'objc', 'FixIt_Clang_objc.m' )
cufile = PathToTestFile( 'cuda', 'fixit_test.cu' )
ufile = PathToTestFile( 'unicode.cc' )
tests = [
# L
# i C
# n o
# e l Lang File, Checker
[ 16, 0, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
[ 25, 14, 'cpp11', cfile, FixIt_Check_cpp11_InsMultiLine ],
[ 35, 7, 'cpp11', cfile, FixIt_Check_cpp11_Del ],
[ 40, 6, 'cpp11', cfile, FixIt_Check_cpp11_Repl ],
[ 48, 3, 'cpp11', cfile, FixIt_Check_cpp11_DelAdd ],
[ 5, 3, 'objective-c', mfile, FixIt_Check_objc ],
[ 7, 1, 'objective-c', mfile, FixIt_Check_objc_NoFixIt ],
[ 3, 12, 'cuda', cufile, FixIt_Check_cuda ],
# multiple errors on a single line; both with fixits
[ 54, 15, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ],
# should put closest fix-it first?
[ 54, 51, 'cpp11', cfile, FixIt_Check_cpp11_MultiSecond ],
# unicode in line for fixit
[ 21, 16, 'cpp11', ufile, FixIt_Check_unicode_Ins ],
# FixIt attached to a "child" diagnostic (i.e. a Note)
[ 60, 1, 'cpp11', cfile, FixIt_Check_cpp11_Note ],
# FixIt due to forced spell checking
[ 72, 9, 'cpp11', cfile, FixIt_Check_cpp11_SpellCheck ],
]
for test in tests:
yield RunFixItTest, test[ 0 ], test[ 1 ], test[ 2 ], test[ 3 ], test[ 4 ]
@WithRetry
@SharedYcmd
def RunRangedFixItTest( app, rng, expected ):
contents = ReadFile( PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) )
args = {
'completer_target' : 'filetype_default',
'contents' : contents,
'filepath' : PathToTestFile( 'FixIt_Clang_cpp11.cpp' ),
'command_arguments': [ 'FixIt' ],
'range' : rng,
'filetype' : 'cpp'
}
app.post_json( '/event_notification',
CombineRequest( args, {
'event_name': 'FileReadyToParse',
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, 'cpp' )
response = app.post_json( '/run_completer_command',
BuildRequest( **args ) ).json
args[ 'fixit' ] = response[ 'fixits' ][ 0 ]
response = app.post_json( '/resolve_fixit',
BuildRequest( **args ) ).json
print( 'Resolved fixit response = ' )
print( response )
expected( response )
def Subcommands_FixIt_Ranged_test():
expand_auto_range = {
'start': { 'line_num': 80, 'column_num': 1 },
'end': { 'line_num': 80, 'column_num': 4 },
}
subexpression_extract_range = {
'start': { 'line_num': 84, 'column_num': 14 },
'end': { 'line_num': 84, 'column_num': 20 },
}
macro_expand_range = {
'start': { 'line_num': 83, 'column_num': 3 },
'end': { 'line_num': 83, 'column_num': 13 },
}
raw_string_range = {
'start': { 'line_num': 80, 'column_num': 19 },
'end': { 'line_num': 80, 'column_num': 35 },
}
tests = [
[ expand_auto_range, FixIt_Check_AutoExpand_Resolved ],
[ macro_expand_range, FixIt_Check_MacroExpand_Resolved ],
[ subexpression_extract_range, FixIt_Check_SubexprExtract_Resolved ],
[ raw_string_range, FixIt_Check_RawStringReplace_Resolved ],
]
for test in tests:
yield RunRangedFixItTest, test[ 0 ], test[ 1 ]
@WithRetry
@SharedYcmd
def Subcommands_FixIt_AlreadyResolved_test( app ):
filename = PathToTestFile( 'FixIt_Clang_cpp11.cpp' )
request = {
'completer_target' : 'filetype_default',
'contents' : ReadFile( filename ),
'filepath' : filename,
'command_arguments': [ 'FixIt' ],
'line_num' : 16,
'column_num' : 1,
'filetype' : 'cpp'
}
app.post_json( '/event_notification',
CombineRequest( request, {
'event_name': 'FileReadyToParse',
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, 'cpp' )
expected = app.post_json( '/run_completer_command',
BuildRequest( **request ) ).json
print( 'expected = ' )
print( expected )
request[ 'fixit' ] = expected[ 'fixits' ][ 0 ]
actual = app.post_json( '/resolve_fixit',
BuildRequest( **request ) ).json
print( 'actual = ' )
print( actual )
assert_that( actual, equal_to( expected ) )
@SharedYcmd
def Subcommands_RefactorRename_test( app ):
test = {
'request': {
'filetype': 'cpp',
'completer_target': 'filetype_default',
'contents': ReadFile( PathToTestFile( 'basic.cpp' ) ),
'filepath': PathToTestFile( 'basic.cpp' ),
'command_arguments': [ 'RefactorRename', 'Bar' ],
'line_num': 17,
'column_num': 4,
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
ChunkMatcher( 'Bar',
LineColMatcher( 1, 8 ),
LineColMatcher( 1, 11 ) ),
ChunkMatcher( 'Bar',
LineColMatcher( 9, 3 ),
LineColMatcher( 9, 6 ) ),
ChunkMatcher( '\n\n',
LineColMatcher( 12, 2 ),
LineColMatcher( 15, 1 ) ),
ChunkMatcher( 'Bar',
LineColMatcher( 15, 8 ),
LineColMatcher( 15, 11 ) ),
ChunkMatcher( ' ',
LineColMatcher( 15, 46 ),
LineColMatcher( 16, 1 ) ),
ChunkMatcher( 'Bar',
LineColMatcher( 17, 3 ),
LineColMatcher( 17, 6 ) ),
ChunkMatcher( '',
LineColMatcher( 17, 14 ),
LineColMatcher( 17, 15 ) ),
ChunkMatcher( ' ',
LineColMatcher( 17, 17 ),
LineColMatcher( 17, 17 ) ),
ChunkMatcher( ' ',
LineColMatcher( 17, 19 ),
LineColMatcher( 17, 19 ) ),
)
} ) )
} )
},
'route': '/run_completer_command'
}
RunAfterInitialized( app, test )
| StarcoderdataPython |
3218996 | import logging
logging.basicConfig(format='%(message)s', level=logging.INFO)
logger = logging.getLogger(__package__)
| StarcoderdataPython |
3346985 | # -*- coding: utf-8 -*-
"""
Created on Sun May 10 20:03:20 2020
@author: hexx
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from shutil import copyfile
from myFunctions import createFolder
today = pd.to_datetime('today')
today =today.strftime("%Y-%m-%d")
today = '2020-09-12'
# Model_Date = np.load("./Model_Parameter.npy",allow_pickle='TRUE').item()
PODA_Model = np.load("./PODA_Model_"+today+".npy",allow_pickle='TRUE').item()
google_Mobility_Day = PODA_Model['ML_File_Date']
start_Date = '03-03-2020'
YYG_projection_Date=PODA_Model['YYG_File_Date']
ML_Model=PODA_Model['ML_File_Date']
fuel_mobility_factor_file = ML_Model
apple_fuel_Factor_file = PODA_Model['ML_File_Date']
model_mark =''
isopen='' #'_noreopen'
fuel_Demand_EIA = PODA_Model['Fuel_Demand_EIA'].reset_index()
fuel_Demand_EIA = fuel_Demand_EIA.set_index('Date')
fig2 = plt.figure(figsize=(6, 5))
fig3 = plt.figure(figsize=(6, 5))
ax2 = fig2.add_subplot(1, 1, 1)
ax3 = fig3.add_subplot(1, 1, 1)
Line_Style =['.-m', '-.r', '-b', '--g']
caseID =['lower', 'mean', 'upper', 'MIT']
for case_i, case in enumerate(caseID):
if case == 'mean':
caseLabel = 'Reference'
else:
caseLabel = case
COVID = PODA_Model['Data_for_Mobility_Projection_'+case]
COVID = COVID[COVID['State Name']== 'Michigan']
data_used = PODA_Model['Google_Apple_Mobility_Projection_'+case].reset_index()
data_used = data_used[(data_used['date']> pd.to_datetime(start_Date))]
data_used = data_used.set_index('date')
NHTS_Category_Share = PODA_Model['NHTS Category Share']
NHTS_State_Fuel_Share = PODA_Model['NHTS State Fuel Share']
df_StateName_Code = PODA_Model['StateName_StateCode']
cols = ['State Name']
data_used = data_used.join(df_StateName_Code.set_index(cols), on=cols, how='left')
data_used = data_used.join(NHTS_Category_Share.set_index('State Code'), on='State Code', how='left')
'''
#Google mobility-fuel correlation model
'''
#load model correlation factors
factor = PODA_Model['Google_Mobility_EIA_Factor']
# data_used['work factor'] = 1 + data_used['Workplaces']/100*factor[0]
# data_used['school factor'] = 1 + data_used['Workplaces']/100*factor[1]
# data_used['medical factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[2]
# data_used['shopping factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[3]
# data_used['social factor'] = 1 + data_used['Retail and Recreation']/100*factor[4]
# data_used['park factor'] = 1 + data_used['Parks']/100*factor[5]
# data_used['transport someone factor'] = 1+ data_used['Retail and Recreation']/100*factor[7]
# data_used['meals factor'] = 1 + data_used['Retail and Recreation']/100*factor[6]
# data_used['else factor'] = 1+ data_used['Retail and Recreation']/100*factor[7]
data_used['work factor'] = 1 + data_used['Workplaces']/100*factor[0]
data_used['school factor'] = 1 + data_used['Workplaces']/100*factor[1]
data_used['medical factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[2]
data_used['shopping factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[3]
data_used['social factor'] = 1 + data_used['Retail and Recreation']/100*factor[4]
data_used['park factor'] = 1 + data_used['Parks']/100*factor[5]
data_used['transport someone factor'] = 1+ data_used['Retail and Recreation']/100*factor[7] #Workplaces
data_used['meals factor'] = 1 + data_used['Retail and Recreation']/100*factor[6]
data_used['else factor'] = 1+ data_used['Retail and Recreation']/100*factor[7] #workplace
data_used['Google State Mobility Predict'] = (data_used['Work']*data_used['work factor'] + \
data_used['School/Daycare/Religious activity']*data_used['school factor'] + \
data_used['Medical/Dental services']*data_used['medical factor'] + \
data_used['Shopping/Errands']*data_used['shopping factor'] + \
data_used['Social/Recreational']*factor[8]*data_used['social factor'] + \
data_used['Social/Recreational']*(1-factor[8])*data_used['park factor'] + \
data_used['Meals']*data_used['meals factor'] +\
data_used['Transport someone']*data_used['transport someone factor'] + \
data_used['Something else']*data_used['else factor'])/100 + factor[9]
aa = data_used.join(NHTS_State_Fuel_Share.set_index('State Name'), on='State Name', how='left')
aa['Google fuel factor'] = aa['Google State Mobility Predict']*aa['Percentage gasoline']
aa['Apple fuel factor']=aa['Apple State Mobility Predict']*aa['Percentage gasoline']
aa['Date'] = aa.index
day_Shift = int(factor[10])
x = aa.sum(level='date')
x = x[['Google fuel factor', 'Apple fuel factor']]
# x['Date'] =x.index+pd.DateOffset(days=day_Shift)
'''
apple mobility-fuel correlation
'''
apple_x = x['Apple fuel factor'].to_numpy()
apple_x_length = len(apple_x)
apple_x=apple_x.reshape(apple_x_length, 1)
regr = PODA_Model['Apple_EIA_Regression']
# regr_coef = regr.coef_
# print('reg_coeff: ', regr_coef)
# regr_interp = regr.intercept_
# print('reg_interp: ', regr_interp)
Apple_fuel_Demand_Pred = regr.predict(apple_x)
# aa['Apple Fuel Demand Predict'] = fuel_Demand_Apple_Pred
baseline = 8722 #average of EIA between Jan 03-Feb 07(thousand bpd)
PODA_Model['EIA_Baseline'] = baseline
data_save = aa[['Date', 'State Name', 'State Code', 'Google State Mobility Predict', 'Apple State Mobility Predict']]
data_save['Google State Mobility Predict'] = data_save['Google State Mobility Predict']*100
data_save.to_excel('./Fuel Demand Projection/Mobility_State_'+YYG_projection_Date+case+isopen+'.xlsx')
x['Google Fuel Demand Predict'] = x['Google fuel factor']*baseline
x['Apple Fuel Demand Predict'] = Apple_fuel_Demand_Pred
# x.to_excel('./Fuel Demand Projection/Mobility_US_'+YYG_projection_Date+case+isopen+'.xlsx')
PODA_Model['Fuel_Demand_Projection_'+case]=x
PODA_Model['Mobility_State_Level_Projection_'+case]=data_save
fig1 = plt.figure(figsize=(6, 5))
ax1 = fig1.add_subplot(1, 1, 1)
ax1.plot(x.index, x['Google Fuel Demand Predict'], '-',
label='Google Mobility (Predicted')
ax1.plot(x.index, x['Apple Fuel Demand Predict'], '--g',
label='Apple Mobility (Predicted')
ax1.plot(fuel_Demand_EIA.index - pd.DateOffset(days=day_Shift),
fuel_Demand_EIA['Gasoline'],
'--s', label='EIA Weekly Fuel Demand')
ax1.set_xlabel('Date')
ax1.set_ylabel('Daily Motor Gasoline Demand (thousand BPD)')
# ax1.set_ylim(4000, 10000)
ax1.set_title('Fuel Demand: '+model_mark+case+' YYG:'+YYG_projection_Date + ' MLmodel:' +ML_Model+isopen)
ax1.legend()
ax2.plot(COVID.index, COVID['US Daily Confirmed'], Line_Style[case_i], label=case)
ax3.plot(COVID.index, COVID['US Daily Death'], Line_Style[case_i], label=case)
# if (case == 'mean') & (isopen ==''):
# data_save.to_excel('C:/Users/hexx/Box Sync/Energy-COVID-19/Data for Website/Mobility_State_'+YYG_projection_Date+case+'.xlsx')
ax2.legend()
ax3.legend()
np.save(("./PODA_Model_"+today+".npy"), PODA_Model)
createFolder('./PODA_Model')
copyfile('./PODA_Model_'+today+'.npy', './PODA_Model/PODA_Model_'+today+'.npy')
| StarcoderdataPython |
3396779 | <gh_stars>10-100
import numpy as np
import os
import pdb
from PIL import Image, ImageDraw
import cv2
import glob as gb
# import av
from skvideo.io import ffprobe
import pandas as pd
import sys
"""
Add keys for videos without any detections, add also frame keys for those videos
"""
split = 'train'
src_det_file_path = '/home/jinchoi/src/rehab/dataset/action/kinetics/detectron_results/kinetics100/kinetics100_{}_detections_height_256pixels.npy'.format(split)
if split == 'val':
ref_listfile_path = '/home/jinchoi/src/video-data-aug/data/kinetics400/videossl_splits/kinetics100_{}_100_percent_labeled_rawframes.txt'.format(split)
else:
ref_listfile_path = '/home/jinchoi/src/video-data-aug/data/kinetics400/videossl_splits/kinetics100_{}_100_percent_labeled_rawframes.txt'.format(split)
tgt_det_file_path = '/home/jinchoi/src/rehab/dataset/action/kinetics/detectron_results/kinetics100/kinetics100_{}_no_missing_keys_detections_height_256pixels.npy'.format(split)
# read the original detection file
dets = np.load(src_det_file_path, allow_pickle=True)
dets = dets.item()
video_res_info = dets['video_res_info']
dets = dets['dets']
print('Done with reading the org detection numpy file: {}'.format(src_det_file_path))
# read the reference video listfile
df = pd.read_csv(ref_listfile_path, header=None, sep=' ')
ref_data = df.values
new_dets = dict()
new_dets = {}
ref_cls_list = []
missing_det_vid_cnt = 0
# contruct a dictionary with class as key and the class/vid, # of frames, label as values
for i,row in enumerate(ref_data):
if i%1000 == 0:
print('Processing {}/{} videos in the ref. listfile'.format(i+1, ref_data.shape[0]))
# cur_key = row[0].split('/')[0] + '/' + row[0].split('/')[1][:11]
cur_cls = row[0].split('/')[0]
cur_vid = row[0].split('/')[1][:11]
num_frms = row[1]
if cur_cls not in new_dets:
new_dets[cur_cls] = dict()
if cur_vid not in new_dets[cur_cls]:
new_dets[cur_cls][cur_vid] = dict()
for idx in range(num_frms):
idx_one_based = idx + 1
new_dets[cur_cls][cur_vid][idx_one_based] = {'frame'
: idx_one_based, 'human_boxes': np.zeros([0,5]).astype(np.float32)}
if cur_vid in dets[cur_cls]:
assert len(new_dets[cur_cls][cur_vid]) == len(dets[cur_cls][cur_vid])
new_dets[cur_cls][cur_vid] = dets[cur_cls][cur_vid]
dets[cur_cls].pop(cur_vid,None)
else:
missing_det_vid_cnt += 1
print(i, cur_vid)
if cur_cls not in ref_cls_list:
ref_cls_list.append(cur_cls)
sys.stdout.flush()
print('Done with adding missing vid keys and frame keys by comparing {} and {}'.format(ref_listfile_path, src_det_file_path))
# validate if all the exisiting dets are copied to the new_dets
for cur_cls,cur_data in dets.items():
if len(cur_data.keys()) > 0:
pdb.set_trace()
wrapped_dets = dict(
video_res_info=video_res_info,
dets = new_dets
)
np.save(tgt_det_file_path, wrapped_dets)
print('Detection results saved to {}'.format(tgt_det_file_path)) | StarcoderdataPython |
4841645 | <gh_stars>10-100
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock:
CNCore.listNodes() → NodeList
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html
#CNCore.listNodes
Note: CN /node returns a list of Node elements, while MN /node returns a single
Node element. Since the endpoint is the same, care must be taken to not add
callbacks for both getCapabilities() and listNodes() within the same test.
A DataONEException can be triggered by adding a custom header. See
d1_exception.py
"""
import logging
import os
import re
import responses
import d1_common.const
import d1_common.url
import d1_common.utils.filesystem
import d1_common.utils.ulog
import d1_test.mock_api.d1_exception
import d1_test.mock_api.util
# Config
N_TOTAL = 100
LIST_NODES_ENDPOINT_RX = r"v([123])/node"
def add_callback(base_url):
responses.add_callback(
responses.GET,
re.compile(
r"^" + d1_common.url.joinPathElements(base_url, LIST_NODES_ENDPOINT_RX)
),
callback=_request_callback,
content_type="",
)
def _request_callback(request):
logging.debug('Received callback. url="{}"'.format(request.url))
# Return DataONEException if triggered
exc_response_tup = d1_test.mock_api.d1_exception.trigger_by_header(request)
if exc_response_tup:
return exc_response_tup
# Return regular response
version_tag = _parse_url(request.url)
if version_tag == "v1":
type_doc_name = "node_list_1_0.xml"
elif version_tag == "v2":
type_doc_name = "node_list_2_0.xml"
else:
assert False, 'Type doc not available for version. tag="{}"'.format(version_tag)
node_list_xml_path = d1_common.utils.filesystem.abs_path(
os.path.join("type_docs", type_doc_name)
)
with open(node_list_xml_path, "rb") as f:
node_list_xml = f.read()
header_dict = {"Content-Type": d1_common.const.CONTENT_TYPE_XML}
return 200, header_dict, node_list_xml
def _parse_url(url):
version_tag, endpoint_str, param_list, query_dict, client = d1_test.mock_api.util.parse_rest_url(
url
)
assert endpoint_str == "node"
assert not param_list, "listNodes() accepts no parameters"
return version_tag
| StarcoderdataPython |
3368921 | import numpy
from tkinter import *
from tkinter import ttk
from Constants import Constant_Images
from PIL import Image, ImageTk
save = Toplevel()
save.title("Save Page")
save.geometry("300x300")
# radio button değerleri için
var = IntVar()
var.set(2)
def combo():
"""
Combobox u ekranda gösterir. Ayrıca tıklandığında gerçekleşecek olaylar burada yer alır.
"""
combo_frame = Frame(save)
combo_frame.pack()
labelTop = Label(combo_frame,
text="Lütfen İşlem Seçiniz :")
labelTop.grid(column=0, row=0)
combobox = ttk.Combobox(combo_frame,
values=[
"Format Seçiniz..",
"JPG",
"PNG",
"BITMAP"])
combobox.grid(column=0, row=1)
combobox.current(0)
def callbackfunc(cmb):
"""
Combobox tan gelen durumlara göre grekli işlemleri yapan fonksiyonları çağrır. Ve dönen resmi
show_image fonksiyonuna göndererek fotoğrafı ekranda gösterir.
"""
result_image = Constant_Images.image
print(combobox.current(), combobox.get())
choice = combobox.current()
if (choice == 1):
print("JPG")
Constant_Images.image_type = "JPEG"
elif (choice == 2):
print("PNG")
Constant_Images.image_type = "PNG"
combobox.bind("<<ComboboxSelected>>", callbackfunc)
combo()
def save_image():
print()
if Constant_Images.image_type == "JPEG":
Constant_Images.image.convert('RGB').save(Constant_Images.save_directory + '/my_saved_photo.jpeg',Constant_Images.image_type)
elif Constant_Images.image_type == "PNG":
Constant_Images.image.convert('RGB').save(Constant_Images.save_directory + '/my_saved_photo.png',Constant_Images.image_type)
#İleri Butonu
next_page = Button(save, text="Save", fg="white",bg='blue', height=3, width=15,command=save_image)
next_page.pack()
save.mainloop()
| StarcoderdataPython |
1736535 | <filename>flavio/physics/bdecays/bvll/observables_bs.py
"""Functions for exclusive $B_s\to V\ell^+\ell^-$ decays, taking into account
the finite life-time difference between the $B_s$ mass eigenstates,
see arXiv:1502.05509."""
import flavio
from . import observables
from flavio.classes import Observable, Prediction
import cmath
def bsvll_obs(function, q2, wc_obj, par, B, V, lep):
ml = par['m_'+lep]
mB = par['m_'+B]
mV = par['m_'+V]
y = par['DeltaGamma/Gamma_'+B]/2.
if q2 < 4*ml**2 or q2 > (mB-mV)**2:
return 0
scale = flavio.config['renormalization scale']['bvll']
mb = flavio.physics.running.running.get_mb(par, scale)
ff = flavio.physics.bdecays.bvll.amplitudes.get_ff(q2, par, B, V)
h = flavio.physics.bdecays.bvll.amplitudes.helicity_amps(q2, ff, wc_obj, par, B, V, lep)
h_bar = flavio.physics.bdecays.bvll.amplitudes.helicity_amps_bar(q2, ff, wc_obj, par, B, V, lep)
J = flavio.physics.bdecays.angular.angularcoeffs_general_v(h, q2, mB, mV, mb, 0, ml, ml)
J_bar = flavio.physics.bdecays.angular.angularcoeffs_general_v(h_bar, q2, mB, mV, mb, 0, ml, ml)
h_tilde = h_bar.copy()
h_tilde[('pl', 'V')] = h_bar[('mi', 'V')]
h_tilde[('pl', 'A')] = h_bar[('mi', 'A')]
h_tilde[('mi', 'V')] = h_bar[('pl', 'V')]
h_tilde[('mi', 'A')] = h_bar[('pl', 'A')]
h_tilde['S'] = -h_bar['S']
q_over_p = flavio.physics.mesonmixing.observables.q_over_p(wc_obj, par, B)
phi = cmath.phase(-q_over_p) # the phase of -q/p
J_h = flavio.physics.bdecays.angular.angularcoeffs_h_v(phi, h, h_tilde, q2, mB, mV, mb, 0, ml, ml)
return function(y, J, J_bar, J_h)
def S_theory_num_Bs(y, J, J_bar, J_h, i):
# (42) of 1502.05509
flavio.citations.register("Descotes-Genon:2015hea")
return 1/(1-y**2) * (J[i] + J_bar[i]) - y/(1-y**2) * J_h[i]
def S_experiment_num_Bs(y, J, J_bar, J_h, i):
if i in [4, '6s', '6c', 7, 9]:
return -S_theory_num_Bs(y, J, J_bar, J_h, i)
return S_theory_num_Bs(y, J, J_bar, J_h, i)
def S_experiment_Bs(y, J, J_bar, J_h, i):
r"""CP-averaged angular observable $S_i$ in the LHCb convention.
See eq. (C.8) of arXiv:1506.03970v2.
"""
return S_experiment_num_Bs(y, J, J_bar, J_h, i)/SA_den_Bs(y, J, J_bar, J_h)
def dGdq2_ave_Bs(y, J, J_bar, J_h):
# (48) of 1502.05509
flavio.citations.register("Descotes-Genon:2015hea")
return (1/(1-y**2) * (observables.dGdq2(J) + observables.dGdq2(J_bar))
- y/(1-y**2) * observables.dGdq2(J_h))/2.
# denominator of S_i and A_i observables
def SA_den_Bs(y, J, J_bar, J_h):
return 2*dGdq2_ave_Bs(y, J, J_bar, J_h)
def FL_Bs(y, J, J_bar, J_h):
r"""Longitudinal polarization fraction $F_L$"""
return FL_num_Bs(y, J, J_bar, J_h)/SA_den_Bs(y, J, J_bar, J_h)
def FL_num_Bs(y, J, J_bar, J_h):
return -S_theory_num_Bs(y, J, J_bar, J_h, '2c')
def bsvll_dbrdq2(q2, wc_obj, par, B, V, lep):
tauB = par['tau_'+B]
return tauB * bsvll_obs(dGdq2_ave_Bs, q2, wc_obj, par, B, V, lep)
def bsvll_obs_int(function, q2min, q2max, wc_obj, par, B, V, lep, epsrel=0.005):
def obs(q2):
return bsvll_obs(function, q2, wc_obj, par, B, V, lep)
return flavio.physics.bdecays.bvll.observables.nintegrate_pole(obs, q2min, q2max, epsrel=epsrel)
def bsvll_dbrdq2_int(q2min, q2max, wc_obj, par, B, V, lep, epsrel=0.005):
def obs(q2):
return bsvll_dbrdq2(q2, wc_obj, par, B, V, lep)
return flavio.physics.bdecays.bvll.observables.nintegrate_pole(obs, q2min, q2max, epsrel=epsrel)/(q2max-q2min)
# Functions returning functions needed for Prediction instances
def bsvll_dbrdq2_int_func(B, V, lep):
def fct(wc_obj, par, q2min, q2max):
return bsvll_dbrdq2_int(q2min, q2max, wc_obj, par, B, V, lep)
return fct
def bsvll_dbrdq2_func(B, V, lep):
def fct(wc_obj, par, q2):
return bsvll_dbrdq2(q2, wc_obj, par, B, V, lep)
return fct
def bsvll_obs_int_ratio_func(func_num, func_den, B, V, lep):
def fct(wc_obj, par, q2min, q2max):
num = bsvll_obs_int(func_num, q2min, q2max, wc_obj, par, B, V, lep)
if num == 0:
return 0
denom = bsvll_obs_int(func_den, q2min, q2max, wc_obj, par, B, V, lep)
return num/denom
return fct
def bsvll_obs_int_ratio_leptonflavour(func, B, V, l1, l2):
def fct(wc_obj, par, q2min, q2max):
num = bsvll_obs_int(func, q2min, q2max, wc_obj, par, B, V, l1, epsrel=0.0005)
if num == 0:
return 0
denom = bsvll_obs_int(func, q2min, q2max, wc_obj, par, B, V, l2, epsrel=0.0005)
return num/denom
return fct
def bsvll_obs_ratio_func(func_num, func_den, B, V, lep):
def fct(wc_obj, par, q2):
num = bsvll_obs(func_num, q2, wc_obj, par, B, V, lep)
if num == 0:
return 0
denom = bsvll_obs(func_den, q2, wc_obj, par, B, V, lep)
return num/denom
return fct
# Observable and Prediction instances
_tex = {'e': 'e', 'mu': '\mu', 'tau': r'\tau'}
_observables = {
'FL': {'func_num': FL_num_Bs, 'tex': r'\overline{F_L}', 'desc': 'Time-averaged longitudinal polarization fraction'},
'S3': {'func_num': lambda y, J, J_bar, J_h: S_experiment_num_Bs(y, J, J_bar, J_h, 3), 'tex': r'\overline{S_3}', 'desc': 'Time-averaged, CP-averaged angular observable'},
'S4': {'func_num': lambda y, J, J_bar, J_h: S_experiment_num_Bs(y, J, J_bar, J_h, 4), 'tex': r'\overline{S_4}', 'desc': 'Time-averaged, CP-averaged angular observable'},
'S7': {'func_num': lambda y, J, J_bar, J_h: S_experiment_num_Bs(y, J, J_bar, J_h, 7), 'tex': r'\overline{S_7}', 'desc': 'Time-averaged, CP-averaged angular observable'},
}
_hadr = {
'Bs->phi': {'tex': r"B_s\to \phi ", 'B': 'Bs', 'V': 'phi', },
}
for l in ['e', 'mu', 'tau']:
for M in _hadr.keys():
_process_tex = _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+r"^-"
_process_taxonomy = r'Process :: $b$ hadron decays :: FCNC decays :: $B\to V\ell^+\ell^-$ :: $' + _process_tex + r"$"
for obs in sorted(_observables.keys()):
# binned angular observables
_obs_name = "<" + obs + ">("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description('Binned ' + _observables[obs]['desc'] + r" in $" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-$")
_obs.tex = r"$\langle " + _observables[obs]['tex'] + r"\rangle(" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-)$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bsvll_obs_int_ratio_func(_observables[obs]['func_num'], SA_den_Bs, _hadr[M]['B'], _hadr[M]['V'], l))
# differential angular observables
_obs_name = obs + "("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2'])
_obs.set_description(_observables[obs]['desc'][0].capitalize() + _observables[obs]['desc'][1:] + r" in $" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-$")
_obs.tex = r"$" + _observables[obs]['tex'] + r"(" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-)$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bsvll_obs_ratio_func(_observables[obs]['func_num'], SA_den_Bs, _hadr[M]['B'], _hadr[M]['V'], l))
# binned branching ratio
_obs_name = "<dBR/dq2>("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description(r"Binned time-integrated differential branching ratio of $" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-$")
_obs.tex = r"$\langle \frac{d\overline{\text{BR}}}{dq^2} \rangle(" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-)$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bsvll_dbrdq2_int_func(_hadr[M]['B'], _hadr[M]['V'], l))
# differential branching ratio
_obs_name = "dBR/dq2("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2'])
_obs.set_description(r"Differential time-integrated branching ratio of $" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-$")
_obs.tex = r"$\frac{d\overline{\text{BR}}}{dq^2}(" + _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+"^-)$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bsvll_dbrdq2_func(_hadr[M]['B'], _hadr[M]['V'], l))
# Lepton flavour ratios
for l in [('mu','e'), ('tau','mu'),]:
for M in _hadr.keys():
# binned ratio of BRs
_obs_name = "<R"+l[0]+l[1]+">("+M+"ll)"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description(r"Ratio of partial branching ratios of $" + _hadr[M]['tex'] +_tex[l[0]]+r"^+ "+_tex[l[0]]+r"^-$" + " and " + r"$" + _hadr[M]['tex'] +_tex[l[1]]+r"^+ "+_tex[l[1]]+"^-$")
_obs.tex = r"$\langle R_{" + _tex[l[0]] + ' ' + _tex[l[1]] + r"} \rangle(" + _hadr[M]['tex'] + r"\ell^+\ell^-)$"
for li in l:
# add taxonomy for both processes (e.g. Bs->Vee and Bs->Vmumu)
_obs.add_taxonomy(r'Process :: $b$ hadron decays :: FCNC decays :: $B\to V\ell^+\ell^-$ :: $' + _hadr[M]['tex'] +_tex[li]+r"^+"+_tex[li]+r"^-$")
Prediction(_obs_name, bsvll_obs_int_ratio_leptonflavour(dGdq2_ave_Bs, _hadr[M]['B'], _hadr[M]['V'], *l))
| StarcoderdataPython |
1629985 | <reponame>prihoda/bgc-pipeline-1<filename>bgc_detection/evaluation/confusion_matrix.py
#!/usr/bin/env python
# <NAME>
# Plot confusion matrix from a given Domain CSV prediction file
# and prediction threshold defined by the TPR or FPR values to be achieved
import argparse
import pandas as pd
import numpy as np
from sklearn import metrics
import seaborn as sns
import matplotlib.pyplot as plt
def format_confusion_matrix(matrix, classes, figsize=(8, 4.5), title='', **kwargs):
"""
Plot confusion matrix using provided sklearn metrics.confusion_matrix values
:param matrix: Numpy array with values from sklearn metrics.confusion_matrix
:param classes: Tuple for (negative, positive) class labels
:param figsize: Figure size
:param title: Figure title
:param kwargs: Arguments to pass to the plot function
:return: figure with confusion matrix plot
"""
tn, fp, fn, tp = matrix.ravel()
labels = np.array([
[
'TP = {}'.format(tp),
'FP = {}'.format(fp),
'Precision = {:.2f}%'.format(tp / (tp + fp) * 100)
],
[
'FN = {}'.format(fn),
'TN = {}'.format(tn),
''
],
[
'TPR = {:.2f}%'.format(tp / (tp + fn) * 100),
'FPR = {:.2f}%'.format(fp / (fp + tn) * 100),
''
]
])
#sns.set_style("ticks", {"xtick.major.size": 0, "ytick.major.size": 0})
#sns.set(font_scale=1.2)
columns = ['Labelled ' + classes[1], 'Labelled ' + classes[0], '']
index = ['Predicted ' + classes[1], 'Predicted ' + classes[0], '']
vals = np.array([[tp, fp, 0], [fn, tn, 0], [0, 0, 0]])
template = pd.DataFrame(vals, index=index, columns=columns)
print(template)
vmax = np.sum(vals)
cmap = sns.blend_palette(['white', '#0066cc'], as_cmap=True)
fig, ax = plt.subplots(1,1,figsize=figsize)
sns.heatmap(template, ax=ax, annot=labels, fmt='', vmax=vmax, cbar=False, cmap=cmap, linewidths=1, **kwargs)
ax.xaxis.tick_top()
plt.suptitle(title, fontsize=13)
plt.yticks(rotation=0)
ax.tick_params(labelsize=15)
fig.tight_layout()
fig.subplots_adjust(top=0.77)
return fig
def get_threshold(true_values, predictions, target_fpr=None, target_tpr=None):
"""
Calculate threshold that should be used a given FPR or TPR value, based on given true values and predictions.
Can be seen as a horizontal or vertical cut of a ROC curve
:param true_values: Series of true values
:param predictions: Series of predictions
:param target_fpr: Target TPR to be achieved (or None to ignore)
:param target_tpr: Target FPR to be achieved (or None to ignore)
:return: threshold that should be used a given FPR or TPR value
"""
if target_fpr is None and target_tpr is None:
raise AttributeError('Specify one of TPR and FPR')
if target_fpr and target_tpr:
raise AttributeError('Specify only one of TPR and FPR')
prev_threshold = None
fprs, tprs, thresholds = metrics.roc_curve(true_values, predictions)
for fpr, tpr, threshold in zip(fprs, tprs, thresholds):
if target_fpr is not None and fpr > target_fpr:
break
if target_tpr is not None and tpr > target_tpr:
break
prev_threshold = threshold
if not prev_threshold:
raise AttributeError('Target FPR or TPR not achievable')
return prev_threshold
def confusion_matrix(true_values, predictions, threshold, title, **kwargs):
"""
Plot confusion matrix from a given Domain CSV prediction file and prediction threshold
:param true_values: Series of true values
:param predictions: Series of predictions
:param threshold: Inclusive prediction threshold to use
:param title: Plot title
:param kwargs: Additional arguments for plot function
:return: Figure with confusion matrix
"""
matrix = metrics.confusion_matrix(true_values, predictions >= threshold)
title = title + ' (threshold {:.5f})'.format(threshold)
print(title)
return format_confusion_matrix(matrix, title=title, classes=['non-BGC', 'BGC'], **kwargs)
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("--fpr", dest="fpr", default=None, type=float,
help="Target validation FPR used to select threshold.", metavar="FLOAT")
parser.add_argument("--tpr", dest="tpr", default=None, type=float,
help="Target validation TPR used to select threshold.", metavar="FLOAT")
parser.add_argument("--threshold", dest="threshold", required=False, type=float,
help="Prediction threshold to select domains.", metavar="INT")
parser.add_argument("-t", "--title", dest="title", required=True,
help="Confusion matrix plot title.", metavar="STRING")
parser.add_argument("-o", "--output", dest="output", required=True,
help="Output file path.", metavar="FILE")
parser.add_argument(dest='predictions', nargs='+',
help="Paths to CSV prediction files.", metavar="FILE")
options = parser.parse_args()
predictions = [pd.read_csv(path) for path in options.predictions]
merged_true_values = np.concatenate([p['in_cluster'] for p in predictions])
merged_predictions = np.concatenate([p['prediction'] for p in predictions])
if options.threshold:
threshold = options.threshold
elif options.fpr or options.tpr:
threshold = get_threshold(merged_true_values, merged_predictions, target_fpr=options.fpr,
target_tpr=options.tpr)
else:
raise AttributeError('Specify either threshold or target TPR/FPR')
fig = confusion_matrix(merged_true_values, merged_predictions, threshold, title=options.title)
fig.savefig(options.output, dpi=100)
print('Saved plot to {}'.format(options.output)) | StarcoderdataPython |
37114 | import pytest
from django.urls import reverse
class TestImageUpload:
@pytest.mark.django_db
def test_upload_image_not_authenticated(self, client, small_jpeg_io):
upload_url = reverse("cast:api:upload_image")
small_jpeg_io.seek(0)
r = client.post(upload_url, {"original": small_jpeg_io})
# redirect to login
assert r.status_code == 302
@pytest.mark.django_db
def test_upload_image_authenticated(self, client, user, small_jpeg_io):
# login
r = client.login(username=user.username, password=<PASSWORD>)
# upload
upload_url = reverse("cast:api:upload_image")
small_jpeg_io.seek(0)
r = client.post(upload_url, {"original": small_jpeg_io})
assert r.status_code == 201
assert int(r.content.decode("utf-8")) > 0
| StarcoderdataPython |
3231028 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from path import path
from cloudify import ctx
from cloudify import exceptions
from cloudify.decorators import operation
@operation
def create_idea_project(virtualenv_name, **_):
project_dir, repo_paths, package_paths, resource_dirs = _extract_dirs()
if not project_dir:
ctx.logger.info('No project dir configured.')
return
project_dir = path(project_dir)
idea_dir = project_dir / '.idea'
name_file = idea_dir / '.name'
project_iml = idea_dir / '{}.iml'.format(project_dir.basename())
misc_xml = idea_dir / 'misc.xml'
modules_xml = idea_dir / 'modules.xml'
vcs_xml = idea_dir / 'vcs.xml'
module_paths = []
for resource_dir in resource_dirs:
if resource_dir.endswith('/'):
resource_dir = resource_dir[:-1]
resource_dir = path(resource_dir)
iml_path = resource_dir / '{}.iml'.format(resource_dir.basename())
module_paths.append(iml_path)
if not iml_path.exists():
ctx.logger.info('Adding resource module: {}'.format(iml_path))
ctx.download_resource_and_render(
resource_path='resources/idea/module.xml',
target_path=iml_path,
template_variables={'module_type': 'WEB_MODULE'})
for package_path in package_paths:
if package_path.endswith('/'):
package_path = package_path[:-1]
package_path = path(package_path)
iml_path = package_path / '{}.iml'.format(package_path.basename())
module_paths.append(iml_path)
if not iml_path.exists():
ctx.logger.info('Adding python module: {}'.format(iml_path))
ctx.download_resource_and_render(
resource_path='resources/idea/module.xml',
target_path=iml_path,
template_variables={'module_type': 'PYTHON_MODULE'})
if not idea_dir.exists():
idea_dir.mkdir_p()
if not name_file.exists():
name_file.write_text(virtualenv_name)
if not project_iml.exists():
ctx.download_resource_and_render(
resource_path='resources/idea/module.xml',
target_path=project_iml,
template_variables={'module_type': 'JAVA_MODULE'})
if not misc_xml.exists():
ctx.download_resource_and_render(
resource_path='resources/idea/misc.xml',
target_path=misc_xml,
template_variables={'virtualenv_name': virtualenv_name})
if not modules_xml.exists():
ctx.download_resource_and_render(
resource_path='resources/idea/modules.xml',
target_path=modules_xml,
template_variables={'module_paths': module_paths})
if not vcs_xml.exists():
ctx.download_resource_and_render(
resource_path='resources/idea/vcs.xml',
target_path=vcs_xml,
template_variables={'repo_paths': repo_paths})
def _extract_dirs():
project_dir = None
repo_paths = []
package_paths = []
resource_paths = []
for rel in ctx.instance.relationships:
node = rel.target.node
instance = rel.target.instance
props = node.properties
runtime_props = instance.runtime_properties
if node.type == 'git_repo':
repo_location = runtime_props['repo_location']
repo_paths.append(repo_location)
if props['project_dir'] is True:
if project_dir:
raise exceptions.NonRecoverableError(
'Cannot configure more than one project dir')
project_dir = repo_location
resources = props['resources'] or []
if resources is True:
resources = ['']
repo_location = path(repo_location)
for resource_path in resources:
resource_paths.append(repo_location / resource_path)
elif node.type == 'python_package':
package_paths.append(runtime_props['package_path'])
return project_dir, repo_paths, package_paths, resource_paths
| StarcoderdataPython |
37090 | # Basic String Operations (Title)
# Reading
# Iterating over a String with the 'for' Loop (section)
# General Format:
# for variable in string:
# statement
# statement
# etc.
name = 'Juliet'
for ch in name:
print(ch)
# This program counts the number of times the letter T
# (uppercase or lowercase) appears in a string.
# (with 'for' loop)
def main():
count = 0
my_string = input('Enter a sentence: ')
for ch in my_string:
if ch == 'T' or ch == 't':
count += 1
print(f'The letter T appears {count} times.')
if __name__ == '__main__':
main()
# Indexing (section)
my_string = 'Roses are red'
ch = my_string[6]
my_string = 'Roses are red'
print(my_string[0], my_string[6], my_string[10])
# negative numbers
my_string = 'Roses are red'
print(my_string[-1], my_string[-2], my_string[-13])
# IndexError Exceptions (section)
# Occur if index out of range for a particular string
city = 'Boston'
print(city[6])
city = 'Boston'
index = 0
while index < 7:
print(city[index])
index += 1
# The 'len' Function (section)
# useful to prevent loops from iterating beyond the end
# of a string.
city = 'Boston'
size = len(city)
print(size)
city = 'Boston'
index = 0
while index < len(city):
print(city[index])
index += 1
# String Concatenation (section)
name = 'Kelly'
name += ' '
name += 'Yvonne'
name += ' '
name += 'Smith'
print(name)
# Strings are immutable (section)
# This program concatenates strings.
def main():
name = 'Carmen'
print(f'The name is: {name}')
name = name + ' Brown'
print(f'Now the name is: {name}')
if __name__ == '__main__':
main()
# no string[index] on left side of an assignment operator
# Error below
friend = 'Bill'
friend[0] = 'J'
# End
# Checkpoint
# 8.1 Assume the variable 'name' references a string. Write a
# 'for' loop that prints each character in the string.
name = 'name'
for letter in name:
print(letter)
# 8.2 What is the index of the first character in a string?
# A. 0
# 8.3 If a string has 10 characters, what is the index of the
# last character?
# A. 9
# 8.4 What happeneds if you try to use an invalid index to
# access a character in a string?
# A. An IndexError exception will occur if you try to use an
# index that is out of range for a particular string.
# 8.5 How do you find the length of a string?
# A. Use the built-in len function.
# 8.6 What is wrong with the following code?
animal = 'Tiger'
animal [0] = 'L'
# A. The second statement attempts to assign a value to an
# individual character in the string. Strings are immutable,
# however, so the expression animal [0] cannot appear on the
# left side of an assignment operator.
# End | StarcoderdataPython |
1706547 | <filename>BotErrors/RepeatedUserError.py
class RepeatedUserError(Exception):
pass
| StarcoderdataPython |
1763230 | import logging
import telegrampy
from telegrampy.ext import commands
logging.basicConfig(level=logging.INFO, format="(%(asctime)s) %(levelname)s %(message)s", datefmt="%m/%d/%y - %H:%M:%S %Z")
logger = logging.getLogger("telegrampy")
# Make sure to never share your token
bot = commands.Bot("token here")
# Create and register a simple command called "hi"
# This is invoked with "/hi" and the bot will respond with "Hello"
@bot.command()
async def hi(ctx):
await ctx.send("Hello")
# Start polling updates from Telegram
bot.run()
| StarcoderdataPython |
1665884 | #! /usr/bin/env python3.6
"""
server.py
Stripe Sample.
Python 3.6 or newer required.
"""
import json
import os
import random
import string
import stripe
from dotenv import load_dotenv, find_dotenv
from flask import Flask, jsonify, render_template, redirect, request, session, send_from_directory, Response
import urllib
# Setup Stripe python client library
load_dotenv(find_dotenv())
stripe.api_key = os.getenv('STRIPE_SECRET_KEY')
stripe.api_version = os.getenv('STRIPE_API_VERSION', '2019-12-03')
static_dir = str(os.path.abspath(os.path.join(__file__ , "..", os.getenv("STATIC_DIR"))))
app = Flask(__name__, static_folder=static_dir,
static_url_path="", template_folder=static_dir)
# Set the secret key to some random bytes. Keep this really secret!
# This enables Flask sessions.
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
@app.route('/', methods=['GET'])
def get_example():
return render_template('index.html')
@app.route("/transfer", methods=["POST"])
def transfer():
data = json.loads(request.data)
transfer = stripe.Transfer.create(
amount=data['amount'],
currency='usd',
destination=data['account'],
)
return jsonify({
'transfer': transfer
})
def get_balance_usd():
balance = stripe.Balance.retrieve()
try:
usd_balance = next(b for b in balance['available'] if b['currency'] == 'usd')['amount']
except StopIteration:
usd_balance = 0
return {
'balance': usd_balance
}
@app.route("/platform-balance", methods=["GET"])
def platform_balance():
return jsonify(get_balance_usd())
@app.route("/add-platform-balance", methods=["POST"])
def add_platform_balance():
data = json.loads(request.data)
try:
stripe.Topup.create(
amount=data['amount'],
currency='usd',
description='Stripe sample top-up',
statement_descriptor='Stripe sample',
)
except stripe.error.StripeError as e:
return jsonify({'error': str(e)})
return jsonify(get_balance_usd())
@app.route("/recent-accounts", methods=["GET"])
def get_accounts():
accounts = stripe.Account.list(limit=10)
return jsonify({'accounts': accounts})
@app.route("/express-dashboard-link", methods=["GET"])
def get_express_dashboard_link():
account_id = request.args.get('account_id')
link = stripe.Account.create_login_link(account_id, redirect_url=(request.url_root))
return jsonify({'url': link.url})
if __name__== '__main__':
app.run(port=4242)
| StarcoderdataPython |
1748719 | <gh_stars>0
import tensorflow as tf
import sys
START_ID=0
PAD_ID = 1
END_ID=2
class PointerWrapper(tf.contrib.seq2seq.AttentionWrapper):
"""Customized AttentionWrapper for PointerNet."""
def __init__(self,cell,attention_size,memory,initial_cell_state=None,name=None):
# In the paper, Bahdanau Attention Mechanism is used
# We want the scores rather than the probabilities of alignments
# Hence, we customize the probability_fn to return scores directly
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attention_size, memory, probability_fn=lambda x: x )
# According to the paper, no need to concatenate the input and attention
# Therefore, we make cell_input_fn to return input only
cell_input_fn=lambda input, attention: input
# Call super __init__
super(PointerWrapper, self).__init__(cell,
attention_mechanism=attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=cell_input_fn,
output_attention=True,
initial_cell_state=initial_cell_state,
name=name)
@property
def output_size(self):
return self.state_size.alignments
def call(self, inputs, state):
_, next_state = super(PointerWrapper, self).call(inputs, state)
return next_state.alignments, next_state
class PointerNet(object):
""" Pointer Net Model
This class implements a multi-layer Pointer Network
aimed to solve the Convex Hull problem. It is almost
the same as the model described in this paper:
https://arxiv.org/abs/1506.03134.
"""
def __init__(self, batch_size=128, max_input_sequence_len=5, max_output_sequence_len=7,
rnn_size=128, attention_size=128, num_layers=2, beam_width=2,
learning_rate=0.001, max_gradient_norm=5, forward_only=False,slice_length=1439):
"""Create the model.
Args:
batch_size: the size of batch during training
max_input_sequence_len: the maximum input length
max_output_sequence_len: the maximum output length
rnn_size: the size of each RNN hidden units
attention_size: the size of dimensions in attention mechanism
num_layers: the number of stacked RNN layers
beam_width: the width of beam search
learning_rate: the initial learning rate during training
max_gradient_norm: gradients will be clipped to maximally this norm.
forward_only: whether the model is forwarding only
"""
self.batch_size = batch_size
self.max_input_sequence_len = max_input_sequence_len
self.max_output_sequence_len = max_output_sequence_len
self.forward_only = forward_only
self.init_learning_rate = learning_rate
# Note we have three special tokens namely 'START', 'END'
# Here the size of vocab need be added by 3
self.vocab_size = max_input_sequence_len+3
# Global step
self.global_step = tf.Variable(0, trainable=False)
# Choose LSTM Cell
cell = tf.contrib.rnn.LSTMCell
# Create placeholders
self.inputs = tf.placeholder(tf.float32, shape=[self.batch_size,self.max_input_sequence_len,slice_length], name="inputs")
self.outputs = tf.placeholder(tf.int32, shape=[self.batch_size,self.max_output_sequence_len+1], name="outputs")
self.enc_input_weights = tf.placeholder(tf.int32,shape=[self.batch_size,self.max_input_sequence_len], name="enc_input_weights")
self.dec_input_weights = tf.placeholder(tf.int32,shape=[self.batch_size,self.max_output_sequence_len], name="dec_input_weights")
# Calculate the lengths
enc_input_lens=tf.reduce_sum(self.enc_input_weights,axis=1)
dec_input_lens=tf.reduce_sum(self.dec_input_weights,axis=1)
# Special token embedding
special_token_embedding = tf.get_variable("special_token_embedding", [3,slice_length], tf.float32, tf.contrib.layers.xavier_initializer())
# Embedding_table
# Shape: [batch_size,vocab_size,features_size]
embedding_table = tf.concat([tf.tile(tf.expand_dims(special_token_embedding,0),[self.batch_size,1,1]), self.inputs],axis=1)
# Unstack embedding_table
# Shape: batch_size*[vocab_size,features_size]
embedding_table_list = tf.unstack(embedding_table, axis=0)
# Unstack outputs
# Shape: (max_output_sequence_len+1)*[batch_size]
outputs_list = tf.unstack(self.outputs, axis=1)
# targets
# Shape: [batch_size,max_output_sequence_len]
self.targets = tf.stack(outputs_list[1:],axis=1)
# decoder input ids
# Shape: batch_size*[max_output_sequence_len,1]
dec_input_ids = tf.unstack(tf.expand_dims(tf.stack(outputs_list[:-1],axis=1),2),axis=0)
# encoder input ids
# Shape: batch_size*[max_input_sequence_len+1,1]
enc_input_ids = [tf.expand_dims(tf.range(2,self.vocab_size),1)]*self.batch_size
# Look up encoder and decoder inputs
encoder_inputs = []
decoder_inputs = []
for i in range(self.batch_size):
encoder_inputs.append(tf.gather_nd(embedding_table_list[i], enc_input_ids[i]))
decoder_inputs.append(tf.gather_nd(embedding_table_list[i], dec_input_ids[i]))
# Shape: [batch_size,max_input_sequence_len+1,2]
encoder_inputs = tf.stack(encoder_inputs,axis=0)
# Shape: [batch_size,max_output_sequence_len,2]
decoder_inputs = tf.stack(decoder_inputs,axis=0)
# Stack encoder cells if needed
if num_layers > 1:
fw_enc_cell = tf.contrib.rnn.MultiRNNCell([cell(rnn_size) for _ in range(num_layers)])
bw_enc_cell = tf.contrib.rnn.MultiRNNCell([cell(rnn_size) for _ in range(num_layers)])
else:
fw_enc_cell = cell(rnn_size)
bw_enc_cell = cell(rnn_size)
# Tile inputs if forward only
if self.forward_only:
# Tile encoder_inputs and enc_input_lens
encoder_inputs = tf.contrib.seq2seq.tile_batch(encoder_inputs,beam_width)
enc_input_lens = tf.contrib.seq2seq.tile_batch(enc_input_lens,beam_width)
# Encode input to obtain memory for later queries
memory,_ = tf.nn.bidirectional_dynamic_rnn(fw_enc_cell, bw_enc_cell, encoder_inputs, enc_input_lens, dtype=tf.float32)
# Shape: [batch_size(*beam_width), max_input_sequence_len+1, 2*rnn_size]
memory = tf.concat(memory, 2)
# PointerWrapper
pointer_cell = PointerWrapper(cell(rnn_size), attention_size, memory)
# Stack decoder cells if needed
if num_layers > 1:
dec_cell = tf.contrib.rnn.MultiRNNCell([cell(rnn_size) for _ in range(num_layers-1)]+[pointer_cell])
else:
dec_cell = pointer_cell
# Different decoding scenario
if self.forward_only:
# Tile embedding_table
tile_embedding_table = tf.tile(tf.expand_dims(embedding_table,1),[1,beam_width,1,1])
# Customize embedding_lookup_fn
def embedding_lookup(ids):
# Note the output value of the decoder only ranges 0 to max_input_sequence_len
# while embedding_table contains two more tokens' values
# To get around this, shift ids
# Shape: [batch_size,beam_width]
ids = ids+2
# Shape: [batch_size,beam_width,vocab_size]
one_hot_ids = tf.cast(tf.one_hot(ids,self.vocab_size), dtype=tf.float32)
# Shape: [batch_size,beam_width,vocab_size,1]
one_hot_ids = tf.expand_dims(one_hot_ids,-1)
# Shape: [batch_size,beam_width,features_size]
next_inputs = tf.reduce_sum(one_hot_ids*tile_embedding_table, axis=2)
return next_inputs
# Do a little trick so that we can use 'BeamSearchDecoder'
shifted_START_ID = START_ID - 2
shifted_END_ID = END_ID - 2
# Beam Search Decoder
decoder = tf.contrib.seq2seq.BeamSearchDecoder(dec_cell, embedding_lookup,
tf.tile([shifted_START_ID],[self.batch_size]), shifted_END_ID,
dec_cell.zero_state(self.batch_size*beam_width,tf.float32), beam_width)
# Decode
outputs, a, b = tf.contrib.seq2seq.dynamic_decode(decoder)
self.op = outputs
# predicted_ids
# Shape: [batch_size, max_output_sequence_len, beam_width]
predicted_ids = outputs.predicted_ids
# Transpose predicted_ids
# Shape: [batch_size, beam_width, max_output_sequence_len]
self.predicted_ids = tf.transpose(predicted_ids,[0,2,1])
else:
# Get the maximum sequence length in current batch
cur_batch_max_len = tf.reduce_max(dec_input_lens)
# Training Helper
helper = tf.contrib.seq2seq.TrainingHelper(decoder_inputs, dec_input_lens)
# Basic Decoder
decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, dec_cell.zero_state(self.batch_size,tf.float32))
# Decode
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder,impute_finished=True)
# logits
logits = outputs.rnn_output
self.rnn_output = outputs.rnn_output
# predicted_ids_with_logits
self.predicted_ids_with_logits=tf.nn.top_k(logits)
# Pad logits to the same shape as targets
logits = tf.concat([logits,tf.ones([self.batch_size,self.max_output_sequence_len-cur_batch_max_len,self.max_input_sequence_len+1])],axis=1)
# Subtract target values by 2
# because prediction output ranges from 0 to max_input_sequence_len+1
# while target values are from 0 to max_input_sequence_len + 3
self.shifted_targets = (self.targets - 2)*self.dec_input_weights
# Losses
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.shifted_targets, logits=logits)
# Total loss
self.loss = tf.reduce_sum(losses*tf.cast(self.dec_input_weights,tf.float32))/self.batch_size
# Get all trainable variables
parameters = tf.trainable_variables()
# Calculate gradients
gradients = tf.gradients(self.loss, parameters)
# Clip gradients
clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_gradient_norm)
# Optimization
#optimizer = tf.train.GradientDescentOptimizer(self.init_learning_rate)
optimizer = tf.train.AdamOptimizer(self.init_learning_rate)
# Update operator
self.update = optimizer.apply_gradients(zip(clipped_gradients, parameters),global_step=self.global_step)
# Summarize
tf.summary.scalar('loss',self.loss)
for p in parameters:
tf.summary.histogram(p.op.name,p)
for p in gradients:
tf.summary.histogram(p.op.name,p)
# Summarize operator
self.summary_op = tf.summary.merge_all()
#DEBUG PART
self.debug_var = logits
#/DEBUG PART
# Saver
self.saver = tf.train.Saver(tf.global_variables(),max_to_keep=0)
def step(self, session, inputs, enc_input_weights, outputs=None, dec_input_weights=None, update=True):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
inputs: the point positions in 2D coordinate. shape: [batch_size,max_input_sequence_len,2]
enc_input_weights: the weights of encoder input points. shape: [batch_size,max_input_sequence_len]
outputs: the point indexes in inputs. shape: [batch_size,max_output_sequence_len+1]
dec_input_weights: the weights of decoder input points. shape: [batch_size,max_output_sequence_len]
Returns:
(training)
The summary
The total loss
The predicted ids with logits
The targets
The variable for debugging
(evaluation)
The predicted ids
"""
#Fill up inputs
input_feed = {}
input_feed[self.inputs] = inputs
input_feed[self.enc_input_weights] = enc_input_weights
if self.forward_only==False:
input_feed[self.outputs] = outputs
input_feed[self.dec_input_weights] = dec_input_weights
#Fill up outputs
output_feed = []
if self.forward_only and not update:
output_feed = [self.predicted_ids,self.op]
if not self.forward_only and update:
output_feed = [self.update, self.summary_op, self.loss, self.predicted_ids_with_logits, self.shifted_targets, self.debug_var, self.rnn_output]
if not update and not self.forward_only:
output_feed = [self.summary_op, self.loss, self.predicted_ids_with_logits, self.shifted_targets, self.debug_var]
#Run step
outputs = session.run(output_feed, input_feed)
if update and not self.forward_only:
return outputs[1],outputs[2],outputs[3],outputs[4],outputs[5],outputs[6]
if self.forward_only and not update:
return outputs[0],outputs[1]
if not update and not self.forward_only:
return outputs[0],outputs[1],outputs[2],outputs[3],outputs[4]
| StarcoderdataPython |
129278 | # python3
# coding=<UTF-8>
import os
import re
from lxml.etree import parse, HTMLParser
from urllib.request import quote
from ..params_container import Container
from ..target import Target
from ..exceptions import EmptyPageException
__author__ = 'akv17'
__doc__ = \
"""
National Corpus of Russian
==========================
API for National Corpus of Russian (http://ruscorpora.ru/index.html)
**Search Parameters**
query: str or list([str])
query or queries (currently only exact search by word or phrase is available)
num_results: int, default 100
number of results wanted
kwic: bool, default True
kwic format (True) or a sentence (False)
get_analysis: bool, default False
whether to collect grammatical tags for target word or not
subcorpus: str, default 'main'
subcorpus.
Valid: ['main', 'syntax', 'paper', 'regional', 'school',
'dialect', 'poetic', 'spoken', 'accent', 'murco',
'multiparc', 'old_rus', 'birchbark', 'mid_rus', 'orthlib']
Example
-------
.. code-block:: python
corp = lingcorpora.Corpus('rus')
results = corp.search('сердце', n_results=10, subcorpus='poetic')
for result in results:
for i, target in enumerate(result):
print(i+1, target.text)
.. parsed-literal::
"сердце": 100%|██████████| 10/10 [00:00<00:00, 23.94docs/s]
1 Не будильник поставлен на шесть, а колотится сердце быстрей.
2 Да и сердце легче бьется ― поддается уговорам.
3 Прелесть, от нее дрожит сердце возле птиц или ночниц бледных.
4 Чтоб они стали перинами белыми с мягкой опорой на дне, и невредимыми съехали, целыми дети на той стороне. Сердце привязано ниткой невидимой. Нить коротка, а земля велика.
5 Моя мама умерла девятого мая, когда всюду день-деньской надрывают сердце «аты-баты» ― коллективный катарсис такой.
6 Чтобы скорей, скорей горло его достать. Сердце его потрогать.
7 И ― прочь через площадь в закатных лучах В какой-нибудь Чехии, Польше… Разбитое сердце, своя голова на плечах ― Чего тебе больше?
8 «Хотел бы я знать, если Бог повелит, О чем твое старое сердце болит».
9 Когда уйдет последний друг И в сердце перемрут подруги, Я очерчу незримый круг И лиру заключу в том круге.
10 О том не надо вспоминать, Но что-то в сердце изломилось: ― Не узнаю родную мать.
"""
GR_TAGS_INFO = \
"""
Часть речи
существительное: S
прилагательное: A
числительное: NUM
числприл: ANUM
глагол: V
наречие: ADV
предикатив: PRAEDIC
вводное слово: PARENTH
местсущ: SPRO
местприл: APRO
местпредикатив: PRAEDICPRO
местоименное наречие: ADVPRO
предлог: PR
союз: CONJ
частица: PART
междометие: INTJ
Падеж
именительный: nom
звательный: voc
родительный: gen
родительный 2: gen2
дательный: dat
винительный: acc
винительный 2: acc2
творительный: ins
предложный: loc
предложный 2: loc2
счётная форма: adnum
Наклонение / Форма
изъявительное: indic
повелительное: imper
повелительное 2: imper2
инфинитив: inf
причастие: partcp
деепричастие: ger
Степень / Краткость
сравнительная: comp
сравнительная 2: comp2
превосходная: supr
полная форма: plen
краткая форма: brev
Время
настоящее: praes
будущее: fut
прошедшее: praet
Переходность
переходный: tran
непереходный: intr
Число
единственное: sg
множественное: pl
Лицо
первое: 1p
второе: 2p
третье: 3p
Прочее
цифровая запись: ciph
аномальная форма: anom
искаженная форма: distort
инициал: INIT
сокращение: abbr
несклоняемое: 0
топоним: topon
Имена собственные
фамилия: famn
имя: persn
отчество: patrn
Род
мужской: m
женский: f
средний: n
общий: mf
Залог
действительный: act
страдательный: pass
медиальный: med
Одушевленность
одушевленное: anim
неодушевленное: inan
Вид
совершенный: pf
несовершенный: ipf
"""
TEST_DATA = {'test_single_query': {'query': 'фонема'},
'test_multi_query': {'query': ['фонема', 'морфема']}
}
class PageParser(Container):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__stop_flag = False
self.__page_num = 0
self.__targets_seen = 0
self.subcorpus = self.subcorpus if self.subcorpus is not None else 'main'
self.__seed = ''
self.__xpath = '/page/searchresult/body/result/document'
self.__dpp = 50
self.__spd = 100
self.__url = 'http://processing.ruscorpora.ru/search.xml?'
if 'accentize' in kwargs and kwargs['accentize']:
self.__request = 'env=alpha&mode=%s&text=lexform&sort=i_grtagging&dpp=%s&spd=%s&req=%s&p=%s'
self.__request_gr = 'env=alpha&mode=%s&text=lexgramm&sort=i_grtagging&seed=%s&dpp=%s&lex1=%s&gramm1=%s&p=%s'
else:
self.__request = 'env=alpha&nodia=1&mode=%s&text=lexform&sort=i_grtagging&seed=%s&dpp=%s&req=%s&p=%s'
self.__request_gr = 'env=alpha&nodia=1&mode=%s&text=lexgramm&sort=i_grtagging&seed=%s&dpp=%s&lex1=%s&gramm1=%s&p=%s'
def __get_ana(self, word):
"""
Word's analysis parser
"""
ana = dict()
for _ana in word.findall('ana'):
# iter over values of current ana of target (lex, sem, m, ...)
for ana_type in _ana.findall('el'):
ana[ana_type.attrib['name']] = [x.text for x in ana_type.findall('el-group/el-atom')]
return ana
def __parse_docs(self, docs, analysis=True):
"""
A generator over etree of documents
"""
# iter over docs
for doc in docs:
meta = doc.xpath('span')[0].text
# if 'омонимия снята' not in doc.xpath('span')[1].text:
# self.__stop_flag = True
# print('конец снятой омонимии', self.__page_num)
# break
# iter over snippets in *doc*
for snip in doc.xpath('.//li'):
text = str()
_len = 0
target_idxs = list()
ana = list()
# iter over words in cur example
for word in snip.getchildren()[:-3]:
# nonalpha and unknown tokens
if word.tag == 'span':
text += word.text + (word.tail if word.tail is not None and word.tail != ' ' else '')
_len += len(text)
continue
# lexical tokens
if word.attrib and False:
text += word.attrib['text']
# process target
if word.attrib.get('target') is not None:
target_idxs.append((_len, _len + len(word.attrib['text'])))
ana.append(self.__get_ana(word) if analysis else dict())
_len += len(word.attrib['text'])
if target_idxs:
for i, idxs in enumerate(target_idxs):
if i + 1 == len(target_idxs) or idxs[1] + 1 != target_idxs[i + 1][0]:
yield text, idxs, meta, [ana[i]], self.gr_tags
else:
# continue
yield text, None, meta, None, self.gr_tags
def __get_page(self, page_num):
"""
return: etree of the page
"""
if self.gr_tags is not None:
arguments = (self.subcorpus,
# self.__seed,
self.__dpp,
quote(self.query),
quote(self.gr_tags),
page_num
)
else:
arguments = (self.subcorpus,
# self.__seed,
self.__dpp,
self.__spd,
quote(self.query),
page_num
)
request = self.__request_gr if self.gr_tags is not None else self.__request
request = request % (arguments)
return parse(self.__url + request, HTMLParser(encoding='utf-8'))
def __get_results(self, page):
docs_tree = page.xpath('//ol/li')
if not docs_tree:
# raise EmptyPageException
self.__stop_flag = True
return
for doc in self.__parse_docs(docs_tree, self.get_analysis):
self.__targets_seen += 1
if self.n_results == -1 or self.__targets_seen <= self.n_results:
yield Target(*doc)
else:
self.__stop_flag = True
return
def extract(self):
"""
A streamer to Corpus
"""
while not self.__stop_flag:
try:
page = self.__get_page(self.__page_num)
if self.__page_num == 0:
self.zero_page_first_titles = [x.xpath('span')[0].text for x in page.xpath('//ol/li')]
else:
lis = page.xpath('//ol/li')
if len(lis) == len(self.zero_page_first_titles) and all([self.zero_page_first_titles[i] == lis[i].xpath('span')[0].text for i in range(len(lis))]):
self.__stop_flag = True
break
yield from self.__get_results(page)
self.__page_num += 1
except EmptyPageException:
self.__stop_flag = True
except OSError:
import time
time.sleep(3)
| StarcoderdataPython |
1680731 | import boto3
from botocore.exceptions import ClientError
client = boto3.client('rds')
def get_parameter_group_family(engine_name,engine_version):
try:
response = client.describe_db_engine_versions(
Engine=engine_name,
EngineVersion=engine_version
)
parameter_group_family = response['DBEngineVersions'][0]['DBParameterGroupFamily']
print(parameter_group_family)
return parameter_group_family
except ClientError as e:
print("The utility failed with the error " + e.response['Error']['Code']) | StarcoderdataPython |
1633619 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import multiprocessing
class BaseExtractor(object):
def __init__(self, names, max_workers=None):
self._names = names
self._max_workers = max_workers or (multiprocessing.cpu_count() * 4)
def run(self, job):
try:
self.extract(job)
self.wait_complete()
except KeyboardInterrupt:
print('** Shutting down ...')
self.shutdown()
else:
print('^.^ Extracting all packages done!')
finally:
self.final()
def extract(self, job):
raise NotImplementedError
def wait_complete(self):
raise NotImplementedError
def shutdown(self):
raise NotImplementedError
def final(self):
pass
| StarcoderdataPython |
1739238 | <gh_stars>0
#!/usr/bin/python
import os
import shutil
# Obtain and import the installLib library - should work on Windows and Linux / MacOS.
if os.path.exists("install-lib"):
os.system("git -C install-lib pull https://github.com/dhicks6345789/install-lib.git")
else:
os.system("git clone https://github.com/dhicks6345789/install-lib.git")
shutil.copyfile("install-lib/installLib.py", "./installLib.py")
import installLib
# A function to write a bunch of commands to a batch file and then run.
def runAsBatchFile(theBatchFileLines):
outputHandle = open("temp.bat", "w")
outputHandle.write("@echo off\n")
for batchFileLine in theBatchFileLines:
outputHandle.write(batchFileLine + "\n")
outputHandle.close()
os.system("temp.bat")
os.remove("temp.bat")
installLib.validValueOptions = ["-googleClientID", "-googleClientSecret", "-configFolder", "-dataFolder"]
installLib.getUserOption("-googleClientID", "Enter the Google Client ID used to connect rclone")
installLib.getUserOption("-googleClientSecret", "Enter the Google Client Secret used to connect rclone")
installLib.getUserOption("-configFolder", "Enter the Google Drive folder that contains the Data Tools config")
installLib.getUserOption("-dataFolder", "Enter the (writeable) Google Drive folder that contains the MIS Data")
# Check if we're on Windows.
if os.name == "nt":
# Make sure PExpect is installed.
installLib.runIfPathMissing(installLib.pythonHome + os.sep + "Lib" + os.sep + "site-packages" + os.sep + "pexpect", "\"" + installLib.pipExe + "\" install pexpect")
import pexpect
import pexpect.popen_spawn
# Code goes here - check for rclone, install it if needed.
rclonePath = "C:\\Program Files\\rclone\\rclone.exe"
# Make sure rclone is configured.
if not os.path.exists(installLib.userHome + os.sep + ".config" + os.sep + "rclone" + os.sep + "rclone.conf"):
rclone = pexpect.popen_spawn.PopenSpawn(rclonePath.replace("\\", "/") + " config")
rclone.expect("n/s/q>")
rclone.send("n\n")
rclone.expect("name>")
rclone.send("drive\n")
rclone.expect("Storage>")
rclone.send("drive\n")
rclone.expect("client_id>")
rclone.send(installLib.userOptions["-googleClientID"] + "\n")
rclone.expect("client_secret>")
rclone.send(installLib.userOptions["-googleClientSecret"] + "\n")
rclone.expect("scope>")
rclone.send("drive\n")
rclone.expect("root_folder_id>")
rclone.send("\n")
rclone.expect("service_account_file>")
rclone.send("\n")
rclone.expect("y/n>")
rclone.send("n\n")
rclone.expect("y/n>")
rclone.send("y\n")
rclone.expect("y/n>")
rclone.send("n\n")
rclone.expect("y/e/d>")
rclone.send("y\n")
rclone.expect("e/n/d/r/c/s/q>")
rclone.send("q\n")
# At this point, we should be able to get data from Google Drive.
runAsBatchFile(["echo Getting data from Google Drive.",
"\"" + rclonePath + "\" sync \"drive:" + installLib.userOptions["-configFolder"] + "\" config",
"\"" + rclonePath + "\" sync \"drive:" + installLib.userOptions["-dataFolder"] + "\" \"..\\Documents\\User Tools Data\""])
# Make sure XLRD (Python library for handling Excel files, required for Excel support in Pandas) is installed.
installLib.runIfPathMissing("C:\\Program Files\\"+installLib.pythonVersion+"\\Lib\\site-packages\\xlrd", "py -m pip install xlrd")
# Make sure OpenPyXL (Python library for handling Excel files, required for Excel support in Pandas) is installed.
installLib.runIfPathMissing("C:\\Program Files\\"+installLib.pythonVersion+"\\Lib\\site-packages\\openpyxl", "py -m pip install openpyxl")
# Make sure Pandas (Python data-analysis library) is installed.
installLib.runIfPathMissing("C:\\Program Files\\"+installLib.pythonVersion+"\\Lib\\site-packages\\pandas", "py -m pip install pandas")
# Make sure Numpy (Python maths library) is installed.
installLib.runIfPathMissing("C:\\Program Files\\"+installLib.pythonVersion+"\\Lib\\site-packages\\numpy", "py -m pip install numpy")
# Make sure ReportLab (Python PDF creation library) is installed.
installLib.runIfPathMissing("C:\\Program Files\\"+installLib.pythonVersion+"\\Lib\\site-packages\\reportlab", "py -m pip install reportlab")
# Make sure PyPDF2 (Python PDF manipulation library) is installed.
installLib.runIfPathMissing("C:\\Program Files\\"+installLib.pythonVersion+"\\Lib\\site-packages\\PyPDF2", "py -m pip install PyPDF2")
# Code goes here - check for GAM, install it and set it up if needed.
| StarcoderdataPython |
100755 | """
<NAME>, 2018
All rights reserved
"""
import torch
from torchvision import datasets, transforms
def load_training_data(args, kwargs):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size,
shuffle=True, **kwargs)
return train_loader
def load_test_data(args, kwargs):
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),
batch_size=args.batch_size,
shuffle=True, **kwargs)
return test_loader
| StarcoderdataPython |
1706158 | <gh_stars>0
import decimal
from django.conf import settings
from django.db import models
from shop.models import Product
from django.core.validators import MinValueValidator, MaxValueValidator
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='orders')
billing_name = models.CharField(max_length=100, null=True, verbose_name='Name')
billing_address = models.CharField(max_length=255, null=True, verbose_name='Address')
billing_email_address = models.EmailField(max_length=255, null=True, verbose_name='Email Address')
billing_city = models.CharField(max_length=50, null=True, verbose_name='City')
billing_country = models.CharField(max_length=100, null=True, verbose_name='Country')
billing_post_code = models.CharField(max_length=30, null=True, verbose_name='Post Code')
stripe_id = models.CharField(max_length=50, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
paid = models.BooleanField(default=False)
discount = models.IntegerField(blank=True, null=True, default=None)
class Meta:
ordering = ('-created',)
def __str__(self):
return f'{self.user} - {str(self.id)}'
def get_total_price(self):
total = sum(item.get_cost() for item in self.items.all())
if self.discount:
discount_price = decimal.Decimal(self.discount / 100) * total
return int(total - discount_price)
return total
def get_total_without_discount(self):
total = sum(item.get_cost() for item in self.items.all())
return total
class OrderItem(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name='items')
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='order_items')
price = models.DecimalField(decimal_places=2, max_digits=6)
quantity = models.PositiveSmallIntegerField(default=1)
def __str__(self):
return str(self.id)
def get_cost(self):
return self.price * self.quantity
class Coupon(models.Model):
code = models.CharField(max_length=30, unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)])
active = models.BooleanField(default=False)
def __str__(self):
return self.code
| StarcoderdataPython |
1762584 | <reponame>cristi2019255/LocalSearchAlgorithms
import numpy as np
import math
def generate_graph(size = 10, fully_connected = False, random = False):
""" Generate a graph for graph bipartitioning problem
Args:
size (int, optional): The number of vertices for the generated graph. Defaults to 10.
fully_connected (bool, optional): If is set to True the generated graph is fully connected. Defaults to False.
random (bool, optional): If set to True each vertex 'i' in the generated graph besides edges to vertices 'i-1' and 'i+1'
has edges to 3 random chosen vertices. Defaults to False.
Returns:
list of lists, list: the generated graph, vertices positions in plane
"""
graph = []
pos = []
dtheata = 2* math.pi/size
theta = 0
x = np.cos(theta) + 0.5
y = np.sin(theta) + 0.5
if fully_connected:
for i in range(size):
graph.append(list(np.append(np.arange(i), np.arange(i+1, size))))
pos.append((x,y))
theta += dtheata
x = np.cos(theta) + 0.5
y = np.sin(theta) + 0.5
else:
for i in range(size):
if i == size - 1:
graph.append([i-1,0])
pos.append((x,y))
else:
if i == 0:
graph.append([size - 1,i+1])
pos.append((x,y))
else:
graph.append([i-1,i+1])
pos.append((x,y))
theta += dtheata
x = np.cos(theta) + 0.5
y = np.sin(theta) + 0.5
if random:
for i in range(size):
if i < size - 1:
nr = 3#np.random.randint(0, high=size)
vs = np.random.randint(0 , high = size, size = nr)
for v in vs:
if not (v == i):
if not (v in graph[i]):
graph[i].append(v)
if not (i in graph[v]):
graph[v].append(i)
return graph, pos
def save_graph(file_name, graph, pos):
""" Saving the graph to a file
"""
with open(file_name,'w') as f:
for i in range(len(graph)):
line = str((i+1)) + ' ' + '(' + str(pos[i][0]) + ',' + str(pos[i][1]) + str(len(graph[i]))
for v in graph[i]:
line += ' ' + str(v + 1)
line += '\n'
f.write(line)
f.close()
| StarcoderdataPython |
4499 | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype validators.**
This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable)
classes enabling callers to validate the internal structure of arbitrarily
complex scalars, data structures, and third-party objects. Like annotation
objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these
classes dynamically generate PEP-compliant type hints when subscripted
(indexed) and are thus intended to annotate callables and variables. Unlike
annotation objects defined by the :mod:`typing` module, these classes are *not*
explicitly covered by existing PEPs and thus *not* directly usable as
annotations.
Instead, callers are expected to (in order):
#. Annotate callable parameters and returns to be validated with
:pep:`593`-compliant :attr:`typing.Annotated` type hints.
#. Subscript those hints with (in order):
#. The type of those parameters and returns.
#. One or more subscriptions of classes declared by this submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To avoid polluting the public module namespace, external attributes
# should be locally imported at module scope *ONLY* under alternate private
# names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather
# than merely "from argparse import ArgumentParser").
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from beartype.vale._is._valeis import _IsFactory
from beartype.vale._is._valeistype import (
_IsInstanceFactory,
_IsSubclassFactory,
)
from beartype.vale._is._valeisobj import _IsAttrFactory
from beartype.vale._is._valeisoper import _IsEqualFactory
# ....................{ SINGLETONS }....................
# Public factory singletons instantiating these private factory classes.
Is = _IsFactory(basename='Is')
IsAttr = _IsAttrFactory(basename='IsAttr')
IsEqual = _IsEqualFactory(basename='IsEqual')
IsInstance = _IsInstanceFactory(basename='IsInstance')
IsSubclass = _IsSubclassFactory(basename='IsSubclass')
# Delete all private factory classes imported above for safety.
del (
_IsFactory,
_IsAttrFactory,
_IsEqualFactory,
_IsInstanceFactory,
_IsSubclassFactory,
)
# ....................{ TODO }....................
#FIXME: As intelligently requested by @Saphyel at #32, add support for
#additional classes support constraints resembling:
#
#* String constraints:
# * Email.
# * Uuid.
# * Choice.
# * Language.
# * Locale.
# * Country.
# * Currency.
#* Comparison constraints
# * IdenticalTo.
# * NotIdenticalTo.
# * LessThan.
# * GreaterThan.
# * Range.
# * DivisibleBy.
#FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same
#signature and docstring as the existing CauseSleuth.get_cause_or_none()
#method. This new BeartypeValidator.get_cause_or_none() method should then be
#called by the "_peperrorannotated" submodule to generate human-readable
#exception messages. Note that this implies that:
#* The BeartypeValidator.__init__() method will need to additionally accept a new
# mandatory "get_cause_or_none: Callable[[], Optional[str]]" parameter, which
# that method should then localize to "self.get_cause_or_none".
#* Each __class_getitem__() dunder method of each "_BeartypeValidatorFactoryABC" subclass will need
# to additionally define and pass that callable when creating and returning
# its "BeartypeValidator" instance.
#FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that we can
#leverage all of our existing "beartype.is" infrastructure to dynamically
#synthesize PEP-compliant type hints that would then be implicitly supported by
#any runtime type checker. At present, subscriptions of "Is" (e.g.,
#"Annotated[str, Is[lambda text: bool(text)]]") are only supported by beartype
#itself. Of course, does anyone care? I mean, if you're using a runtime type
#checker, you're probably *ONLY* using beartype. Right? That said, this would
#technically improve portability by allowing users to switch between different
#checkers... except not really, since they'd still have to import beartype
#infrastructure to do so. So, this is probably actually useless.
#
#Nonetheless, the idea itself is trivial. We declare a new
#"beartype.is.Portable" singleton accessed in the same way: e.g.,
# from beartype import beartype
# from beartype.is import Portable
# NonEmptyStringTest = Is[lambda text: bool(text)]
# NonEmptyString = Portable[str, NonEmptyStringTest]
# @beartype
# def munge_it(text: NonEmptyString) -> str: ...
#
#So what's the difference between "typing.Annotated" and "beartype.is.Portable"
#then? Simple. The latter dynamically generates one new PEP 3119-compliant
#metaclass and associated class whenever subscripted. Clearly, this gets
#expensive in both space and time consumption fast -- which is why this won't
#be the default approach. For safety, this new class does *NOT* subclass the
#first subscripted class. Instead:
#* This new metaclass of this new class simply defines an __isinstancecheck__()
# dunder method. For the above example, this would be:
# class NonEmptyStringMetaclass(object):
# def __isinstancecheck__(cls, obj) -> bool:
# return isinstance(obj, str) and NonEmptyStringTest(obj)
#* This new class would then be entirely empty. For the above example, this
# would be:
# class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass):
# pass
#
#Well, so much for brilliant. It's slow and big, so it seems doubtful anyone
#would actually do that. Nonetheless, that's food for thought for you.
| StarcoderdataPython |
1729035 | <reponame>ilopezgp/human_impacts
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import anthro.viz
import anthro.io
from pywaffle import Waffle
colors = anthro.viz.plotting_style()
data = pd.read_csv('../../../data/agriculture/FAO_fish_production_quantities/processed/FAO_FishStatJ_total_mass_species.csv')
# Restrict to most recent year and seaprate by catch type
culture = data[data['source']=='cultured']
# Do manual categorization
dfs = []
for g, d in culture.groupby(['year']):
_carp = d[d['species'].str.contains(' carp') &
~d['species'].str.contains('carpet')]
carp_species = _carp['species'].values
carp = pd.DataFrame([_carp['produced_mass_t'].sum() * 1E3], columns=['produced_mass_kg'])
carp['category'] = 'carp'
carp['approx_indiv_mass_kg'] = 3
carp['approx_res_time_yr'] = 0.7
carp['population'] = carp['approx_res_time_yr'].values * carp['produced_mass_kg'].values\
/ carp['approx_indiv_mass_kg'].values
carp['biomass_kg'] = carp['population'].values * carp['approx_indiv_mass_kg']
# Tilapia
_tilapia = d[d['species'].str.contains('tilapia')]
tilapia_species = _tilapia['species'].values
tilapia = pd.DataFrame([_tilapia['produced_mass_t'].sum() * 1E3], columns=['produced_mass_kg'])
tilapia['category'] = 'tilapia'
tilapia['approx_indiv_mass_kg'] = 2
tilapia['approx_res_time_yr'] = 0.7
tilapia['population'] = tilapia['approx_res_time_yr'].values * tilapia['produced_mass_kg'].values\
/ tilapia['approx_indiv_mass_kg'].values
tilapia['biomass_kg'] = tilapia['population'].values * tilapia['approx_indiv_mass_kg']
# Shrimp and prawns
_shrimp = d[d['species'].str.contains('shrimp') |
d['species'].str.contains('prawn')]
shrimp_species = _shrimp['species'].values
shrimp = pd.DataFrame([_shrimp['produced_mass_t'].sum() * 1E3], columns=['produced_mass_kg'])
shrimp['category'] = 'shrimp & prawns'
shrimp['approx_indiv_mass_kg'] = 0.05
shrimp['approx_res_time_yr'] = 0.5
shrimp['population'] = shrimp['approx_res_time_yr'].values * shrimp['produced_mass_kg'].values\
/ shrimp['approx_indiv_mass_kg'].values
shrimp['biomass_kg'] = shrimp['population'].values * shrimp['approx_indiv_mass_kg']
shrimp
_shells = d[d['species'].str.contains('clam') |
d['species'].str.contains('carpet') |
d['species'].str.contains('mussel') |
d['species'].str.contains('cupped') |
d['species'].str.contains('abalone') |
d['species'].str.contains('scallop') |
d['species'].str.contains('oyster')]
shell_species = _shells['species'].values
shells = pd.DataFrame([_shells['produced_mass_t'].sum() * 1E3], columns=['produced_mass_kg'])
shells['category'] = 'oysters, clams, & mussels'
shells['approx_indiv_mass_kg'] = 0.1
shells['approx_res_time_yr'] = 2
shells['population'] = shells['approx_res_time_yr'].values * shells['produced_mass_kg'].values\
/ shells['approx_indiv_mass_kg'].values
shells['biomass_kg'] = shells['population'].values * shells['approx_indiv_mass_kg']
# Concatenate these
categorized = pd.concat([carp, tilapia, shrimp, shells], sort=False)
cat_species = list(np.concatenate([carp_species, tilapia_species, shrimp_species, shell_species]))
_others = d[~d['species'].isin(cat_species)]
other = pd.DataFrame([_others['produced_mass_t'].sum() * 1E3], columns=['produced_mass_kg'])
other['category'] = 'other'
other['approx_indiv_mass_kg'] = 3
other['approx_res_time_yr'] = 1
other['population'] = other['approx_res_time_yr'].values * other['produced_mass_kg'].values\
/ other['approx_indiv_mass_kg'].values
other['biomass_kg'] = other['population'].values * other['approx_indiv_mass_kg'].values
# Merge everything.
categorized = pd.concat([categorized, other], sort=False)
categorized['year'] = g
dfs.append(categorized)
categorized = pd.concat(dfs, sort=False)
categorized.to_csv('./aquaculture_categorized.csv', index=False)
# %%
| StarcoderdataPython |
4824689 | <filename>mailchimp3/entities/campaignfolders.py
# coding=utf-8
"""
The Campaign Folders API endpoints
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/campaign-folders/
Schema: https://api.mailchimp.com/schema/3.0/CampaignFolders/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
class CampaignFolders(BaseApi):
"""
Organize your campaigns using folders.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(CampaignFolders, self).__init__(*args, **kwargs)
self.endpoint = 'campaign-folders'
self.folder_id = None
def create(self, data):
"""
Create a new campaign folder.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
if 'name' not in data:
raise KeyError('The campaign folder must have a name')
response = self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
self.folder_id = response['id']
else:
self.folder_id = None
return response
def all(self, get_all=False, **queryparams):
"""
Get all folders used to organize campaigns.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.folder_id = None
if get_all:
return self._iterate(url=self._build_path(), **queryparams)
else:
return self._mc_client._get(url=self._build_path(), **queryparams)
def get(self, folder_id, **queryparams):
"""
Get information about a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.folder_id = folder_id
return self._mc_client._get(url=self._build_path(folder_id), **queryparams)
def update(self, folder_id, data):
"""
Update a specific folder used to organize campaigns.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*
}
"""
self.folder_id = folder_id
if 'name' not in data:
raise KeyError('The campaign folder must have a name')
return self._mc_client._patch(url=self._build_path(folder_id), data=data)
def delete(self, folder_id):
"""
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
"""
self.folder_id = folder_id
return self._mc_client._delete(url=self._build_path(folder_id))
| StarcoderdataPython |
3262956 | '''
Description:
Version: 1.0
Autor: Zhangzixu
Date: 2022-01-02 18:46:09
LastEditors: Zhangzixu
LastEditTime: 2022-01-10 13:25:22
'''
# optimizer
optimizer = dict(type='SGD', lr=1e-4, momentum=0.90, weight_decay=5e-4)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='poly', power=0.9, min_lr=1e-6, by_epoch=True)
total_epochs = 1500
| StarcoderdataPython |
83182 | <filename>practice/string/string/string/string.py
print(' a string that you "dont" have to escape \n This \n is a multi-line \n heredoc string -------> example')
| StarcoderdataPython |
1703680 | # File: api_caller.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
import json
class ApiCaller:
CONST_REQUEST_METHOD_GET = 'get'
CONST_REQUEST_METHOD_POST = 'post'
CONST_EXPECTED_DATA_TYPE_JSON = 'json'
CONST_EXPECTED_DATA_TYPE_FILE = 'file'
CONST_API_AUTH_LEVEL_RESTRICTED = 1
CONST_API_AUTH_LEVEL_DEFAULT = 100
CONST_API_AUTH_LEVEL_ELEVATED = 500
CONST_API_AUTH_LEVEL_SUPER = 1000
api_key = ''
server = ''
endpoint_url = ''
endpoint_auth_level = CONST_API_AUTH_LEVEL_RESTRICTED
data = {}
params = {}
files = {}
request_method_name = ''
api_result_msg = ''
api_unexpected_error_msg = 'Unexpected error has occurred (HTTP code: {}). Please try again later or connect with the support'
api_unexpected_error_404_msg = 'Unexpected error has occurred (HTTP code: {}). This error is mostly occurring when called webservice is outdated and so does not support current action. If you believe it is an error, please contact with the support'
api_success_msg = 'Your request was successfully processed by Falcon Sandbox'
api_expected_error_msg = 'API error has occurred. HTTP code: {}, message: \'{}\''
api_response = None
api_expected_data_type = CONST_EXPECTED_DATA_TYPE_JSON
api_response_json = {}
def __init__(self, api_key, server, phantom):
self.api_key = api_key
self.server = server
self.phantom = phantom
self.check_class_options()
def check_class_options(self):
requested_fields = ['request_method_name', 'endpoint_url']
for requested_field in requested_fields:
if getattr(self, requested_field) == '':
raise Exception('Value for \'{}\' should be declared in class \'{}\'.'.format(requested_field, self.__class__.__name__))
def call(self, request_handler, headers={'User-agent': 'VxPhantom'}, verify_server=False):
if '@' in self.endpoint_url:
raise Exception('Can\'t call API endpoint with url \'{}\', when some placeholders are still not filled.'.format(self.endpoint_url))
self.params.update({'is_phantom': '1'})
request_url = '{}/api/v2{}'.format(self.server, self.endpoint_url)
self.phantom.debug_print('request_method', self.request_method_name)
self.phantom.debug_print('request_url', request_url)
self.phantom.debug_print('request_params', self.params)
self.phantom.debug_print('request_data', self.data)
caller_function = getattr(request_handler, self.request_method_name)
headers['api-key'] = self.api_key
self.phantom.debug_print('sent_headers', headers)
self.api_response = caller_function(request_url, data=self.data, params=self.params, files=self.files, headers=headers, verify=verify_server)
self.phantom.debug_print('request_headers', self.api_response.headers)
self.phantom.debug_print('request_content_type', self.api_response.headers['Content-Type'])
self.phantom.debug_print('request_code', self.api_response.status_code)
if self.api_expected_data_type == self.CONST_EXPECTED_DATA_TYPE_JSON:
self.phantom.debug_print('request_content', self.api_response.text)
self.api_result_msg = self.prepare_response_msg()
def get_api_response(self):
if self.api_response is None:
raise Exception('It\'s not possible to get api response before doing request.')
return self.api_response
def attach_data(self, options):
self.data = self.modify_params(options)
self.build_url(self.data)
def attach_params(self, params):
self.params = self.modify_params(params)
self.build_url(self.params)
def modify_params(self, params):
params.pop('context', None)
if hasattr(self, 'map_params'):
params = self.map_params(params)
return params
def attach_files(self, files):
self.files = files
def if_request_success(self):
return int(int(self.api_response.status_code) / 200) == 1 # 20x status code
def prepare_response_msg(self):
if self.api_response is None:
raise Exception('It\'s not possible to get response message since API was not called.')
if self.if_request_success() is True:
if self.api_expected_data_type == self.CONST_EXPECTED_DATA_TYPE_JSON:
self.api_response_json = self.get_response_json()
self.api_result_msg = self.api_success_msg
else:
if self.api_response.headers['Content-Type'] == 'application/json':
self.api_response_json = self.api_response.json()
self.api_result_msg = self.api_expected_error_msg.format(self.api_response.status_code, self.api_response_json['message'])
else:
if self.api_response.status_code == 404:
self.api_result_msg = self.api_unexpected_error_404_msg.format(self.api_response.status_code)
else:
self.api_result_msg = self.api_unexpected_error_msg.format(self.api_response.status_code)
return self.api_result_msg
def get_response_status_code(self):
if self.api_response is None:
raise Exception('It\'s not possible to get response code since API was not called.')
return self.api_response.status_code
def get_prepared_response_msg(self):
if self.api_result_msg == '':
self.api_result_msg = self.prepare_response_msg()
return self.api_result_msg
def get_response_json(self):
if self.api_response is None:
raise Exception('It\'s not possible to get response json since API was not called.')
elif bool(self.api_response_json) is False:
try:
if self.api_response.headers['Content-Type'] == 'application/json':
self.api_response_json = self.api_response.json()
elif self.api_response.headers['Content-Type'].startswith('text/html'):
# let's be more tolerant and accept situation when content type is not valid, but response has proper json
self.api_response_json = json.loads(self.api_response.text)
else:
'''
Some of endpoints can return mixed content type - like file type(success) and json(controlled errors).
Let's return there empty dictionary, as it's already properly handled by other project parts.
'''
self.api_response_json = {}
except ValueError:
'''
When response has status code equal 200 and we're expecting json, there should be json always.
Let's ignore other cases as for errors like 50x, when we're getting html page instead.
That case should be handled in some other place.
'''
if self.if_request_success() and self.request_method_name == self.CONST_EXPECTED_DATA_TYPE_JSON:
raise Exception('Failed to parse response: \'{}\''.format(self.api_response.text))
else:
self.api_response_json = {}
return self.api_response_json
def build_url(self, params):
if '@' in self.endpoint_url:
url_data = params
url_data_copy = url_data.copy()
for key, value in url_data.items():
searched_key = '@' + key
if searched_key in self.endpoint_url:
self.endpoint_url = self.endpoint_url.replace(searched_key, value)
del url_data_copy[key] # Working on copy, since it's not possible to manipulate dict size, during iteration
if self.request_method_name == self.CONST_REQUEST_METHOD_GET:
self.params = url_data_copy
else:
self.data = url_data_copy
def get_full_endpoint_url(self):
return self.server + self.endpoint_url
| StarcoderdataPython |
1700698 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
import datetime
import json
import os
from enum import Enum
# TODO: implement
def index_coordinates(pages):
"""Creates an index of coordinates by text offsets.
Gets called by `self.from_json()`.
"""
coords = []
for page in pages:
for token in page['t']:
item = {
'page_id': page['id'],
'coords': token['c'],
'start_offset': token['s'],
'end_offset': token['s'] + token['l']
}
if "hy1" in token:
item['hy1'] = token['hy1']
if "hy2" in token:
item['hy2'] = token['hy2']
coords.append(item)
return coords
class ContentItemCase(Enum):
FULL = "FULL" # all info
TEXT = "TEXT" # min info + text
LIGHT = "LIGHT" # min info
class ContentItem:
"""
Class which represents an impresso (rebuilt) content item.
TODO: complement
:ivar str id: canonical content item id
:ivar str lg:
:ivar str type:
:ivar datetime date:
:ivar str journal:
:ivar str s3v:
:ivar str fulltext:
:ivar dict text_offsets: pages/regions/paragraphs/lines
"""
def __init__(self, ci_id, lg, tp):
"""Constructor"""
self.id = ci_id
self.lg = lg
self.type = tp
self.date = self.build_date(ci_id)
self.journal = self.build_journal(ci_id)
self._text_offsets = {}
self.__coordinates = []
@staticmethod
def build_date(ci_id):
tmp = ci_id.split("-")
return datetime.date(int(tmp[1]), int(tmp[2]), int(tmp[3]))
@staticmethod
def build_journal(ci_id):
return ci_id.split("-")[0]
@property
def title(self):
return self.__title
@title.setter
def title(self, value):
self.title = value
@property
def lines(self):
return self.__lines
@lines.setter
def lines(self, value):
self.__lines = value
@property
def paragraphs(self):
return self.__paragraphs
@paragraphs.setter
def paragraphs(self, value):
self.__paragraphs = value
@property
def pages(self):
return self.__pages
@pages.setter
def pages(self, value):
self.__pages = value
@property
def regions(self):
return self.__regions
@regions.setter
def regions(self, value):
self.__regions = value
@property
def fulltext(self):
return self.__fulltext
@fulltext.setter
def fulltext(self, value):
self.fulltext = value
@staticmethod
def from_json(path=None, data=None, case=ContentItemCase.LIGHT):
"""Loads an instance of `ContentItem` from a JSON file.
:param str path: path to a json file
:param dict data: content item information
:param enum case: content item configuration via `ContentItemCase`
(LIGHT/TEXT/FULL)
"""
assert data is not None or path is not None
if data is not None:
doc = ContentItem(data['id'], data['lg'], data['tp'])
doc.case = case
if case == ContentItemCase.TEXT or case == ContentItemCase.FULL:
doc.__title = data['t'] if 't' in data else None
doc.__fulltext = data['ft'] if 'ft' in data else None
if case == ContentItemCase.FULL:
doc.__lines = data['lb'] if 'lb' in data else None
doc.__paragraphs = data['pb'] if 'pb' in data else None
doc.__regions = data['rb'] if 'pb' in data else None
doc.__pages = data['ppreb'] if 'ppreb' in data else None
doc.__coordinates = index_coordinates(doc.__pages)
return doc
elif path is not None:
return
def to_json(self, path, case=ContentItemCase.LIGHT):
data = {
"id": self.id,
"lg": self.lg,
"tp": self.type
}
if case == ContentItemCase.TEXT or case == ContentItemCase.FULL:
data['t'] = self.__title
data['ft'] = self.__fulltext
if case == ContentItemCase.FULL:
data['lb'] = self.__lines
data['pb'] = self.__paragraphs
data['rb'] = self.__regions
data['ppreb'] = self.__pages
json_output_path = os.path.join(path, f'{self.id}.json')
try:
with open(json_output_path, 'w') as out_file:
json.dump(data, out_file)
return True
except Exception as e:
print(e)
return False
def get_coordinates(self, start_offset, end_offset):
"""Enables access to image coordinates by means of text offsets.
returns a dictionary like
{
'page_id': '....',
'coordinates': [x,y,w,h]
}
"""
return list(filter(
lambda c: c['start_offset'] >= start_offset and c['end_offset'] <= end_offset,
self.__coordinates
))
def __str__(self):
s = f'{self.__class__.__name__}:\n\t' \
f'ci_case={self.case}\n\t' \
f'ci_id={self.id}\n\t' \
f'ci_lg={self.lg}\n\t' \
f'ci_type={self.type}\n\t' \
f'ci_date={self.date}\n\t' \
if self.case == ContentItemCase.TEXT \
or self.case == ContentItemCase.FULL:
s = s + f'ci_fulltext={self.fulltext}\n\t' \
f'ci_title={self.title}\n\t' \
return s
| StarcoderdataPython |
1618028 | <gh_stars>1-10
import numpy as np
#########################
### SIMULATION PARAMS ###
#########################
timestep = 1.
runtime = 20000.
num_threads = 9
######################
### NETWORK PARAMS ###
######################
#bg_noise_d1 = 95.
bg_noise_d1 = 80.
bg_noise_d2 = 57.
bg_weight_d1 = 2.5
bg_weight_d2 = 2.5
## NEURONS ##
num_neurons_per_channel = 40
# Channels are now aranged on an imaginary grid. The size of the grid determines the number of channels
grid_size = [[6, 6]] # 0th element = row, 1st element = column
num_channels = grid_size[0][0] * grid_size[0][1]
d1_params = {"V_m": -87.2,
"E_L": -87.2,
# Not specified, but check bottom left figure in Gertler Fig 2. vreset > V_rest
"V_reset": -87.2,
"V_th": -50.,
"g_L": 9.,
"C_m": 195.,
"tau_syn_in": 10.0,
"tau_syn_ex": 5.0
}
d2_params = {"V_m": -85.4,
"E_L": -85.4,
"V_reset": -85.4,
"V_th": -50.,
"g_L": 4.5,
"C_m": 159.,
"tau_syn_in": 10.0,
"tau_syn_ex": 5.0
}
## CONNECTIVITY ##
# Mimicing a distance dependent kernel: TL ---> gen---> TR; TR---> gen; TR-> TL; gen---> TL, TL -> TR ; ---> strong inhibition, -> weak inhibition
# Since now the number of neurons / channel have reduced to scale the connectivity, since now far connectivity is completely ineffective:
scaleConn = 2.0
withinChanConnPropScaling = 0.6 * scaleConn # These scaling numbers for within, far and near channels are calculated from Lope-Huerta 2013, Fig 6, on the basis of number of connected neurons depending on size. Within ~ 5, hence the average of connected D1/D2 neurons within channel is ~ 5, similarly, near channel ~ 15, far channel ~ 1
# withinChanConnPropScaling = 1.7 * scaleConn # These scaling numbers for within, far and near channels are calculated from Lope-Huerta 2013, Fig 6, on the basis of number of connected neurons depending on size. Within ~ 5, hence the average of connected D1/D2 neurons within channel is ~ 5, similarly, near channel ~ 15, far channel ~ 1
betweenNearChanConnPropScaling = 1.7 * scaleConn
#betweenNearChanConnPropScaling = 0.6 * scaleConn
betweenFarChanConnPropScaling = 0.15 * scaleConn
withinChanDelay = 1.0
betweenNearChanDelay = 2.5
betweenFarChanDelay = 4.5
cd1d1 = 0.07 * num_neurons_per_channel # Planert
cd1d2 = 0.05 * num_neurons_per_channel
cd2d1 = 0.13 * num_neurons_per_channel
cd2d2 = 0.23 * num_neurons_per_channel # In planert
cd1fsi = 0.89
cd2fsi = 0.67
weightScale = 0.9 # Re-tuned below according to iaf_cond_alpha and Table 1 in Planert, see script neuorn_model_tuning.py
# Had to be tuned so that mV match the planert data, we did forget that these were tuned for aeif_exp
jd1d1 = -0.75 * weightScale
jd1d2 = -0.85 * weightScale
jd2d1 = -1.7 * weightScale # also depends on neuron properties
jd2d2 = -1.35 * weightScale
c_inter_hemis = 0.1
j_inter_hemis = -0.5
global conn_params_d1d1_within_chan
global conn_params_d1d2_within_chan
global conn_params_d2d1_within_chan
global conn_params_d2d2_within_chan
global conn_params_d1d1_between_near_chan
global conn_params_d1d2_between_near_chan
global conn_params_d2d1_between_near_chan
global conn_params_d2d2_between_near_chan
global conn_params_d1d1_between_far_chan
global conn_params_d1d2_between_far_chan
global conn_params_d2d1_between_far_chan
global conn_params_d2d2_between_far_chan
def update_conn_params():
global conn_params_d1d1_within_chan
global conn_params_d1d2_within_chan
global conn_params_d2d1_within_chan
global conn_params_d2d2_within_chan
global conn_params_d1d1_between_near_chan
global conn_params_d1d2_between_near_chan
global conn_params_d2d1_between_near_chan
global conn_params_d2d2_between_near_chan
global conn_params_d1d1_between_far_chan
global conn_params_d1d2_between_far_chan
global conn_params_d2d1_between_far_chan
global conn_params_d2d2_between_far_chan
print(withinChanConnPropScaling, betweenNearChanConnPropScaling,
betweenFarChanConnPropScaling)
conn_params_d1d1_within_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd1d1 * withinChanConnPropScaling)}
conn_params_d1d2_within_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd1d2 * withinChanConnPropScaling)}
conn_params_d2d1_within_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd2d1 * withinChanConnPropScaling)}
conn_params_d2d2_within_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd2d2 * withinChanConnPropScaling)}
conn_params_d1d1_between_near_chan = {'rule': 'fixed_outdegree',
'outdegree': int(cd1d1 * betweenNearChanConnPropScaling)}
conn_params_d1d2_between_near_chan = {'rule': 'fixed_outdegree',
'outdegree': int(cd1d2 * betweenNearChanConnPropScaling)}
conn_params_d2d1_between_near_chan = {'rule': 'fixed_outdegree',
'outdegree': int(cd2d1 * betweenNearChanConnPropScaling)}
conn_params_d2d2_between_near_chan = {'rule': 'fixed_outdegree',
'outdegree': int(cd2d2 * betweenNearChanConnPropScaling)}
conn_params_d1d1_between_far_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd1d1 * betweenFarChanConnPropScaling)}
conn_params_d1d2_between_far_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd1d2 * betweenFarChanConnPropScaling)}
conn_params_d2d1_between_far_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd2d1 * betweenFarChanConnPropScaling)}
conn_params_d2d2_between_far_chan = {
'rule': 'fixed_outdegree', 'outdegree': int(cd2d2 * betweenFarChanConnPropScaling)}
update_conn_params()
syn_params_d1d1_within_chan = {"weight": jd1d1, "delay": withinChanDelay}
syn_params_d1d2_within_chan = {"weight": jd1d2, "delay": withinChanDelay}
syn_params_d2d1_within_chan = {"weight": jd2d1, "delay": withinChanDelay}
syn_params_d2d2_within_chan = {"weight": jd2d2, "delay": withinChanDelay}
syn_params_d1d1_between_near_chan = {
"weight": jd1d1, "delay": betweenNearChanDelay}
syn_params_d1d2_between_near_chan = {
"weight": jd1d2, "delay": betweenNearChanDelay}
syn_params_d2d1_between_near_chan = {
"weight": jd2d1, "delay": betweenNearChanDelay}
syn_params_d2d2_between_near_chan = {
"weight": jd2d2, "delay": betweenNearChanDelay}
syn_params_d1d1_between_far_chan = {
"weight": jd1d1, "delay": betweenFarChanDelay}
syn_params_d1d2_between_far_chan = {
"weight": jd1d2, "delay": betweenFarChanDelay}
syn_params_d2d1_between_far_chan = {
"weight": jd2d1, "delay": betweenFarChanDelay}
syn_params_d2d2_between_far_chan = {
"weight": jd2d2, "delay": betweenFarChanDelay}
| StarcoderdataPython |
3393473 |
class DbColumn(object) :
def __init__(self, name, javaType, jdbcType, comment, nullable, maxLen) :
self.name = name
self.javaType = javaType
self.jdbcType = jdbcType
self.comment = comment
self.nullable = nullable
self.maxLen = maxLen
def __str__(self) :
return "name = " + self.name + ", javaType = " + self.javaType + ", comment = " + self.comment \
+ ', nullable = ' + str(self.nullable) + ", maxLen = " + str(self.maxLen)
class DbEntity(object) :
def __init__(self, tableName, clazzName, clazzComment, keyCol, colList):
self.tableName = tableName
self.clazzName = clazzName
self.clazzComment = clazzComment
self.keyCol = keyCol
self.colList = colList
self._packageName = ''
self._moduleName = ''
self._modelPackage = ''
self._dsPackage = ''
self._splitRead = False
def splitRead(self, val) :
self._splitRead = val
def packageName(self, packageName) :
self._packageName = packageName
def packageName(self):
return self._packageName
def moduleName(self, moduleName) :
self._moduleName = moduleName
def moduleName(self) :
return self._moduleName
def modelPackage(self, modelPackage) :
self._modelPackage = modelPackage
def modelPackage(self) :
return self._modelPackage
def dsPackage(self, dsPackage) :
self._dsPackage = dsPackage
def dsPackage(self) :
return self._dsPackage
def poClazz(self) :
return self.clazzName + 'Po'
def poProp(self) :
return self.clazzName[0].lower() + self.clazzName[1:] + 'Po'
def fullPo(self) :
return self.modelPackage + '.' + self.moduleName + '.po.' + self.clazzName + 'Po'
def roMapperClazz(self) :
return self.clazzName + 'RoMapper'
def roMapperProp(self) :
return self.clazzName[0].lower() + self.clazzName[1:] + 'RoMapper'
def fullRoMapper(self) :
return self.dsPackage + '.ro.' + self.moduleName + '.mapper.' + self.clazzName + 'RoMapper'
def rwDsClazz(self) :
if self._splitRead:
return self.clazzName + 'RwDs'
else :
return self.clazzName + 'Ds'
def rwMapperClazz(self) :
if self._splitRead:
return self.clazzName + 'RwMapper'
else :
return self.clazzName + 'Mapper'
def rwMapperProp(self) :
if self._splitRead:
return self.clazzName[0].lower() + self.clazzName[1:] + 'RwMapper'
else :
return self.clazzName[0].lower() + self.clazzName[1:] + 'Mapper'
def fullRwMapper(self):
if self._splitRead:
return self.dsPackage + '.rw.' + self.moduleName + '.mapper.' + self.clazzName + 'RwMapper'
else:
return self.dsPackage + '.' + self.moduleName + '.mapper.' + self.clazzName + 'Mapper'
def fullRoDs(self) :
return self.packageName + '.ro.' + self.moduleName + '.ds.' + self.clazzName + 'RoDs'
def fullRwDs(self) :
if self._splitRead:
return self.packageName + '.rw.' + self.moduleName + '.ds.' + self.clazzName + 'RwDs'
else:
return self.packageName + '.' + self.moduleName + '.ds.' + self.clazzName + 'Ds'
def getColJavaType(self, colName) :
for col in self.colList :
if col.name == colName :
return col.javaType
return 'TypeNotFound'
def getColJdbcType(self, colName) :
for col in self.colList :
if col.name == colName :
return col.jdbcType
return 'TypeNotFound'
def __str__(self) :
cols = ""
for col in self.colList :
cols += col.__str__()
cols += "\r\n"
#return "tableName = " + self.tableName
return "clazzName = " + self.clazzName + ", clazzComment = " + self.clazzComment + " keyCol = " + str(self.keyCol) + ", cols = \r\n" + cols
| StarcoderdataPython |
1788932 | <filename>data_collection/gazette/spiders/sc_balneario_picarras.py
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScBalnearioPicarrasSpider(FecamGazetteSpider):
name = "sc_balneario_picarras"
FECAM_QUERY = "cod_entidade:33"
TERRITORY_ID = "4212809"
| StarcoderdataPython |
55410 | import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch
class Generator(nn.Module):
def __init__(self, configs, shape):
super(Generator, self).__init__()
self.label_emb = nn.Embedding(configs.n_classes, configs.n_classes)
self.shape = shape
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(configs.latent_dim + configs.n_classes, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(shape))),
nn.Tanh()
)
def forward(self, noise, labels):
# Concatenate label embedding and data to produce input
gen_input = torch.cat((self.label_emb(labels), noise), -1)
input = self.model(gen_input)
input = input.view(input.size(0), -1) # resize
return input
class Discriminator(nn.Module):
def __init__(self, configs, shape):
super(Discriminator, self).__init__()
self.label_embedding = nn.Embedding(configs.n_classes, configs.n_classes)
self.model = nn.Sequential(
nn.Linear(configs.n_classes + int(np.prod(shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 1),
)
def forward(self, input, labels):
# Concatenate label embedding and data to produce input
d_in = torch.cat((input.view(input.size(0), -1), self.label_embedding(labels)), -1)
validity = self.model(d_in)
return validity
| StarcoderdataPython |
102437 | # Copyright 2016-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the arpignore module
"""
import os
import subprocess
import sys
import unittest
import mock
import moduletests.src.arpignore
try:
# Python 2.x
from cStringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
if sys.hexversion >= 0x3040000:
# contextlib.redirect_stdout was introduced in Python 3.4
import contextlib
else:
# contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
import contextlib2 as contextlib
# builtins was named __builtin__ in Python 2 so accommodate the change for the purposes of mocking the open call
if sys.version_info >= (3,):
builtins_name = "builtins"
else:
builtins_name = "__builtin__"
class Testarpignore(unittest.TestCase):
config_file_path = "/etc/sysctl.d/55-arp-ignore.conf"
def setUp(self):
self.output = StringIO()
def tearDown(self):
self.output.close()
@mock.patch("subprocess.check_output")
def test_detect_noproblem(self, check_output_mock):
"""Test that no problem is detected with expected-good output."""
check_output_mock.return_value = "arp_ignore = 0"
self.assertFalse(moduletests.src.arpignore.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
def test_detect_problem(self, check_output_mock):
"""Test that the problem is detected with expected-bad output."""
check_output_mock.return_value = "arp_ignore = 1"
self.assertTrue(moduletests.src.arpignore.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=["net.ipv4.conf.all.arp_ignore = 1",
subprocess.CalledProcessError(1, "test")])
def test_fix_sysctlfail(self, check_output_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(subprocess.CalledProcessError, moduletests.src.arpignore.fix, self.config_file_path)
self.assertTrue(check_output_mock.called)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] net.ipv4.conf.all.arp_ignore=0 failed for running system\n"))
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpignore.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpignore.open", side_effect=IOError)
def test_fix_write_new_fail(self, open_mock, exists_mock, check_output_mock):
check_output_mock.return_value = "net.ipv4.conf.lo.arp_announce = 0\nnet.ipv4.conf.all.arp_ignore = 1"
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.arpignore.fix, self.config_file_path)
self.assertTrue(open_mock.called)
self.assertTrue(exists_mock.called)
self.assertTrue(check_output_mock.called)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] Unable to open /etc/sysctl.d/55-arp-ignore.conf and write to it.\n"))
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpignore.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpignore.open", mock.mock_open())
def test_fix_write_new_success(self, exists_mock, check_output_mock):
check_output_mock.return_value = "net.ipv4.conf.lo.arp_announce = 0\nnet.ipv4.conf.all.arp_ignore = 1"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpignore.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith("[FIXED] /etc/sysctl.d/55-arp-ignore.conf written.\n"))
self.assertTrue(exists_mock.called)
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpignore.os.path.exists", side_effect=[True])
def test_fix_success(self, exists_mock, check_output_mock):
check_output_mock.return_value = "net.ipv4.conf.all.arp_ignore = 1\nsome_other = 0"
open_mock = mock.mock_open(read_data="#comment\n"
"net.ipv4.conf.all.arp_ignore = 1\n"
"net.ipv4.conf.lo.arp_ignore = 0\n"
"garbage\n")
# mock_open does not have support for iteration so it must be added manually
# readline() until a blank line is reached (the sentinel)
def iter_func(self):
return iter(self.readline, "")
open_mock.return_value.__iter__ = iter_func
def py3_next_func(self):
return next(iter(self.readline, ""))
if sys.hexversion >= 0x3000000:
open_mock.return_value.__next__ = py3_next_func
with mock.patch("moduletests.src.arpignore.open", open_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpignore.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith("[FIXED] /etc/sysctl.d/55-arp-ignore.conf written.\n"))
self.assertEqual(str(open_mock.mock_calls), "[call('/etc/sysctl.d/55-arp-ignore.conf', 'r'),\n"
" call().__enter__(),\n call().readlines(),\n"
" call().__exit__(None, None, None),\n"
" call('/etc/sysctl.d/55-arp-ignore.conf', 'w'),\n"
" call().__enter__(),\n"
" call().write('#comment\\nnet.ipv4.conf.lo.arp_ignore = 0'),\n"
" call().write('\\n'),\n"
" call().write('net.ipv4.conf.all.arp_ignore = 0'),\n"
" call().write('\\n'),\n"
" call().__exit__(None, None, None)]")
self.assertTrue(exists_mock.called)
self.assertTrue(check_output_mock.called)
@mock.patch("moduletests.src.arpignore.get_config_dict", return_value=dict())
@mock.patch("moduletests.src.arpignore.detect", return_value=False)
def test_run_success(self, detect_mock, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpignore.run())
self.assertEqual(self.output.getvalue(), "Determining if any interfaces are set to ignore arp requests\n"
"[SUCCESS] arp ignore is disabled for all interfaces.\n")
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpignore.get_config_dict")
@mock.patch("moduletests.src.arpignore.detect", return_value=True)
def test_run_no_remediate(self, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": False,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpignore.run())
self.assertTrue("[UNFIXED] Remediation impossible without sudo and --remediate.\n"
"-- Running as root/sudo: True\n"
"-- Required --remediate flag specified: False\n"
"[FAILURE] arp ignore is enabled for one or more interfaces. Please see the module log\n"
in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpignore.get_config_dict")
@mock.patch("moduletests.src.arpignore.detect", return_value=True)
@mock.patch("moduletests.src.arpignore.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.arpignore.backup", return_value=True)
@mock.patch("moduletests.src.arpignore.fix", return_value=True)
@mock.patch("moduletests.src.arpignore.restore", return_value=True)
def test_run_failure_isfile(self,
restore_mock,
fix_mock,
backup_mock,
isfile_mock,
detect_mock,
config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpignore.run())
self.assertTrue("[FAILURE] arp ignore is enabled for one or more interfaces. "
"Please see the module log"
in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpignore.get_config_dict")
@mock.patch("moduletests.src.arpignore.detect", return_value=True)
@mock.patch("moduletests.src.arpignore.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpignore.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpignore.run())
self.assertTrue("[FAILURE] arp ignore is enabled for one or more interfaces. "
"Please see the module log"
in self.output.getvalue())
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpignore.get_config_dict")
@mock.patch("moduletests.src.arpignore.detect", side_effect=(True, False))
@mock.patch("moduletests.src.arpignore.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpignore.fix", return_value=True)
def test_run_fix(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpignore.run())
self.assertEqual(self.output.getvalue(), "Determining if any interfaces are set to ignore arp requests\n"
"[SUCCESS] arp ignore is disabled for all interfaces "
"after remediation.\n")
self.assertTrue(fix_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpignore.get_config_dict")
@mock.patch("moduletests.src.arpignore.detect", side_effect=Exception)
@mock.patch("moduletests.src.arpignore.restore", return_value=True)
def test_run_exception(self, restore_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpignore.run())
self.assertTrue(restore_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpignore.get_config_dict", side_effect=IOError)
def test_run_failure_config_exception(self, config_mock):
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpignore.run())
self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))
self.assertTrue(config_mock.called)
| StarcoderdataPython |
1651200 | <reponame>Rohitmaan2012/Twitter_Sentiment_Analysis<gh_stars>0
"""
Numpy version of DCNN, used for prediction, instead of training
"""
import numpy as np
from numpy_impl import (conv2d, LogisticRegression)
_MODEL_PATH = "models/filter_widths=10,7,,batch_size=10,,ks=20,5,,fold=1,1,,conv_layer_n=2,,ebd_dm=48,,nkerns=6,12,,dr=0.5,0.5,,l2_regs=1e-06,0.0001,1e-05,1e-06.pkl"
class WordEmbeddingLayer(object):
"""
Layer that takes input vectors, output the sentence matrix
"""
def __init__(self,
embeddings):
"""
embeddings: numpy.ndarray
Embedding, (vocab size, embedding dimension)
"""
assert embeddings.ndim == 2, "Should be have 2 dimensions"
self.embeddings = embeddings
def output(self, x):
"""
x: numpy.ndarray
the input sentences consiting of word indices (number of instances, sentence word number)
"""
sent_matrices = np.array(
map(lambda sent: self.embeddings[sent],
x)
)
# equivalent to dimshuffle(0, 'x', 2, 1) in Theano
return sent_matrices.swapaxes(1,2)[:,None,:,:]
class ConvFoldingPoolLayer(object):
"""
Convolution, folding and k-max pooling layer
"""
def __init__(self,
k,
fold,
W,
b):
"""
k: int
the k value in the max-pooling layer
fold: int, 0 or 1
fold or not
W: numpy.ndarray,
the filter weight matrices,
dimension: (number of filters, num input feature maps, filter height, filter width)
b: numpy.ndarray,
the filter bias,
dimension: (number of filters, )
"""
self.fold_flag = fold
self.W = W
self.b = b
self.k = k
def fold(self, x):
"""
x: np.ndarray
the input, 4d array
"""
return (x[:, :, np.arange(0, x.shape[2], 2)] +
x[:, :, np.arange(1, x.shape[2], 2)]) / 2
def k_max_pool(self, x, k):
"""
perform k-max pool on the input along the rows
x: numpy.ndarray
the input, 4d array
k: theano.tensor.iscalar
the k parameter
Returns:
4D numpy.ndarray
"""
ind = np.argsort(x, axis = 3)
sorted_ind = np.sort(ind[:,:,:, -k:], axis = 3)
dim0, dim1, dim2, dim3 = sorted_ind.shape
indices_dim0 = np.arange(dim0).repeat(dim1 * dim2 * dim3)
indices_dim1 = np.transpose(np.arange(dim1).repeat(dim2 * dim3).reshape((dim1*dim2*dim3, 1)).repeat(dim0, axis=1)).flatten()
indices_dim2 = np.transpose(np.arange(dim2).repeat(dim3).reshape((dim2*dim3, 1)).repeat(dim0 * dim1, axis = 1)).flatten()
return x[indices_dim0, indices_dim1, indices_dim2, sorted_ind.flatten()].reshape(sorted_ind.shape)
def output(self, x):
# non-linear transform of the convolution output
conv_out = conv2d(x,
self.W,
mode = "full")
if self.fold_flag:
# fold
fold_out = self.fold(conv_out)
else:
fold_out = conv_out
# k-max pool
pool_out = (self.k_max_pool(fold_out, self.k) +
self.b[np.newaxis, :, np.newaxis, np.newaxis])
return np.tanh(pool_out)
class DCNN(object):
def __init__(self, params):
self.e_layer = WordEmbeddingLayer(embeddings = params.embeddings)
self.c_layers = []
for i in xrange(params.conv_layer_n):
self.c_layers.append(ConvFoldingPoolLayer(params.ks[i],
params.fold[i],
W = params.W[i],
b = params.b[i])
)
self.l_layer = LogisticRegression(
params.logreg_W,
params.logreg_b
)
def _p_y_given_x(self, x):
output = self.e_layer.output(x)
for l in self.c_layers:
output = l.output(output)
assert output.ndim == 4
output = output.reshape(
(output.shape[0],
np.prod(output.shape[1:]))
)
return self.l_layer._p_y_given_x(output)
def predict(self, x):
return np.argmax(self._p_y_given_x(x), axis = 1)
# The following functions are
# FOR TESTING PURPOSE
#
def _nnl(self, x, y):
p_y_given_x = self._p_y_given_x(x)
return np.mean(
-np.log(p_y_given_x[np.arange(y.shape[0]), y])
)
def _errors(self, x, y):
assert y.dtype == np.int32, "%r != %r" %(y.dtype, np.int32)
pred_y = self.predict(x)
return np.sum(pred_y != y) / float(pred_y.shape[0])
def _c_layer_output(self, x):
output = self.e_layer.output(x)
for l in self.c_layers:
output = l.output(output)
return output
| StarcoderdataPython |
3211532 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
def hash160(data):
d32 = hashlib.sha256(data).digest()
h = hashlib.new('ripemd160')
h.update(d32)
d20 = h.digest()
return d20
def sha256(data):
d32 = hashlib.sha256(data).digest()
return d32
def double_sha256(data):
d32 = sha256(sha256(data))
return d32
def checksum(data):
d4 = hashlib.sha256(hashlib.sha256(data).digest()).digest()[0:4]
return d4
| StarcoderdataPython |
43835 | from xml.etree import ElementTree as Etree
from xml.dom import minidom
from elavonvtpv.enum import RequestType
from elavonvtpv.Response import Response
import datetime
import hashlib
import requests
class Request:
def __init__(self, secret, request_type, merchant_id, order_id, currency=None, amount=None, card=None,
tss_info=None, settle=True, account=None, channel=None, comment1=None, comment2=None,
past_reference=None, authorization_code=None, refund_hash=None, pares=None, mpi=None):
"""
Defines a Request object
:param secret: the shared secret between Elavon and your account
:param request_type: RequestType enum object containing the type of the request to be sent to Elavon
:param merchant_id: the credentials of the elavon merchant account
:param order_id: number unique to the request for all accounts associated with the merchant
:param currency: Currency enum object containing the code of the currency to be use in the transaction
:param amount: amount of currency to be charged, in the smallest unit of currency possible
:param card: CreditCard object containing the data pertaining to the customer's credit card
:param tss_info: TssInfo object containing the data pertaining to the anti-fraud system
:param settle: flag indicating if the transaction must be settled automatically by Elavon
:param account: the sub-account to be used for the request
:param channel: Channel enum object indicating the channel by which the transaction is made
:param comment1: optional comment to include in the request
:param comment2: optional comment to include in the request
:param past_reference: reference of the transaction to which this one refers
:param authorization_code: authorization code given with the transaction to which this one refers
:param refund_hash: hash provided by Elavon, needed to make refunds
:param pares: authorization code given with the transaction to which this one refers
:param mpi: hash provided by Elavon, needed to make refunds
"""
self.timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
self.secret = secret
self.request_type = request_type
self.merchant_id = merchant_id
self.order_id = order_id
self.currency = currency
self.amount = amount
self.card = card
self.tss_info = tss_info
self.settle = settle
self.account = account
self.channel = channel
self.comment1 = comment1
self.comment2 = comment2
self.past_reference = past_reference
self.authorization_code = authorization_code
self.refund_hash = refund_hash
self.pares = pares
self.mpi = mpi
def __hash(self):
"""
Builds the request hash from the data contained within
:return: the hash string that will latter be cyphered
"""
res = "%s.%s.%s.%s.%s." % (str(self.timestamp), str(self.merchant_id), str(self.order_id), str(self.amount),
str(self.currency.value))
if self.card:
res += "%s" % str(self.card.number)
return res.encode('utf-8')
def sha1_hash(self):
"""
returns a secure hash in SHA-1 for this request
:return: secure hash in SHA-1
"""
sha1_hash = hashlib.sha1(self.__hash()).hexdigest()
sha1_hash += ".%s" % self.secret
return hashlib.sha1(sha1_hash.encode('utf-8')).hexdigest()
# return hashlib.sha1(self.__hash()).hexdigest()
def md5_hash(self):
"""
returns a secure hash in MD5 for this request
:return: secure hash in MD5
"""
md5_hash = hashlib.md5(self.__hash()).hexdigest()
md5_hash += ".%s" % self.secret
return hashlib.md5(md5_hash.encode('utf-8')).digest()
def __basic_to_etree_element(self):
"""
creates the basic structure of an Elavon request
:return: the basic root element of the request containing those fields that exist en every request type
"""
if self.request_type == RequestType.verify_enrolled:
request_type = '3ds-verifyenrolled'
elif self.request_type == RequestType.verify_sig:
request_type = '3ds-verifysig'
else:
request_type = self.request_type.name
request = Etree.Element('request')
request.set('timestamp', self.timestamp)
request.set('type', request_type)
merchant_id = Etree.SubElement(request, 'merchantid')
merchant_id.text = self.merchant_id
if self.account:
account = Etree.SubElement(request, 'account')
account.text = self.account
order_id = Etree.SubElement(request, 'orderid')
order_id.text = self.order_id
return request
def __channel_to_etree_element(self):
channel = Etree.Element('channel')
channel.text = self.channel.value
return channel
def __past_reference_to_etree_element(self):
past_reference = Etree.Element('pasref')
past_reference.text = self.past_reference
return past_reference
def __pares_to_etree_element(self):
pares = Etree.Element('pares')
pares.text = self.pares
return pares
def __authorization_code_to_etree_element(self):
authorization_code = Etree.Element('authcode')
authorization_code.text = self.authorization_code
return authorization_code
def __amount_to_etree_element(self):
amount = Etree.Element('amount')
amount.set('currency', self.currency.value)
amount.text = self.amount
return amount
def __auto_settle_to_etree_element(self):
auto_settle = Etree.Element('autosettle')
auto_settle.set('flag', '1' if self.settle else '0')
return auto_settle
def __comments_to_etree_element(self):
comments = Etree.Element('comments')
if self.comment1:
comment1 = Etree.SubElement(comments, 'comment', id='1')
comment1.text = self.comment1
if self.comment2:
comment2 = Etree.SubElement(comments, 'comment', id='2')
comment2.text = self.comment2
return comments
def __refund_hash_to_etree_element(self):
refundhash = Etree.Element('refundhash')
refundhash.text = hashlib.sha1(self.refund_hash.encode('utf-8')).hexdigest()
return refundhash
def __sh1_hash_to_etree_element(self):
sha1_hash = Etree.Element('sha1hash')
sha1_hash.text = self.sha1_hash()
return sha1_hash
def __md5_hash_to_etree_element(self):
md5_hash = Etree.Element('md5hash')
md5_hash.text = self.md5_hash()
return md5_hash
def __auth_to_etree(self):
request = self.__basic_to_etree_element()
if not self.mpi:
request.append(self.__channel_to_etree_element())
request.append(self.__amount_to_etree_element())
request.append(self.card.to_etree_element())
if self.mpi:
request.append(self.mpi.to_etree_element())
request.append(self.__auto_settle_to_etree_element())
if self.comment1 or self.comment2:
request.append(self.__comments_to_etree_element())
if self.tss_info:
request.append(self.tss_info.to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
# request.append(self.__md5_hash_to_etree_element())
return Etree.ElementTree(request)
def __manual_to_etree(self):
request = self.__basic_to_etree_element()
request.append(self.__channel_to_etree_element())
request.append(self.__authorization_code_to_etree_element())
request.append(self.__amount_to_etree_element())
request.append(self.card.to_etree_element())
request.append(self.__auto_settle_to_etree_element())
if self.comment1 or self.comment2:
request.append(self.__comments_to_etree_element())
if self.tss_info:
request.append(self.tss_info.to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
# request.append(self.__md5_hash_to_etree_element())
return Etree.ElementTree(request)
def __obt_to_etree(self):
request = self.__basic_to_etree_element()
request.append(self.card.to_etree_element())
request.append(self.__auto_settle_to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
# request.append(self.__md5_hash_to_etree_element())
return Etree.ElementTree(request)
def __offline_to_etree(self):
request = self.__basic_to_etree_element()
request.append(self.__past_reference_to_etree_element())
request.append(self.__authorization_code_to_etree_element())
request.append(self.__amount_to_etree_element())
request.append(self.card.to_etree_element())
request.append(self.__auto_settle_to_etree_element())
if self.comment1 or self.comment2:
request.append(self.__comments_to_etree_element())
if self.tss_info:
request.append(self.tss_info.to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
# request.append(self.__md5_hash_to_etree_element())
return Etree.ElementTree(request)
def __rebate_to_etree(self):
request = self.__basic_to_etree_element()
request.append(self.__past_reference_to_etree_element())
request.append(self.__authorization_code_to_etree_element())
request.append(self.__amount_to_etree_element())
request.append(self.__auto_settle_to_etree_element())
if self.comment1 or self.comment2:
request.append(self.__comments_to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
request.append(self.__refund_hash_to_etree_element())
return Etree.ElementTree(request)
def __void_to_etree(self):
request = self.__basic_to_etree_element()
request.append(self.__past_reference_to_etree_element())
request.append(self.card.to_etree_element())
request.append(self.__authorization_code_to_etree_element())
if self.comment1 or self.comment2:
request.append(self.__comments_to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
# request.append(self.__md5_hash_to_etree_element())
return Etree.ElementTree(request)
def __tss_to_etree(self):
request = self.__basic_to_etree_element()
request.append(self.__amount_to_etree_element())
request.append(self.card.to_etree_element())
request.append(self.__auto_settle_to_etree_element())
if self.comment1 or self.comment2:
request.append(self.__comments_to_etree_element())
if self.tss_info:
request.append(self.tss_info.to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
# request.append(self.__md5_hash_to_etree_element())
return Etree.ElementTree(request)
def __settle_to_etree(self):
request = self.__basic_to_etree_element()
request.append(self.__past_reference_to_etree_element())
if self.amount and self.currency:
request.append(self.__amount_to_etree_element())
request.append(self.__authorization_code_to_etree_element())
if self.comment1 or self.comment2:
request.append(self.__comments_to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
# request.append(self.__md5_hash_to_etree_element())
return Etree.ElementTree(request)
def __verify_enrolled_to_etree(self):
request = self.__basic_to_etree_element()
if self.amount and self.currency:
request.append(self.__amount_to_etree_element())
request.append(self.card.to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
return Etree.ElementTree(request)
def __verify_sig_to_etree(self):
request = self.__basic_to_etree_element()
if self.amount and self.currency:
request.append(self.__amount_to_etree_element())
request.append(self.card.to_etree_element())
request.append(self.__pares_to_etree_element())
request.append(self.__sh1_hash_to_etree_element())
return Etree.ElementTree(request)
def __to_etree(self):
if self.request_type is RequestType.auth:
return self.__auth_to_etree()
elif self.request_type is RequestType.manual:
return self.__manual_to_etree()
elif self.request_type is RequestType.obt:
return self.__obt_to_etree()
elif self.request_type is RequestType.offline:
return self.__offline_to_etree()
elif self.request_type is RequestType.rebate:
return self.__rebate_to_etree()
elif self.request_type is RequestType.void:
return self.__void_to_etree()
elif self.request_type is RequestType.TSS:
return self.__tss_to_etree()
elif self.request_type is RequestType.settle:
return self.__settle_to_etree()
elif self.request_type is RequestType.verify_enrolled:
return self.__verify_enrolled_to_etree()
elif self.request_type is RequestType.verify_sig:
return self.__verify_sig_to_etree()
def to_xml_string(self):
binary = Etree.tostring(self.__to_etree().getroot(), encoding='utf8', method='xml')
return binary.decode('utf-8')
def to_pretty_xml(self):
return minidom.parseString(self.to_xml_string()).toprettyxml()
def send(self, url):
headers = {'Content-Type': 'application/xml'}
response = requests.post(url=url, data=self.to_pretty_xml(), headers=headers)
return Response(response.content)
| StarcoderdataPython |
1600155 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.core.cache import caches
from django_redis import get_redis_connection as get_django_cache_redis_connection
from .cache import CacheEnum
def make_redis_key(key: str) -> str:
"""
所有通过get_redis_connection获取原生Redis来使用Redis的,其最终key都需要避免与Django Cache冲突
所以这里提供了方法来协助生成避免Key冲突的,这样使用原生Redis时不需要关注与其他项目或者项目内Cache的key冲突
"""
cache = caches[CacheEnum.REDIS.value]
# 对于Cache来说version是有意义的,但对于原生使用Redis,没意义,为了避免与Cache Key冲突,这里可以使用任何version不可能去的值来代替,比如"raw"
return cache.make_key(key, version="raw")
def get_redis_connection():
"""
复用Django Cache其配置的Redis Cache的Redis Client Connection
这样可以不需要根据Redis配置来生成Redis Client Connection
Note:
这里返回的是原生Redis Connection,所以Django Cache里配置的KEY_PREFIX、TIMEOUT、VERSION都不会生效
所以使用时为了避免与Cache的key冲突,必须配合 make_redis_key方法一起使用
"""
return get_django_cache_redis_connection(alias=CacheEnum.REDIS.value)
| StarcoderdataPython |
151672 | <reponame>JohnnyPeng18/coach
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union ,Dict, Any
from enum import Enum, Flag, auto
from copy import deepcopy
import numpy as np
import random
from collections import namedtuple
try:
import robosuite
from robosuite.wrappers import Wrapper, DomainRandomizationWrapper
except ImportError:
from rl_coach.logger import failed_imports
failed_imports.append("Robosuite")
from rl_coach.base_parameters import Parameters, VisualizationParameters
from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection
from rl_coach.spaces import BoxActionSpace, VectorObservationSpace, StateSpace, PlanarMapsObservationSpace
# Importing our custom Robosuite environments here so that they are properly
# registered in Robosuite, and so recognized by 'robosuite.make()' and included
# in 'robosuite.ALL_ENVIRONMENTS'
import rl_coach.environments.robosuite.cube_exp
robosuite_environments = list(robosuite.ALL_ENVIRONMENTS)
robosuite_robots = list(robosuite.ALL_ROBOTS)
robosuite_controllers = list(robosuite.ALL_CONTROLLERS)
def get_robosuite_env_extra_parameters(env_name: str):
import inspect
assert env_name in robosuite_environments
env_params = inspect.signature(robosuite.environments.REGISTERED_ENVS[env_name]).parameters
base_params = list(RobosuiteBaseParameters().env_kwargs_dict().keys()) + ['robots', 'controller_configs']
return {n: p.default for n, p in env_params.items() if n not in base_params}
class OptionalObservations(Flag):
NONE = 0
CAMERA = auto()
OBJECT = auto()
class RobosuiteBaseParameters(Parameters):
def __init__(self, optional_observations: OptionalObservations = OptionalObservations.NONE):
super(RobosuiteBaseParameters, self).__init__()
# NOTE: Attribute names should exactly match the attribute names in Robosuite
self.horizon = 1000 # Every episode lasts for exactly horizon timesteps
self.ignore_done = True # True if never terminating the environment (ignore horizon)
self.reward_shaping = True # if True, use dense rewards.
# How many control signals to receive in every simulated second. This sets the amount of simulation time
# that passes between every action input (this is NOT the same as frame_skip)
self.control_freq = 10
# Optional observations (robot state is always returned)
# if True, every observation includes a rendered image
self.use_camera_obs = bool(optional_observations & OptionalObservations.CAMERA)
# if True, include object (cube/etc.) information in the observation
self.use_object_obs = bool(optional_observations & OptionalObservations.OBJECT)
# Camera parameters
self.has_renderer = False # Set to true to use Mujoco native viewer for on-screen rendering
self.render_camera = 'frontview' # name of camera to use for on-screen rendering
self.has_offscreen_renderer = self.use_camera_obs
self.render_collision_mesh = False # True if rendering collision meshes in camera. False otherwise
self.render_visual_mesh = True # True if rendering visual meshes in camera. False otherwise
self.camera_names = 'agentview' # name of camera for rendering camera observations
self.camera_heights = 84 # height of camera frame.
self.camera_widths = 84 # width of camera frame.
self.camera_depths = False # True if rendering RGB-D, and RGB otherwise.
# Collision
self.penalize_reward_on_collision = True
self.end_episode_on_collision = False
@property
def optional_observations(self):
flag = OptionalObservations.NONE
if self.use_camera_obs:
flag = OptionalObservations.CAMERA
if self.use_object_obs:
flag |= OptionalObservations.OBJECT
elif self.use_object_obs:
flag = OptionalObservations.OBJECT
return flag
@optional_observations.setter
def optional_observations(self, value):
self.use_camera_obs = bool(value & OptionalObservations.CAMERA)
if self.use_camera_obs:
self.has_offscreen_renderer = True
self.use_object_obs = bool(value & OptionalObservations.OBJECT)
def env_kwargs_dict(self):
res = {k: (v.value if isinstance(v, Enum) else v) for k, v in vars(self).items()}
return res
class RobosuiteEnvironmentParameters(EnvironmentParameters):
def __init__(self, level, robot=None, controller=None, apply_dr: bool = False,
dr_every_n_steps_min: int = 10, dr_every_n_steps_max: int = 20,
use_joint_vel_obs=False):
super().__init__(level=level)
self.base_parameters = RobosuiteBaseParameters()
self.extra_parameters = {}
self.robot = robot
self.controller = controller
self.apply_dr = apply_dr
self.dr_every_n_steps_min = dr_every_n_steps_min
self.dr_every_n_steps_max = dr_every_n_steps_max
self.use_joint_vel_obs = use_joint_vel_obs
self.custom_controller_config_fpath = None
@property
def path(self):
return 'rl_coach.environments.robosuite_environment:RobosuiteEnvironment'
DEFAULT_REWARD_SCALES = {
'Lift': 2.25,
'LiftLab': 2.25,
}
RobosuiteStepResult = namedtuple('RobosuiteStepResult', ['observation', 'reward', 'done', 'info'])
# Environment
class RobosuiteEnvironment(Environment):
def __init__(self, level: LevelSelection,
seed: int, frame_skip: int, human_control: bool, custom_reward_threshold: Union[int, float, None],
visualization_parameters: VisualizationParameters,
base_parameters: RobosuiteBaseParameters,
extra_parameters: Dict[str, Any],
robot: str, controller: str,
target_success_rate: float = 1.0, apply_dr: bool = False,
dr_every_n_steps_min: int = 10, dr_every_n_steps_max: int = 20, use_joint_vel_obs=False,
custom_controller_config_fpath=None, **kwargs):
super(RobosuiteEnvironment, self).__init__(level, seed, frame_skip, human_control, custom_reward_threshold,
visualization_parameters, target_success_rate)
# Validate arguments
self.frame_skip = max(1, self.frame_skip)
def validate_input(input, supported, name):
if input not in supported:
raise ValueError("Unknown Robosuite {0} passed: '{1}' ; Supported {0}s are: {2}".format(
name, input, ' | '.join(supported)
))
validate_input(self.env_id, robosuite_environments, 'environment')
validate_input(robot, robosuite_robots, 'robot')
self.robot = robot
if controller is not None:
validate_input(controller, robosuite_controllers, 'controller')
self.controller = controller
self.base_parameters = base_parameters
self.base_parameters.has_renderer = self.is_rendered and self.native_rendering
self.base_parameters.has_offscreen_renderer = self.base_parameters.use_camera_obs or (self.is_rendered and not
self.native_rendering)
# Seed
if self.seed is not None:
np.random.seed(self.seed)
random.seed(self.seed)
# Load and initialize environment
env_args = self.base_parameters.env_kwargs_dict()
env_args.update(extra_parameters)
if 'reward_scale' not in env_args and self.env_id in DEFAULT_REWARD_SCALES:
env_args['reward_scale'] = DEFAULT_REWARD_SCALES[self.env_id]
env_args['robots'] = self.robot
controller_cfg = None
if self.controller is not None:
controller_cfg = robosuite.controllers.load_controller_config(default_controller=self.controller)
elif custom_controller_config_fpath is not None:
controller_cfg = robosuite.controllers.load_controller_config(custom_fpath=custom_controller_config_fpath)
env_args['controller_configs'] = controller_cfg
self.env = robosuite.make(self.env_id, **env_args)
# TODO: Generalize this to filter any observation by name
if not use_joint_vel_obs:
self.env.modify_observable('robot0_joint_vel', 'active', False)
# Wrap with a dummy wrapper so we get a consistent API (there are subtle changes between
# wrappers and actual environments in Robosuite, for example action_spec as property vs. function)
self.env = Wrapper(self.env)
if apply_dr:
self.env = DomainRandomizationWrapper(self.env, seed=self.seed, randomize_every_n_steps_min=dr_every_n_steps_min,
randomize_every_n_steps_max=dr_every_n_steps_max)
# State space
self.state_space = self._setup_state_space()
# Action space
low, high = self.env.unwrapped.action_spec
self.action_space = BoxActionSpace(low.shape, low=low, high=high)
self.reset_internal_state()
if self.is_rendered:
image = self.get_rendered_image()
self.renderer.create_screen(image.shape[1], image.shape[0])
# TODO: Other environments call rendering here, why? reset_internal_state does it
def _setup_state_space(self):
state_space = StateSpace({})
dummy_obs = self._process_observation(self.env.observation_spec())
state_space['measurements'] = VectorObservationSpace(dummy_obs['measurements'].shape[0])
if self.base_parameters.use_camera_obs:
state_space['camera'] = PlanarMapsObservationSpace(dummy_obs['camera'].shape, 0, 255)
return state_space
def _process_observation(self, raw_obs):
new_obs = {}
# TODO: Support multiple cameras, this assumes a single camera
camera_name = self.base_parameters.camera_names
camera_obs = raw_obs.get(camera_name + '_image', None)
if camera_obs is not None:
depth_obs = raw_obs.get(camera_name + '_depth', None)
if depth_obs is not None:
depth_obs = np.expand_dims(depth_obs, axis=2)
camera_obs = np.concatenate([camera_obs, depth_obs], axis=2)
new_obs['camera'] = camera_obs
measurements = raw_obs['robot0_proprio-state']
object_obs = raw_obs.get('object-state', None)
if object_obs is not None:
measurements = np.concatenate([measurements, object_obs])
new_obs['measurements'] = measurements
return new_obs
def _take_action(self, action):
action = self.action_space.clip_action_to_space(action)
# We mimic the "action_repeat" mechanism of RobosuiteWrapper in Surreal.
# Same concept as frame_skip, only returning the average reward across repeated actions instead
# of the total reward.
rewards = []
for _ in range(self.frame_skip):
obs, reward, done, info = self.env.step(action)
rewards.append(reward)
if done:
break
reward = np.mean(rewards)
self.last_result = RobosuiteStepResult(obs, reward, done, info)
def _update_state(self):
obs = self._process_observation(self.last_result.observation)
self.state = {k: obs[k] for k in self.state_space.sub_spaces}
self.reward = self.last_result.reward or 0
self.done = self.last_result.done
self.info = self.last_result.info
def _restart_environment_episode(self, force_environment_reset=False):
reset_obs = self.env.reset()
self.last_result = RobosuiteStepResult(reset_obs, 0.0, False, {})
def _render(self):
self.env.render()
def get_rendered_image(self):
img: np.ndarray = self.env.sim.render(camera_name=self.base_parameters.render_camera,
height=512, width=512, depth=False)
return np.flip(img, 0)
def close(self):
self.env.close()
class RobosuiteGoalBasedExpEnvironmentParameters(RobosuiteEnvironmentParameters):
@property
def path(self):
return 'rl_coach.environments.robosuite_environment:RobosuiteGoalBasedExpEnvironment'
class RobosuiteGoalBasedExpEnvironment(RobosuiteEnvironment):
def _process_observation(self, raw_obs):
new_obs = super()._process_observation(raw_obs)
new_obs['obs-goal'] = None
return new_obs
def _setup_state_space(self):
state_space = super()._setup_state_space()
goal_based_shape = list(state_space['camera'].shape)
goal_based_shape[2] *= 2
state_space['obs-goal'] = PlanarMapsObservationSpace(tuple(goal_based_shape), 0, 255)
return state_space
| StarcoderdataPython |
5470 | <filename>main.py
'''
Created on Jun 17, 2021
@author: Sean
'''
import PDF2CSV_GUI
def main():
j = PDF2CSV_GUI.Convert_GUI()
if __name__ == "__main__":
main() | StarcoderdataPython |
1727528 | <gh_stars>0
import unittest
from unittest.mock import patch
from eiffelactory import artifactory
from eiffelactory import config
artifact_filename = 'artifact.txt'
build_path_substring = 'job/TEST/job/BUILD_NAME/255'
query_string =\
'items.find({"artifact.name":"artifact.txt",' \
'"artifact.module.build.url":' \
'{"$match":"*job/TEST/job/BUILD_NAME/255*"}}' \
').include("name","repo","path")'
wrong_query_string = \
'items.find({"artifact.name":"wrong_file.txt",' \
'"artifact.module.build.url":' \
'{"$match":"*job/TEST/job/BUILD_NAME/255*"}}' \
').include("name","repo","path")'
bad_query_string = \
'items.find({"artifact.name":"{"file.txt"}"},' \
'"artifact.module.build.url":' \
'{"$match":"*{"job/TEST/job/BUILD_NAME/255"}*"}}' \
').include("name","repo","path")'
response_dict = \
'{"results":[{"path":"eiffelactory","repo":"repo","name":"artifact.txt"}]}'
empty_dict = '{"results":[]}'
response_dict_binary = str.encode(response_dict)
empty_response_dict = str.encode(empty_dict)
def mocked_requests_post(search_url, auth, data):
class MockedPostResponse:
def __init__(self, status_code, content):
self.content = content
self.status_code = status_code
if data == query_string:
return MockedPostResponse(status_code=200,
content=response_dict_binary)
elif data == wrong_query_string:
# imitating that the item was not found, but not that the query is
# malformed
return MockedPostResponse(status_code=200, content=empty_response_dict)
elif data == bad_query_string:
return MockedPostResponse(status_code=400,
content=b'Failed to parse query')
class MockedConfig:
def __init__(self):
cfg = config.Config('tests/all_options.config')
self.artifactory = cfg.artifactory
class TestArtifactory(unittest.TestCase):
def setUp(self):
self.artifactory = artifactory.ArtifactoryConnection(
MockedConfig().artifactory)
def test__format_aql_query(self):
self.assertEqual(self.artifactory._format_aql_query(
artifact_filename, build_path_substring),
query_string)
@patch('eiffelactory.artifactory.requests.post',
side_effect=mocked_requests_post)
def test__execute_aql_query(self, mocked_requests_post):
response_content = self.artifactory._execute_aql_query(query_string)
self.assertEqual(response_content, response_dict)
response_content = self.artifactory._execute_aql_query(wrong_query_string)
self.assertEqual(response_content, empty_dict)
response_content = self.artifactory._execute_aql_query(bad_query_string)
self.assertEqual(response_content, None)
@patch('eiffelactory.artifactory.requests.post',
side_effect=mocked_requests_post)
def test_find_artifact_on_artifactory(self, mocked_requests_post):
result = self.artifactory.\
find_artifact_on_artifactory(artifact_filename,
build_path_substring)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['name'], artifact_filename)
artifact_filename2 = 'wrong_file.txt'
result = self.artifactory.\
find_artifact_on_artifactory(artifact_filename2,
build_path_substring)
self.assertEqual(result, [])
def tearDown(self):
self.artifactory = None
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3312893 | <filename>scenarios/wifi/connect_test.py<gh_stars>0
import pytest
from conftest import get_remote_hosts_dict, skip_if_not_enough_remote_nodes
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017, Technische Universität Berlin"
__version__ = "0.1.0"
__email__ = "<EMAIL>"
# skip all tests from this pytest module
# if there is no remote nodes with wifi cards
pytestmark = pytest.mark.skipif(skip_if_not_enough_remote_nodes("wifi", 1),
reason="Need at least 1 remote nodes with WiFi")
# skip test if there is less than two wifi nodes available
needTwoWifi = pytest.mark.skipif(skip_if_not_enough_remote_nodes("wifi", 2),
reason="Need at least 2 remote nodes with WiFi")
@needTwoWifi
def test_sta_connects_ap():
remoteHosts = get_remote_hosts_dict()
print(remoteHosts)
print("Create controller and 2 nodes: AP and STA. Make STA to connect to AP")
pass
| StarcoderdataPython |
3205721 | <gh_stars>0
from . import db
from contextlib import contextmanager
'''
Column | Type | Collation | Nullable | Default
----------------------+-------------------------+-----------+----------+---------
name | character varying(255) | | |
instance_id | character varying(255) | | |
uuid | character varying(255) | | |
type | character varying(255) | | |
contact | character varying(1024) | | |
status | character varying(255) | | |
state | character varying(255) | | |
max_no_answer | integer | | not null | 0
wrap_up_time | integer | | not null | 0
reject_delay_time | integer | | not null | 0
busy_delay_time | integer | | not null | 0
no_answer_delay_time | integer | | not null | 0
last_bridge_start | integer | | not null | 0
last_bridge_end | integer | | not null | 0
last_offered_call | integer | | not null | 0
last_status_change | integer | | not null | 0
no_answer_count | integer | | not null | 0
calls_answered | integer | | not null | 0
talk_time | integer | | not null | 0
ready_time | integer | | not null | 0
external_calls_count | integer | | not null | 0
'''
class Agents(db.Model):
__tablename__='agents'
name =db.Column(db.String(255)) #1001@default
instance_id =db.Column(db.String(255)) #single_box
uuid =db.Column(db.String(255))
type =db.Column(db.String(255)) #callback
contact =db.Column(db.String(1024)) #user/1001
status =db.Column(db.String(255)) #Logged out
state =db.Column(db.String(255)) #Waiting
max_no_answer =db.Column(db.Integer,nullable=False,default=5) #最大无应答 就会将座席改成On Break
wrap_up_time =db.Column(db.Integer,nullable=False,default=15) #整理时间
reject_delay_time =db.Column(db.Integer,nullable=False,default=30) #拒绝时间
busy_delay_time =db.Column(db.Integer,nullable=False,default=0)
no_answer_delay_time =db.Column(db.Integer,nullable=False,default=0)
last_bridge_start =db.Column(db.Integer,nullable=False,default=0)
last_bridge_end =db.Column(db.Integer,nullable=False,default=0)
last_offered_call =db.Column(db.Integer,nullable=False,default=0)
last_status_change =db.Column(db.Integer,nullable=False,default=0)
no_answer_count =db.Column(db.Integer,nullable=False,default=0)
calls_answered =db.Column(db.Integer,nullable=False,default=0)
talk_time =db.Column(db.Integer,nullable=False,default=0)
ready_time =db.Column(db.Integer,nullable=False,default=0)
external_calls_count =db.Column(db.Integer,nullable=False,default=0)
@contextmanager
def auto_commit(self):
try:
yield
self.session.commit() # 事务
except Exception as e:
self.session.rollback() # 回滚
raise e | StarcoderdataPython |
1656663 | <gh_stars>10-100
from tinkoff_voicekit_client.Operations.long_running import ClientOperations
from tinkoff_voicekit_client.Operations import aio_long_running as aio
| StarcoderdataPython |
1702705 | import unittest
class SDSPythonSampleTests(unittest.TestCase):
@classmethod
def test_main(cls):
import program
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1696253 | from ._DatasetViewSet import DatasetViewSet
| StarcoderdataPython |
4837397 | <filename>NEW_PRAC/LeetCode/Top Interview Questions/Top_Interview_Questions_Easy/Array/plusOne.py
#############Method 1#########################
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
a = [str(i) for i in digits]
str_a = int("".join(a))
str_a = str(str_a + 1)
str_a = list(str_a)
str_a = [int(i) for i in str_a]
return str_a
# Time Complexity - O(n)
# Space Complexity - O(3n)
############Method2#########################
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
temp = ""
for i in digits:
temp += str(i)
sum = int(temp)
sum = sum + 1
temp = str(sum)
result = []
for j in temp:
result.append(j)
return result
# Time Complexity - O(n)
# Space Complexity - O(n)
| StarcoderdataPython |
3291822 | <filename>10_poisson.py
# Import libraries
###############################
import numpy # numerics
from matplotlib import pyplot # plotting
from matplotlib import cm # colormap
from mpl_toolkits.mplot3d import Axes3D # 3d plot
######################################################################
# FUNCTION DEFINITIONS #
######################################################################
def poisson2d(p, pp, b, dx, dy, steps):
for i in range(steps):
p[1:-1,1:-1] = ((pp[2:,1:-1]+pp[0:-2,1:-1])*dy**2+\
(pp[1:-1,2:]+pp[1:-1,0:-2])*dx**2-\
b[1:-1,1:-1]*dx**2*dy**2)/\
(2*(dx**2+dy**2))
# BCs
p[0,:] = 0 # @ x=0
p[-1,:] = 0 # @ x=2
p[:,0] = 0 # @ y=0
p[:,-1] = 0 # @ y=1
# Update pp
pp = p
return p
def plot2d(x, y, p, title):
fig = pyplot.figure(figsize=(8, 5), dpi=100)
ax = fig.gca(projection='3d')
X, Y = numpy.meshgrid(x, y, indexing='ij')
surf = ax.plot_surface(X, Y, p, rstride=1, cstride=1, cmap=cm.coolwarm, \
linewidth=0, antialiased=False)
ax.set_xlim(0,2)
ax.set_ylim(0,1)
ax.view_init(30,225)
pyplot.title(title)
######################################################################
# USER INPUT #
######################################################################
# Physical parameters
##############################
# Geometry and spatial discretization
##############################
nx = 50 # Remark: Must not be too large --> small dx --> instability?
ny = 50
xmin = 0
xmax = 2
ymin = 0
ymax = 1
dx = (xmax-xmin)/(nx-1)
dy = (ymax-ymin)/(ny-1)
x = numpy.linspace(xmin, xmax, nx)
y = numpy.linspace(ymin, ymax, ny)
# Temporal discretization
##############################
#nt = 201 # Remark: nt had to be increased compared to 07 again
# Initial condition
##############################
# Initialize dependent variables
p = numpy.zeros((nx, ny))
pp = p.copy() # p at previous step
b = numpy.zeros((nx, ny))
b[nx/4,ny/4] = 100
b[3/4*nx,3/4*ny] = -100
######################################################################
# SOME FUN #
######################################################################
plot2d(x, y, p, 'p initial')
plot2d(x, y, b, 'b initial')
p = poisson2d(p, pp, b, dx, dy, 100)
plot2d(x, y, p, 'after 100')
p = poisson2d(p, pp, b, dx, dy, 900)
plot2d(x, y, p, 'after 1000')
pyplot.show()
| StarcoderdataPython |
68202 | <reponame>surveybott/psiTurk<filename>tests/conftest.py
from __future__ import print_function
# https://docs.pytest.org/en/latest/fixture.html#using-fixtures-from-classes-modules-or-projects
from builtins import object
import pytest
import os
import sys
import pickle
import json
import datetime
import dateutil.parser
import ciso8601
import boto3
from botocore.stub import Stubber
import shutil
from distutils import dir_util, file_util
from faker import Faker
from importlib import reload
@pytest.fixture(autouse=True)
def bork_aws_environ():
os.environ['AWS_ACCESS_KEY_ID'] = 'foo'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'bar'
os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'
os.environ.pop('AWS_PROFILE', None)
yield
@pytest.fixture()
def edit_config_file():
def do_it(find, replace):
with open('config.txt', 'r') as file:
config_file = file.read()
config_file = config_file.replace(find, replace)
with open('config.txt', 'w') as file:
file.write(config_file)
yield do_it
@pytest.fixture(scope='function', autouse=True)
def experiment_dir(tmpdir, bork_aws_environ, edit_config_file):
# pytest.set_trace()
os.chdir(tmpdir)
import psiturk.setup_example as se
se.setup_example()
edit_config_file('use_psiturk_ad_server = true', 'use_psiturk_ad_server = false')
# os.chdir('psiturk-example') # the setup script already chdirs into here, although I don't like that it does that
yield
os.chdir('..')
shutil.rmtree('psiturk-example')
@pytest.fixture(autouse=True)
def db_setup(mocker, experiment_dir, tmpdir, request):
import psiturk.db
reload(psiturk.db)
import psiturk.models
psiturk.models.Base.metadata.clear()
reload(psiturk.models)
from psiturk.db import init_db
init_db()
yield
#############
# amt-related fixtures
##############
@pytest.fixture(scope='function')
def client():
client = boto3.client('mturk')
return client
@pytest.fixture(scope='function')
def stubber(client):
stubber = Stubber(client)
stubber.activate()
yield stubber
stubber.deactivate()
@pytest.fixture()
def amt_services_wrapper(patch_aws_services):
import psiturk.amt_services_wrapper
reload(psiturk.amt_services_wrapper)
amt_services_wrapper = psiturk.amt_services_wrapper.MTurkServicesWrapper()
return amt_services_wrapper
@pytest.fixture(scope='function')
def patch_aws_services(client, mocker):
import psiturk.amt_services_wrapper
import psiturk.amt_services
def setup_mturk_connection(self):
self.mtc = client
return True
mocker.patch.object(psiturk.amt_services.MTurkServices,
'verify_aws_login', lambda *args, **kwargs: True)
mocker.patch.object(psiturk.amt_services.MTurkServices,
'setup_mturk_connection', setup_mturk_connection)
my_amt_services = psiturk.amt_services.MTurkServices(
'', '', is_sandbox=True)
mocker.patch.object(
psiturk.amt_services_wrapper.MTurkServicesWrapper, 'amt_services', my_amt_services)
@pytest.fixture(scope='session')
def faker():
faker = Faker()
return faker
@pytest.fixture()
def stubber_prepare_create_hit(stubber, helpers, faker):
def do_it(with_hit_id=None):
if not with_hit_id:
with_hit_id = faker.md5(raw_output=False)
stubber.add_response(
'create_hit_type', helpers.get_boto3_return('create_hit_type.json'))
boto_return_create_hit_with_hit_type = helpers.get_boto3_return(
'create_hit_with_hit_type.json')
boto_return_create_hit_with_hit_type['HIT']['HITId'] = with_hit_id
# used to always return a hit with id: 3XJOUITW8URHJMX7F00H20LGRIAQTX
stubber.add_response('create_hit_with_hit_type',
boto_return_create_hit_with_hit_type)
return do_it
@pytest.fixture()
def create_dummy_hit(stubber_prepare_create_hit, amt_services_wrapper):
def do_it(with_hit_id=None, **kwargs):
stubber_prepare_create_hit(with_hit_id)
result = amt_services_wrapper.create_hit(1, 0.01, 1, **kwargs)
return do_it
@pytest.fixture()
def create_dummy_assignment(faker):
from psiturk.db import db_session, init_db
from psiturk.models import Participant
def do_it(participant_attributes={}):
participant_attribute_defaults = {
'workerid': faker.md5(raw_output=False),
'hitid': faker.md5(raw_output=False),
'assignmentid': faker.md5(raw_output=False),
}
participant_attributes = dict(list(
participant_attribute_defaults.items()) + list(participant_attributes.items()))
init_db()
participant = Participant(**participant_attributes)
db_session.add(participant)
db_session.commit()
return participant
return do_it
@pytest.fixture()
def list_hits(stubber, helpers, amt_services_wrapper):
'''
Returns two hit_ids:
3BFNCI9LYKQ2ENUY4MLKKW0NSU437W
3XJOUITW8URHJMX7F00H20LGRIAQTX
'''
def do_it(hits_json=None, all_studies=False, active=False):
if not hits_json:
hits_json = helpers.get_boto3_return('list_hits.json')
stubber.add_response('list_hits', hits_json)
if active:
results = (amt_services_wrapper.get_active_hits(
all_studies=all_studies)).data
else:
results = (amt_services_wrapper.get_all_hits(
all_studies=all_studies)).data
return results
return do_it
@pytest.fixture()
def expire_a_hit():
def do_it(hits_json, index_of_hit_to_expire=0):
expired_time = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(hours=10)
hits_json['HITs'][index_of_hit_to_expire]['Expiration'] = expired_time
return hits_json
return do_it
@pytest.fixture()
def activate_a_hit():
def do_it(hits_json, index_of_hit_to_be_active=1):
active_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=10)
hits_json['HITs'][index_of_hit_to_be_active]['Expiration'] = active_time
return hits_json
return do_it
class Helpers(object):
@staticmethod
def get_boto3_return(name):
# https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat
def date_hook(json_dict):
for (key, value) in list(json_dict.items()):
try:
# json_dict[key] = dateutil.parser.parse(value)
# json_dict[key] = datetime.datetime.fromisoformat(value)
# json_dict[key] = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S%Z")
json_dict[key] = ciso8601.parse_datetime(value)
except:
if key == 'Expiration':
print(key)
raise
pass
return json_dict
filepath = os.path.join(
*[os.path.dirname(os.path.realpath(__file__)), 'boto3-returns', name])
with open(filepath, 'rb') as infile:
if filepath.endswith('.pickle'):
return pickle.load(infile, encoding='latin1')
elif filepath.endswith('.json'):
data = json.load(infile, object_hook=date_hook)
# print(data['HITs'][0])
return data
@pytest.fixture(scope='session')
def helpers():
return Helpers
| StarcoderdataPython |
4805212 | <filename>Função def .py
def divisao(n1, n2):
if n2 == 0:
return
return n1 / n2
divide = divisao(8,2)
if divide:
print(divide)
else:
print('Conta Invalida')
def divisao(n1, n2):
if n2 == 0:
return
return n1 / n2
divide = divisao(60,4)
if divide:
print(divide)
else:
print('Conta Invalida')
| StarcoderdataPython |
4801636 | from setuptools import setup, find_packages
setup(
name="flexlmtools",
version="0.1.0",
install_requires=[],
extras_require={
"develop": ["pytest"]
},
author="<NAME>",
author_email="<EMAIL>",
description="Package for Flexlm License Manager",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Unlicense",
"Operating System :: OS Independent"
],
python_requires='>=3.6'
) | StarcoderdataPython |
50146 | def main():
# input
N, K = map(int, input().split())
# compute
def twoN(a: int):
if a%200 == 0:
a = int(a/200)
else:
a = int(str(a) + "200")
return a
for i in range(K):
N = twoN(N)
# output
print(N)
if __name__ == '__main__':
main()
| StarcoderdataPython |
16737 | <gh_stars>0
# -*- coding: utf-8 -*-
import argparse
import cv2 as cv
import mediapipe as mp
import sys
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_device", type=int, default=0)
parser.add_argument("--video_file", type=str, default="")
args = parser.parse_args()
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
cap = object()
if args.video_file != "":
cap = cv.VideoCapture(args.video_file)
else:
cap = cv.VideoCapture(args.video_device)
if not cap.isOpened():
print("Cannot open camera device-0")
sys.exit(-1)
else:
print("Video <width: {}, height: {}, fps: {}>".format(
cap.get(cv.CAP_PROP_FRAME_WIDTH),
cap.get(cv.CAP_PROP_FRAME_HEIGHT),
cap.get(cv.CAP_PROP_FPS)
))
fps = int(cap.get(cv.CAP_PROP_FPS))
frame_idx = 0
while 1:
ret, frame = cap.read()
if not ret:
print("Cannot receive frame, exiting ...")
break
frame_idx += 1
st = time.time()
# flip the frame horizontally for a later selfie-view display
frame = cv.cvtColor(cv.flip(frame, 1), cv.COLOR_BGR2RGB)
# to improve performance, optionally mark the frame as not writeable to pass by reference
frame.flags.writeable = False
results = pose.process(frame)
frame.flags.writeable = True
frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)
# draw the pose annotation on the frame
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
ed = time.time()
print("Used {:.3f} secs to process frame-{:05}".format(ed - st, frame_idx))
gap = 1000//fps - int(1000 * (ed - st))
if gap < 5:
gap = 5
cv.imshow("pose_recognition_from_camera_demo", frame)
if cv.waitKey(gap) & 0xFF == 27:
break
cap.release()
cv.destroyAllWindows()
| StarcoderdataPython |
4813604 | <reponame>SuddenDevs/SuddenDev<gh_stars>1-10
import flask
import flask_login
import flask_socketio as fsio
import sqlalchemy
from . import socketio, redis
from .models import db, User
from .game_instance import GameInstance
from .tasks import play_round
from .rooms import (
get_room_of_player,
get_color_of_player,
get_players_in_room,
remove_player_from_room,
get_room_state_json_string,
set_script,
set_player_ready,
all_players_are_ready,
reset_all_players,
get_name_of_player,
remove_room,
get_room_wave,
set_room_wave,
set_player_unready,
)
NAMESPACE = '/game-session'
@socketio.on('joined', namespace=NAMESPACE)
def joined(message):
"""Sent by clients when they enter a room."""
player_id = flask_login.current_user.id
game_id = get_room_of_player(player_id)
if game_id is None:
flask.flash("sorry something isn't quite right... try joining another game")
return flask.redirect(flask.url_for('.main.home'))
# subscribe client to room broadcasts
fsio.join_room(game_id)
update_players(game_id)
player_name = get_name_of_player(player_id)
if player_name is not None:
fsio.emit('message_room', player_name + ' has joined!', room=game_id, namespace=NAMESPACE)
@socketio.on('left', namespace=NAMESPACE)
def left(message):
"""Sent by clients when they leave a room."""
player_id = flask_login.current_user.id
manage_player_leaves(player_id)
@socketio.on('disconnect', namespace=NAMESPACE)
def disconnect():
"""Received when a client ungracefully leaves a room."""
player_id = flask_login.current_user.id
manage_player_leaves(player_id)
@socketio.on('cancel', namespace=NAMESPACE)
def cancel(message):
player_id = flask_login.current_user.id
game_id = get_room_of_player(player_id)
set_player_unready(game_id, player_id)
update_players(game_id)
@socketio.on('submit', namespace=NAMESPACE)
def submit_code(message):
"""Sent by clients when submitting code."""
flask_login.current_user.script = message
db.session.commit()
player_id = flask_login.current_user.id
game_id = get_room_of_player(player_id)
if game_id is None:
flask.flash("sorry something isn't quite right... try joining another game")
return flask.redirect(flask.url_for('.main.home'))
set_script(game_id, player_id, message)
update_players(game_id)
player_name = get_name_of_player(player_id)
if player_name is not None:
fsio.emit('message_room', player_name + ' has submitted a new script.', room=game_id, namespace=NAMESPACE)
@socketio.on('test', namespace=NAMESPACE)
def test(message):
"""
Sent by clients to run a wave that tests their (unsubmitted) code.
The message contains the player script to use.
"""
player_id = flask_login.current_user.id
game_id = get_room_of_player(player_id)
player_jsons = get_players_in_room(game_id)
if game_id is None or player_jsons is None:
flask.flash("sorry something isn't quite right... try joining another game")
return flask.redirect(flask.url_for('.main.home'))
player_names = []
player_scripts = []
player_ids = []
colors = []
for player in player_jsons:
player_names.append(player['name'])
player_ids.append(player['id'])
colors.append(player['color'])
# use the submitted script
if player['id'] == player_id:
player_scripts.append(message)
else:
player_scripts.append(player['script'])
wave = get_room_wave(game_id)
fsio.emit('message_local', 'Testing against wave ' + str(wave), room=flask.request.sid, namespace=NAMESPACE)
handle = play_round.delay(game_id, player_names, player_scripts, player_ids, colors, wave)
cleared, game_states = handle.get()
socketio.emit('result', '{\"result\": [ ' + ','.join(game_states) + ']}', room=flask.request.sid, namespace=NAMESPACE)
@socketio.on('play', namespace=NAMESPACE)
def play(message):
"""Sent by clients to indicate they are ready to play."""
player_id = flask_login.current_user.id
game_id = get_room_of_player(player_id)
set_player_ready(game_id, player_id)
update_players(game_id)
# TODO: guard against no player entry
player_name = get_name_of_player(player_id)
if player_name is not None:
fsio.emit('ready', str(player_id), room=game_id, namespace=NAMESPACE)
fsio.emit('message_room', player_name + ' is ready to go!', room=game_id, namespace=NAMESPACE)
run_game_if_everyone_ready(game_id)
@socketio.on('message_chat', namespace=NAMESPACE)
def chat(message):
player_id = flask_login.current_user.id
game_id = get_room_of_player(player_id)
player_name = get_name_of_player(player_id)
color = get_color_of_player(player_id)
if player_name is not None:
fsio.emit('message_chat', '{ "name":"' + str(player_name) + '", "body": "' + message + '", "color": "#' + color + '"}', room=game_id, namespace=NAMESPACE)
def manage_player_leaves(player_id):
game_id = get_room_of_player(player_id)
if game_id is None:
return
fsio.leave_room(game_id)
player_name = get_name_of_player(player_id)
remove_player_from_room(game_id, player_id)
if get_players_in_room(game_id) == []:
remove_room(game_id)
else:
# notify players that one has left
update_players(game_id)
if player_name is not None:
fsio.emit('message_room', player_name + ' has left.', room=game_id, namespace=NAMESPACE)
# TODO: we should make the game run if that was the last player not ready
# but we'd need to guard against running the game whilst it's already running
# so, for now, we'll do nothing and if a player cancels and re-readies, it
# should run fine
def run_game_if_everyone_ready(game_id):
if all_players_are_ready(game_id):
player_jsons = get_players_in_room(game_id)
player_ids = []
player_names = []
player_scripts = []
colors = []
for player in player_jsons:
player_names.append(player['name'])
player_ids.append(player['id'])
player_scripts.append(player['script'])
colors.append(player['color'])
fsio.emit('message_room', 'Everyone is ready! Here we go...', room=game_id, namespace=NAMESPACE)
current_wave = 1
cleared = True
while cleared:
handle = play_round.delay(game_id, player_names, player_scripts, player_ids, colors, current_wave)
cleared, game_states = handle.get()
socketio.emit('result', '{\"result\": [ ' + ','.join(game_states) + ']}', room=game_id, namespace=NAMESPACE)
current_wave += 1
set_room_wave(game_id, current_wave)
highest_wave = current_wave - 1
for player in player_jsons:
player_id = player['id']
user = User.query.get(player_id)
if highest_wave >= 5 and not user.wave5_trophy:
user.wave5_trophy = True
db.session.commit()
fsio.emit('message_trophy', player['name'] + ' has earned the Wave 5 trophy!' , room=game_id, namespace=NAMESPACE)
if highest_wave >= 10 and not user.wave10_trophy:
user.wave10_trophy = True
db.session.commit()
fsio.emit('message_trophy', player['name'] + ' has earned the Wave 10 trophy!' , room=game_id, namespace=NAMESPACE)
if highest_wave >= 15 and not user.wave15_trophy:
user.wave15_trophy = True
db.session.commit()
fsio.emit('message_trophy', player['name'] + ' has earned the Wave 15 trophy!' , room=game_id, namespace=NAMESPACE)
if highest_wave >= 20 and not user.wave20_trophy:
user.wave20_trophy = True
db.session.commit()
fsio.emit('message_trophy', player['name'] + ' has earned the Wave 20 trophy!' , room=game_id, namespace=NAMESPACE)
reset_all_players(game_id)
def update_players(game_id):
game_json_string = get_room_state_json_string(game_id)
if game_json_string is not None:
fsio.emit('update', game_json_string, room=game_id, namespace=NAMESPACE)
| StarcoderdataPython |
3314311 | import numpy as np
import random
def mixup(X_train, Y_train, class_n,
ratio=0.5,
alpha=1.0,
beta=1.0):
new_data_n = int(X_train.shape[0] * ratio)
new_X = []
new_Y = []
count = 0
idxset = set(range(X_train.shape[0]))
while count < new_data_n:
idx1, idx2 = random.sample(idxset, 2)
l = np.random.beta(alpha, beta)
new_x = l * X_train[idx1] + (1-l) * X_train[idx2]
new_y = l * Y_train[idx1] + (1-l) * Y_train[idx2]
new_X.append(new_x)
new_Y.append(new_y)
count += 1
X_train = np.concatenate((X_train, np.array(new_X)))
Y_train = np.concatenate((Y_train, np.array(new_Y)))
print(X_train.shape, Y_train.shape)
return X_train, Y_train
| StarcoderdataPython |
195748 | import random
from graph import Graph
def create_signal_tree(integers):
vertices = [integers[0]]
edges = []
for i in range(1, len(integers)):
v1 = integers[i - 1]
v2 = integers[i]
if v2 not in vertices:
vertices.append(v2)
edges.append((v1, v2, min(v1, v2)))
graph = Graph(vertices, edges, directed = False)
print(graph.get_signal_tree())
integers = [1, 5, 2, 2, 7, 4, 1, 3]
create_signal_tree(integers) | StarcoderdataPython |
3309671 | <reponame>d39b/DQSnake<filename>qlearner.py
import tensorflow as tf
import numpy as np
from qnetwork import QNetwork
#trains a deep q-network
#uses double q-learning, i.e. separate train and target networks (with output Q_train and Q_target respectively)
#let (s,a,s2,r,t) be a transition, then the train network is trained using the loss function
#L = (T - Q_train(s,a))^2
#where T = r if t = True (i.e. s2 is a terminal state)
# and T = Q_target(s2,a*) with a* = argmax Q_train(s2,a') over all possible a'
class QLearner:
def __init__(self,config,num_actions,width,height,num_channels,memory_size,load_model=None,target_network_update_tau=None):
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
self.num_actions = num_actions
self.width = width
self.height = height
self.num_channels = num_channels
self.memory_size = memory_size
self.target_network_update_tau = target_network_update_tau
layers = config["layers"]
self.clip_max = config["clip_max"]
self.clip_grad_norm = None
if "clip_grad_norm" in config:
self.clip_grad_norm = config["clip_grad_norm"]
print("Clipping gradient norm to {}".format(self.clip_grad_norm))
self.lr = config["learning_rate"]
self.rms_decay = config["rms_decay"]
self.gamma = config["gamma"]
self.loss_type = config["loss"]
self.optimizer_type = config["optimizer"]
#placeholders
with self.graph.as_default():
self.state_train_placeholder = tf.placeholder(tf.float32,[None,self.width,self.height,self.num_channels])
self.state_target_placeholder = tf.placeholder(tf.float32,shape=[None,self.width,self.height,self.num_channels])
self.action_index_placeholder = tf.placeholder(tf.int32,shape=[None])
self.reward_placeholder = tf.placeholder(tf.float32,shape=[None,1])
self.terminal_placeholder = tf.placeholder(tf.float32,shape=[None,1])
self.beta_placeholder = tf.placeholder(tf.float32,shape=[])
self.p_placeholder = tf.placeholder(tf.float32,shape=[None,1])
#create q networks
self.train_network = QNetwork(layers, "train", self.graph, self.num_actions,self.state_train_placeholder,trainable=True)
self.target_network = QNetwork(layers, "target", self.graph, self.num_actions,self.state_target_placeholder,trainable=False)
self.add_training_ops()
self.create_target_update_operations(tau=self.target_network_update_tau)
self.add_saver()
#load variables from file
if not load_model is None:
self.load_model(load_model)
self.variables_initialized = True
#initialize variables
else:
with self.graph.as_default():
self.init_op = tf.global_variables_initializer()
self.run_operations(self.init_op)
self.update_target_network()
self.variables_initialized = True
#saves loss, q-values of train network and gradient magnitudes of all trained variables to tensorboard
def add_tensorboard_ops(self,path):
with self.graph.as_default():
summary_loss = tf.summary.scalar('loss',self.loss)
summary_q_values = tf.summary.histogram('q_values',self.train_network.q_values)
for g in self.gradients:
tf.summary.scalar(g[1].name,tf.reduce_sum(tf.square(g[0])))
self.summary_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(path,self.graph)
#train network with a batch of transitions
def train_step(self,s,a,r,s2,t,p_values,beta,write_summary=False):
#we need the q-values of the train network for s and s2
#for better efficiency we combine them into a single batch and split the result when necessary
state_concat = np.concatenate((s,s2))
feed_dict = {
self.state_train_placeholder : state_concat,
self.state_target_placeholder : s2,
self.action_index_placeholder : a,
self.reward_placeholder : r,
self.terminal_placeholder : t,
self.beta_placeholder : beta,
self.p_placeholder : p_values
}
train_ops = [self.global_step,self.loss,self.td,self.train_op]
if write_summary:
train_ops.append(self.summary_op)
results = self.run_operations(train_ops,feed_dict=feed_dict)
step = results[0]
loss = results[1]
td = results[2]
if write_summary:
summary = results[-1]
self.summary_writer.add_summary(summary, global_step=step)
return (step,loss,td)
#compute q-values for inference
def compute_q(self,s):
feed_dict = {
self.state_target_placeholder : s
}
q_op = self.target_network.q_values
q_values = self.run_operations(q_op,feed_dict=feed_dict)
return q_values
def compute_action(self,s):
q_values = self.compute_q(s)
q_max = np.argmax(q_values,axis=1)
return q_max
def add_training_ops(self):
with self.graph.as_default():
train_q_values = self.train_network.q_values
target_q_values = self.target_network.q_values
action_one_hot = tf.one_hot(self.action_index_placeholder,self.num_actions,dtype=tf.float32)
non_terminal = tf.subtract(tf.constant(1.0),self.terminal_placeholder)
#split train q-values into values for s and s2
train_q_values_split = tf.split(train_q_values,2)
train_q_values_1 = tf.multiply(train_q_values_split[0],action_one_hot)
train_q_values_2 = train_q_values_split[1]
#compute importance sampling weights (used for prioritized experience replay)
iw = tf.pow(tf.multiply((1.0/self.memory_size),tf.reciprocal(self.p_placeholder)),self.beta_placeholder)
iw_max = tf.reduce_max(iw)
iw = tf.divide(iw,iw_max)
#compute targets
r = tf.multiply(action_one_hot,self.reward_placeholder)
next_action_index = tf.argmax(train_q_values_2,axis=1,output_type=tf.int32)
row_indices = tf.range(tf.shape(next_action_index)[0])
next_action_index = tf.stack([row_indices,next_action_index],axis=1)
next_action_q_value = tf.gather_nd(target_q_values,next_action_index)
next_action_q_value = tf.expand_dims(next_action_q_value,axis=1)
next_action_q_value = tf.multiply(action_one_hot,next_action_q_value)
next_action_q_value = tf.multiply(non_terminal,next_action_q_value)
targets = tf.add(r,tf.multiply(self.gamma,next_action_q_value))
#add loss function
if self.loss_type == "mse":
#compute td values for prioritized experience memory
td = tf.subtract(targets,train_q_values_1)
self.td = tf.reduce_max(tf.abs(td),axis=1)
td = tf.multiply(td,iw)
td_clipped = tf.clip_by_value(td,(-1)*self.clip_max,self.clip_max)
self.loss = tf.nn.l2_loss(td_clipped)
self.global_step = tf.Variable(0,trainable=False,name='global_step')
#add optimizer
if self.optimizer_type == "rms":
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr,decay=self.rms_decay,momentum=0,epsilon=1e-10,centered=True)
elif self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr,epsilon=1.5e-4)
#compute gradients and clip them to fixed norm
self.gradients = self.optimizer.compute_gradients(self.loss)
if not self.clip_grad_norm is None:
tmp_gradients = []
for g in self.gradients:
tmp_gradients.append((tf.clip_by_norm(g[0],self.clip_grad_norm),g[1]))
self.gradients = tmp_gradients
#apply gradients
self.train_op = self.optimizer.apply_gradients(self.gradients,global_step=self.global_step)
#updates the target network
#with mode "hard" : v_target = v_train (i.e. the variables are completely updated at fixed intervals)
#with mode "soft" : v_target = (1-tau)*v_target + tau*v_train (i.e. variables are updated on every step but slowly)
def create_target_update_operations(self,tau=None):
self.target_update_operations = []
with self.graph.as_default():
for i in range(len(self.train_network.variables)):
var1 = self.target_network.variables[i]
var2 = self.train_network.variables[i]
update_op = None
if not tau is None:
update_op = var1.assign(tf.add(tf.multiply(var2,tau),tf.multiply(var1,1-tau)))
else:
update_op = var1.assign(var2)
self.target_update_operations.append(update_op)
def update_target_network(self):
self.run_operations(self.target_update_operations)
def run_operations(self,ops,feed_dict={}):
return self.session.run(ops,feed_dict=feed_dict)
def add_saver(self):
with self.graph.as_default():
self.saver = tf.train.Saver(save_relative_paths=True)
return self.saver
def save_model(self,filename):
if self.variables_initialized:
result = self.saver.save(self.session,filename)
return result
else:
print("Error: can't save model if variables are not initialized")
return None
def load_model(self,filename):
self.saver.restore(self.session,filename)
self.variables_initialized = True
| StarcoderdataPython |
3353648 | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .change_trust_asset import ChangeTrustAsset
from .int64 import Int64
__all__ = ["ChangeTrustOp"]
@type_checked
class ChangeTrustOp:
"""
XDR Source Code::
struct ChangeTrustOp
{
ChangeTrustAsset line;
// if limit is set to 0, deletes the trust line
int64 limit;
};
"""
def __init__(
self,
line: ChangeTrustAsset,
limit: Int64,
) -> None:
self.line = line
self.limit = limit
def pack(self, packer: Packer) -> None:
self.line.pack(packer)
self.limit.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "ChangeTrustOp":
line = ChangeTrustAsset.unpack(unpacker)
limit = Int64.unpack(unpacker)
return cls(
line=line,
limit=limit,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "ChangeTrustOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "ChangeTrustOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.line == other.line and self.limit == other.limit
def __str__(self):
out = [
f"line={self.line}",
f"limit={self.limit}",
]
return f"<ChangeTrustOp {[', '.join(out)]}>"
| StarcoderdataPython |
3387729 | import lightgbm as lgb
import numpy as np
import sklearn
import pandas as pd
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import mean_squared_error
import riskrewardutil as rru
basePath = '/research/remote/petabyte/users/robert/Utilities/ltr-baseline/mslr10k/'
expPath = "/research/remote/petabyte/users/robert/LightGBM/Experiments/"
dataPath = basePath + 'dat/MSLR-WEB10K/'
modelPath = basePath + 'model/'
runPath = basePath + 'run/'
qrelsFile = dataPath + '../all.test.qrels'
#Count the amount of queries
def group_counts(arr):
d = np.ones(arr.size, dtype=int)
d[1:] = (arr[:-1] != arr[1:]).astype(int)
return np.diff(np.where(np.append(d, 1))[0])
name = "lgbm.2000.63.0.05.0.4.withTrisk"
combineddf = pd.DataFrame()
earlystop = [1000,1500,2000]
for stop in earlystop:
combineddf = pd.DataFrame()
name = name + '.earlystop%d' % (stop)
for fold in range(1,6):
suffix = name + ".fold%d" % (fold)
X, y, qidtrain = load_svmlight_file(dataPath + 'Fold%d/train.txt' % (fold), query_id=True)
train_data = lgb.Dataset(X, label=y, group=group_counts(qidtrain), free_raw_data=False)
X_valid, y_valid, qidValid = load_svmlight_file(dataPath + 'Fold%d/vali.txt' % (fold), query_id=True)
valid_data = lgb.Dataset(X_valid, label=y_valid, group=group_counts(qidValid), free_raw_data=False)
valid_data.reference = train_data
X_test, y_test, qid = load_svmlight_file(dataPath + 'Fold%d/test.txt' % (fold), query_id=True)
test_data = lgb.Dataset(X_test, label=y_test, group=group_counts(qid), free_raw_data=False)
#Global variables needed for custom metrics, qid and qrels for each valid file
qidvalid= qidValid
qrelsvalid = dataPath + 'Fold%d/vali.qrels' % (fold)
qidtrain = qidtrain
qrelstrain = dataPath + 'Fold%d/train.qrels' % (fold)
#Another global variables containing bm25 features for each fold
baselinename = 'resultsmslr10k/evalMetrics/baselinevalrun%d' % (fold)
baselineeval = 'resultsmslr10k/evalMetrics/baselinevaleval%d' % (fold)
baselinetrainname = 'resultsmslr10k/evalMetrics/baselinetrainrun%d' % (fold)
baselinetraineval = 'resultsmslr10k/evalMetrics/baselinetraineval%d' % (fold)
temppath = '/research/remote/petabyte/users/robert/LightGBM/Experiments/resultsmslr10k/evalMetrics/'
metrics = rru.riskrewardUtil(qidvalid, qrelsvalid, baselinename, baselineeval, qidtrain, qrelstrain, baselinetrainname, baselinetraineval, temppath)
eval_result = {}
#Setup Param File and generate different models for hyper parameter tuning
param = {'num_leaves':63, 'num_trees':2000, 'objective':'lambdarank',
'learning_rate': 0.05,'feature_fraction': 0.4,
'bagging_fraction': 0.8,'bagging_freq': 5,
'verbose': 1, 'early_stopping_rounds': stop}
param['metric'] = 'None'
#Train Model
num_round = 10
bst = lgb.train(param, train_data, num_round, valid_sets=[valid_data], feval=metrics.trisk1, evals_result=eval_result)
bst.save_model(modelPath + suffix)
combineddf = combineddf.append(metrics.predictgenerateRunFile(modelPath + suffix, runPath + suffix, X_test, qid))
evals = pd.DataFrame.from_dict(eval_result)
evals.to_csv(basePath + 'evaluation_result/' + suffix)
combineddf.to_csv(runPath + 'run.all.' + name, index=False, header=None, sep=' ')
#TO DO
#Get all the run file as 1 file, then eval them with evalScore
metrics.evalScore(runPath + 'run.all.' + name, expPath + 'resultsmslr10k/evalPerQueryLGBMTriskes%d' % (stop), qrelsFile)
| StarcoderdataPython |
1764585 | <reponame>mengjian0502/StructuredCG_RRAM<gh_stars>0
"""
Channel Gating Layers
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .qmodules import WQ, AQ
def _gen_mask(mtype, dim):
"""
Pre-defined computation masks
"""
mask = torch.ones((dim[2], dim[3])).cuda()
reverse = False
if mtype < 5:
mtype = 10 - mtype
# reverse = True
if mtype == 5:
mask[0::2, 1::2] = 0
mask[1::2, 0::2] = 0
elif mtype == 6:
mask[0::5, 2::5] = 0
mask[0::5, 4::5] = 0
mask[1::5, 1::5] = 0
mask[1::5, 3::5] = 0
mask[2::5, 0::5] = 0
mask[2::5, 2::5] = 0
mask[3::5, 1::5] = 0
mask[3::5, 4::5] = 0
mask[4::5, 0::5] = 0
mask[4::5, 3::5] = 0
elif mtype == 7:
mask[0::3, 2::3] = 0
mask[1::3, 1::3] = 0
mask[2::3, 0::3] = 0
elif mtype == 8:
mask[1::2, 1::2] = 0
elif mtype == 9:
mask[1::3, 1::3] = 0
mask[2::3, 0::3] = 0
elif mtype == 10:
mask[1::3, 1::3] = 0
else:
raise NotImplementedError
if reverse is True:
mask = torch.abs(mask - 1)
return mask
class Greater_Than(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return torch.gt(input, 0).float()
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None
class Conv2d_CG(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
slice_size=16, cg_groups=1, cg_threshold_init=-6.0, cg_alpha=2.0, hard_sig=False):
super(Conv2d_CG, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.hard_sig = hard_sig
self.iter = 1
self.cg_in_chunk_size = 16 # fixed input chunk
self.cg_groups = cg_groups
# channel gating
self.cg_alpha = cg_alpha
self.cg_threshold = nn.Parameter(cg_threshold_init * torch.ones(1, out_channels, 1, 1))
self.cg_in_chunk_size = in_channels // self.cg_groups
self.cg_bn = nn.functional.instance_norm
self.cg_gt = Greater_Than.apply
# if self.cg_in_chunk_size < 128:
# self.slice_size = out_channels // self.cg_groups
# else:
# self.slice_size = 128
self.slice_size = slice_size
self.mask_base = torch.zeros_like(self.weight.data).cuda()
self.cg_out_chunk_size = out_channels // self.cg_groups
for i in range(self.cg_groups):
self.mask_base[i*self.cg_out_chunk_size:(i+1)*self.cg_out_chunk_size,i*self.cg_in_chunk_size:(i+1)*self.cg_in_chunk_size,:,:] = 1
self.mask_cond = 1 - self.mask_base
# quantization
self.weight_quant = WQ(wbit=4, num_features=out_channels, channel_wise=0)
self.input_quant = AQ(abit=4, num_features=out_channels, alpha_init=10.0)
def forward(self, input):
# Quantization
weight_q = self.weight_quant(self.weight)
input_q = self.input_quant(input)
if self.cg_groups > 1:
# partial sum of the base path
self.Yp = F.conv2d(input_q, weight_q*self.mask_base, None, self.stride, self.padding, self.dilation, self.groups)
# block gating
Yp_ = nn.functional.avg_pool3d(self.Yp, kernel_size=(self.slice_size,1,1), stride=(self.slice_size, 1, 1))
Yp_ = Yp_.repeat_interleave(self.slice_size, dim=1)
if self.hard_sig:
k = 0.25
pre_d = self.cg_alpha*(self.cg_bn(Yp_)-self.cg_threshold)
self.d = self.cg_gt(torch.nn.functional.hardtanh(k*(pre_d+2), min_val=0., max_val=1.)-0.5)
else:
self.d = self.cg_gt(torch.sigmoid(self.cg_alpha*(self.cg_bn(Yp_)-self.cg_threshold))-0.5)
# report statistics
self.num_out = self.d.numel()
self.num_full = self.d[self.d>0].numel()
# group sparsity
self.cond_group = nn.functional.avg_pool3d(self.d.data, kernel_size=(self.slice_size,1,1), stride=(self.slice_size, 1, 1))
self.nonzero_g = self.cond_group[self.cond_group>0].numel()
self.total_g = self.cond_group.numel()
self.Yc = F.conv2d(input_q, weight_q * self.mask_cond, None, self.stride, self.padding, self.dilation, self.groups)
out = self.Yp + self.Yc * self.d
else:
# import pdb; pdb.set_trace()
out = F.conv2d(input_q, weight_q, None, self.stride, self.padding, self.dilation, self.groups)
if not self.bias is None:
out += self.bias.view(1, -1, 1, 1).expand_as(out)
return out
def extra_repr(self):
return super(Conv2d_CG, self).extra_repr() + ', slice_size={}, cg_groups={}, hard_sig={}'.format(self.slice_size, self.cg_groups, self.hard_sig)
class Linear_CG(nn.Linear):
def __init__(self, in_features, out_features, bias=True, gamma=0.0, alpha=0.0, block_size=16):
super(Linear_CG, self).__init__(in_features, out_features, bias)
self.gamma = gamma
self.alpha = alpha
self.block_size = block_size
self.count = -1 # disable in-module mask generation
# quantization
self.weight_quant = WQ(wbit=4, num_features=out_features, channel_wise=0)
self.input_quant = AQ(abit=4, num_features=out_features, alpha_init=10.0)
def forward(self, input):
weight_q = self.weight_quant(self.weight)
input_q = self.input_quant(input)
out = F.linear(input_q, weight_q, self.bias)
return out
def extra_repr(self):
return super(Linear_CG, self).extra_repr() + ', gamma={}, alpha={}, block_size={}'.format(
self.gamma, self.alpha, self.block_size)
class QConv2d_CG(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
slice_size=16, cg_groups=1, cg_threshold_init=-6.0, cg_alpha=2.0, hard_sig=False, wbit=32, abit=32, mtype=5):
super(QConv2d_CG, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.hard_sig = hard_sig
self.iter = 1
self.cg_groups = cg_groups
self.mtype = mtype
# channel gating
self.cg_alpha = cg_alpha
self.cg_threshold = cg_threshold_init * torch.ones(1, out_channels, 1, 1)
self.cg_in_chunk_size = in_channels // self.cg_groups
self.cg_bn = nn.functional.instance_norm
self.cg_gt = Greater_Than.apply
self.slice_size = slice_size
self.mask_base = torch.zeros_like(self.weight.data).cuda()
self.cg_out_chunk_size = out_channels // self.cg_groups
for i in range(self.cg_groups):
self.mask_base[i*self.cg_out_chunk_size:(i+1)*self.cg_out_chunk_size,i*self.cg_in_chunk_size:(i+1)*self.cg_in_chunk_size,:,:] = 1
self.mask_cond = 1 - self.mask_base
# quantization
self.wbit = wbit
self.abit = abit
self.weight_quant = WQ(wbit=wbit, num_features=out_channels, channel_wise=0)
self.input_quant = AQ(abit=abit, num_features=out_channels, alpha_init=10.0)
def update_mask(self, m):
self.mtype = m
def forward(self, input):
# Quantization
weight_q = self.weight_quant(self.weight)
input_q = self.input_quant(input)
if self.cg_groups > 1:
# partial sum of the base path
self.Yp = F.conv2d(input_q, weight_q*self.mask_base, None, self.stride, self.padding, self.dilation, self.groups)
# block gating with fixed pattern
dims = self.Yp.size()
mask = _gen_mask(mtype=self.mtype, dim=dims)
mask = mask.unsqueeze(0).unsqueeze(0)
masks = mask.repeat_interleave(self.slice_size, dim=1)
num_chunk = self.weight.size(0) // self.slice_size
self.d = torch.Tensor([]).cuda()
for ii in range(num_chunk):
if (ii+1) % 2 == 0:
# masks = torch.rot90(masks, 1, dims=[2,3])
masks = 1 - masks
self.d = torch.cat((self.d, masks), dim=1)
self.d = torch.cat(input_q.size(0)*[self.d]) # repeat along batch dimension
# report statistics
self.num_out = self.d.numel()
self.num_full = self.d[self.d>0].numel()
# group sparsity
self.cond_group = nn.functional.avg_pool3d(self.d.data, kernel_size=(self.slice_size,1,1), stride=(self.slice_size, 1, 1))
self.nonzero_g = self.cond_group[self.cond_group>0].numel()
self.total_g = self.cond_group.numel()
self.Yc = F.conv2d(input_q, weight_q * self.mask_cond, None, self.stride, self.padding, self.dilation, self.groups)
try:
out = self.Yp + self.Yc * self.d
except:
import pdb;pdb.set_trace()
else:
out = F.conv2d(input_q, weight_q, None, self.stride, self.padding, self.dilation, self.groups)
if not self.bias is None:
out += self.bias.view(1, -1, 1, 1).expand_as(out)
return out
def extra_repr(self):
return super(QConv2d_CG, self).extra_repr() + ', slice_size={}, cg_groups={}, hard_sig={}, wbit={}, abit={}'.format(self.slice_size, self.cg_groups, self.hard_sig, self.wbit, self.abit)
| StarcoderdataPython |
107588 | <filename>LeetCodeSolutions/LeetCode_0371.py<gh_stars>10-100
class Solution:
def getSum(self, a: int, b: int) -> int:
a &= 0xFFFFFFFF
b &= 0xFFFFFFFF
while b:
carry = a & b
a = a ^ b
b = ((carry) << 1) & 0xFFFFFFFF
return a if a < 0x80000000 else ~(a ^ 0xFFFFFFFF)
| StarcoderdataPython |
1744616 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): <NAME>
# the package
import altar
import altar.cuda
# declaration
class cudaCoolingStep:
"""
Encapsulation of the state of the calculation at some particular β value
"""
# public data
beta = None # the inverse temperature
theta = None # a (samples x parameters) matrix
prior = None # a (samples) vector with logs of the sample likelihoods
data = None # a (samples) vector with the logs of the data likelihoods given the samples
posterior = None # a (samples) vector with the logs of the posterior likelihood
# read-only public data
@property
def samples(self):
"""
The number of samples
"""
# encoded in θ
return self.theta.shape[0]
@property
def parameters(self):
"""
The number of model parameters
"""
# encoded in θ
return self.theta.shape[1]
# factories
@classmethod
def start(cls, annealer):
"""
Build the first cooling step by asking {model} to produce a sample set from its
initializing prior, compute the likelihood of this sample given the data, and compute a
(perhaps trivial) posterior
"""
# get the model
model = annealer.model
samples = model.job.chains
precision = model.job.gpuprecision
# build an uninitialized step
step = cls.alloc(samples=samples, parameters=model.parameters, dtype=precision)
# run model here is a bad idea, moved to cudaannealing
# initialize it
# model.cuInitSample(theta=step.theta)
# compute the likelihoods
#model.updateModel(annealer=annealer)
#model.likelihoods(annealer=annealer, step=step, batch=samples)
# return the initialized state
return step
@classmethod
def alloc(cls, samples, parameters, dtype):
"""
Allocate storage for the parts of a cooling step
"""
# dtype must be given to avoid unmatched precisions
# allocate the initial sample set
theta = altar.cuda.matrix(shape=(samples, parameters), dtype=dtype).zero()
# allocate the likelihood vectors
prior = altar.cuda.vector(shape=samples, dtype=dtype).zero()
data = altar.cuda.vector(shape=samples, dtype=dtype).zero()
posterior = altar.cuda.vector(shape=samples, dtype=dtype).zero()
# build one of my instances and return it
return cls(beta=0, theta=theta, likelihoods=(prior, data, posterior))
# interface
def clone(self):
"""
Make a new step with a duplicate of my state
"""
# make copies of my state
beta = self.beta
theta = self.theta.clone()
likelihoods = self.prior.clone(), self.data.clone(), self.posterior.clone()
# make one and return it
return type(self)(beta=beta, theta=theta, likelihoods=likelihoods)
def computePosterior(self, batch=None):
"""
(Re-)Compute the posterior from prior, data, and (updated) beta
"""
batch = batch if batch is not None else self.samples
# copy prior to posterior
self.posterior.copy(self.prior)
# add beta*dataLikelihood
altar.cuda.cublas.axpy(alpha=self.beta, x=self.data, y=self.posterior, batch=batch)
# all done
return self
def copyFromCPU(self, step):
"""
Copy cpu step to gpu step
"""
self.beta = step.beta
self.theta.copy_from_host(source=step.theta)
self.prior.copy_from_host(source=step.prior)
self.data.copy_from_host(source=step.data)
self.posterior.copy_from_host(source=step.posterior)
return self
def copyToCPU(self, step):
"""
copy gpu step to cpu step
"""
step.beta = self.beta
self.theta.copy_to_host(target=step.theta)
self.prior.copy_to_host(target=step.prior)
self.data.copy_to_host(target=step.data)
self.posterior.copy_to_host(target=step.posterior)
return self
# meta-methods
def __init__(self, beta, theta, likelihoods, **kwds):
# chain up
super().__init__(**kwds)
# store the temperature
self.beta = beta
# store the sample set
self.theta = theta
# store the likelihoods
self.prior, self.data, self.posterior = likelihoods
# all done
return
# local
precision = None
# end of file
| StarcoderdataPython |
144545 | from typing import List
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
st = []
l, r = len(nums), 0
for i, n in enumerate(nums):
while st and n < nums[st[-1]]:
l = min(l, st.pop())
st.append(i)
st = []
for i, n in enumerate(reversed(nums)):
while st and n > nums[st[-1]]:
r = max(r, st.pop())
st.append(len(nums)-i-1)
return r - l + 1 if r > l else 0
if __name__ == '__main__':
# nums = [2, 6, 4, 8, 10, 9, 15]
# nums = [2, 3, 3, 2, 4]
# nums = [1, 3, 2, 2, 2]
nums = [1, 3, 5, 4, 2]
# nums = [1, 2, 4, 5, 3]
ret = Solution().findUnsortedSubarray(nums)
print(ret)
| StarcoderdataPython |
1758437 | <filename>tests/test_utils.py
import json
import os
from connect.config import Config
from typing import Dict, Any
class TestUtils:
@staticmethod
def get_request(file, model_class):
with open(os.path.join(os.path.dirname(__file__), file)) as request_file:
request = model_class.deserialize(request_file.read())
return request
# @staticmethod
# def get_request(response, root_folder=None):
# if root_folder:
# return TestUtils.extract_data_file(root_folder + "/request/" + response + ".json")
# return TestUtils.extract_data_file("request/" + response + ".json")
@staticmethod
def get_response(response, root_folder=None):
if root_folder:
return TestUtils.extract_data_file(root_folder+"/response/" + response + ".json")
return TestUtils.extract_data_file("response/" + response + ".json")
@staticmethod
def extract_data_file(file):
with open(os.path.join(os.path.dirname(__file__), file)) as config_file:
data_file = json.load(config_file)
return data_file
@staticmethod
def get_product_fulfillment(fulfillment):
config_file = TestUtils.get_config_file()
fulfillment_automation = fulfillment(Config(
api_url=config_file['apiEndpoint'],
api_key=config_file['apiKey'],
products=config_file['products']
))
return fulfillment_automation
@staticmethod
def get_tier_configuration(tierconfig):
config_file = TestUtils.get_config_file()
tier_automation = tierconfig(Config(
api_url=config_file['apiEndpoint'],
api_key=config_file['apiKey'],
products=config_file['products']
))
return tier_automation
@staticmethod
def get_config_file() -> Dict[str, Any]:
with open('./config.json') as file_handle:
config = json.load(file_handle)
return config | StarcoderdataPython |
3308891 | #!/usr/bin/python3
__DOC__ = """ talk to EMM server *at all* from ordinary Python
See also https://github.com/google/android-management-api-samples/blob/master/notebooks/quickstart.ipynb
"""
# Copyright 2018 Google LLC.
# © <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
## Setup
######################################################################
# The base resource of your Android Management solution is a Google Cloud Platform project.
# All other resources (`Enterprises`, `Devices`, `Policies`, etc) belong to the project, and
# the project controls access to these resources.
#
# A solution is typically associated with a single project, but
# you can create multiple projects if you want to restrict access to resources.
#
# You can create a project in the Google Cloud Console:
#
# 1. Go to the Cloud Console: https://console.cloud.google.com/cloud-resource-manager
# 2. Click `CREATE PROJECT`.
# 3. Enter your project details, and then click `CREATE`.
# 4. Read and remember the project ID; run ./quickstart.py --project-id=X.
import argparse
import base64
import json
import logging
import os
import pathlib
import subprocess
import urllib.parse
import apiclient.discovery
import google.oauth2.service_account
import google_auth_oauthlib.flow
import googleapiclient
import jsmin # purely so policy file can have comments
import pypass
import requests # purely for get().json() shorthand
parser = argparse.ArgumentParser(description=__DOC__)
parser.add_argument(
'json_config_path',
nargs='?',
default=pathlib.Path('frobozz-policies.jsonc'),
type=pathlib.Path,
# example='<EMAIL>',
help="""
A file containing a JSON object with at least {"policies": {"my-cool-policy": ...}}.
If it contains "gcloud_project_id" you won't be prompted for one.
If it contains "enterprise_name" you won't be prompted to create one.
Note that "enterprise_name" is a token provided by Google, NOT one you make up.
If it contains "gcloud_service_account" it'll be looked up in ~/.password-store.
Otherwise, you will have to bounce through a browser every time.
""")
parser.add_argument(
'--work-profile-mode', action='store_true',
help="""
Emit enrollment URL instead of enrollment QR code.
QR code is easier for "fully managed mode" (device only has restricted work account).
URL is easier for "work profile mode" (device has an unrestricted non-work account).
""")
parser.add_argument(
'--enrollment-policy-name',
help="""
At the end of this garbage script, it generates an enrollment QR code for SOME policy.
Which one is semi-random. To force a specific one, use this.
e.g. --enrollment-policy-name=policy1
""")
parser.add_argument('--hurry-the-fuck-up', action='store_true')
parser.add_argument('--debug', dest='logging_level', action='store_const', const=logging.DEBUG, default=logging.NOTSET)
parser.add_argument('--verbose', dest='logging_level', action='store_const', const=logging.INFO, default=logging.NOTSET)
parser.add_argument('--google-play-iframe', action='store_true')
parser.add_argument('--delete-some-tablets', nargs='*')
parser.add_argument('--migrate-some-tablets', nargs='*')
args = parser.parse_args()
logging.getLogger().setLevel(args.logging_level)
with args.json_config_path.open() as f:
json_config_object = json.loads(jsmin.jsmin(f.read()))
# Sanity check
if args.enrollment_policy_name:
if args.enrollment_policy_name not in json_config_object['policies']:
raise RuntimeError('Bogus enrollment policy name',
args.enrollment_policy_name,
json_config_object['policies'].keys())
if 'service_account' in json_config_object:
# first-time setup has already been done, so get an oauth token from the private key.
service_account_object = json.loads(
pypass.PasswordStore().get_decrypted_password(
json_config_object['service_account']).strip())
# Basic sanity checks
if service_account_object['type'] != 'service_account':
raise RuntimeError('wrong json')
if 'private_key' not in service_account_object:
raise RuntimeError('wrong json')
gcloud_project_id = service_account_object['project_id']
logging.debug('Project ID is: %s', gcloud_project_id)
androidmanagement = apiclient.discovery.build(
serviceName='androidmanagement',
version='v1',
cache_discovery=False, # disable some stupid warning
credentials=google.oauth2.service_account.Credentials.from_service_account_info(
info=service_account_object,
scopes=['https://www.googleapis.com/auth/androidmanagement']))
logging.info('Authentication succeeded.')
else:
# FIXME: CHANGE THESE MAGIC NUMBERS;
# DO NOT HARD-CODE THEM IN A PUBLIC REPO!
# This is a public OAuth config, you can use it to run this guide, but
# please use different credentials when building your own solution.
service_account_object = {
'client_id':'882252295571-uvkkfelq073vq73bbq9cmr0rn8bt80ee.apps.googleusercontent.com',
'client_secret': '<KEY>',
'auth_uri':'https://accounts.google.com/o/oauth2/auth',
'token_uri':'https://accounts.google.com/o/oauth2/token'
}
gcloud_project_id = input('What is the gcloud project ID (that runs your EMM service?): ')
# To create and access resources,
# you must authenticate with an account that has edit rights over your project.
# To start the authentication flow, run the cell below.
#
# When you build a server-based solution, you should create a
# service account so you don't need to authorize the access every time.
#
# https://developers.google.com/android/management/service-account
# Create the API client.
androidmanagement = apiclient.discovery.build(
'androidmanagement', 'v1',
credentials=google_auth_oauthlib.flow.InstalledAppFlow.from_client_config(
scopes=['https://www.googleapis.com/auth/androidmanagement'],
client_config={'installed': service_account_object}
).run_console())
print('\nAuthentication succeeded.')
# Get WPA2-PSK passphrases -- if any -- out of pypass.
for policy in json_config_object.get('policies', {}).values():
for networkConfiguration in policy.get('openNetworkConfiguration', {}).get('NetworkConfigurations', []):
if 'Passphrase' in networkConfiguration.get('WiFi', {}):
logging.info('Asking pass(1) for WiFi PSK for %s', networkConfiguration['WiFi']['SSID'])
networkConfiguration['WiFi']['Passphrase'] = pypass.PasswordStore().get_decrypted_password(
f"android-wifi-PSK/{networkConfiguration['WiFi']['SSID']}").strip()
# Used later to revert this hack during dumping/caching.
# Symmetry with the above loop.
def redact_some_passphrases(device_or_policy_or_webapp: dict) -> None: # DESTRUCTIVE
policy = device_or_policy_or_webapp
for networkConfiguration in policy.get('openNetworkConfiguration', {}).get('NetworkConfigurations', []):
if 'Passphrase' in networkConfiguration.get('WiFi', {}):
networkConfiguration['WiFi']['Passphrase'] = None
if args.google_play_iframe:
IFRAME_URL = "https://storage.googleapis.com/android-management-api-samples/managed_play_iframe.html"
web_token = androidmanagement.enterprises().webTokens().create(
parent=json_config_object['enterprise_name'],
body={"parentFrameUrl": IFRAME_URL}).execute()
subprocess.check_call(['xdg-open', f'{IFRAME_URL}?mode=SELECT&token={web_token["value"]}'])
logging.debug('Skipping everything else')
exit()
if args.delete_some_tablets:
for name in args.delete_some_tablets:
androidmanagement.enterprises().devices().delete(name=name).execute()
exit()
# Ref. https://stackoverflow.com/questions/52949572/android-management-api-change-policy-for-device#52953195
if args.migrate_some_tablets:
assert args.enrollment_policy_name, 'Gotta know new policy to move to'
for name in args.migrate_some_tablets:
androidmanagement.enterprises().devices().patch(
name=name,
updateMask='policyName',
body={'policyName': args.enrollment_policy_name}).execute()
exit()
######################################################################
## Create an enterprise
######################################################################
# An `Enterprise` resource binds an organization to your Android Management solution.
# `Devices` and `Policies` both belong to an enterprise.
# Typically, a single enterprise resource is associated with a single organization.
# However, you can create multiple enterprises for the same organization based on their needs.
# For example, an organization may want separate enterprises for its different departments or regions.
#
# To create an enterprise you need a Gmail account.
# It MUST NOT already be associated with an enterprise.
#
# To start the enterprise creation flow, run the cell below.
#
# If you've already created an enterprise for this project,
# you can skip this step and enter your enterprise name in the next cell.
if 'enterprise_name' not in json_config_object:
# Generate a signup URL where the enterprise admin can signup with a Gmail
# account.
signup_url = androidmanagement.signupUrls().create(
projectId=gcloud_project_id,
callbackUrl='https://storage.googleapis.com/android-management-quick-start/enterprise_signup_callback.html'
).execute()
print('Please visit this URL to create an enterprise:', signup_url['url'])
enterprise_token = input('Enter the code: ')
# Complete the creation of the enterprise and retrieve the enterprise name.
enterprise = androidmanagement.enterprises().create(
projectId=gcloud_project_id,
signupUrlName=signup_url['name'],
enterpriseToken=enterprise_token,
body={}
).execute()
json_config_object['enterprise_name'] = enterprise['name']
print('\nYour enterprise name is', json_config_object['enterprise_name'])
# Take note of the enterprise name so you can reuse it after you close this notebook.
# If you already have an enterprise, you can enter the enterprise name in the cell below and run the cell.
######################################################################
## Create a policy
######################################################################
#
# A `Policy` is a group of settings that determine the behavior of a managed device and apps installed thereon.
# Each Policy resource represents a unique group of device and app settings and can be applied to one or more devices.
# Once a device is linked to a policy, any updates to the policy are automatically applied to the device.
#
# To create a basic policy, run the cell below.
# You'll see how to create more advanced policies later in this guide.
# Some settings have to be sent as JSON *encoded as a string*, e.g.
#
# "URLBlocklist": "[\"*\", \"chrome://*\"]",
#
# This is FUCKING UNREADABLE, so as a workaround,
# let me write them as normal json,
# then convert it to a string here.
#
# Known keys where this is helpful:
# "URLBlocklist", "URLAllowlist", "ManagedBookmarks", "ProxySettings"
#
# UPDATE: also do some basic validation, as neither androidmanagement
# nor com.android.chrome validate managedConfiguration.
# AND as I've REPEATEDLY made non-obvious errors which were
# silently ignored, LEAVING THE SYSTEM INSECURE.
for packageName, managedConfiguration in (
(a['packageName'], a['managedConfiguration'])
for p in json_config_object['policies'].values()
for a in p.get('applications', [])
if 'managedConfiguration' in a):
# FIXME: memoize this "get" call.
schema = dict(
(d['key'], d)
for d in androidmanagement.enterprises().applications().get(
name=f'{json_config_object["enterprise_name"]}/applications/{packageName}').execute()['managedProperties'])
schema_type_to_python_type = {
'BOOL': bool,
'STRING': str,
'CHOICE': str,
'MULTISELECT': str}, # FIXME: is this correct???
for k, v in managedConfiguration.items():
if schema[k]['type'] == 'BOOL':
# EXAMPLE (tested): "SearchSuggestEnabled": false
if not isinstance(v, bool):
raise TypeError(packageName, k, v, 'BOOL')
elif schema[k]['type'] == 'CHOICE':
# EXAMPLE (tested): "DnsOverHttpsMode": "off"
# EXAMPLE (tested): "IncognitoModeAvailability": 1 # NOTE: 1 or "1"; both work
choices = {choice['value'] for choice in schema[k]['entries']}
assert all(isinstance(choice, str) for choice in choices)
if isinstance(v, int) and str(v) in choices:
logging.debug('Ignoring str-as-int (seems to work) %s %s %s %s', packageName, k, v, choices)
elif not isinstance(v, str):
raise TypeError(packageName, k, v, 'CHOICE', choices)
elif schema[k]['type'] == 'MULTISELECT':
# EXAMPLE (tested): "ExplicitlyAllowedNetworkPorts": "[\"554\", \"10080\"]"
choices = {choice['value'] for choice in schema[k]['entries']}
assert all(isinstance(choice, str) for choice in choices)
if not isinstance(v, list):
raise TypeError(packageName, k, v, 'MULTISELECT', choices)
for choice in v:
if choice not in choices:
raise ValueError(packageName, k, v, choice, 'MULTISELECT', choices)
logging.debug('Double-json-ing %s %s', packageName, k)
managedConfiguration[k] = json.dumps(v)
elif schema[k]['type'] == 'STRING':
# EXAMPLE (tested): "HomepageLocation": "https://LOCAL.PrisonPC.com/"
# EXAMPLE (tested): "ManagedBookmarks": "[{\"name\": \"MyCoolBookmark\", \"url\": \"https://example.com/\"}]"
# EXAMPLE (tested): "URLAllowlist": "[\"https://example.com/\", \"https://www.example.com/\", \"https://ang.wikipedia.org/\"]"
if not isinstance(v, str):
logging.debug('Double-json-ing %s %s', packageName, k)
managedConfiguration[k] = json.dumps(v)
else:
raise RuntimeError(packageName, k, schema[k]['type'])
for policy_name, policy_body in json_config_object['policies'].items():
# Example: "frobozz-DEADBE/policies/policy1"
# FIXME: probably doesn't quote silly enterprise names properly.
policy_path = f'{json_config_object["enterprise_name"]}/policies/{policy_name}'
androidmanagement.enterprises().policies().patch(
name=policy_path,
body=policy_body).execute()
############################################################
## Create webapp pseudo-apps
############################################################
# Ref. https://colab.research.google.com/github/google/android-management-api-samples/blob/master/notebooks/web_apps.ipynb
# Google requires inline base64 PNG images.
# Let's just use URLs because fuck that.
# UPDATE: austlii.edu.au returns 200 to firefox, but 401 to python requests.
# Therefore, double fuck it --- I'll commit icons to git.
icon_dir = pathlib.Path('icons')
for webApp in json_config_object.get('webApps', []):
if 'icons' not in webApp:
icon_path = (icon_dir / webApp['title']).with_suffix('.png')
if icon_path.exists():
logging.debug('Slurping icon from disk: %s', icon_path)
with icon_path.open(mode='rb') as f:
webApp['icons'] = [{'imageData': base64.urlsafe_b64encode(f.read()).decode('UTF-8')}]
# Unlike policy, patch() won't implicitly create a webapp.
# Instead we must "PATCH if in LIST else CREATE".
# This mirrors SQL's "UPDATE if SELECT else INSERT".
old_webApps = androidmanagement.enterprises().webApps().list(
parent=json_config_object['enterprise_name']).execute()['webApps']
for new_webApp in json_config_object['webApps']:
# We assume the startUrl (not title) is unique.
# UPDATE: this got to be too annoying when URLs were typo'd, so switch to title.
if not any(old_webApp['title'] == new_webApp['title']
for old_webApp in old_webApps):
logging.debug("Doesn't exist, so call create()")
androidmanagement.enterprises().webApps().create(
parent=json_config_object['enterprise_name'],
body=new_webApp).execute()
continue
for old_webApp in old_webApps:
if old_webApp['title'] != new_webApp['title']:
continue
# UGHHHHH, if we send a noop patch, the webapp version jumps, and play store pushes a "new" 50kB apk to every device.
# Therefore if old_webApp == new_webApp, do nothing.
# Except that old_webApp has some auto-populated fields, so
# only compare startUrl/title/displayMode.
# UPDATE: When I upload a webApp['icons'], it isn't there when I query it back.
# This appears to be by design. Sigh.
if all(old_webApp[k] == new_webApp[k]
for k in new_webApp.keys()
if k != 'icons'):
logging.debug('Exists and unchanged, so call nothing')
continue
logging.debug('Exists and changed, so call patch()')
androidmanagement.enterprises().webApps().patch(
name=old_webApp['name'],
body=new_webApp).execute()
############################################################
## Delete historical devices from the device list.
############################################################
def pages(
resource: googleapiclient.discovery.Resource, # e.g. androidmanagement.enterprises().devices()
*args,
**kwargs):
"Given e.g. devices(), iterate over each page of responses."
request = None
while True:
if request is None: # first iteration through "while True"
request = resource.list(*args, **kwargs)
else: # subsequent iteration through "while True"
request = resource.list_next(
previous_request=request,
previous_response=response)
if request: # on last page, list_next() returns None
response = request.execute()
yield response
else:
break
def merged_pages(
resource: googleapiclient.discovery.Resource, # e.g. androidmanagement.enterprises().devices()
response_key: str, # e.g. "devices"
*args,
**kwargs):
"Given e.g. devices(), iterate over each device (across multiple pages)."
for page in pages(resource, *args, **kwargs):
# Sanity check
for k in page.keys():
if k not in {response_key, 'nextPageToken'}:
raise RuntimeError('Unexpected key', {k: page[k]})
for record in page[response_key]:
yield record
# If a device is re-enrolled, it becomes a new "device" with a new name.
# The old enrollment continues to exist under the old name.
# Delete any old enrollments that haven't already been deleted.
# Use set() to minimize the number of HTTP requests, since they're slow (urllib2 can't HTTP/3).
devices = list(
merged_pages(
# our arguments
resource=androidmanagement.enterprises().devices(),
response_key='devices',
# google's arguments
parent=json_config_object['enterprise_name']))
device_names_to_delete = (
# All obsolete devices
set(
name
for d in devices
for name in d.get('previousDeviceNames', {}))
& # set intersection -- name must be in both sets
# All known devices
set(d['name'] for d in devices))
for name in device_names_to_delete:
androidmanagement.enterprises().devices().delete(
name=name).execute()
######################################################################
## Do some queries
######################################################################
# Save to disk some notes about the current state, so
# it can be poked around at later with jq(1).
os.makedirs('cache', exist_ok=True)
with open('cache/API-androidmanagement-v1.json', mode='w') as f:
resp = requests.get(
# Either of these URLs works, and returns the same content.
# This is the URL that apiclient.discovery.build() above talks to.
'https://www.googleapis.com/discovery/v1/apis/androidmanagement/v1/rest'
or
# This is the URL that Google documentation told us to use.
'https://androidmanagement.googleapis.com/$discovery/rest?version=v1')
resp.raise_for_status()
json.dump(
resp.json(),
f,
sort_keys=True,
indent=4)
del resp
def my_json_dump(obj):
path = pathlib.Path(f'cache/{obj["name"]}.json')
os.makedirs(path.parent, exist_ok=True)
with path.open('w') as f:
json.dump(obj, f, sort_keys=True, indent=4)
for enterprise in merged_pages(
# our arguments
resource=androidmanagement.enterprises(),
response_key='enterprises',
# google's arguments
projectId=gcloud_project_id):
my_json_dump(enterprise)
for response_key, resource in [
('devices', androidmanagement.enterprises().devices),
('policies', androidmanagement.enterprises().policies),
('webApps', androidmanagement.enterprises().webApps)]:
for obj in merged_pages(
# our arguments
resource=resource(),
response_key=response_key,
# google's arguments
parent=enterprise['name']):
redact_some_passphrases(obj) # DESTRUCTIVE
my_json_dump(obj)
# NOTE: because this is essentially EVERY app in Play Store,
# there is no list().
# Instead we ask for a single application by name.
# Get those names from current policies.
#
# NOTE: com.android.chrome's managedProperties is equivalent to
# https://www.chromium.org/administrators/policy-list-3
if args.hurry_the_fuck_up:
logging.debug('Skipping slow download of application stuff')
continue
for packageName in sorted(set(
application['packageName']
for policy in json_config_object['policies'].values()
for application in policy.get('applications', [])
if application.get('installType', 'INSTALL_TYPE_UNSPECIFIED') != 'BLOCKED')):
try:
my_json_dump(androidmanagement.enterprises().applications().get(
name=f'{enterprise["name"]}/applications/{packageName}').execute())
except googleapiclient.errors.HttpError as e:
if e.resp.status == 404:
logging.debug('App %s not in Play Store -- probably from F-Droid', packageName)
else:
raise
######################################################################
## Provision a device
######################################################################
# Provisioning refers to the process of enrolling a device with an enterprise,
# applying the appropriate policies to the device, and
# guiding the user to complete the set up of their device in accordance with those policies.
# Before attempting to provision a device,
# ensure that the device is running Android 6.0 or above.
#
# You need an enrollment token for each device that you want to provision (you can use the same token for multiple devices);
# when creating a token you can specify a policy that will be applied to the device.
if args.hurry_the_fuck_up:
logging.debug('Skipping everything else (probably just enrollment QR code)')
exit()
# FIXME: this does enrollment for whatever the LAST POLICY IN THE LIST loop was.
# Since "policies" is a dict, the order is random!
# Move this crap inside the "for ... in policies" loop?
# https://developers.google.com/android/management/reference/rest/v1/enterprises.enrollmentTokens#EnrollmentToken
enrollment_token = androidmanagement.enterprises().enrollmentTokens().create(
parent=json_config_object['enterprise_name'],
body={"policyName": args.enrollment_policy_name or policy_name,
'duration': f'{60 * 60 * 24 * 90}s', # maximum duration (90 days, in seconds)
}
).execute()
# Embed your enrollment token in either an enrollment link or a QR code, and then follow the provisioning instructions below.
if args.work_profile_mode:
print('Please open this link on your device:',
'https://enterprise.google.com/android/enroll?et=' + enrollment_token['value'])
else:
# url = 'https://chart.googleapis.com/chart?' + urllib.parse.urlencode({
# 'cht': 'qr',
# 'chs': '500x500',
# 'chl': enrollment_token['qrCode']})
# print('Please visit this URL to scan the QR code:', url)
# subprocess.check_call(['xdg-open', url])
subprocess.run(['qrencode', '-tUTF8'],
check=True,
input=enrollment_token['qrCode'],
text=True)
# The method for provisioning a device varies depending on the management mode you want to use.
#
# Fully managed mode
# ------------------------------------------------------------
# In fully managed mode the entire device is managed and the device needs to be factory reset before setup.
# To set up a device in fully managed mode you need to use a QR code.
#
# For devices running Android 7.0 or above:
#
# 1. Turn on a new or factory-reset device.
# 2. Tap the same spot on the welcome screen six times to enter QR code mode.
# 3. Connect to a WiFi network.
# 4. Scan the QR code.
#
# For devices running Android 6.0:
#
# 1. Turn on a new or factory-reset device.
# 2. Follow the setup wizard and enter your Wi-Fi details.
# 3. When prompted to sign in, enter **afw#setup**.
# 4. Tap Next, and then accept the installation of Android Device Policy.
# 5. Scan the QR code.
#
# Work profile mode
# ------------------------------------------------------------
# In work profile mode corporate apps and data are kept secure in a self-contained work profile
# while the user keeps control of the rest of the device.
# To set up a work profile you can either use a QR code or an enrollment link.
#
# Using the enrollment link:
#
# 1. Make the link accessible on the device (send it via email or put it on a website).
# 2. Open the link.
#
# Or using the QR code:
#
# 1. Go to Settings > Google.
# 2. Tap "Set up your work profile".
# 3. Scan the QR code.
######################################################################
## What's next?
######################################################################
# By now you should have a managed device configured with a basic policy, but
# there's much more you can do with the Android Management API.
#
# First, we recommend exploring the range of available policies to build the right policy for your needs:
# https://developers.google.com/android/management/create-policy
#
# Next, explore other features of the Android Management API:
#
# • Learn how to discover apps:
# https://developers.google.com/android/management/apps
#
# • Set up Pub/Sub notifications
# https://developers.google.com/android/management/notifications
#
# Or start developing a server-based solution:
#
# • Download the Android Management API client library for
#
# :Java: https://developers.google.com/api-client-library/java/apis/androidmanagement/v1
# :.NET: https://developers.google.com/api-client-library/dotnet/apis/androidmanagement/v1
# :Python: https://developers.google.com/api-client-library/python/apis/androidmanagement/v1 or
# :Ruby: https://developers.google.com/api-client-library/ruby/apis/androidmanagement/v1
#
# • Create a service account
# https://developers.google.com/android/management/service-account
| StarcoderdataPython |
3212412 | #! python
import time, datetime
# отсчет времени
startTime = time.time()
time.sleep(1)
print(time.time())
time.sleep(1)
endTime = time.time()
print('Время %s ' % (endTime - startTime))
# сейчас
dateNow = datetime.datetime.now()
print(dateNow.year)
print(dateNow.hour)
print(dateNow.second)
# дату в строку
print(datetime.datetime.fromtimestamp(time.time()).isocalendar()[0])
print(dateNow.strftime('%d "%B" %Y %H:%M:%S'))
# из строки в дату
strDate = '21.01.2018'
dateNow2 = datetime.datetime.strptime(strDate, '%d.%m.%Y')
print(dateNow2.strftime('%d "%B" %Y %H:%M:%S')) | StarcoderdataPython |
3342754 | <gh_stars>0
import traceback
import bson
import struct
import json
import websockets
import random
from os.path import isfile
from autobahn.twisted.websocket import WebSocketServerProtocol
db = {}
fp = None
if isfile("maplist.bson"):
fp = open("maplist.bson", 'a+b')
fp.seek(0)
try:
db = bson.loads(fp.read())
except struct.error:
traceback.print_exc()
db = {}
# fp.seek(0)
# print(len(fp.read()) / 1024)
# fp.seek(0)
# fp.write(bson.dumps(db))
finally:
fp.seek(0)
else:
fp = open("maplist.bson", 'w+b')
fp.write(bson.dumps(db))
fp.seek(0)
class MapListServer(WebSocketServerProtocol):
def onConnect(self, request):
print("Client connecting: {}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
self.sendMessage(b"ERR:CANTBINARY", isBinary = False)
return
payload = payload.decode('utf-8')
if ':' not in payload:
self.sendMessage(b"ERR:BADSYNTAX", isBinary = False)
return
command = payload.split(':')[0]
print("Command received: {}".format(command))
data = payload[payload.find(':') + 1 :]
if command == "RETRIEVE":
if data not in db:
self.sendMessage(b"ERR:NOMAP", isBinary = False)
print("Map not found!")
return
else:
map = db[data]
print("Retrieved map with success: {} lines and {} sprites.".format(len(map['walls']), len(map['sprites'])))
self.sendMessage(("SUCCESS:"+json.dumps(db[data])).encode('utf-8'), isBinary = False)
elif command == "SAVE":
while True:
mid = ''.join([random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") for _ in range(0, 20)])
if mid not in db:
break
db[mid] = json.loads(data)
print("Saved map with success: {}".format(mid))
self.sendMessage(("SUCCESS:"+mid).encode('utf-8'), isBinary = False)
else:
print("Command not understood!")
self.sendMessage(b"ERR:NOCOMMAND", isBinary = False)
fp.write(bson.dumps(db))
fp.seek(0)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {}".format(reason))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
from autobahn.twisted.websocket import WebSocketServerFactory
factory = WebSocketServerFactory()
factory.protocol = MapListServer
reactor.listenTCP(8909, factory)
reactor.run()
| StarcoderdataPython |
1664161 | <gh_stars>1-10
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions, authentication
schema_view = get_schema_view(
openapi.Info(
title="Recipe API",
default_version='v1',
description="not description yet",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="<EMAIL>"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
authentication_classes=(authentication.TokenAuthentication, )
)
urlpatterns = [
path('admin/', admin.site.urls),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0),
name='schema-redoc'),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
58637 | '''Desenvolva um programa que leia o comprimento de três retas e diga ao
usuário se elas podem ou não formar um triângulo.'''
#minha resolução
#https://escolaeducacao.com.br/condicao-da-existencia-de-um-triangulo/
a = float(input('Digite o primeiro comprimento: '))
b = float(input('Digite o segundo comprimento: '))
c = float(input('Digite o terceiro comprimento: '))
if b + c > a and a + c > b and a + b > c:
print('É possível formar um triangulo com essas medidas.')
else:
print('Não é possível formar um triangulo com essas medidas.')
print('''
*
*
* FIM''')
#resolução do curso
print('-='*20)
print('Analisador de triângulos')
print('-='*20)
r1 = float(input('Primeiro segmento: '))
r2 = float(input('Segundo segmento: '))
r3 = float(input('Terceiro segmento: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Os segmentos acima PODEM FORMAR triângulo!')
else:
print('Os segmentos acuma NÃO PODEM FORMAR triângulo!') | StarcoderdataPython |
1635421 | # The MIT License
#
# Copyright (c) 2008 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
authorized.py
Created by <NAME> on 2008-05-04.
Copyright (c) 2008 Publishare LLC. Distributed under MIT License.
"""
__author__ = '<NAME>'
from google.appengine.api import users
import logging
def role(role):
"""
A decorator to enforce user roles, currently 'user' (logged in)
and 'admin'.
To use it, decorate your handler methods like this:
import authorized
@authorized.role("admin")
def get(self):
user = users.GetCurrentUser(self)
self.response.out.write('Hello, ' + user.nickname())
If this decorator is applied to a GET handler, we check if the
user is logged in and redirect her to the create_login_url() if not.
For HTTP verbs other than GET, we cannot do redirects to the login
url because the return redirects are done as GETs (not the original
HTTP verb for the handler). So if the user is not logged in, we
return an error.
"""
def wrapper(handler_method):
def check_login(self, *args, **kwargs):
user = users.get_current_user()
if not user:
if self.request.method != 'GET':
logging.debug("Not user - aborting")
self.error(403)
else:
logging.debug("User not logged in -- force login")
self.redirect(users.create_login_url(self.request.uri))
elif role == "user" or (role == "admin" and
users.is_current_user_admin()):
logging.debug("Role is %s so will allow handler", role)
handler_method(self, *args, **kwargs)
else:
if self.request.method == 'GET':
logging.debug("Unknown role (%s) on GET", role)
self.redirect("/403.html")
else:
logging.debug("Unknown role: %s", role)
self.error(403) # User didn't meet role.
# TODO: Give better feedback/status code.
return check_login
return wrapper
| StarcoderdataPython |
90574 | <filename>watcher/tests/datasource/test_gnocchi_helper.py<gh_stars>0
# -*- encoding: utf-8 -*-
# Copyright (c) 2017 Servionica
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from watcher.common import clients
from watcher.common import exception
from watcher.datasource import gnocchi as gnocchi_helper
from watcher.tests import base
CONF = cfg.CONF
@mock.patch.object(clients.OpenStackClients, 'gnocchi')
class TestGnocchiHelper(base.BaseTestCase):
def test_gnocchi_statistic_aggregation(self, mock_gnocchi):
gnocchi = mock.MagicMock()
expected_result = 5.5
expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]]
gnocchi.metric.get_measures.return_value = expected_measures
mock_gnocchi.return_value = gnocchi
helper = gnocchi_helper.GnocchiHelper()
result = helper.statistic_aggregation(
resource_id='16a86790-327a-45f9-bc82-45839f062fdc',
metric='cpu_util',
granularity=360,
start_time=timeutils.parse_isotime("2017-02-02T09:00:00.000000"),
stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"),
aggregation='mean'
)
self.assertEqual(expected_result, result)
def test_gnocchi_wrong_datetime(self, mock_gnocchi):
gnocchi = mock.MagicMock()
expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]]
gnocchi.metric.get_measures.return_value = expected_measures
mock_gnocchi.return_value = gnocchi
helper = gnocchi_helper.GnocchiHelper()
self.assertRaises(
exception.InvalidParameter, helper.statistic_aggregation,
resource_id='16a86790-327a-45f9-bc82-45839f062fdc',
metric='cpu_util',
granularity=360,
start_time="2017-02-02T09:00:00.000000",
stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"),
aggregation='mean')
| StarcoderdataPython |
63933 | <reponame>opendatadiscovery/odd-collector-aws
from odd_collector_aws.adapters.sagemaker.domain.artifact import (
create_artifact,
Image,
Model,
Dataset,
)
def test_create_artifact():
image_artifact = {
"name": "SageMaker.ImageUri",
"uri": "1111.dkr.ecr.us-east-1.amazonaws.com/predict-flight-delay/components/preprocessing-serving:debug",
}
artifact = create_artifact(**image_artifact)
assert artifact.name == "preprocessing-serving:debug"
assert (
artifact.uri
== "1111.dkr.ecr.us-east-1.amazonaws.com/predict-flight-delay/components/preprocessing-serving:debug"
)
assert isinstance(artifact, Image)
model_artifact = {
"name": "SageMaker.ModelArtifact",
"uri": "s3://sagemaker-flight-delay-prediction-demo/pipelines-1111/output/model.tar.gz",
}
artifact = create_artifact(**model_artifact)
assert artifact.name == "model"
assert (
artifact.uri
== "s3://sagemaker-flight-delay-prediction-demo/pipelines-1111/output/model.tar.gz"
)
assert isinstance(artifact, Model)
input_artifact = {
"name": "input",
"uri": "s3://bucket/input-data/sample/data.csv",
}
artifact = create_artifact(**input_artifact)
assert artifact.artifact_type == "Dataset"
assert artifact.name == "input"
assert artifact.uri == "s3://bucket/input-data/sample/data.csv"
assert isinstance(artifact, Dataset)
| StarcoderdataPython |
3265799 | <gh_stars>0
import datetime
import json
import logging
import textwrap
from django.conf import settings
from django.contrib.auth import login as django_login, logout as django_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
from django.db.models import Q
from django.db.models.functions import Lower
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import edit
from django.views.generic import ListView
# Imported as recommended in the low-level API docs:
# https://django-reversion.readthedocs.org/en/latest/api.html?#importing-the-low-level-api
import reversion as revisions
from reversion import create_revision
from reversion.helpers import generate_patch, generate_patch_html
from vr.server import forms, tasks, events, models
from vr.server.utils import yamlize, build_swarm_trace_id
logger = logging.getLogger('velociraptor')
VERSION_DIFFS_LIMIT = 10
def json_response(func):
"""
A decorator thats takes a view response and turns it
into json. If a callback is added through GET or POST
the response is JSONP.
"""
def decorator(request, *args, **kwargs):
objects = func(request, *args, **kwargs)
if isinstance(objects, HttpResponse):
return objects
try:
data = json.dumps(objects)
if 'callback' in request.REQUEST:
# a jsonp response!
data = '%s(%s);' % (request.REQUEST['callback'], data)
return HttpResponse(data, "text/javascript")
except Exception:
data = json.dumps(str(objects))
return HttpResponse(data, "application/json")
return decorator
def app_in_default_dashboard(app, user):
"""
Determines if an app is part of the user's default dashboard.
"""
try:
dashboard = user.userprofile.default_dashboard
except ObjectDoesNotExist:
return False
return dashboard and app.id in dashboard.apps.values_list('id', flat=True)
@login_required
def dash(request):
return render(request, 'dash.html', {
'hosts': models.Host.objects.filter(active=True),
'dashboard_id': '',
'dashboard_name': 'Home',
'supervisord_web_port': settings.SUPERVISORD_WEB_PORT
})
@login_required
def default_dash(request):
if hasattr(request.user, 'userprofile') and request.user.userprofile:
dashboard = request.user.userprofile.default_dashboard
if dashboard is not None:
dashboard_name = 'Default - %s' % dashboard.name
return render(request, 'dash.html', {
'hosts': models.Host.objects.filter(active=True),
'dashboard_id': dashboard.id,
'dashboard_name': dashboard_name,
'supervisord_web_port': settings.SUPERVISORD_WEB_PORT
})
# If you don't have a default dashboard go to home!
return HttpResponseRedirect('/')
@login_required
def custom_dash(request, slug):
dashboard = get_object_or_404(models.Dashboard, slug=slug)
return render(request, 'dash.html', {
'hosts': models.Host.objects.filter(active=True),
'dashboard_id': dashboard.id,
'dashboard_name': dashboard.name,
'quick_dashboard': True,
'supervisord_web_port': settings.SUPERVISORD_WEB_PORT
})
@login_required
def build_app(request):
form = forms.BuildForm(request.POST or None)
if form.is_valid():
app = models.App.objects.get(id=form.cleaned_data['app_id'])
build = models.Build(app=app, tag=form.cleaned_data['tag'],
os_image=app.get_os_image())
build.save()
do_build(build, request.user)
# If app is part of the user's default dashboard, redirect there.
if app_in_default_dashboard(app, request.user):
return redirect('default_dash')
return redirect('dash')
return render(request, 'basic_form.html', {
'form': form,
'btn_text': 'Build',
})
def do_build(build, user):
"""
Put a build job on the worker queue, and a notification about it on the
pubsub.
"""
tasks.build_app.delay(build_id=build.id)
events.eventify(
user, 'build', build,
resource_uri='/admin/server/builds/{}/'.format(build.id))
@login_required
def upload_build(request):
form = forms.BuildUploadForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.save()
events.eventify(request.user, 'upload', form.instance)
return HttpResponseRedirect(reverse('deploy'))
return render(request, 'basic_form.html', {
'form': form,
'btn_text': 'Upload',
'instructions': """Use this form to upload a build. A valid build
should have a Procfile, and have all app-specific dependencies already
compiled into the env.""",
'enctype': "multipart/form-data"
})
@login_required
def release(request):
form = forms.ReleaseForm(request.POST or None)
if form.is_valid():
# Take the env vars from the build, update with vars from the form, and
# save on the instance.
release = form.instance
env_yaml = release.build.env_yaml or {}
env_yaml.update(release.env_yaml or {})
release.env_yaml = env_yaml
form.save()
events.eventify(
request.user, 'release', release,
resource_uri='/admin/server/release/{}/'.format(release.id))
return HttpResponseRedirect(reverse('deploy'))
builds = models.Build.objects
form.fields['build'].queryset = builds.filter(status=models.BUILD_SUCCESS)
return render(request, 'basic_form.html', {
'form': form,
'btn_text': 'Save',
})
@login_required
def deploy(request):
# Construct the form for specifying deployments.
form = forms.DeploymentForm(request.POST or None)
if form.is_valid():
# The form fields exactly match the arguments to the celery
# task, so just use that dict for kwargs.
data = form.cleaned_data
release = models.Release.objects.get(id=data.pop('release_id'))
if 'app' in data:
data.pop('app')
do_deploy(release, request.user, **data)
# If app is part of the user's default dashboard, redirect there.
if app_in_default_dashboard(release.build.app, request.user):
return redirect('default_dash')
return redirect('dash')
return render(request, 'basic_form.html', vars())
def do_deploy(release, user, config_name, hostname, proc, port):
"""
Put a deploy job on the work queue, and a notification about it on the
events pubsub.
"""
tasks.deploy.delay(release_id=release.id, config_name=config_name,
hostname=hostname, proc=proc, port=port)
procname = '%(release)s-%(proc)s-%(port)s to %(hostname)s' % vars()
events.eventify(
user, 'deploy', procname,
resource_uri='/admin/server/release/{}/'.format(release.id))
@login_required
def proclog(request, hostname, procname):
return render(request, 'proclog.html', vars())
def _get_version_diffs_for_obj(obj, limit):
version_list = revisions.get_for_object(obj)
fields = [field for field in obj._meta.fields]
version_diffs, last_edited = [], None
if len(version_list) > 1:
last_edited = version_list[0].revision.date_created
old_versions = version_list[1:limit + 1]
for iversion, version in enumerate(old_versions):
newer_version = version_list[iversion]
diff_dict = {}
for field in fields:
if generate_patch(version, newer_version, field.name):
# If versions differ, generate a pretty html diff
diff_html = generate_patch_html(
version, newer_version, field.name)
diff_dict[field.name] = (
version.field_dict[field.name],
newer_version.field_dict[field.name],
diff_html,
)
version_diffs.append({
'diff_dict': diff_dict,
'user': newer_version.revision.user,
'date': newer_version.revision.date_created,
})
return version_diffs, last_edited
@login_required
@revisions.create_revision()
def edit_swarm(request, swarm_id=None):
if swarm_id:
# Need to populate form from swarm
swarm = get_object_or_404(models.Swarm, id=swarm_id)
initial = {
'app_id': swarm.app.id,
'squad_id': swarm.squad.id,
'tag': swarm.release.build.tag,
'config_name': swarm.config_name,
'config_yaml': yamlize(swarm.config_yaml),
'env_yaml': yamlize(swarm.env_yaml),
'volumes': yamlize(swarm.volumes),
'run_as': swarm.run_as or 'nobody',
'mem_limit': swarm.mem_limit,
'memsw_limit': swarm.memsw_limit,
'proc_name': swarm.proc_name,
'size': swarm.size,
'pool': swarm.pool or '',
'balancer': swarm.balancer,
'config_ingredients': [
ing.pk for ing in swarm.config_ingredients.all()]
}
version_diffs, _last_edited = _get_version_diffs_for_obj(
swarm, VERSION_DIFFS_LIMIT)
compiled_config = yamlize(swarm.get_config())
compiled_env = yamlize(swarm.get_env())
else:
initial = None
swarm = models.Swarm()
version_diffs = []
compiled_config = yamlize({})
compiled_env = yamlize({})
form = forms.SwarmForm(request.POST or None, initial=initial)
error_msg = None
if form.is_valid():
data = form.cleaned_data
# Check if we already have a swarm with these parameters
# Note: exclude itself, in case we are editing an existing swarm
n = models.Swarm.objects.filter(
app=data['app_id'],
proc_name=data['proc_name'],
config_name=data['config_name'],
squad=data['squad_id'],
).exclude(id=swarm_id).count()
if n > 0:
error_msg = (
'Swarm already exists for this app, proc, config and squad!'
)
else:
swarm.app = models.App.objects.get(id=data['app_id'])
swarm.squad = models.Squad.objects.get(id=data['squad_id'])
swarm.config_name = data['config_name']
swarm.config_yaml = data['config_yaml']
swarm.env_yaml = data['env_yaml']
swarm.volumes = data['volumes']
swarm.run_as = data['run_as']
swarm.mem_limit = data['mem_limit']
swarm.memsw_limit = data['memsw_limit']
swarm.proc_name = data['proc_name']
swarm.size = data['size']
swarm.pool = data['pool'] or None
swarm.balancer = data['balancer'] or None
swarm.release = swarm.get_current_release(data['tag'])
swarm.save()
swarm.config_ingredients.clear()
for ingredient in data['config_ingredients']:
swarm.config_ingredients.add(ingredient)
# Set the version metadata as recommended in the low-level API docs
# https://django-reversion.readthedocs.org/en/latest/api.html?#version-meta-data
revisions.set_user(request.user)
revisions.set_comment("Created from web form.")
do_swarm(swarm, request.user)
# If app is part of the user's default dashboard, redirect there.
if app_in_default_dashboard(swarm.app, request.user):
return redirect('default_dash')
return redirect('dash')
return render(request, 'swarm.html', {
'swarm': swarm,
'form': form,
'error_msg': error_msg,
'btn_text': 'Swarm',
'version_diffs': version_diffs,
'version_diffs_limit': VERSION_DIFFS_LIMIT,
'compiled_config': compiled_config,
'compiled_env': compiled_env
})
@login_required
@json_response
def search_swarm(request):
query = request.GET.get('query', None)
if query:
swarms = models.Swarm.objects.filter(
Q(app__name__icontains=query) |
Q(config_name__icontains=query) |
Q(release__build__tag__icontains=query) |
Q(proc_name__icontains=query))
else:
swarms = models.Swarm.objects.all()
return [{
'shortname': swarm.shortname(),
'id': swarm.id,
'app_name': swarm.app.name
} for swarm in swarms]
def do_swarm(swarm, user):
"""
Put a swarming job on the queue, and a notification about it on the pubsub.
"""
# Create a swarm trace id that takes our swarm and time
swarm_trace_id = build_swarm_trace_id(swarm)
values = dict(
user=user.username,
shortname=swarm.shortname(),
app=swarm.app,
tag=swarm.release.build.tag,
config_name=swarm.config_name,
proc_name=swarm.proc_name,
squad=swarm.squad,
memory=swarm.get_memory_limit_str(),
size=swarm.size,
balancer=swarm.balancer,
pool=swarm.pool,
trace_id=swarm_trace_id,
)
ev_detail = textwrap.dedent(
"""%(user)s swarmed %(shortname)s
App: %(app)s
Version: %(tag)s
Config Name: %(config_name)s
Proc Name: %(proc_name)s
Squad: %(squad)s
Memory: %(memory)s
Size: %(size)s
Balancer: %(balancer)s
Pool: %(pool)s
Trace ID: %(trace_id)s
""") % values
events.eventify(
user, 'swarm', swarm.shortname(),
detail=ev_detail, swarm_id=swarm_trace_id,
resource_uri='/swarm/{}/'.format(swarm.id))
tasks.swarm_start.delay(swarm.id, swarm_trace_id)
return swarm_trace_id
class ListLogEntry(ListView):
template_name = 'log.html'
model = models.DeploymentLogEntry
paginate_by = 50
query_params = {}
def get_queryset(self):
self.query_params = {
k: v for k, v in self.request.GET.items()
if k != 'page' and v.strip()
}
return models.DeploymentLogEntry.objects.filter(**self.query_params)
def get_context_data(self, **kwargs):
context = super(ListLogEntry, self).get_context_data(**kwargs)
context['apps_list'] = models.App.objects.order_by(Lower('name'))
context['users_list'] = User.objects.order_by(Lower('username'))
context['q'] = self.query_params
return context
class UpdateConfigIngredient(edit.UpdateView):
template_name = 'ingredient_form.html'
model = models.ConfigIngredient
success_url = reverse_lazy('ingredient_list')
form_class = forms.ConfigIngredientForm
def get_context_data(self, **kwargs):
"""
Augment the data passed to the template with:
- version_diffs: Version history
- last_edited: Last time when the ingredient was modified
- related swarms
"""
context = super(UpdateConfigIngredient, self).get_context_data(
**kwargs)
version_diffs, last_edited = _get_version_diffs_for_obj(
self.object, VERSION_DIFFS_LIMIT)
context['version_diffs'] = version_diffs
context['version_diffs_limit'] = VERSION_DIFFS_LIMIT
context['last_edited'] = last_edited or 'No data'
context['related_swarms'] = self.object.swarm_set.all()
return context
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Updated from web form.")
return_value = super(UpdateConfigIngredient, self).form_valid(form)
return return_value
class AddConfigIngredient(edit.CreateView):
template_name = 'ingredient_form.html'
model = models.ConfigIngredient
success_url = reverse_lazy('ingredient_list')
form_class = forms.ConfigIngredientForm
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Added from web form.")
return_value = super(AddConfigIngredient, self).form_valid(form)
return return_value
class ListConfigIngredient(ListView):
template_name = 'ingredient_list.html'
model = models.ConfigIngredient
paginate_by = 30
class DeleteConfigIngredient(edit.DeleteView):
model = models.ConfigIngredient
template_name = 'confirm_delete.html'
success_url = reverse_lazy('ingredient_list')
def delete(self, request, *args, **kwargs):
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Deleted from web form.")
return super(DeleteConfigIngredient, self).delete(request)
class ListHost(ListView):
model = models.Host
template_name = 'host_list.html'
class AddHost(edit.CreateView):
template_name = 'host_form.html'
model = models.Host
success_url = reverse_lazy('host_list')
form_class = forms.HostForm
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Added from web form.")
return_value = super(AddHost, self).form_valid(form)
return return_value
class UpdateHost(edit.UpdateView):
template_name = 'host_form.html'
model = models.Host
success_url = reverse_lazy('host_list')
form_class = forms.HostForm
slug_field = 'name'
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Updated from web form.")
return_value = super(UpdateHost, self).form_valid(form)
return return_value
class DeleteHost(edit.DeleteView):
model = models.Host
template_name = 'confirm_delete.html'
success_url = reverse_lazy('host_list')
def delete(self, request, *args, **kwargs):
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Deleted from web form.")
return super(DeleteHost, self).delete(request)
class ListSquad(ListView):
model = models.Squad
template_name = 'squad_list.html'
class AddSquad(edit.CreateView):
template_name = 'squad_form.html'
model = models.Squad
success_url = reverse_lazy('squad_list')
form_class = forms.SquadForm
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Added from web form.")
return_value = super(AddSquad, self).form_valid(form)
return return_value
class UpdateSquad(edit.UpdateView):
template_name = 'squad_form.html'
model = models.Squad
success_url = reverse_lazy('squad_list')
form_class = forms.SquadForm
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Updated from web form.")
return_value = super(UpdateSquad, self).form_valid(form)
return return_value
class DeleteSquad(edit.DeleteView):
model = models.Squad
template_name = 'confirm_delete.html'
success_url = reverse_lazy('squad_list')
def delete(self, request, *args, **kwargs):
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Deleted from web form.")
return super(DeleteSquad, self).delete(request)
class ListApp(ListView):
model = models.App
template_name = 'app_list.html'
class AddApp(edit.CreateView):
template_name = 'app_form.html'
model = models.App
success_url = reverse_lazy('app_list')
# Get rid of the following message:
# Using ModelFormMixin (base class of AddBuildPack) without the 'fields'
# attribute is prohibited.
fields = ['name', 'repo_url', 'repo_type', 'buildpack', 'stack']
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Added from web form.")
return_value = super(AddApp, self).form_valid(form)
return return_value
class UpdateApp(edit.UpdateView):
template_name = 'app_form.html'
model = models.App
success_url = reverse_lazy('app_list')
fields = ['name', 'repo_url', 'repo_type', 'buildpack', 'stack']
slug_field = 'name'
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Updated from web form.")
return_value = super(UpdateApp, self).form_valid(form)
return return_value
class DeleteApp(edit.DeleteView):
model = models.App
template_name = 'confirm_delete.html'
success_url = reverse_lazy('app_list')
def delete(self, request, *args, **kwargs):
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Deleted from web form.")
return super(DeleteApp, self).delete(request)
# Buildpack views
class ListBuildPack(ListView):
model = models.BuildPack
template_name = 'buildpack_list.html'
class AddBuildPack(edit.CreateView):
template_name = 'buildpack_form.html'
model = models.BuildPack
success_url = reverse_lazy('buildpack_list')
# Get rid of the following message:
# Using ModelFormMixin (base class of AddBuildPack) without the 'fields'
# attribute is prohibited.
fields = ['repo_url', 'repo_type', 'desc', 'order']
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Added from web form.")
return_value = super(AddBuildPack, self).form_valid(form)
return return_value
class UpdateBuildPack(edit.UpdateView):
template_name = 'buildpack_form.html'
model = models.BuildPack
success_url = reverse_lazy('buildpack_list')
fields = ['repo_url', 'repo_type', 'desc', 'order']
def form_valid(self, form):
"""
Override so we can setup django-reversion versioning.
"""
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Updated from web form.")
return_value = super(UpdateBuildPack, self).form_valid(form)
return return_value
class DeleteBuildPack(edit.DeleteView):
model = models.BuildPack
template_name = 'confirm_delete.html'
success_url = reverse_lazy('buildpack_list')
def delete(self, request, *args, **kwargs):
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Deleted from web form.")
return super(DeleteBuildPack, self).delete(request)
# Stack views
class ListStack(ListView):
model = models.OSStack
template_name = 'stack_list.html'
@login_required
def edit_stack(request, stack_id=None):
if stack_id:
stack = models.OSStack.objects.get(id=stack_id)
else:
stack = None
form = forms.StackForm(request.POST or None, request.FILES or None,
instance=stack)
if form.is_valid():
form.save()
stack = form.instance # In case we just made a new one.
if form.cleaned_data['build_now']:
# Image names should look like stackname_date_counter
name_prefix = '%s_%s_' % (
form.instance.name,
datetime.datetime.today().strftime('%Y%m%d'),
)
builds_today = models.OSImage.objects.filter(
name__startswith=name_prefix).count()
image_name = name_prefix + str(builds_today + 1)
image = models.OSImage(
stack=stack,
name=image_name,
base_image_url=form.instance.base_image_url,
base_image_name=form.instance.name + '_base',
provisioning_script_url=form.instance.provisioning_script.url,
)
image.save()
events.eventify(
request.user, 'build image', image,
resource_uri='/stack/{}/'.format(image.id))
tasks.build_image.delay(image.id)
return redirect('dash')
values = dict(
form=form,
object=stack,
enctype='multipart/form-data',
)
return render(request, 'stack_form.html', values)
class DeleteStack(edit.DeleteView):
model = models.OSStack
template_name = 'confirm_delete.html'
success_url = reverse_lazy('stack_list')
def delete(self, request, *args, **kwargs):
with create_revision():
revisions.set_user(self.request.user)
revisions.set_comment("Deleted from web form.")
return super(DeleteStack, self).delete(request)
def login(request):
form = forms.LoginForm(request.POST or None)
if form.is_valid():
# log the person in.
django_login(request, form.user)
# redirect to next or home
return HttpResponseRedirect(request.GET.get('next', '/'))
return render(request, 'login.html', {
'form': form,
'hide_nav': True
})
def logout(request):
django_logout(request)
return HttpResponseRedirect('/')
| StarcoderdataPython |
3252498 | n, m = map(int, input().split())
w = []
for i in range(1, n + 1):
x1, y1, x2, y2 = map(int, input().split())
w.append((x1, y1, x2, y2, i))
for _ in range(m):
x, y = map(int, input().split())
found = False
for i, s in reversed(list(enumerate(w))):
if x >= s[0] and x <= s[2] and y >= s[1] and y <= s[3]:
print(s[4])
last = w[i]
for j in range(i, n - 1):
w[j] = w[j + 1]
w[-1] = last
found = True
break
if not found:
print("IGNORED")
| StarcoderdataPython |
3371900 | class Solution {
func longestCommonPrefix(_ strs: [String]) -> String {
if strs.count < 1 {
return ""
}
if strs.count < 2 {
return strs.first!
}
var result = ""
var minLen = Int.max
for item in strs {
minLen = min(minLen, item.lengthOfBytes(using: String.Encoding.utf8))
}
print(minLen)
for i in 0..<minLen {
let temp = Array(strs.first!)[i]
var allSame = true
for item in strs {
if temp != Array(item)[i] {
allSame = false
break
}
}
if allSame {
result.append(temp)
} else {
break
}
}
return result
}
}
var s = Solution()
var result = s.longestCommonPrefix(["flower","flow","flight"])
print(result)
| StarcoderdataPython |
1691432 | <filename>src/XmlDataReader.py
from Types import DataType
from DataReader import DataReader
import xml.etree.ElementTree as ET
class XmlDataReader(DataReader):
def __init__(self) -> None:
self.students: DataType = {}
def read(self, path: str) -> DataType:
with open(path, encoding="utf-8") as file:
xml_tree = ET.parse(file)
root = xml_tree.getroot()
for person in root:
name = person.tag.replace('_', ' ')
self.students[name] = []
for subj in person:
self.students[name].append(
(subj.tag, int(subj.text or "0"))
)
return self.students
| StarcoderdataPython |
1759433 | <filename>packages/pyre/primitives/Path.py
# -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2022 all rights reserved
#
# externals
import collections
import functools
import io
import os
import pwd
import stat
# helpers
def _unaryDispatch(f):
"""
Wrapper for functions that require the string representation of path objects
"""
# declare and wrap my helper
@functools.wraps(f)
def dispatch(self, *args, **kwds):
# build my rep and forward to the wrapped function
return f(self, *args, **kwds)
# return the function to leave behind
return dispatch
# declaration
class Path(tuple):
"""
A representation of a path
"""
# types
from .exceptions import PathError, SymbolicLinkLoopError
# string constants
_CWD = '.'
_SEP = '/'
_HOME = '~'
# path constants
# N.B.: this used to be {None}, for unknown reasons; no test fails, and no there is
# no known case that depends on the old value
root = (_SEP,)
# interface
# methods about me and my parts implemented as properties
@property
def parts(self):
"""
Build an iterator over my components
"""
# easy enough
return iter(self)
@property
def names(self):
"""
Build an iterator over the names of my components, skipping the root marker, if present
"""
# grab my parts
parts = self.parts
# if I am an absolute path
if self.anchor:
# advance the counter
next(parts)
# and return the iterator
return parts
@property
def anchor(self):
"""
Return the representation of the root of the path, if present
"""
# if i am empty
if len(self) == 0:
# i can't be absolute
return ''
# get my first part
first = self[0]
# if it is my separator
if first == self._SEP:
# i have a root
return first
# otherwise, I don't
return ''
@property
def parents(self):
"""
Generate a sequence of the logical ancestors of the path
"""
# get my type
cls = type(self)
# generate a sequence of lengths so i can build subsequences
for pos in range(1,len(self)):
# build a path out of a subsequence that doesn't include the last level
yield super().__new__(cls, self[:-pos])
# all done
return
@property
def crumbs(self):
"""
Generate a sequence of paths from here to the root
"""
# first me
yield self
# and now my ancestors
yield from self.parents
# all done
return
@property
def parent(self):
"""
Build a path that is my logical ancestor
Note that this is purely a lexical operation and is not guaranteed to yield correct
results unless this path has been fully resolved
"""
# the root
if self == self.root:
# is it's own parent
return self
# for the rest, generate a sequence of length one shorter than me
return super().__new__(type(self), self[:-1])
@property
def name(self):
"""
Return the final path component
"""
# N.B. this now returns a reasonable answer for both the root of the filesystem
# and the {cwd}; both cases used to return empty strings, for unknown reasons;
# there is no test that fails after this behavior change, and i haven't been able
# to track down a case that depended on the old behavior, so here we go...
# when i am empty
if len(self) == 0:
# the last component is the empty string
return '.'
# otherwise, get the last part of the path
return self[-1]
@property
def path(self):
"""
Return a string representing the full path
"""
# easy enough
return str(self)
@property
def suffix(self):
"""
The file extension of the final path component
"""
# grab my name
name = self.name
# look for the last '.'
pos = name.rfind('.')
# if not there
if pos == -1:
# we have nothing
return ''
# otherwise
return name[pos:]
@property
def suffixes(self):
"""
Return an iterable over the extensions in name
"""
# get my name and skip any leading dots
name = self.name.lstrip('.')
# split on the '.', skip the first bit and return the rest with a leading '.'
return ('.' + suffix for suffix in name.split('.')[1:])
@property
def stem(self):
"""
The final path component without any suffixes
"""
# grab my name
name = self.name
# look for the last '.'
pos = name.rfind('.')
# if not there
if pos == -1:
# my stem is my name
return name
# otherwise, drop the suffix
return name[:pos]
@property
def contents(self):
"""
Generate a sequence of my contents
"""
# go through my contents
for name in os.listdir(self):
# make a path and hand it to the caller
yield self / name
# all done
return
# introspection methods
def as_posix(self):
"""
Return a POSIX compliant representation
"""
# i know how to do this
return str(self)
def as_uri(self):
"""
Return a POSIX compliant representation
"""
# if i am an absolute path
if self.anchor:
# splice my representation into a valid 'file:' uri
return f"file://{self}"
# otherwise, build an error message
msg = f"'{self}' is a relative path and can't be expressed as a URI"
# and complain
raise ValueError(msg)
def isAbsolute(self):
"""
Check whether the path is absolute or not
"""
# get my last part
return True if self.anchor else False
def isReserved(self):
"""
Check whether the path is reserved or not
"""
# always false
return False
# methods about me and others
def join(self, *others):
"""
Combine me with {others} and make a new path
"""
# get my type
cls = type(self)
# that's just what my constructor does
return cls.__new__(cls, self, *others)
def relativeTo(self, other):
"""
Find a {path} such that {other} / {path} == {self}
"""
# coerce {other} into a path
other = self.coerce(other)
# the {path} exists iff {other} is a subsequence of {self}
if len(other) > len(self):
# no way
raise ValueError(f"'{other}' is not a parent of '{self}'")
# now check the individual levels
for mine, hers in zip(self, other):
# if they are not identical
if mine != hers:
# build the message
error = f"'{self}' does not start with '{other}'"
location = f"'{mine}' doesn't match '{hers}'"
# and complain
raise ValueError(f"{error}: {location}")
# what's left is the answer
return super().__new__(type(self), self[len(other):])
def withName(self, name):
"""
Build a new path with my name replaced by {name}
"""
# check that the name has no separators in it
if self._SEP in name:
# complain
raise ValueError(f"invalid name '{name}'")
# replace my name and build a new path
return super().__new__(type(self), self[:-1] + (name,))
def withSuffix(self, suffix=None):
"""
Build a new path with my suffix replaced by {suffix}
"""
# check that the suffix is valid
if suffix and (not suffix.startswith('.') or self._SEP in suffix):
# complain
raise ValueError(f"invalid suffix '{suffix}'")
# get my suffix
mine = self.suffix
# and my stem
stem = self.stem
# if the suffix is {None}
if suffix is None:
# and i have one, remove it; otherwise, clone me
return self.withName(stem) if mine else self
# if a suffix were supplied, append it to my stem and build a path
return self.withName(name=stem+suffix)
# real path interface
@classmethod
def cwd(cls):
"""
Build a path that points to the current working directory
"""
# get the directory and turn it into a path
return cls(os.getcwd())
@classmethod
def home(cls, user=''):
"""
Build a path that points to the {user}'s home directory
"""
# if we don't have a user
if not user:
# assume the current user
dir = os.path.expanduser(cls._HOME)
# otherwise
else:
# attempt to
try:
# index the {passwd} database using the user
dir = pwd.getpwnam(user).pw_dir
# if this fails
except KeyError:
# most likely cause is
msg = f"the user '{user}' is not in the password database"
# so complain
raise RuntimeError(msg)
# if we get this far, we have the name of the path; build a path and return it
return cls(dir)
def resolve(self):
"""
Build an equivalent absolute normalized path that is free of symbolic links
"""
# if I'm empty
if len(self) == 0:
# return the current working directory
return self.cwd()
# if I am the root
if self == self.root:
# I am already resolved
return self
# otherwise, get the guy to do his thing
return self._resolve(resolved={})
def expanduser(self, marker=_HOME):
"""
Build a path with '~' and '~user' patterns expanded
"""
# grab the user spec, which must be the last path component
spec = self[:-1]
# if it doesn't start with the magic character
if spec[0] != marker:
# we are done
return self
# otherwise, use it to look up the user's home directory; the user name is what follows
# the marker, and our implementation of {home} interprets a blank user name as the
# current user
home = self.home(user=spec[1:])
# build the new path and return it
return super().__new__(type(self), home + self[1:])
# real path introspection
def exists(self):
"""
Check whether I exist
"""
# MGA - 20160121
# N.B. do not be tempted to return {self} on success and {None} on failure: our
# representation of the {cwd} is an empty tuple, and that would always fail the
# existence test. at least until we short circuit {__bool__} to always return
# {True}. an idea whose merits were not clear at the time of this note
# attempt to
try:
# get my stat record
self.stat()
# if i don't exist or i am a broken link
except (FileNotFoundError, NotADirectoryError):
# stat is unhappy, so i don't exist
return False
# if i got this far, i exist
return True
def isBlockDevice(self):
"""
Check whether I am a block device
"""
# check with {stat}
return self.mask(stat.S_IFBLK)
def isCharacterDevice(self):
"""
Check whether I am a character device
"""
# check with {stat}
return self.mask(stat.S_IFCHR)
def isDirectory(self):
"""
Check whether I am a directory
"""
# check with {stat}
return self.mask(stat.S_IFDIR)
def isFile(self):
"""
Check whether I am a regular file
"""
# check with {stat}
return self.mask(stat.S_IFREG)
def isNamedPipe(self):
"""
Check whether I am a socket
"""
# check with {stat}
return self.mask(stat.S_IFIFO)
def isSocket(self):
"""
Check whether I am a socket
"""
# check with {stat}
return self.mask(stat.S_IFSOCK)
def isSymlink(self):
"""
Check whether I am a symbolic link
"""
# attempt to
try:
# get my stat record
mode = self.lstat().st_mode
# if anything goes wrong:
except (AttributeError, FileNotFoundError, NotADirectoryError):
# links are probably not supported here, so maybe not...
return False
# otherwise, check with my stat record
return stat.S_ISLNK(mode)
def mask(self, mask):
"""
Get my stat record and filter me through {mask}
"""
# attempt to
try:
# get my stat record
mode = self.stat().st_mode
# if i don't exist or i am a broken link
except (FileNotFoundError, NotADirectoryError):
# probably not...
return False
# otherwise, check with {mask}
return stat.S_IFMT(mode) == mask
# physical path interface
# forwarding to standard library functions
chdir = _unaryDispatch(os.chdir)
chmod = _unaryDispatch(os.chmod)
lstat = _unaryDispatch(os.lstat)
stat = _unaryDispatch(os.stat)
open = _unaryDispatch(io.open)
rmdir = _unaryDispatch(os.rmdir)
unlink = _unaryDispatch(os.unlink)
# local implementations of the physical path interface
def mkdir(self, parents=False, exist_ok=False, **kwds):
"""
Create a directory at my location.
If {parents} is {True}, create all necessary intermediate levels; if {exist_ok} is
{True}, do not raise an exception if the directory exists already
"""
# if we were not asked to build the intermediate levels
if not parents:
# attempt to
try:
# create the directory
return os.mkdir(self, **kwds)
# if the directory exists already
except FileExistsError:
# and we care
if not exist_ok:
# complain
raise
# if we are supposed to build the intermediate levels, delegate to the system routine
return os.makedirs(self, exist_ok=exist_ok, **kwds)
def touch(self, mode=0x666, exist_ok=True):
"""
Create a file at his path
"""
# all done
raise NotImplementedError('NYI!')
# constructors
@classmethod
def coerce(cls, *args):
"""
Build a path out of the given arguments
"""
# my normal constructor does this
return cls(*args)
# meta-methods
def __new__(cls, *args):
"""
Build a new path out of strings or other paths
"""
# if i have only one argument and it is a path
if len(args) == 1 and isinstance(args[0], cls):
# return it
return args[0]
# otherwise, parse the arguments and chain up to build my instance
return super().__new__(cls, cls._parse(args))
def __str__(self):
"""
Assemble my parts into a string
"""
# if i am empty
if len(self) == 0:
# i represent the current working directory
return self._CWD
# grab my separator
sep = self._SEP
# set up an iterator over myparts
i = iter(self)
# if i am an absolute path
if self[0] == sep:
# advance the iterator to skip the root marker
next(i)
# but remember it
marker = sep
# otherwise
else:
# leave no marker in the beginning
marker = ''
# build the body out of the remaining parts
body = sep.join(i)
# ok, let's put this all together
return f'{marker}{body}'
# implement the {os.PathLike} protocol
__fspath__ = __str__
def __bool__(self):
"""
Test for non null values
"""
# there are no conditions under which I am false since empty tuple means '.'
return True
# arithmetic; pure sugar but slower than other methods of assembling paths
def __truediv__(self, other):
"""
Syntactic sugar for assembling paths
"""
# get my type
cls = type(self)
# too easy
return cls.__new__(cls, self, other)
def __rtruediv__(self, other):
"""
Syntactic sugar for assembling paths
"""
# get my type
cls = type(self)
# too easy
return cls.__new__(cls, other, self)
# implementation details
@classmethod
def _parse(cls, args, sep=_SEP, home=_HOME, fragments=None):
"""
Recognize each entry in {args} and distill its contribution to the path under construction
"""
# N.B.: this implementation depends critically on retaining the value of {fragments} across
# recursive invocations; therefore, {fragments} MUST NOT be reassigned to a new value, and
# all modifications to its value must happen in place
# initialize the pile
if fragments is None:
# empty it out
fragments = []
# go through the {args}
for arg in args:
# if {arg} is another path
if isinstance(arg, cls):
# check whether it is an absolute path
if len(arg) > 0 and arg[0] == sep:
# clear out the current pile
fragments.clear()
# append its part to mine
fragments.extend(arg)
# if {arg} is a string
elif isinstance(arg, str):
# empty strings
if not arg:
# are ignored
continue
# check whether it starts with my separator
if arg[0] == sep:
# in which case, clear out the current pile
fragments.clear()
# and start a new absolute path
fragments.append(sep)
# check whether it starts with the home marker
elif arg[0] == home:
# expand it
arg = os.path.expanduser(arg)
# clear out the current pile
fragments.clear()
# and start a new absolute path
fragments.append(sep)
# split on separator and remove blanks caused by multiple consecutive separators
fragments.extend(filter(None, arg.split(sep)))
# more general iterables
elif isinstance(arg, collections.abc.Iterable):
# recurse with their contents
cls._parse(args=arg, sep=sep, fragments=fragments)
# anything else
else:
# is an error
msg = f"can't parse '{arg}', of type {type(arg)}"
# so complain
raise TypeError(msg)
# all done
return fragments
def _resolve(self, base=None, resolved=None):
"""
Workhorse for path resolution
"""
# what's left to resolve
workload = self.parts
# if i am an absolute path
if self.anchor:
# set my starting point
base = self.root
# skip the leasing root marker
next(workload)
# if i am a relative path
else:
# my starting point is the current working directory, which is guaranteed to be
# free of symbolic links
base = self.cwd() if base is None else base
# at this point, {base} is known to be a fully resolved path
# go through my parts
for part in workload:
# empty or parts that are '.'
if not part or part=='.':
# are skipped
continue
# parent directory markers
if part == '..':
# back me up by one level
base = base.parent
# and carry on
continue
# splice the part onto base
newpath = base / part
# check
try:
# whether we have been here before
resolution = resolved[newpath]
# if not
except KeyError:
# carry on
pass
# if yes
else:
# if {base} has a null resolution
if resolution is None:
# we probably got a loop, so complain
raise self.SymbolicLinkLoopError(path=self, loop=newpath)
# otherwise, replace {base} with its resolution
base = resolution
# and carry on
continue
# now we need to know whether what we have so far is a symbolic link
if newpath.isSymlink():
# add it to the pile, but mark it unresolved
resolved[newpath] = None
# find out what it points to
link = type(self)(os.readlink(str(newpath)))
# resolve it in my context
base = link._resolve(resolved=resolved, base=base)
# remember this
resolved[newpath] = base
# if not
else:
# save it and carry on
base = newpath
return base
# patches
Path.root = Path(Path._SEP)
# end of file
| StarcoderdataPython |
3207107 | <gh_stars>1-10
from smsiran.sms_ir import SmsIR
from smsiran.ghasedak import Ghasedak
| StarcoderdataPython |
3207613 | <filename>test/test_fetch.py<gh_stars>0
from pytest import mark
from test.util import postcodes_io_ok
@mark.skipif(postcodes_io_ok() is False, reason="Postcodes IO Down!")
@mark.asyncio
class TestApi:
async def test_true(self):
assert False
| StarcoderdataPython |
66504 | """Decorate functions with contracts."""
# pylint: disable=invalid-name
# pylint: disable=protected-access
# pylint: disable=wrong-import-position
# We need to explicitly assign the aliases instead of using
# ``from ... import ... as ...`` statements since mypy complains
# that the module icontract lacks these imports.
# See also:
# https://stackoverflow.com/questions/44344327/cant-make-mypy-work-with-init-py-aliases
import icontract._decorators
require = icontract._decorators.require
snapshot = icontract._decorators.snapshot
ensure = icontract._decorators.ensure
invariant = icontract._decorators.invariant
import icontract._globals
aRepr = icontract._globals.aRepr
SLOW = icontract._globals.SLOW
import icontract._metaclass
DBCMeta = icontract._metaclass.DBCMeta
DBC = icontract._metaclass.DBC
import icontract._types
_Contract = icontract._types.Contract
_Snapshot = icontract._types.Snapshot
import icontract.errors
ViolationError = icontract.errors.ViolationError
| StarcoderdataPython |
4829176 |
from django.conf import settings
TEMPLATE_BASE = settings.AUTH_TEMPLATE_BASE or "helios_auth/templates/base.html"
# enabled auth systems
from . import auth_systems
ENABLED_AUTH_SYSTEMS = settings.AUTH_ENABLED_SYSTEMS or list(auth_systems.AUTH_SYSTEMS.keys())
DEFAULT_AUTH_SYSTEM = settings.AUTH_DEFAULT_SYSTEM or None
| StarcoderdataPython |
3254956 | <gh_stars>1-10
import pandas.io.json
import sys
def convert_file(json_file, operation):
try:
normalized = pandas.io.json.json_normalize(json_file)
normalized.to_csv(operation)
except AttributeError as err:
print("Cannot create a .csv file due to nature of the json file ({}) - File's name: {}. Retrying...".format(str(err), operation), file=sys.stderr)
convert_file({
"field1": json_file
}, operation)
| StarcoderdataPython |
13880 | """Use TIMESTAMP column for latest submission
Revision ID: eff<PASSWORD>0<PASSWORD>
Revises: <PASSWORD>
Create Date: 2017-01-08 22:20:43.814375
"""
# revision identifiers, used by Alembic.
revision = 'eff<PASSWORD>'
down_revision = '<PASSWORD>'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
import libweasyl
from libweasyl.legacy import UNIXTIME_OFFSET
def upgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_old',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.ArrowColumn(), nullable=False, server_default='epoch'),
)
op.execute(
"UPDATE profile SET latest_submission_time = TIMESTAMP WITHOUT TIME ZONE 'epoch' + "
"(latest_submission_time_old - %d) * INTERVAL '1 second'" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_old')
def downgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_new',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.WeasylTimestampColumn(), nullable=False, server_default='0'),
)
op.execute(
"UPDATE profile SET latest_submission_time = extract(epoch from latest_submission_time_new) + %d" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_new')
| StarcoderdataPython |
1782679 | <gh_stars>0
"""
Simple wrapper that adds some extra encoding capabilities needed for
this project.
"""
import collections
import datetime
import decimal
from json import JSONDecodeError # noqa
import json as json_impl
class JsonExtendedEncoder(json_impl.JSONEncoder):
"""
Needed for the json module to understand what to do with the types we
use in this project.
"""
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(
obj,
(
datetime.time,
datetime.date,
datetime.datetime,
),
):
return str(obj)
# Handle case where zeep returns the undocumented _raw_elements key
# which is of type `collections.deque`. As the name suggestions this
# object contains raw elements, which json will be unable to process,
# therefore we iterate over the object and return a string
# representation of it.
elif isinstance(obj, collections.deque):
return str([x for x in obj])
return super().default(obj)
def dump(*args, **kw):
if 'cls' not in kw:
kw['cls'] = JsonExtendedEncoder
return json_impl.dump(*args, **kw)
def dumps(*args, **kw):
if 'cls' not in kw:
kw['cls'] = JsonExtendedEncoder
return json_impl.dumps(*args, **kw)
load = json_impl.load
loads = json_impl.loads
| StarcoderdataPython |
3294230 | <gh_stars>0
__author__ = 'michael'
from pyyelp.pyyelp import Yelp
def search_test():
yelp = Yelp()
print(yelp.search(term='Starbucks', location='San Francisco'))
def business_test():
yelp = Yelp()
print(yelp.get_business_by_id('yelp-san-francisco'))
def phone_test():
yelp = Yelp()
print(yelp.search_by_phone_number('8314399507'))
search_test()
business_test()
phone_test() | StarcoderdataPython |
3389044 | <reponame>tradenity/python-sdk<gh_stars>1-10
# coding: utf-8
"""
Tradenity API
Tradenity eCommerce Rest API
Contact: <EMAIL>
"""
from __future__ import absolute_import
import re
import pprint
# python 2 and python 3 compatibility library
import six
from tradenity.api_client import ApiClient
class ReturnLineItem(object):
swagger_types = {
'id': 'str',
'meta': 'InstanceMeta',
'unit_price': 'int',
'quantity': 'int',
'product': 'Product',
'taxes': 'list[TaxRate]',
'promotions': 'list[Promotion]',
'subtotal': 'int',
'total': 'int',
'shipping_amount': 'int',
'tax_amount': 'int',
'discount_amount': 'int',
'return_operation': 'ReturnOperation'
}
attribute_map = {
'id': 'id',
'meta': '__meta',
'unit_price': 'unitPrice',
'quantity': 'quantity',
'product': 'product',
'taxes': 'taxes',
'promotions': 'promotions',
'subtotal': 'subtotal',
'total': 'total',
'shipping_amount': 'shippingAmount',
'tax_amount': 'taxAmount',
'discount_amount': 'discountAmount',
'return_operation': 'returnOperation'
}
api_client = None
def __init__(self, id=None, meta=None, unit_price=None, quantity=None, product=None, taxes=None, promotions=None, subtotal=None, total=None, shipping_amount=None, tax_amount=None, discount_amount=None, return_operation=None):
"""ReturnLineItem - a model defined in Swagger"""
self._id = id
self._meta = None
self._unit_price = None
self._quantity = None
self._product = None
self._taxes = None
self._promotions = None
self._subtotal = None
self._total = None
self._shipping_amount = None
self._tax_amount = None
self._discount_amount = None
self._return_operation = None
self.discriminator = None
if meta is not None:
self.meta = meta
self.unit_price = unit_price
self.quantity = quantity
self.product = product
if taxes is not None:
self.taxes = taxes
if promotions is not None:
self.promotions = promotions
if subtotal is not None:
self.subtotal = subtotal
if total is not None:
self.total = total
if shipping_amount is not None:
self.shipping_amount = shipping_amount
if tax_amount is not None:
self.tax_amount = tax_amount
if discount_amount is not None:
self.discount_amount = discount_amount
self.return_operation = return_operation
@property
def id(self):
if self._id:
return self._id
elif self.meta is None:
return None
else:
self._id = self.meta.href.split("/")[-1]
return self._id
@id.setter
def id(self, new_id):
self._id = new_id
@property
def meta(self):
"""Gets the meta of this ReturnLineItem.
:return: The meta of this ReturnLineItem.
:rtype: InstanceMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this ReturnLineItem.
:param meta: The meta of this ReturnLineItem.
:type: InstanceMeta
"""
self._meta = meta
@property
def unit_price(self):
"""Gets the unit_price of this ReturnLineItem.
:return: The unit_price of this ReturnLineItem.
:rtype: int
"""
return self._unit_price
@unit_price.setter
def unit_price(self, unit_price):
"""Sets the unit_price of this ReturnLineItem.
:param unit_price: The unit_price of this ReturnLineItem.
:type: int
"""
self._unit_price = unit_price
@property
def quantity(self):
"""Gets the quantity of this ReturnLineItem.
:return: The quantity of this ReturnLineItem.
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this ReturnLineItem.
:param quantity: The quantity of this ReturnLineItem.
:type: int
"""
self._quantity = quantity
@property
def product(self):
"""Gets the product of this ReturnLineItem.
:return: The product of this ReturnLineItem.
:rtype: Product
"""
return self._product
@product.setter
def product(self, product):
"""Sets the product of this ReturnLineItem.
:param product: The product of this ReturnLineItem.
:type: Product
"""
self._product = product
@property
def taxes(self):
"""Gets the taxes of this ReturnLineItem.
:return: The taxes of this ReturnLineItem.
:rtype: list[TaxRate]
"""
return self._taxes
@taxes.setter
def taxes(self, taxes):
"""Sets the taxes of this ReturnLineItem.
:param taxes: The taxes of this ReturnLineItem.
:type: list[TaxRate]
"""
self._taxes = taxes
@property
def promotions(self):
"""Gets the promotions of this ReturnLineItem.
:return: The promotions of this ReturnLineItem.
:rtype: list[Promotion]
"""
return self._promotions
@promotions.setter
def promotions(self, promotions):
"""Sets the promotions of this ReturnLineItem.
:param promotions: The promotions of this ReturnLineItem.
:type: list[Promotion]
"""
self._promotions = promotions
@property
def subtotal(self):
"""Gets the subtotal of this ReturnLineItem.
:return: The subtotal of this ReturnLineItem.
:rtype: int
"""
return self._subtotal
@subtotal.setter
def subtotal(self, subtotal):
"""Sets the subtotal of this ReturnLineItem.
:param subtotal: The subtotal of this ReturnLineItem.
:type: int
"""
self._subtotal = subtotal
@property
def total(self):
"""Gets the total of this ReturnLineItem.
:return: The total of this ReturnLineItem.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ReturnLineItem.
:param total: The total of this ReturnLineItem.
:type: int
"""
self._total = total
@property
def shipping_amount(self):
"""Gets the shipping_amount of this ReturnLineItem.
:return: The shipping_amount of this ReturnLineItem.
:rtype: int
"""
return self._shipping_amount
@shipping_amount.setter
def shipping_amount(self, shipping_amount):
"""Sets the shipping_amount of this ReturnLineItem.
:param shipping_amount: The shipping_amount of this ReturnLineItem.
:type: int
"""
self._shipping_amount = shipping_amount
@property
def tax_amount(self):
"""Gets the tax_amount of this ReturnLineItem.
:return: The tax_amount of this ReturnLineItem.
:rtype: int
"""
return self._tax_amount
@tax_amount.setter
def tax_amount(self, tax_amount):
"""Sets the tax_amount of this ReturnLineItem.
:param tax_amount: The tax_amount of this ReturnLineItem.
:type: int
"""
self._tax_amount = tax_amount
@property
def discount_amount(self):
"""Gets the discount_amount of this ReturnLineItem.
:return: The discount_amount of this ReturnLineItem.
:rtype: int
"""
return self._discount_amount
@discount_amount.setter
def discount_amount(self, discount_amount):
"""Sets the discount_amount of this ReturnLineItem.
:param discount_amount: The discount_amount of this ReturnLineItem.
:type: int
"""
self._discount_amount = discount_amount
@property
def return_operation(self):
"""Gets the return_operation of this ReturnLineItem.
:return: The return_operation of this ReturnLineItem.
:rtype: ReturnOperation
"""
return self._return_operation
@return_operation.setter
def return_operation(self, return_operation):
"""Sets the return_operation of this ReturnLineItem.
:param return_operation: The return_operation of this ReturnLineItem.
:type: ReturnOperation
"""
self._return_operation = return_operation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReturnLineItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReturnLineItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
@classmethod
def get_api_client(cls):
if cls.api_client is None:
cls.api_client = ApiClient.instance()
return cls.api_client
@classmethod
def find_all(cls, **kwargs):
return cls.list_all_return_line_items(**kwargs)
@classmethod
def find_all_by(cls, **kwargs):
return cls.list_all_return_line_items(**kwargs)
@classmethod
def find_one_by(cls, **kwargs):
results = cls.list_all_return_line_items(**kwargs)
if len(results) > 0:
return results[0]
@classmethod
def find_by_id(cls, id):
return cls.get_return_line_item_by_id(id)
def create(self):
new_instance = self.create_return_line_item(self)
self.id = new_instance.id
return self
def update(self):
return self.update_return_line_item_by_id(self.id, self)
def delete(self):
return self.delete_return_line_item_by_id(self.id)
@classmethod
def create_return_line_item(cls, return_line_item, **kwargs):
"""Create ReturnLineItem
Create a new ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_return_line_item(return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param ReturnLineItem return_line_item: Attributes of returnLineItem to create (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_return_line_item_with_http_info(return_line_item, **kwargs)
else:
(data) = cls._create_return_line_item_with_http_info(return_line_item, **kwargs)
return data
@classmethod
def _create_return_line_item_with_http_info(cls, return_line_item, **kwargs):
"""Create ReturnLineItem
Create a new ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_return_line_item_with_http_info(return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param ReturnLineItem return_line_item: Attributes of returnLineItem to create (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['return_line_item']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'return_line_item' is set
if ('return_line_item' not in params or
params['return_line_item'] is None):
raise ValueError("Missing the required parameter `return_line_item` when calling `create_return_line_item`")
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'return_line_item' in params:
body_params = params['return_line_item']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/returnLineItems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReturnLineItem',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def delete_return_line_item_by_id(cls, return_line_item_id, **kwargs):
"""Delete ReturnLineItem
Delete an instance of ReturnLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_return_line_item_by_id(return_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs)
else:
(data) = cls._delete_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs)
return data
@classmethod
def _delete_return_line_item_by_id_with_http_info(cls, return_line_item_id, **kwargs):
"""Delete ReturnLineItem
Delete an instance of ReturnLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_return_line_item_by_id_with_http_info(return_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['return_line_item_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'return_line_item_id' is set
if ('return_line_item_id' not in params or
params['return_line_item_id'] is None):
raise ValueError("Missing the required parameter `return_line_item_id` when calling `delete_return_line_item_by_id`")
collection_formats = {}
path_params = {}
if 'return_line_item_id' in params:
path_params['returnLineItemId'] = params['return_line_item_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/returnLineItems/{returnLineItemId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def get_return_line_item_by_id(cls, return_line_item_id, **kwargs):
"""Find ReturnLineItem
Return single instance of ReturnLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_return_line_item_by_id(return_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to return (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs)
else:
(data) = cls._get_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs)
return data
@classmethod
def _get_return_line_item_by_id_with_http_info(cls, return_line_item_id, **kwargs):
"""Find ReturnLineItem
Return single instance of ReturnLineItem by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_return_line_item_by_id_with_http_info(return_line_item_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to return (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['return_line_item_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'return_line_item_id' is set
if ('return_line_item_id' not in params or
params['return_line_item_id'] is None):
raise ValueError("Missing the required parameter `return_line_item_id` when calling `get_return_line_item_by_id`")
collection_formats = {}
path_params = {}
if 'return_line_item_id' in params:
path_params['returnLineItemId'] = params['return_line_item_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/returnLineItems/{returnLineItemId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReturnLineItem',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def list_all_return_line_items(cls, **kwargs):
"""List ReturnLineItems
Return a list of ReturnLineItems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_return_line_items(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[ReturnLineItem]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_return_line_items_with_http_info(**kwargs)
else:
(data) = cls._list_all_return_line_items_with_http_info(**kwargs)
return data
@classmethod
def _list_all_return_line_items_with_http_info(cls, **kwargs):
"""List ReturnLineItems
Return a list of ReturnLineItems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_return_line_items_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[ReturnLineItem]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
if 'page' in params:
query_params.append(('page', params['page']))
if 'size' in params:
query_params.append(('size', params['size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/returnLineItems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='page[ReturnLineItem]',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def replace_return_line_item_by_id(cls, return_line_item_id, return_line_item, **kwargs):
"""Replace ReturnLineItem
Replace all attributes of ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_return_line_item_by_id(return_line_item_id, return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to replace (required)
:param ReturnLineItem return_line_item: Attributes of returnLineItem to replace (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs)
else:
(data) = cls._replace_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs)
return data
@classmethod
def _replace_return_line_item_by_id_with_http_info(cls, return_line_item_id, return_line_item, **kwargs):
"""Replace ReturnLineItem
Replace all attributes of ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to replace (required)
:param ReturnLineItem return_line_item: Attributes of returnLineItem to replace (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['return_line_item_id', 'return_line_item']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'return_line_item_id' is set
if ('return_line_item_id' not in params or
params['return_line_item_id'] is None):
raise ValueError("Missing the required parameter `return_line_item_id` when calling `replace_return_line_item_by_id`")
# verify the required parameter 'return_line_item' is set
if ('return_line_item' not in params or
params['return_line_item'] is None):
raise ValueError("Missing the required parameter `return_line_item` when calling `replace_return_line_item_by_id`")
collection_formats = {}
path_params = {}
if 'return_line_item_id' in params:
path_params['returnLineItemId'] = params['return_line_item_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'return_line_item' in params:
body_params = params['return_line_item']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/returnLineItems/{returnLineItemId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReturnLineItem',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def update_return_line_item_by_id(cls, return_line_item_id, return_line_item, **kwargs):
"""Update ReturnLineItem
Update attributes of ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_return_line_item_by_id(return_line_item_id, return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to update. (required)
:param ReturnLineItem return_line_item: Attributes of returnLineItem to update. (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs)
else:
(data) = cls._update_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs)
return data
@classmethod
def _update_return_line_item_by_id_with_http_info(cls, return_line_item_id, return_line_item, **kwargs):
"""Update ReturnLineItem
Update attributes of ReturnLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str return_line_item_id: ID of returnLineItem to update. (required)
:param ReturnLineItem return_line_item: Attributes of returnLineItem to update. (required)
:return: ReturnLineItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['return_line_item_id', 'return_line_item']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'return_line_item_id' is set
if ('return_line_item_id' not in params or
params['return_line_item_id'] is None):
raise ValueError("Missing the required parameter `return_line_item_id` when calling `update_return_line_item_by_id`")
# verify the required parameter 'return_line_item' is set
if ('return_line_item' not in params or
params['return_line_item'] is None):
raise ValueError("Missing the required parameter `return_line_item` when calling `update_return_line_item_by_id`")
collection_formats = {}
path_params = {}
if 'return_line_item_id' in params:
path_params['returnLineItemId'] = params['return_line_item_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'return_line_item' in params:
body_params = params['return_line_item']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/returnLineItems/{returnLineItemId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReturnLineItem',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
161332 | <reponame>abingham/ackward<filename>site_scons/bygg/build_products.py
class BuildProducts(object):
'''A class to help keep track of build products in the build.
Really this is just a wrapper around a dict stored at the key
'BUILD_TOOL' in an environment. This class doesn't worry about
what stored in that dict, though it's generally things like
SharedLib configurators and such.
'''
def __init__(self, env):
'''This looks up or, if necessary, creates a few keys in the
`env` that are used by this class.
'''
self.env = env
try:
products = env['BUILD_TOOL']
except KeyError:
env['BUILD_TOOL'] = {}
products = env['BUILD_TOOL']
try:
products = products['BUILD_PRODUCTS']
except KeyError:
products['BUILD_PRODUCTS'] = {}
products = products['BUILD_PRODUCTS']
self.products = products
def __getitem__(self, name):
'''Get the build product at `name`.
'''
return self.products[name]
def __setitem__(self, name, product):
'''Set the build product at `name` to `product`.
'''
self.products[name] = product
| StarcoderdataPython |
3341673 | <filename>cinder/volume/drivers/netapp/iscsi.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp iSCSI storage systems.
This driver requires NetApp Clustered Data ONTAP or 7-mode
storage systems with installed iSCSI licenses.
"""
import sys
import time
import uuid
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
from cinder.volume.drivers.netapp.api import NaServer
from cinder.volume.drivers.netapp.options import netapp_7mode_opts
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp.utils import provide_ems
from cinder.volume.drivers.netapp.utils import validate_instantiation
from cinder.volume import volume_types
from oslo.config import cfg
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(netapp_connection_opts)
CONF.register_opts(netapp_transport_opts)
CONF.register_opts(netapp_basicauth_opts)
CONF.register_opts(netapp_cluster_opts)
CONF.register_opts(netapp_7mode_opts)
CONF.register_opts(netapp_provisioning_opts)
class NetAppLun(object):
"""Represents a LUN on NetApp storage."""
def __init__(self, handle, name, size, metadata_dict):
self.handle = handle
self.name = name
self.size = size
self.metadata = metadata_dict or {}
def get_metadata_property(self, prop):
"""Get the metadata property of a LUN."""
if prop in self.metadata:
return self.metadata[prop]
name = self.name
msg = _("No metadata property %(prop)s defined for the"
" LUN %(name)s")
msg_fmt = {'prop': prop, 'name': name}
LOG.debug(msg % msg_fmt)
def __str__(self, *args, **kwargs):
return 'NetApp Lun[handle:%s, name:%s, size:%s, metadata:%s]'\
% (self.handle, self.name, self.size, self.metadata)
class NetAppDirectISCSIDriver(driver.ISCSIDriver):
"""NetApp Direct iSCSI volume driver."""
IGROUP_PREFIX = 'openstack-'
required_flags = ['netapp_transport_type', 'netapp_login',
'netapp_password', 'netapp_server_hostname',
'netapp_server_port']
def __init__(self, *args, **kwargs):
super(NetAppDirectISCSIDriver, self).__init__(*args, **kwargs)
validate_instantiation(**kwargs)
self.configuration.append_config_values(netapp_connection_opts)
self.configuration.append_config_values(netapp_basicauth_opts)
self.configuration.append_config_values(netapp_transport_opts)
self.configuration.append_config_values(netapp_provisioning_opts)
self.lun_table = {}
def _create_client(self, **kwargs):
"""Instantiate a client for NetApp server.
This method creates NetApp server client for api communication.
"""
host_filer = kwargs['hostname']
LOG.debug(_('Using NetApp filer: %s') % host_filer)
self.client = NaServer(host=host_filer,
server_type=NaServer.SERVER_TYPE_FILER,
transport_type=kwargs['transport_type'],
style=NaServer.STYLE_LOGIN_PASSWORD,
username=kwargs['login'],
password=kwargs['password'])
def _do_custom_setup(self):
"""Does custom setup depending on the type of filer."""
raise NotImplementedError()
def _check_flags(self):
"""Ensure that the flags we care about are set."""
required_flags = self.required_flags
for flag in required_flags:
if not getattr(self.configuration, flag, None):
msg = _('%s is not set') % flag
raise exception.InvalidInput(data=msg)
def do_setup(self, context):
"""Setup the NetApp Volume driver.
Called one time by the manager after the driver is loaded.
Validate the flags we care about and setup NetApp
client.
"""
self._check_flags()
self._create_client(
transport_type=self.configuration.netapp_transport_type,
login=self.configuration.netapp_login,
password=<PASSWORD>,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port)
self._do_custom_setup()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate.
Discovers the LUNs on the NetApp server.
"""
self.lun_table = {}
self._get_lun_list()
LOG.debug(_("Success getting LUN list from server"))
def create_volume(self, volume):
"""Driver entry point for creating a new volume."""
default_size = '104857600' # 100 MB
gigabytes = 1073741824L # 2^30
name = volume['name']
if int(volume['size']) == 0:
size = default_size
else:
size = str(int(volume['size']) * gigabytes)
metadata = {}
metadata['OsType'] = 'linux'
metadata['SpaceReserved'] = 'true'
self._create_lun_on_eligible_vol(name, size, metadata)
LOG.debug(_("Created LUN with name %s") % name)
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, name, size, metadata))
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
msg = _("No entry in LUN table for volume/snapshot %(name)s.")
msg_fmt = {'name': name}
LOG.warn(msg % msg_fmt)
return
lun_destroy = NaElement.create_node_with_children(
'lun-destroy',
**{'path': metadata['Path'],
'force': 'true'})
self.client.invoke_successfully(lun_destroy, True)
LOG.debug(_("Destroyed LUN %s") % name)
self.lun_table.pop(name)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def remove_export(self, context, volume):
"""Driver exntry point to remove an export for a volume.
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
pass
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance.
Do the LUN masking on the storage system so the initiator can access
the LUN on the target. Also return the iSCSI properties so the
initiator can find the LUN. This implementation does not call
_get_iscsi_properties() to get the properties because cannot store the
LUN number in the database. We only find out what the LUN number will
be during this method call so we construct the properties dictionary
ourselves.
"""
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
iqn = self._get_iscsi_service_details()
target_details_list = self._get_target_details()
msg = _("Succesfully fetched target details for LUN %(name)s and "
"initiator %(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
if not target_details_list:
msg = _('Failed to get LUN target details for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
target_details = None
for tgt_detail in target_details_list:
if tgt_detail.get('interface-enabled', 'true') == 'true':
target_details = tgt_detail
break
if not target_details:
target_details = target_details_list[0]
if not target_details['address'] and target_details['port']:
msg = _('Failed to get target portal for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
if not iqn:
msg = _('Failed to get target IQN for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
properties = {}
properties['target_discovered'] = False
(address, port) = (target_details['address'], target_details['port'])
properties['target_portal'] = '%s:%s' % (address, port)
properties['target_iqn'] = iqn
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot.
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self.lun_table[vol_name]
self._clone_lun(lun.name, snapshot_name, 'false')
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug(_("Snapshot %s deletion successful") % snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for creating a new volume from a snapshot.
Many would call this "cloning" and in fact we use cloning to implement
this feature.
"""
vol_size = volume['size']
snap_size = snapshot['volume_size']
if vol_size != snap_size:
msg = _('Cannot create volume of size %(vol_size)s from '
'snapshot of size %(snap_size)s')
msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size}
raise exception.VolumeBackendAPIException(data=msg % msg_fmt)
snapshot_name = snapshot['name']
new_name = volume['name']
self._clone_lun(snapshot_name, new_name, 'true')
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given intiator can no
longer access it.
"""
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, initiator_name)
msg = _("Unmapped LUN %(name)s from the initiator "
"%(initiator_name)s")
msg_fmt = {'name': name, 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
def _get_ontapi_version(self):
"""Gets the supported ontapi version."""
ontapi_version = NaElement('system-get-ontapi-version')
res = self.client.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return (major, minor)
def _create_lun_on_eligible_vol(self, name, size, metadata):
"""Creates an actual lun on filer."""
req_size = float(size) *\
float(self.configuration.netapp_size_multiplier)
volume = self._get_avl_volume_by_size(req_size)
if not volume:
msg = _('Failed to get vol with required size for volume: %s')
raise exception.VolumeBackendAPIException(data=msg % name)
path = '/vol/%s/%s' % (volume['name'], name)
lun_create = NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': size,
'ostype': metadata['OsType'],
'space-reservation-enabled':
metadata['SpaceReserved']})
self.client.invoke_successfully(lun_create, True)
metadata['Path'] = '/vol/%s/%s' % (volume['name'], name)
metadata['Volume'] = volume['name']
metadata['Qtree'] = None
def _get_avl_volume_by_size(self, size):
"""Get the available volume by size."""
raise NotImplementedError()
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def _get_target_details(self):
"""Gets the target portal details."""
raise NotImplementedError()
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
raise NotImplementedError()
def _get_lun_list(self):
"""Gets the list of luns on filer."""
raise NotImplementedError()
def _extract_and_populate_luns(self, api_luns):
"""Extracts the luns from api.
Populates in the lun table.
"""
for lun in api_luns:
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
(rest, splitter, name) = path.rpartition('/')
handle = self._create_lun_handle(meta_dict)
size = lun.get_child_content('size')
discovered_lun = NetAppLun(handle, name,
size, meta_dict)
self._add_lun_to_table(discovered_lun)
def _is_naelement(self, elem):
"""Checks if element is NetApp element."""
if not isinstance(elem, NaElement):
raise ValueError('Expects NaElement')
def _map_lun(self, name, initiator, initiator_type='iscsi', lun_id=None):
"""Maps lun to the initiator and returns lun id assigned."""
metadata = self._get_lun_attr(name, 'metadata')
os = metadata['OsType']
path = metadata['Path']
if self._check_allowed_os(os):
os = os
else:
os = 'default'
igroup_name = self._get_or_create_igroup(initiator,
initiator_type, os)
lun_map = NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.client.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except NaApiError as e:
code = e.code
message = e.message
msg = _('Error mapping lun. Code :%(code)s, Message:%(message)s')
msg_fmt = {'code': code, 'message': message}
exc_info = sys.exc_info()
LOG.warn(msg % msg_fmt)
(igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator)
if lun_id is not None:
return lun_id
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _unmap_lun(self, path, initiator):
"""Unmaps a lun from given initiator."""
(igroup_name, lun_id) = self._find_mapped_lun_igroup(path, initiator)
lun_unmap = NaElement.create_node_with_children(
'lun-unmap',
**{'path': path,
'initiator-group': igroup_name})
try:
self.client.invoke_successfully(lun_unmap, True)
except NaApiError as e:
msg = _("Error unmapping lun. Code :%(code)s,"
" Message:%(message)s")
msg_fmt = {'code': e.code, 'message': e.message}
exc_info = sys.exc_info()
LOG.warn(msg % msg_fmt)
# if the lun is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
raise NotImplementedError()
def _get_or_create_igroup(self, initiator, initiator_type='iscsi',
os='default'):
"""Checks for an igroup for an initiator.
Creates igroup if not found.
"""
igroups = self._get_igroup_by_initiator(initiator=initiator)
igroup_name = None
for igroup in igroups:
if igroup['initiator-group-os-type'] == os:
if igroup['initiator-group-type'] == initiator_type or \
igroup['initiator-group-type'] == 'mixed':
if igroup['initiator-group-name'].startswith(
self.IGROUP_PREFIX):
igroup_name = igroup['initiator-group-name']
break
if not igroup_name:
igroup_name = self.IGROUP_PREFIX + str(uuid.uuid4())
self._create_igroup(igroup_name, initiator_type, os)
self._add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
raise NotImplementedError()
def _check_allowed_os(self, os):
"""Checks if the os type supplied is NetApp supported."""
if os in ['linux', 'aix', 'hpux', 'windows', 'solaris',
'netware', 'vmware', 'openvms', 'xen', 'hyper_v']:
return True
else:
return False
def _create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igoup with specified args."""
igroup_create = NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.client.invoke_successfully(igroup_create, True)
def _add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.client.invoke_successfully(igroup_add, True)
def _get_qos_type(self, volume):
"""Get the storage service type for a volume."""
type_id = volume['volume_type_id']
if not type_id:
return None
volume_type = volume_types.get_volume_type(None, type_id)
if not volume_type:
return None
return volume_type['name']
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):
msg = _("Object is not a NetApp LUN.")
raise exception.VolumeBackendAPIException(data=msg)
self.lun_table[lun.name] = lun
def _clone_lun(self, name, new_name, space_reserved):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_lun_by_args(self, **args):
"""Retrives lun with specified args."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
"""Get the attributes for a LUN from our cache table."""
if not name in self.lun_table or not hasattr(
self.lun_table[name], attr):
LOG.warn(_("Could not find attribute for LUN named %s") % name)
return None
return getattr(self.lun_table[name], attr)
def _create_lun_meta(self, lun):
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume['size']
src_vol = self.lun_table[src_vref['name']]
src_vol_size = src_vref['size']
if vol_size != src_vol_size:
msg = _('Cannot clone volume of size %(vol_size)s from '
'src volume of size %(src_vol_size)s')
msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size}
raise exception.VolumeBackendAPIException(data=msg % msg_fmt)
new_name = volume['name']
self._clone_lun(src_vol.name, new_name, 'true')
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
raise NotImplementedError()
class NetAppDirectCmodeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp C-mode iSCSI volume driver."""
def __init__(self, *args, **kwargs):
super(NetAppDirectCmodeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_cluster_opts)
def _do_custom_setup(self):
"""Does custom setup for ontap cluster."""
self.vserver = self.configuration.netapp_vserver
# We set vserver in client permanently.
# To use tunneling enable_tunneling while invoking api
self.client.set_vserver(self.vserver)
# Default values to run first api
self.client.set_api_version(1, 15)
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
def _get_avl_volume_by_size(self, size):
"""Get the available volume by size."""
tag = None
while True:
vol_request = self._create_avl_vol_request(self.vserver, tag)
res = self.client.invoke_successfully(vol_request)
tag = res.get_child_content('next-tag')
attr_list = res.get_child_by_name('attributes-list')
vols = attr_list.get_children()
for vol in vols:
vol_space = vol.get_child_by_name('volume-space-attributes')
avl_size = vol_space.get_child_content('size-available')
if float(avl_size) >= float(size):
avl_vol = dict()
vol_id = vol.get_child_by_name('volume-id-attributes')
avl_vol['name'] = vol_id.get_child_content('name')
avl_vol['vserver'] = vol_id.get_child_content(
'owning-vserver-name')
avl_vol['size-available'] = avl_size
return avl_vol
if tag is None:
break
return None
def _create_avl_vol_request(self, vserver, tag=None):
vol_get_iter = NaElement('volume-get-iter')
vol_get_iter.add_new_child('max-records', '100')
if tag:
vol_get_iter.add_new_child('tag', tag, True)
query = NaElement('query')
vol_get_iter.add_child_elem(query)
vol_attrs = NaElement('volume-attributes')
query.add_child_elem(vol_attrs)
if vserver:
vol_attrs.add_node_with_children(
'volume-id-attributes',
**{"owning-vserver-name": vserver})
vol_attrs.add_node_with_children(
'volume-state-attributes',
**{"is-vserver-root": "false", "state": "online"})
desired_attrs = NaElement('desired-attributes')
vol_get_iter.add_child_elem(desired_attrs)
des_vol_attrs = NaElement('volume-attributes')
desired_attrs.add_child_elem(des_vol_attrs)
des_vol_attrs.add_node_with_children(
'volume-id-attributes',
**{"name": None, "owning-vserver-name": None})
des_vol_attrs.add_node_with_children(
'volume-space-attributes',
**{"size-available": None})
des_vol_attrs.add_node_with_children('volume-state-attributes',
**{"is-cluster-volume": None,
"is-vserver-root": None,
"state": None})
return vol_get_iter
def _get_target_details(self):
"""Gets the target portal details."""
iscsi_if_iter = NaElement('iscsi-interface-get-iter')
result = self.client.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
if result.get_child_content('num-records')\
and int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_if_list = attr_list.get_children()
for iscsi_if in iscsi_if_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
d['interface-enabled'] = iscsi_if.get_child_content(
'is-interface-enabled')
tgt_list.append(d)
return tgt_list
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = NaElement('iscsi-service-get-iter')
result = self.client.invoke_successfully(iscsi_service_iter, True)
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
iscsi_service = attr_list.get_child_by_name('iscsi-service-info')
return iscsi_service.get_child_content('node-name')
LOG.debug(_('No iscsi service found for vserver %s') % (self.vserver))
return None
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
return '%s:%s' % (self.vserver, metadata['Path'])
def _get_lun_list(self):
"""Gets the list of luns on filer.
Gets the luns from cluster with vserver.
"""
tag = None
while True:
api = NaElement('lun-get-iter')
api.add_new_child('max-records', '100')
if tag:
api.add_new_child('tag', tag, True)
lun_info = NaElement('lun-info')
lun_info.add_new_child('vserver', self.vserver)
query = NaElement('query')
query.add_child_elem(lun_info)
api.add_child_elem(query)
result = self.client.invoke_successfully(api)
if result.get_child_by_name('num-records') and\
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
self._extract_and_populate_luns(attr_list.get_children())
tag = result.get_child_content('next-tag')
if tag is None:
break
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
initiator_igroups = self._get_igroup_by_initiator(initiator=initiator)
lun_maps = self._get_lun_map(path)
if initiator_igroups and lun_maps:
for igroup in initiator_igroups:
igroup_name = igroup['initiator-group-name']
if igroup_name.startswith(self.IGROUP_PREFIX):
for lun_map in lun_maps:
if lun_map['initiator-group'] == igroup_name:
return (igroup_name, lun_map['lun-id'])
return (None, None)
def _get_lun_map(self, path):
"""Gets the lun map by lun path."""
tag = None
map_list = []
while True:
lun_map_iter = NaElement('lun-map-get-iter')
lun_map_iter.add_new_child('max-records', '100')
if tag:
lun_map_iter.add_new_child('tag', tag, True)
query = NaElement('query')
lun_map_iter.add_child_elem(query)
query.add_node_with_children('lun-map-info', **{'path': path})
result = self.client.invoke_successfully(lun_map_iter, True)
tag = result.get_child_content('next-tag')
if result.get_child_content('num-records') and \
int(result.get_child_content('num-records')) >= 1:
attr_list = result.get_child_by_name('attributes-list')
lun_maps = attr_list.get_children()
for lun_map in lun_maps:
lun_m = dict()
lun_m['initiator-group'] = lun_map.get_child_content(
'initiator-group')
lun_m['lun-id'] = lun_map.get_child_content('lun-id')
lun_m['vserver'] = lun_map.get_child_content('vserver')
map_list.append(lun_m)
if tag is None:
break
return map_list
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
tag = None
igroup_list = []
while True:
igroup_iter = NaElement('igroup-get-iter')
igroup_iter.add_new_child('max-records', '100')
if tag:
igroup_iter.add_new_child('tag', tag, True)
query = NaElement('query')
igroup_iter.add_child_elem(query)
igroup_info = NaElement('initiator-group-info')
query.add_child_elem(igroup_info)
igroup_info.add_new_child('vserver', self.vserver)
initiators = NaElement('initiators')
igroup_info.add_child_elem(initiators)
initiators.add_node_with_children('initiator-info',
**{'initiator-name': initiator})
des_attrs = NaElement('desired-attributes')
des_ig_info = NaElement('initiator-group-info')
des_attrs.add_child_elem(des_ig_info)
des_ig_info.add_node_with_children('initiators',
**{'initiator-info': None})
des_ig_info.add_new_child('vserver', None)
des_ig_info.add_new_child('initiator-group-name', None)
des_ig_info.add_new_child('initiator-group-type', None)
des_ig_info.add_new_child('initiator-group-os-type', None)
igroup_iter.add_child_elem(des_attrs)
result = self.client.invoke_successfully(igroup_iter, False)
tag = result.get_child_content('next-tag')
if result.get_child_content('num-records') and\
int(result.get_child_content('num-records')) > 0:
attr_list = result.get_child_by_name('attributes-list')
igroups = attr_list.get_children()
for igroup in igroups:
ig = dict()
ig['initiator-group-os-type'] = igroup.get_child_content(
'initiator-group-os-type')
ig['initiator-group-type'] = igroup.get_child_content(
'initiator-group-type')
ig['initiator-group-name'] = igroup.get_child_content(
'initiator-group-name')
igroup_list.append(ig)
if tag is None:
break
return igroup_list
def _clone_lun(self, name, new_name, space_reserved):
"""Clone LUN with the given handle to the new name."""
metadata = self._get_lun_attr(name, 'metadata')
volume = metadata['Volume']
clone_create = NaElement.create_node_with_children(
'clone-create',
**{'volume': volume, 'source-path': name,
'destination-path': new_name,
'space-reserve': space_reserved})
self.client.invoke_successfully(clone_create, True)
LOG.debug(_("Cloned LUN with new name %s") % new_name)
lun = self._get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s'
% (volume, new_name))
if len(lun) == 0:
msg = _("No clonned lun named %s found on the filer")
raise exception.VolumeBackendAPIException(data=msg % (new_name))
clone_meta = self._create_lun_meta(lun[0])
self._add_lun_to_table(NetAppLun('%s:%s' % (clone_meta['Vserver'],
clone_meta['Path']),
new_name,
lun[0].get_child_content('size'),
clone_meta))
def _get_lun_by_args(self, **args):
"""Retrives lun with specified args."""
lun_iter = NaElement('lun-get-iter')
lun_iter.add_new_child('max-records', '100')
query = NaElement('query')
lun_iter.add_child_elem(query)
query.add_node_with_children('lun-info', **args)
luns = self.client.invoke_successfully(lun_iter)
attr_list = luns.get_child_by_name('attributes-list')
return attr_list.get_children()
def _create_lun_meta(self, lun):
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
self._is_naelement(lun)
meta_dict['Vserver'] = lun.get_child_content('vserver')
meta_dict['Volume'] = lun.get_child_content('volume')
meta_dict['Qtree'] = lun.get_child_content('qtree')
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = \
lun.get_child_content('is-space-reservation-enabled')
return meta_dict
def _configure_tunneling(self, do_tunneling=False):
"""Configures tunneling for ontap cluster."""
if do_tunneling:
self.client.set_vserver(self.vserver)
else:
self.client.set_vserver(None)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
data = {}
netapp_backend = 'NetApp_iSCSI_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = (
backend_name or netapp_backend)
data["vendor_name"] = 'NetApp'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['reserved_percentage'] = 0
data['QoS_support'] = False
provide_ems(self, self.client, data, netapp_backend)
self._stats = data
class NetAppDirect7modeISCSIDriver(NetAppDirectISCSIDriver):
"""NetApp 7-mode iSCSI volume driver."""
def __init__(self, *args, **kwargs):
super(NetAppDirect7modeISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(netapp_7mode_opts)
def _do_custom_setup(self):
"""Does custom setup depending on the type of filer."""
self.vfiler = self.configuration.netapp_vfiler
self.volume_list = self.configuration.netapp_volume_list
if self.volume_list:
self.volume_list = self.volume_list.split(',')
self.volume_list = [el.strip() for el in self.volume_list]
if self.vfiler:
(major, minor) = self._get_ontapi_version()
self.client.set_api_version(major, minor)
self.client.set_vfiler(self.vfiler)
def _get_avl_volume_by_size(self, size):
"""Get the available volume by size."""
vol_request = NaElement('volume-list-info')
res = self.client.invoke_successfully(vol_request, True)
volumes = res.get_child_by_name('volumes')
vols = volumes.get_children()
for vol in vols:
avl_size = vol.get_child_content('size-available')
state = vol.get_child_content('state')
if float(avl_size) >= float(size) and state == 'online':
avl_vol = dict()
avl_vol['name'] = vol.get_child_content('name')
avl_vol['block-type'] = vol.get_child_content('block-type')
avl_vol['type'] = vol.get_child_content('type')
avl_vol['size-available'] = avl_size
if self.volume_list:
if avl_vol['name'] in self.volume_list:
return avl_vol
else:
if self._check_vol_not_root(avl_vol):
return avl_vol
return None
def _check_vol_not_root(self, vol):
"""Checks if a volume is not root."""
vol_options = NaElement.create_node_with_children(
'volume-options-list-info', **{'volume': vol['name']})
result = self.client.invoke_successfully(vol_options, True)
options = result.get_child_by_name('options')
ops = options.get_children()
for op in ops:
if op.get_child_content('name') == 'root' and\
op.get_child_content('value') == 'true':
return False
return True
def _get_igroup_by_initiator(self, initiator):
"""Get igroups by initiator."""
igroup_list = NaElement('igroup-list-info')
result = self.client.invoke_successfully(igroup_list, True)
igroups = []
igs = result.get_child_by_name('initiator-groups')
if igs:
ig_infos = igs.get_children()
if ig_infos:
for info in ig_infos:
initiators = info.get_child_by_name('initiators')
init_infos = initiators.get_children()
if init_infos:
for init in init_infos:
if init.get_child_content('initiator-name')\
== initiator:
d = dict()
d['initiator-group-os-type'] = \
info.get_child_content(
'initiator-group-os-type')
d['initiator-group-type'] = \
info.get_child_content(
'initiator-group-type')
d['initiator-group-name'] = \
info.get_child_content(
'initiator-group-name')
igroups.append(d)
return igroups
def _get_target_details(self):
"""Gets the target portal details."""
iscsi_if_iter = NaElement('iscsi-portal-list-info')
result = self.client.invoke_successfully(iscsi_if_iter, True)
tgt_list = []
portal_list_entries = result.get_child_by_name(
'iscsi-portal-list-entries')
if portal_list_entries:
portal_list = portal_list_entries.get_children()
for iscsi_if in portal_list:
d = dict()
d['address'] = iscsi_if.get_child_content('ip-address')
d['port'] = iscsi_if.get_child_content('ip-port')
d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag')
tgt_list.append(d)
return tgt_list
def _get_iscsi_service_details(self):
"""Returns iscsi iqn."""
iscsi_service_iter = NaElement('iscsi-node-get-name')
result = self.client.invoke_successfully(iscsi_service_iter, True)
return result.get_child_content('node-name')
def _create_lun_handle(self, metadata):
"""Returns lun handle based on filer type."""
if self.vfiler:
owner = '%s:%s' % (self.configuration.netapp_server_hostname,
self.vfiler)
else:
owner = self.configuration.netapp_server_hostname
return '%s:%s' % (owner, metadata['Path'])
def _get_lun_list(self):
"""Gets the list of luns on filer."""
lun_list = []
if self.volume_list:
for vol in self.volume_list:
try:
luns = self._get_vol_luns(vol)
if luns:
lun_list.extend(luns)
except NaApiError:
LOG.warn(_("Error finding luns for volume %s."
" Verify volume exists.") % (vol))
else:
luns = self._get_vol_luns(None)
lun_list.extend(luns)
self._extract_and_populate_luns(lun_list)
def _get_vol_luns(self, vol_name):
"""Gets the luns for a volume."""
api = NaElement('lun-list-info')
if vol_name:
api.add_new_child('volume-name', vol_name)
result = self.client.invoke_successfully(api, True)
luns = result.get_child_by_name('luns')
return luns.get_children()
def _find_mapped_lun_igroup(self, path, initiator, os=None):
"""Find the igroup for mapped lun with initiator."""
lun_map_list = NaElement.create_node_with_children(
'lun-map-list-info',
**{'path': path})
result = self.client.invoke_successfully(lun_map_list, True)
igroups = result.get_child_by_name('initiator-groups')
if igroups:
igroup = None
lun_id = None
found = False
igroup_infs = igroups.get_children()
for ig in igroup_infs:
initiators = ig.get_child_by_name('initiators')
init_infs = initiators.get_children()
for info in init_infs:
if info.get_child_content('initiator-name') == initiator:
found = True
igroup = ig.get_child_content('initiator-group-name')
lun_id = ig.get_child_content('lun-id')
break
if found:
break
return (igroup, lun_id)
def _clone_lun(self, name, new_name, space_reserved):
"""Clone LUN with the given handle to the new name."""
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
(parent, splitter, name) = path.rpartition('/')
clone_path = '%s/%s' % (parent, new_name)
clone_start = NaElement.create_node_with_children(
'clone-start',
**{'source-path': path, 'destination-path': clone_path,
'no-snap': 'true'})
result = self.client.invoke_successfully(clone_start, True)
clone_id_el = result.get_child_by_name('clone-id')
cl_id_info = clone_id_el.get_child_by_name('clone-id-info')
vol_uuid = cl_id_info.get_child_content('volume-uuid')
clone_id = cl_id_info.get_child_content('clone-op-id')
if vol_uuid:
self._check_clone_status(clone_id, vol_uuid, name, new_name)
cloned_lun = self._get_lun_by_args(path=clone_path)
if cloned_lun:
self._set_space_reserve(clone_path, space_reserved)
clone_meta = self._create_lun_meta(cloned_lun)
handle = self._create_lun_handle(clone_meta)
self._add_lun_to_table(
NetAppLun(handle, new_name,
cloned_lun.get_child_content('size'),
clone_meta))
else:
raise NaApiError('ENOLUNENTRY', 'No Lun entry found on the filer')
def _set_space_reserve(self, path, enable):
"""Sets the space reserve info."""
space_res = NaElement.create_node_with_children(
'lun-set-space-reservation-info',
**{'path': path, 'enable': enable})
self.client.invoke_successfully(space_res, True)
def _check_clone_status(self, clone_id, vol_uuid, name, new_name):
"""Checks for the job till completed."""
clone_status = NaElement('clone-list-status')
cl_id = NaElement('clone-id')
clone_status.add_child_elem(cl_id)
cl_id.add_node_with_children(
'clone-id-info',
**{'clone-op-id': clone_id, 'volume-uuid': vol_uuid})
running = True
clone_ops_info = None
while running:
result = self.client.invoke_successfully(clone_status, True)
status = result.get_child_by_name('status')
ops_info = status.get_children()
if ops_info:
for info in ops_info:
if info.get_child_content('clone-state') == 'running':
time.sleep(1)
break
else:
running = False
clone_ops_info = info
break
else:
if clone_ops_info:
fmt = {'name': name, 'new_name': new_name}
if clone_ops_info.get_child_content('clone-state')\
== 'completed':
LOG.debug(_("Clone operation with src %(name)s"
" and dest %(new_name)s completed") % fmt)
else:
LOG.debug(_("Clone operation with src %(name)s"
" and dest %(new_name)s failed") % fmt)
raise NaApiError(
clone_ops_info.get_child_content('error'),
clone_ops_info.get_child_content('reason'))
def _get_lun_by_args(self, **args):
"""Retrives lun with specified args."""
lun_info = NaElement.create_node_with_children('lun-list-info', **args)
result = self.client.invoke_successfully(lun_info, True)
luns = result.get_child_by_name('luns')
if luns:
infos = luns.get_children()
if infos:
return infos[0]
return None
def _create_lun_meta(self, lun):
"""Creates lun metadata dictionary."""
self._is_naelement(lun)
meta_dict = {}
self._is_naelement(lun)
meta_dict['Path'] = lun.get_child_content('path')
meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
meta_dict['SpaceReserved'] = lun.get_child_content(
'is-space-reservation-enabled')
return meta_dict
def _update_volume_stats(self):
"""Retrieve status info from volume group."""
LOG.debug(_("Updating volume stats"))
data = {}
netapp_backend = 'NetApp_iSCSI_7mode_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = (
backend_name or 'NetApp_iSCSI_7mode_direct')
data["vendor_name"] = 'NetApp'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['reserved_percentage'] = 0
data['QoS_support'] = False
provide_ems(self, self.client, data, netapp_backend,
server_type="7mode")
self._stats = data
| StarcoderdataPython |
3294338 | <reponame>SamuelHorvath/Variance_Reduced_Optimizers_Pytorch
import argparse
from datetime import datetime
import os
def parse_args(args):
parser = initialise_arg_parser(args, 'Variance Reduction.')
parser.add_argument(
"--total-runs",
type=int,
default=3,
help="Number of times to redo run, we increase seed by 1 if deterministic",
)
parser.add_argument(
"--epochs",
type=int,
default=1,
help="Number of epochs to run",
)
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='Initial learning rate (default: .1)'
)
parser.add_argument(
"--tune-lr",
default=False,
action='store_true',
help="Whether to tune step size during optimization procedure, based on single run"
)
parser.add_argument(
"-b", "--batch-size",
type=int,
default=32,
help="Static batch size for computation, for speed select as large as possible"
)
parser.add_argument(
"--method",
type=str,
required=True,
help="Define which method to run"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
choices=[
"mnist", "cifar10", "cifar100", "mushrooms", "w8a", "ijcnn1", "a9a", "phishing"],
help="Define which dataset to load"
)
parser.add_argument(
"--metric",
type=str,
default='top_1_acc',
choices=["loss", "top_1_acc"],
help="Define which metric to optimize."
)
parser.add_argument(
"--train-metric",
default=False,
action='store_true',
help="Whether to tune train or validation metric"
)
parser.add_argument(
"--model",
type=str,
required=True,
help="Define which model to load"
)
parser.add_argument(
"--loss",
type=str,
default='CE',
choices=['CE', 'BCE'],
help="Define which model to load"
)
parser.add_argument(
'--weight-decay',
type=float,
default=0.,
help='Weight decay (default: 0.)'
)
parser.add_argument(
"--track-grad-norm",
default=False,
action='store_true',
help="Whether to track grad norm on validation set"
)
parser.add_argument(
"--nc-regularizer",
default=False,
action='store_true',
help="Whether to include non-convex regularizer"
)
parser.add_argument(
"--nc-regularizer-value",
type=float,
default=1e-3,
help="Non-convex regularizer coefficient"
)
# SETUP ARGUMENTS
parser.add_argument(
"--checkpoint-dir",
type=str,
default='../check_points',
help="Directory to persist run meta data_preprocess, e.g. best/last models."
)
parser.add_argument(
"--data-path",
type=str,
default="../data/",
help="Base root directory for the dataset."
)
parser.add_argument(
"--gpu",
type=str,
default="0",
help="Define on which GPU to run the model (comma-separated for multiple). If -1, use CPU."
)
parser.add_argument(
"-n", "--num-workers",
type=int,
default=4,
help="Num workers for dataset loading"
)
parser.add_argument(
"--deterministic",
action="store_true",
default=False,
help="Run deterministically for reproducibility."
)
parser.add_argument(
"--manual-seed",
type=int,
default=123,
help="Random seed to use."
)
parser.add_argument(
"--eval-every",
type=int,
default=1,
help="How often to do validation."
)
parser.add_argument(
"--run-id",
type=str,
required=True,
help="Name of the Experiment (no default)"
)
parser.add_argument(
"--loglevel",
type=str,
choices=["DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"],
default="INFO"
)
now = datetime.now()
now = now.strftime("%Y%m%d%H%M%S")
os.makedirs("../logs/", exist_ok=True)
parser.add_argument(
"--logfile",
type=str,
default=f"../logs/log_{now}.txt"
)
# Evaluation mode, do not run training
parser.add_argument("--evaluate", action='store_true', default=False, help="Evaluation or Training mode")
args = parser.parse_args()
transform_gpu_args(args)
return args
def initialise_arg_parser(args, description):
parser = argparse.ArgumentParser(args, description=description)
return parser
def transform_gpu_args(args):
if args.gpu == "-1":
args.gpu = "cpu"
else:
gpu_str_arg = args.gpu.split(',')
if len(gpu_str_arg) > 1:
args.gpu = sorted([int(card) for card in gpu_str_arg])
else:
args.gpu = f"cuda:{args.gpu}"
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.