id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
105982 | # -*- coding: UTF-8 -*-
import __future__
import os
import sys
import traceback
import site
import tempfile
import Quartz
import time
class StdOutput(object):
def __init__(self, output, isError=False):
self.data = output
self.isError = isError
def write(self, data):
if isinstance(data, str):
try:
data = unicode(data, "utf-8", "replace")
except UnicodeDecodeError:
data = "XXX " + repr(data)
self.data.append((data, self.isError))
def flush(self):
pass
def close(self):
pass
def _makeEnviron():
env = dict(os.environ)
kill = ["ARGVZERO", "EXECUTABLEPATH", "PYTHONHOME", "PYTHONPATH", "RESOURCEPATH"]
for key in kill:
if key in env:
del env[key]
return env
def _execute(cmds):
import subprocess
stderrPath = tempfile.mkstemp()[1]
stdoutPath = tempfile.mkstemp()[1]
stderrFile = open(stderrPath, "w")
stdoutFile = open(stdoutPath, "w")
# get the os.environ
env = _makeEnviron()
# make a string of escaped commands
cmds = subprocess.list2cmdline(cmds)
# go
popen = subprocess.Popen(cmds, stderr=stderrFile, stdout=stdoutFile, env=env, shell=True)
popen.wait()
# get the output
stderrFile.close()
stdoutFile.close()
stderrFile = open(stderrPath, "r")
stdoutFile = open(stdoutPath, "r")
stderr = stderrFile.read()
stdout = stdoutFile.read()
stderrFile.close()
stdoutFile.close()
# trash the temp files
os.remove(stderrPath)
os.remove(stdoutPath)
# done
return stderr, stdout
localSitePackagesCode = u"""
from distutils import sysconfig
_site_packages_path = sysconfig.get_python_lib()
print _site_packages_path
"""
def getLocalCurrentPythonVersionDirName():
tempFile = tempfile.mkstemp(".py")[1]
f = open(tempFile, "w")
f.write(localSitePackagesCode)
f.close()
log = _execute(["python", tempFile])[1]
sitePackages = log.split("\n")[0]
os.remove(tempFile)
if os.path.exists(sitePackages):
return sitePackages
else:
return False
localSitePackagesPath = getLocalCurrentPythonVersionDirName()
class DrawBotNamespace(dict):
def __init__(self, context, variables):
self._context = context
self._variables = variables
def __getitem__(self, item):
if item in self._variables:
return getattr(self._context, item)
return super(DrawBotNamespace, self).__getitem__(item)
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
class ScriptRunner(object):
def __init__(self, text=None, path=None, stdout=None, stderr=None, namespace=None, checkSyntaxOnly=False):
from threading import Thread
if path:
curDir, fileName = os.path.split(path)
else:
curDir = os.getenv("HOME")
fileName = '<untitled>'
# save up the important bits
saveStdout = sys.stdout
saveStderr = sys.stderr
saveArgv = sys.argv
try:
saveDir = os.getcwd()
except:
saveDir = None
# set up the name space
if namespace is None:
namespace = dict()
namespace["__file__"] = path
namespace["__name__"] = "__main__"
namespace["help"] = _Helper()
if stdout:
sys.stdout = stdout
if stderr:
sys.stderr = stderr
sys.argv = [fileName]
os.chdir(curDir)
sys.path.insert(0, curDir)
if localSitePackagesPath and localSitePackagesPath not in sys.path:
site.addsitedir(localSitePackagesPath)
# here we go
if text is None:
f = open(path, 'rb')
text = f.read()
f.close()
source = text.replace('\r\n', '\n').replace('\r', '\n')
userCancelID = None
try:
try:
code = compile(source + '\n\n', fileName, "exec", __future__.CO_FUTURE_DIVISION)
except:
traceback.print_exc(0)
else:
if not checkSyntaxOnly:
self._scriptDone = False
try:
exec code in namespace
except KeyboardInterrupt:
pass
except:
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next
traceback.print_exception(etype, value, tb)
etype = value = tb = None
finally:
# reset the important bits
self._scriptDone = True
sys.stdout = saveStdout
sys.stderr = saveStderr
sys.argv = saveArgv
if saveDir:
os.chdir(saveDir)
sys.path.remove(curDir)
def CallbackRunner(callback, stdout=None, stderr=None, args=[], kwargs={}, fallbackResult=None):
result = fallbackResult
saveStdout = sys.stdout
saveStderr = sys.stderr
if stdout:
sys.stdout = stdout
if stderr:
sys.stderr = stderr
try:
result = callback(*args, **kwargs)
except:
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next
traceback.print_exception(etype, value, tb)
etype = value = tb = None
finally:
sys.stdout = saveStdout
sys.stderr = saveStderr
return result
| StarcoderdataPython |
1617759 | '''
Author : <NAME>
GitHub : https://github.com/royaleagle73
Email : <EMAIL>
'''
import os
class get_package_list:
'''
get_package_list CLASS COMBINE A SINGLE METHOD AND A CONSTRUCTOR, WHICH ARE AS FOLLOWS:
1) __init__
2) work()
__init__ DOCFILE:
__init__ SERVES THE PURPOSE TO INITIALISE VARIABLES WHICH AREGONG TO BE USED LATER IN PROGRAM.
work() DOCFILE :
work() FUNCTION WORKS THIS WAY:
1) SEARCHES FOR FILES IN /usr/bin/.
2) REFINE FILES WHICH ARE NOT SCRIPTS
3) SAVE THEM IN A FILE.
4) RETURNS TRUE FOR SUCCESS
'''
def __init__(self):
'''
__init__ DOCFILE:
__init__ SERVES THE PURPOSE TO INITIALISE VARIABLES WHICH AREGONG TO BE USED LATER IN PROGRAM.
'''
self.file_path = "/usr/bin/" # SETTING UP FILE PATH TO FIND PACKAGES
self.files_found = os.listdir(self.file_path) # FINDING FILES AND SAVING THEM IN A LIST
self.data = "S.No., Package Name\n" # INITIALISING VARIABLE TO STORE DATA LATER
self.current_path = os.getcwd() # SAVING THE CURRENT WORKING DIRECTORY FOR LATER USE
self.count = 0 # TO KEEP COUND OF NUMBER OF PACKAGES FOUND
def work(self):
'''
work() DOCFILE :
work() FUNCTION WORKS THIS WAY:
1) SEARCHES FOR FILES IN /usr/bin/.
2) REFINE FILES WHICH ARE NOT SCRIPTS
3) SAVE THEM IN A FILE.
4) RETURNS TRUE FOR SUCCESS
'''
# CHANGING WORKING DIRECTORY
os.chdir(self.file_path) # CHANGING CURRENT WORKING DIRECTORY
ret_data = {"List of Installed Applications" : [["Applications Name"]]}
# LISTING ALL FILES AND SERIAL NUMBER EXCLUDING FOLDERS
for file in self.files_found: # CHECKING EACH SCANNED FILE ONE BY ONE
if not os.path.isdir(file): # CHECKING IS SCANNED FILE IS A FILE OR FOLDER
if not file.endswith(".sh"): # REMOVING SCRIPT FILES
self.count += 1 # IF IT IS A FILE, COUNTING INCREASES BY 1
self.data += str(self.count) + "," + file + "\n" # SAVING THE PACKAGE NAME AND SERIAL NUMBER IN DATA VARIABLE
ret_data["List of Installed Applications"].append([file])
if self.current_path.find("output") == -1: # CHECKING IF CURRENT WORKING DIRECTORY IS OUTPUT FOLDER
self.current_path += "/output/"
os.chdir(self.current_path) # CHANGING CURRENT WORKING DIRECTORY
with open("linux_packages_installed.csv", 'w') as pack: # OPENNG NEW FILE TO SAVE DATA
pack.write(self.data) # WRITING DATA TO FILE
return ret_data | StarcoderdataPython |
3426538 | <gh_stars>0
from typing import Iterator, List, Optional
import astroid
from ._base import NAMES, Extractor, WarningInfo
BRANCHING = (
astroid.If,
astroid.With,
astroid.TryExcept,
astroid.TryFinally,
astroid.Return,
astroid.Raise,
)
class WarningsExtractor(Extractor):
"""Extractor for `wanings.warn()` invocations.
"""
def extract(self, node: astroid.NodeNG) -> Iterator[WarningInfo]:
if isinstance(node, (astroid.FunctionDef, astroid.Module)):
yield from self._extract_from_body(node.body)
def _extract_from_body(self, body: List[astroid.NodeNG]) -> Iterator[WarningInfo]:
for node in body:
if isinstance(node, astroid.Expr):
node = node.value
if isinstance(node, BRANCHING):
return
warning = self._get_warning(node)
if warning is not None:
yield warning
def _get_warning(self, node: astroid.NodeNG) -> Optional[WarningInfo]:
# check if it is a call to `warnings.warn`
if not isinstance(node, astroid.Call):
return None
if node.func.as_string() != 'warnings.warn':
return None
return WarningInfo(
message=self._get_message(node),
category=NAMES.get(self._get_category(node), Warning),
)
@staticmethod
def _get_message(node: astroid.Call) -> str:
# extract positional category
if node.args:
arg_node = node.args[0]
if isinstance(arg_node, astroid.Const):
return str(arg_node.value)
# extract keyword category
for kwarg in node.keywords:
if kwarg.arg != 'message':
continue
if isinstance(kwarg.value, astroid.Const):
return str(kwarg.value.value)
return ' '.join(node.as_string().split())
@staticmethod
def _get_category(node: astroid.Call) -> str:
# extract positional category
if len(node.args) > 1:
arg_node = node.args[1]
if isinstance(arg_node, astroid.Name):
return arg_node.name
# extract keyword category
for kwarg in node.keywords:
if kwarg.arg != 'category':
continue
arg_node = kwarg.value
if isinstance(arg_node, astroid.Name):
return arg_node.name
return 'UserWarning'
| StarcoderdataPython |
328157 | <filename>homeassistant/components/sabnzbd/sab.py
"""Support for the Sabnzbd service."""
from pysabnzbd import SabnzbdApi, SabnzbdApiException
from homeassistant.const import CONF_API_KEY, CONF_PATH, CONF_URL
from homeassistant.core import _LOGGER, HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
async def get_client(hass: HomeAssistant, data):
"""Get Sabnzbd client."""
web_root = data.get(CONF_PATH)
api_key = data[CONF_API_KEY]
url = data[CONF_URL]
sab_api = SabnzbdApi(
url,
api_key,
web_root=web_root,
session=async_get_clientsession(hass, False),
)
try:
await sab_api.check_available()
except SabnzbdApiException as exception:
_LOGGER.error("Connection to SABnzbd API failed: %s", exception.message)
return False
return sab_api
| StarcoderdataPython |
3320564 | <filename>andabb/PositionListener.py
import abc
from .PoseUpdater import Pose
class IPositionListener(object, metaclass=abc.ABCMeta):
"""
Interface for position listeners
"""
@abc.abstractmethod
def newPosition(self, pose: Pose):
raise NotImplementedError('users must define newPosition to use this base class')
class PrinterPositionListerner(IPositionListener):
def newPosition(self, pose: Pose):
print("pose: {}".format(pose))
| StarcoderdataPython |
9613563 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-05 11:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('childf_app', '0002_auto_20180205_1421'),
]
operations = [
migrations.AlterField(
model_name='madadjou',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to='madadju'),
),
]
| StarcoderdataPython |
3571696 |
"""Fill gaps in Python time API-s.
parse_iso_timestamp:
Parse reasonable subset of ISO_8601 timestamp formats.
[ http://en.wikipedia.org/wiki/ISO_8601 ]
datetime_to_timestamp:
Get POSIX timestamp from datetime() object.
"""
import re
import time
from datetime import datetime, timedelta, tzinfo
__all__ = ['parse_iso_timestamp', 'FixedOffsetTimezone', 'datetime_to_timestamp']
class FixedOffsetTimezone(tzinfo):
"""Fixed offset in minutes east from UTC."""
__slots__ = ('__offset', '__name')
def __init__(self, offset):
self.__offset = timedelta(minutes = offset)
# numeric tz name
h, m = divmod(abs(offset), 60)
if offset < 0:
h = -h
if m:
self.__name = "%+03d:%02d" % (h,m)
else:
self.__name = "%+03d" % h
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
ZERO = timedelta(0)
#
# Parse ISO_8601 timestamps.
#
"""
TODO:
- support more combinations from ISO 8601 (only reasonable ones)
- cache TZ objects
- make it faster?
"""
_iso_regex = r"""
\s*
(?P<year> \d\d\d\d) [-] (?P<month> \d\d) [-] (?P<day> \d\d) [ T]
(?P<hour> \d\d) [:] (?P<min> \d\d)
(?: [:] (?P<sec> \d\d ) (?: [.,] (?P<ss> \d+))? )?
(?: \s* (?P<tzsign> [-+]) (?P<tzhr> \d\d) (?: [:]? (?P<tzmin> \d\d))? )?
\s* $
"""
_iso_rc = None
def parse_iso_timestamp(s, default_tz = None):
"""Parse ISO timestamp to datetime object.
YYYY-MM-DD[ T]HH:MM[:SS[.ss]][-+HH[:MM]]
Assumes that second fractions are zero-trimmed from the end,
so '.15' means 150000 microseconds.
If the timezone offset is not present, use default_tz as tzinfo.
By default its None, meaning the datetime object will be without tz.
Only fixed offset timezones are supported.
>>> str(parse_iso_timestamp('2005-06-01 15:00'))
'2005-06-01 15:00:00'
>>> str(parse_iso_timestamp(' 2005-06-01T15:00 +02 '))
'2005-06-01 15:00:00+02:00'
>>> str(parse_iso_timestamp('2005-06-01 15:00:33+02:00'))
'2005-06-01 15:00:33+02:00'
>>> d = parse_iso_timestamp('2005-06-01 15:00:59.33 +02')
>>> d.strftime("%z %Z")
'+0200 +02'
>>> str(parse_iso_timestamp(str(d)))
'2005-06-01 15:00:59.330000+02:00'
>>> parse_iso_timestamp('2005-06-01 15:00-0530').strftime('%Y-%m-%d %H:%M %z %Z')
'2005-06-01 15:00 -0530 -05:30'
"""
global _iso_rc
if _iso_rc is None:
_iso_rc = re.compile(_iso_regex, re.X)
m = _iso_rc.match(s)
if not m:
raise ValueError('Date not in ISO format: %s' % repr(s))
tz = default_tz
if m.group('tzsign'):
tzofs = int(m.group('tzhr')) * 60
if m.group('tzmin'):
tzofs += int(m.group('tzmin'))
if m.group('tzsign') == '-':
tzofs = -tzofs
tz = FixedOffsetTimezone(tzofs)
return datetime(int(m.group('year')),
int(m.group('month')),
int(m.group('day')),
int(m.group('hour')),
int(m.group('min')),
m.group('sec') and int(m.group('sec')) or 0,
m.group('ss') and int(m.group('ss').ljust(6, '0')) or 0,
tz)
#
# POSIX timestamp from datetime()
#
UTC = FixedOffsetTimezone(0)
TZ_EPOCH = datetime.fromtimestamp(0, UTC)
UTC_NOTZ_EPOCH = datetime.utcfromtimestamp(0)
def datetime_to_timestamp(dt, local_time=True):
"""Get posix timestamp from datetime() object.
if dt is without timezone, then local_time specifies
whether it's UTC or local time.
Returns seconds since epoch as float.
>>> datetime_to_timestamp(parse_iso_timestamp("2005-06-01 15:00:59.5 +02"))
1117630859.5
>>> datetime_to_timestamp(datetime.fromtimestamp(1117630859.5, UTC))
1117630859.5
>>> datetime_to_timestamp(datetime.fromtimestamp(1117630859.5))
1117630859.5
>>> now = datetime.utcnow()
>>> now2 = datetime.utcfromtimestamp(datetime_to_timestamp(now, False))
>>> abs(now2.microsecond - now.microsecond) < 100
True
>>> now2 = now2.replace(microsecond = now.microsecond)
>>> now == now2
True
>>> now = datetime.now()
>>> now2 = datetime.fromtimestamp(datetime_to_timestamp(now))
>>> abs(now2.microsecond - now.microsecond) < 100
True
>>> now2 = now2.replace(microsecond = now.microsecond)
>>> now == now2
True
"""
if dt.tzinfo:
delta = dt - TZ_EPOCH
return delta.total_seconds()
elif local_time:
s = time.mktime(dt.timetuple())
return s + (dt.microsecond / 1000000.0)
else:
delta = dt - UTC_NOTZ_EPOCH
return delta.total_seconds()
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
4983710 | """Testing GraphCircuits
"""
import pytest
import qcdenoise as qcd
@pytest.fixture()
def graph_state():
graph_db = qcd.GraphDB()
graph_state = qcd.GraphState(graph_db=graph_db, n_qubits=7)
return graph_state.sample()
@pytest.mark.dependency()
def test_CXGateCircuit(graph_state):
circ_builder = qcd.CXGateCircuit(n_qubits=7)
circ = circ_builder.build(graph_state)
if circ:
assert True
@pytest.mark.dependency()
def test_CZGateCircuit(graph_state):
circ_builder = qcd.CZGateCircuit(n_qubits=7)
circ = circ_builder.build(graph_state)
if circ:
assert True
@pytest.mark.dependency()
def test_CPhaseGateCircuit(graph_state):
circ_builder = qcd.CPhaseGateCircuit(n_qubits=7)
circ = circ_builder.build(graph_state)
if circ:
assert True
| StarcoderdataPython |
4812723 | <gh_stars>10-100
from typing import List, Dict, Optional, Iterable, Tuple
from itertools import chain, cycle
from functools import partial
from multiprocessing import Manager, Pool, Queue, cpu_count
import numpy as np
import simpy
from . import simulation as cs
from .cache import get_from_cache, save_to_cache
from .early_stop import EarlyStopError, process_early_stop
from .lab import laboratory
from .parameters import Parameters
from .population import Population
from .progress import ProgressBar
from .random import RandomParametersState, RandomParameter
from .stats import Stats
from .simulation_environment import SimulationEnvironment
from .metrics import METRICS
SIMULATION_ENGINE_VERSION = '0.0.3'
MAX_WAIT_UNTIL_D0 = 90
def get_stats_matrix(populations: Dict, duration):
num_populations = len(populations)
num_metrics = len(cs.MEASUREMENTS)
num_ages = len(cs.age_str)
stats = np.zeros([num_populations, num_metrics, num_ages, duration])
return stats
def track_population(senv: SimulationEnvironment):
while True:
yield senv.env.timeout(1.0)
if senv.d0 is None:
if senv.sim_params.d0_infections * senv.scaling < np.array([p.infected for p in senv.people]).sum():
senv.d0 = int(senv.env.now + 0.01)
else:
continue
if int(senv.env.now + 0.01) - senv.d0 >= senv.duration:
return
cs.log_stats(senv)
try:
if senv.simulation_queue:
senv.simulation_queue.put(1)
except TypeError: # Communication error with progress bar
pass
def get_house_size(house_sizes): # Number of people living in the same house
return cs.p_choice(house_sizes)
def get_age_group(age_probabilities, age_risk):
return age_risk[cs.p_choice(age_probabilities)]
def set_initial_infection(sim_params: Parameters, people: Iterable[cs.Person]):
success = False
while not success:
someone = cs.choice(people, 1)[0]
if someone.age_group.index < sim_params.min_age_group_initially_infected:
continue
success = someone.expose_to_virus()
def get_population(senv: SimulationEnvironment, population_params: Population) -> \
List:
people = []
n = int(population_params.inhabitants * senv.scaling)
initially_infected = population_params.seed_infections
while len(people) < n:
people.extend(generate_people_in_new_house(senv, population_params))
for _ in range(initially_infected):
set_initial_infection(senv.sim_params, people)
try:
if senv.creation_queue:
senv.creation_queue.put(len(people))
except TypeError: # Communication error with progress bar
pass
return people
def generate_people_in_new_house(senv: SimulationEnvironment, population_params: Population):
house_size = get_house_size(population_params.home_size_probabilities)
house = cs.Home(population_params.geosocial_displacement)
age_probabilities = population_params.age_probabilities
age_groups = population_params.age_groups
age_group_house = get_age_group(age_probabilities, age_groups)
home_age_cofactor = senv.sim_params.home_age_cofactor
for _ in range(house_size):
age_group = (age_group_house
if np.random.random() < home_age_cofactor
else get_age_group(age_probabilities, age_groups)
)
yield cs.Person(senv, age_group, house)
def add_randomness_to_age_group(senv: SimulationEnvironment, age_group, population_params: Population, i):
severity = np.array(age_group.severity)
age_bias = senv.sim_params.severity_bias * (i - 4)
new_odds = np.exp(np.log(severity / (1.0 - severity)
) + senv.sim_params.severity_deviation + age_bias)
age_group.severity = new_odds / (1.0 + new_odds)
if isinstance(population_params.isolation_propensity_increase, RandomParameter):
population_params.isolation_propensity_increase = population_params.isolation_propensity_increase
age_group.isolation_adherence += population_params.isolation_propensity_increase
def create_populations(senv: SimulationEnvironment) -> Dict[str, List[cs.Person]]:
populations = {}
for population_params in senv.sim_params.population_segments:
for i, age_group in enumerate(population_params.age_groups):
add_randomness_to_age_group(senv, age_group, population_params, i)
populations[population_params.name] = get_population(senv, population_params)
return populations
def simulate(
sim_number,
sim_params,
simulation_size,
duration,
simulate_capacity,
use_cache,
creation_queue: Optional[Queue] = None,
simulation_queue: Optional[Queue] = None,
) -> (np.ndarray, RandomParametersState):
if use_cache:
args = (
sim_number, sim_params, simulation_size, duration, simulate_capacity, SIMULATION_ENGINE_VERSION)
results = get_from_cache(args)
if results:
try:
if creation_queue:
creation_queue.put(simulation_size)
if simulation_queue:
simulation_queue.put(duration)
except TypeError: # Communication error with progress bar
pass
return results[1]
cs.seed(sim_number)
np.random.seed(sim_number)
scaling = simulation_size / sim_params.total_inhabitants
env = simpy.Environment()
sim_params = sim_params.clone()
sim_params.random_parameters_state.materialize_object(sim_params)
senv = SimulationEnvironment(
env=env,
sim_params=sim_params,
duration=duration,
sim_number=sim_number,
scaling=scaling,
simulate_capacity=simulate_capacity,
isolation_factor=0.0,
attention=simpy.resources.resource.PriorityResource(env,
capacity=int(sim_params.capacity_hospital_max * scaling)),
hospital_bed=simpy.resources.resource.PriorityResource(env,
capacity=int(
sim_params.capacity_hospital_beds * scaling)),
ventilator=simpy.resources.resource.PriorityResource(env,
capacity=int(sim_params.capacity_ventilators * scaling)),
icu=simpy.resources.resource.PriorityResource(env, capacity=int(sim_params.capacity_icu * scaling)),
stats=get_stats_matrix(sim_params.population_segments, duration),
street_expositions_interval=sim_params.street_transmission_scale_days,
social_group_expositions_interval=(
sim_params.street_transmission_scale_days
+ sim_params.social_group_transmission_scale_difference
),
creation_queue=creation_queue,
simulation_queue=simulation_queue,
lab=laboratory(env, scaling),
)
senv.populations = create_populations(senv)
senv.people = list(chain.from_iterable(senv.populations.values()))
env.process(track_population(senv))
for intervention in sim_params.interventions:
intervention.setup(senv)
for early_stop in sim_params.early_stops or []:
env.process(process_early_stop(senv, early_stop))
while not senv.d0:
if env.now < MAX_WAIT_UNTIL_D0:
env.run(until=env.now + 1)
else:
senv.d0 = MAX_WAIT_UNTIL_D0
try:
env.run(until=duration + senv.d0 + 0.011)
except EarlyStopError:
pass
stats = senv.stats / senv.scaling
if use_cache:
save_to_cache(args, (stats, sim_params.random_parameters_state))
return stats, sim_params.random_parameters_state
def simulate_wrapped(i_params, **kwargs):
return simulate(*i_params, **kwargs)
def get_sim_params_list(sim_params: Parameters, random_states: List[RandomParametersState], n: int) -> List[
Tuple[int, Parameters]]:
random_states_iter = cycle(random_states or [RandomParametersState()])
sim_params_list = []
for random_state, i in zip(random_states_iter, range(n)):
sim_params_with_state = sim_params.clone()
sim_params_with_state.random_parameters_state = random_state
sim_number = int(i / len(random_states)) if random_states else i
sim_params_list.append((sim_number, sim_params_with_state))
return sim_params_list
def run_simulations(
sim_params: Parameters,
simulate_capacity=False,
duration: int = 80,
number_of_simulations: int = 4, # For final presentation purposes, a value greater than 10 is recommended
simulation_size: int = 100000, # For final presentation purposes, a value greater than 500000 is recommended
random_states: Optional[List[RandomParametersState]] = None,
fpath=None,
use_cache=True,
tqdm=None, # Optional tqdm function to display progress
):
if tqdm:
manager = Manager()
creation_queue = manager.Queue()
simulation_queue = manager.Queue()
sim_params_list = get_sim_params_list(sim_params, random_states, number_of_simulations)
simulate_with_params = partial(simulate_wrapped,
simulation_size=simulation_size,
duration=duration,
simulate_capacity=simulate_capacity,
use_cache=use_cache,
creation_queue=creation_queue if tqdm else None,
simulation_queue=simulation_queue if tqdm else None,
)
try:
pool = Pool(min(cpu_count(), number_of_simulations))
all_stats = pool.imap(simulate_with_params, sim_params_list)
if tqdm:
creation_bar, simulation_bar = show_progress(tqdm, creation_queue, simulation_queue, simulation_size,
number_of_simulations, duration)
creation_bar.start()
simulation_bar.start()
all_stats = list(all_stats)
finally:
pool.close()
pool.join()
if tqdm:
creation_bar.stop()
creation_bar.join()
simulation_bar.stop()
simulation_bar.join()
stats = combine_stats(all_stats, sim_params)
if fpath:
stats.save(fpath)
return stats
def combine_stats(all_stats: List[Tuple[np.ndarray, RandomParametersState]], sim_params: Parameters):
mstats = np.stack([stats[0] for stats in all_stats])
random_states = [stats[1] for stats in all_stats]
population_names = tuple(p.name for p in sim_params.population_segments)
return Stats(mstats, random_states, cs.MEASUREMENTS, METRICS, population_names, cs.age_str,
start_date=sim_params.start_date)
def show_progress(tqdm, creation_queue: Queue, simulation_queue: Queue, simulation_size: int,
number_of_simulations: int, duration: int):
creation_bar = ProgressBar(tqdm, creation_queue, simulation_size * number_of_simulations, 0, 'Population')
simulation_bar = ProgressBar(tqdm, simulation_queue, duration * number_of_simulations, 1, 'Simulation')
return creation_bar, simulation_bar
| StarcoderdataPython |
294689 | <filename>github/joeynmt/vizseq/_view/data_filter.py
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from typing import List
# TODO: scale this part
class VizSeqFilter(object):
@classmethod
def filter(cls, data: List[List[str]], query: str) -> List[int]:
if len(query) == 0:
return list(range(len(data[0])))
indices = []
for i, cur_list in enumerate(zip(*data)):
if any(s.find(query) > -1 for s in cur_list):
indices.append(i)
return indices
| StarcoderdataPython |
222538 | from setuptools import find_packages
from setuptools import setup
setup(
name="src",
packages=find_packages(),
version="0.1.0",
description="Predicting BMI from facial images",
author="<NAME>",
license="MIT",
)
| StarcoderdataPython |
1714288 | <reponame>IMULMUL/etl-parser<filename>etl/parsers/etw/Microsoft_Pef_WFP_MessageProvider.py
# -*- coding: utf-8 -*-
"""
Microsoft-Pef-WFP-MessageProvider
GUID : c22d1b14-c242-49de-9f17-1d76b8b9c458
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=2000, version=0)
class Microsoft_Pef_WFP_MessageProvider_2000_0(Etw):
pattern = Struct(
"FragmentEventId" / Int16ul,
"GroupId" / Int32ul,
"ByteLength" / Int32ul,
"Payload" / Bytes(lambda this: this.ByteLength)
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=10001, version=0)
class Microsoft_Pef_WFP_MessageProvider_10001_0(Etw):
pattern = Struct(
"DriverName" / WString,
"MajorVersion" / Int16ul,
"MinorVersion" / Int16ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=10002, version=0)
class Microsoft_Pef_WFP_MessageProvider_10002_0(Etw):
pattern = Struct(
"DriverName" / WString,
"MajorVersion" / Int16ul,
"MinorVersion" / Int16ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=10003, version=0)
class Microsoft_Pef_WFP_MessageProvider_10003_0(Etw):
pattern = Struct(
"Callout" / Int32ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=10004, version=0)
class Microsoft_Pef_WFP_MessageProvider_10004_0(Etw):
pattern = Struct(
"Callout" / Int32ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=10005, version=0)
class Microsoft_Pef_WFP_MessageProvider_10005_0(Etw):
pattern = Struct(
"FilterId" / Int64ul,
"Callout" / Int32ul,
"FilterWeight" / Int64ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=10006, version=0)
class Microsoft_Pef_WFP_MessageProvider_10006_0(Etw):
pattern = Struct(
"FilterId" / Int64ul,
"Callout" / Int32ul,
"FilterWeight" / Int64ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=20001, version=0)
class Microsoft_Pef_WFP_MessageProvider_20001_0(Etw):
pattern = Struct(
"ErrorMessage" / WString,
"NTSTATUS" / Int32ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=20002, version=0)
class Microsoft_Pef_WFP_MessageProvider_20002_0(Etw):
pattern = Struct(
"ErrorMessage" / WString,
"NTSTATUS" / Int32ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=20003, version=0)
class Microsoft_Pef_WFP_MessageProvider_20003_0(Etw):
pattern = Struct(
"Callout" / Int32ul,
"ErrorMessage" / WString,
"NTSTATUS" / Int32ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=20004, version=0)
class Microsoft_Pef_WFP_MessageProvider_20004_0(Etw):
pattern = Struct(
"Callout" / Int32ul,
"ErrorMessage" / WString,
"NTSTATUS" / Int32ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=20005, version=0)
class Microsoft_Pef_WFP_MessageProvider_20005_0(Etw):
pattern = Struct(
"Callout" / Int32ul,
"ErrorMessage" / WString,
"NTSTATUS" / Int32ul
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=60011, version=0)
class Microsoft_Pef_WFP_MessageProvider_60011_0(Etw):
pattern = Struct(
"SourceAddress" / Int32ul,
"DestinationAddress" / Int32ul,
"Protocol" / Int8ul,
"ByteLength" / Int16ul,
"MessageFrame" / Bytes(lambda this: this.ByteLength)
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=60012, version=0)
class Microsoft_Pef_WFP_MessageProvider_60012_0(Etw):
pattern = Struct(
"SourceAddress" / Int32ul,
"DestinationAddress" / Int32ul,
"Protocol" / Int8ul,
"FlowHandle" / Int64ul,
"ByteLength" / Int16ul,
"MessageFrame" / Bytes(lambda this: this.ByteLength)
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=60021, version=0)
class Microsoft_Pef_WFP_MessageProvider_60021_0(Etw):
pattern = Struct(
"Protocol" / Int8ul,
"ByteLength" / Int16ul,
"MessageFrame" / Bytes(lambda this: this.ByteLength)
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=60022, version=0)
class Microsoft_Pef_WFP_MessageProvider_60022_0(Etw):
pattern = Struct(
"Protocol" / Int8ul,
"FlowHandle" / Int64ul,
"ByteLength" / Int16ul,
"MessageFrame" / Bytes(lambda this: this.ByteLength)
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=60031, version=0)
class Microsoft_Pef_WFP_MessageProvider_60031_0(Etw):
pattern = Struct(
"SourceAddress" / Int32ul,
"DestinationAddress" / Int32ul,
"SourcePort" / Int16ul,
"DestinationPort" / Int16ul,
"Luid" / Int64ul,
"Direction" / Int8ul,
"Protocol" / Int8ul,
"FlowHandle" / Int64ul,
"ProcessId" / Int64ul,
"ByteLength" / Int16ul,
"ProcessPath" / Bytes(lambda this: this.ByteLength)
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=60041, version=0)
class Microsoft_Pef_WFP_MessageProvider_60041_0(Etw):
pattern = Struct(
"SourcePort" / Int16ul,
"DestinationPort" / Int16ul,
"Luid" / Int64ul,
"Direction" / Int8ul,
"Protocol" / Int8ul,
"FlowHandle" / Int64ul,
"ProcessId" / Int64ul,
"ByteLength" / Int16ul,
"ProcessPath" / Bytes(lambda this: this.ByteLength)
)
@declare(guid=guid("c22d1b14-c242-49de-9f17-1d76b8b9c458"), event_id=60050, version=0)
class Microsoft_Pef_WFP_MessageProvider_60050_0(Etw):
pattern = Struct(
"DiscardModule" / Int8ul,
"DiscardReason" / Int32ul,
"DiscardFilterID" / Int64ul
)
| StarcoderdataPython |
9703082 | import asyncio
import os
import dbsync
import pytest
import sqlalchemy
from dbsync import client
from dbsync.client.wsclient import SyncClient
from dbsync.client.tracking import track, start_tracking
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .models_websockets import Base, PORT, SERVER_URL, server_db, client_db, A, B
def register_client_tracking():
""""""
client.start_tracking(A)
client.start_tracking(B, ("push",))
def create_sync_client(pid: int = 0, reset_db=True) -> SyncClient:
from dbsync import core
core.mode = "client"
dbname = client_db(pid)
if reset_db:
try:
print(f"deleting db file {dbname}")
os.remove(dbname)
except FileNotFoundError:
print(f"ignore deleting non existing file {dbname}")
engine_client = create_engine(f"sqlite:///{dbname}")
Base.metadata.create_all(engine_client)
dbsync.set_engine(engine_client)
dbsync.create_all()
register_client_tracking()
try:
asyncio.get_event_loop()
except RuntimeError as e:
asyncio.set_event_loop(asyncio.new_event_loop())
clientws = SyncClient(port=PORT, path="sync", engine=engine_client, id=pid)
# client.connect()
return clientws
@pytest.fixture(scope="function")
def sync_client():
return create_sync_client(0)
def create_sync_client_registered(pid: int = 0, reset_db=True) -> SyncClient:
sync_client = create_sync_client(pid, reset_db=reset_db)
asyncio.run(sync_client.register())
return sync_client
@pytest.fixture(scope="function")
def sync_client_registered() -> SyncClient:
return create_sync_client_registered(0)
def create_client_session(pid: int):
dbname = client_db(pid)
engine_client = create_engine(f"sqlite:///{dbname}")
Session = sessionmaker(engine_client)
res = Session()
return res
@pytest.fixture(scope="function")
def client_session() -> sqlalchemy.orm.session.Session:
"""
provides a session object to the server database for sync checking
"""
return create_client_session(0)
##########################################
def create_sync_client_():
...
def create_sync_client_mp():
...
| StarcoderdataPython |
1629310 | <filename>plotvars-masked.py
mapbox_access_token = ''
plotly_username=''
plotly_apikey=''
ipinfo_access_token = ''
| StarcoderdataPython |
8180363 | <reponame>megies/SHTOOLS
"""
This script builds the python documentation from the function signature and the
customized markdown files. The processed documentation is saved as ascii text
files which are loaded on runtime and replace the __doc__ string of the f2py
wrapped functions.
"""
import sys
import os
import re
import textwrap
import _SHTOOLS
def main():
# ---- input/output folders ----
docfolder = os.path.abspath(sys.argv[1])
libfolder = os.path.abspath(sys.argv[2])
mddocfolder = os.path.join(docfolder, 'src', 'pydoc')
pydocfolder = os.path.join(libfolder, 'pyshtools', 'doc')
print('-- searching documentation in folder: {} --'.format(mddocfolder))
# ---- loop through the f2py _SHTOOLS functions and make docstrings ----
for name, func in _SHTOOLS.__dict__.items():
if callable(func):
try:
# ---- process and load documentation ----
# ---- read md file documentation: ----
fname_mddoc = os.path.join(mddocfolder, 'py' +
name.lower() + '.md')
if (os.path.isfile(fname_mddoc)):
docstring = process_mddoc(fname_mddoc)
# ---- save combined docstring in the pydoc folder ----
fname_pydoc = os.path.join(pydocfolder, name.lower() +
'.doc')
with open(fname_pydoc, 'w') as pydocfile:
pydocfile.write(docstring)
except IOError as msg:
print(msg)
# ---- loop through functions that are defined in python ----
pyfunctions = ['PlmIndex', 'YilmIndexVector']
for name in pyfunctions:
try:
# ---- process and load documentation
# read md file documentation:
fname_mddoc = os.path.join(mddocfolder, 'py' + name.lower() +
'.md')
docstring = process_mddoc(fname_mddoc)
# ---- save combined docstring in the pydoc folder--
fname_pydoc = os.path.join(pydocfolder, name.lower() + '.doc')
with open(fname_pydoc, 'w') as pydocfile:
pydocfile.write(docstring)
except IOError as msg:
print(msg)
# ===== PROCESS MD DOCUMENTATION FILE ====
def process_mddoc(fname_mddoc):
# ---- md file search patterns ----
revalue = re.compile('## Value\n\n', re.DOTALL)
retail = re.compile('# See (.*)', re.DOTALL)
reh2 = re.compile('## (.*?)\n', re.DOTALL)
reh1 = re.compile('\A# (.*?)\n', re.DOTALL)
reh1b = re.compile('\n# (.*?)\n', re.DOTALL)
recode = re.compile('`(.*?)`', re.DOTALL)
restaresc = re.compile(r'(\\\*)', re.DOTALL)
# rebold = re.compile('(?![\])[*](.*?)(?![\])[*]',re.DOTALL)
# ---- open md file and search for patterns ----
with open(fname_mddoc, 'r') as mdfile:
# remove the first two lines
mdstring = mdfile.read().split('\n', 2)[2]
# First, remove '## Value\n\n' from constant documentation
match = revalue.search(mdstring)
if match is not None:
mdstring = re.sub(match.group(0), '', mdstring)
match = retail.search(mdstring)
if match is not None:
mdstring = mdstring.replace(match.group(0), '')
match = reh1.search(mdstring)
while match is not None:
mdstring = re.sub(match.group(0), match.group(1) + '\n' +
len(match.group(1)) * '-', mdstring)
match = reh1.search(mdstring)
match = reh1b.search(mdstring)
while match is not None:
mdstring = re.sub(match.group(0), '\n' + match.group(1) + '\n' +
len(match.group(1)) * '-', mdstring)
match = reh1b.search(mdstring)
match = reh2.search(mdstring)
while match is not None:
mdstring = re.sub(match.group(0), match.group(1) + '\n' +
len(match.group(1)) * '-', mdstring)
match = reh2.search(mdstring)
match = recode.search(mdstring)
while match is not None:
mdstring = mdstring.replace(match.group(0), match.group(1))
match = recode.search(mdstring)
match = restaresc.search(mdstring)
while match is not None:
mdstring = mdstring.replace(match.group(0), '*')
match = recode.search(mdstring)
# ---- combine into docstring ----
docstring = ''
tmp = mdstring.splitlines(True)
# --- remove line breaks between parameters ---
for i in range(0, len(tmp)-3):
if tmp[i][0:4] == ': ' and tmp[i+3][0:4] == ': ':
tmp[i+1] = ''
for i in range(0, len(tmp)):
if tmp[i][0:4] == ': ':
docstring += textwrap.fill(tmp[i][4:], width=80,
replace_whitespace=False,
initial_indent=' ',
subsequent_indent=' ') + '\n'
elif tmp[i] == '':
pass
else:
docstring += textwrap.fill(tmp[i], width=80,
replace_whitespace=False) + '\n'
return docstring
# ===== PROCESS F2PY DOCUMENTATION ====
def process_f2pydoc(f2pydoc):
"""
this function replace all optional _d0 arguments with their default values
in the function signature. These arguments are not intended to be used and
signify merely the array dimensions of the associated argument.
"""
# ---- split f2py document in its parts
# 0=Call Signature
# 1=Parameters
# 2=Other (optional) Parameters (only if present)
# 3=Returns
docparts = re.split('\n--', f2pydoc)
if len(docparts) == 4:
doc_has_optionals = True
elif len(docparts) == 3:
doc_has_optionals = False
else:
print('-- uninterpretable f2py documentation --')
return f2pydoc
# ---- replace arguments with _d suffix with empty string in ----
# ---- function signature (remove them): ----
docparts[0] = re.sub('[\[(,]\w+_d\d', '', docparts[0])
# ---- replace _d arguments of the return arrays with their default value:
if doc_has_optionals:
returnarray_dims = re.findall('[\[(,](\w+_d\d)', docparts[3])
for arg in returnarray_dims:
searchpattern = arg + ' : input.*\n.*Default: (.*)\n'
match = re.search(searchpattern, docparts[2])
if match:
default = match.group(1)
docparts[3] = re.sub(arg, default, docparts[3])
docparts[2] = re.sub(searchpattern, '', docparts[2])
# ---- remove all optional _d# from optional argument list:
if doc_has_optionals:
searchpattern = '\w+_d\d : input.*\n.*Default: (.*)\n'
docparts[2] = re.sub(searchpattern, '', docparts[2])
# ---- combine doc parts to a single string
processed_signature = '\n--'.join(docparts)
return processed_signature
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| StarcoderdataPython |
9776937 | """Library with basic functions for data input and plotting."""
from plotting import PlotAnnotator
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import csv
from contextlib import contextmanager
BASIC_EMOTIONS = [
'happiness', 'sadness', 'fear', 'surprise', 'anger', 'disgust']
BASIC_EMOTION_NAMES = BASIC_EMOTIONS # can be replaced for translations.
"""Six basic emotions."""
BASIC_STIMULI = ['valance', 'arousal']
BASIC_STIMULUS_NAMES = BASIC_STIMULI # can be replaced for translations.
"""List of stimuli dimensions"""
class Config:
"""Encapsulates the configuration for running the clustering algorithm."""
def __init__(
self,
n_clusters=4,
n_clusters_range=(2, 11),
n_iterations=100,
n_iterations_range=(1, 3000, 100),
n_evaluations=100,
out_dir='.'):
self.n_clusters = n_clusters
self.n_clusters_range = n_clusters_range
self.n_iterations = n_iterations
self.n_iterations_range = n_iterations_range
self.n_evaluations = n_evaluations
self.out_dir = out_dir
@contextmanager
def fork(self, **kwargs):
"""Makes a copy with applied changes ('with' context)."""
original = self.__dict__.copy()
self.__dict__.update(kwargs)
try:
yield self
finally:
self.__dict__.update(original)
class InputData:
"""Input data structure (dataset)."""
def __init__(self, label, x, y, label_name=None, x_name=None, y_name=None):
self.label = np.array(label)
self.x = np.array(x)
self.y = np.array(y)
self.label_name = label_name
self.x_name = x_name
self.y_name = y_name
self._samples = np.column_stack((self.x, self.y))
@property
def size(self):
return self._samples.shape[0]
@property
def samples(self):
"""Returns the combined X, Y values in the shape of [N, 2]."""
return self._samples
def split_on_filter(self, filt):
"""Splits the data into two new instances as defined by the lambda filter."""
result = (self._init_interim(), self._init_interim())
indices = [[], []]
for i, row in enumerate(self._samples):
partition = int(not filt(i, row))
result[partition]["label"].append(self.label[i])
result[partition]["x"].append(self.x[i])
result[partition]["y"].append(self.y[i])
indices[partition].append(i)
return (
InputData(**result[0]),
InputData(**result[1]),
np.array(indices[0]),
np.array(indices[1]))
def split_on_key(self, key_func):
"""Splits the data into two new instances as defined by the key_func."""
result = {}
for i, row in enumerate(self._samples):
key = key_func(i, row)
if not key in result:
result[key] = self._init_interim()
result[key]["label"].append(self.label[i])
result[key]["x"].append(self.x[i])
result[key]["y"].append(self.y[i])
for key, interim in result.items():
result[key] = InputData(**interim)
return result
def serialize(self, delimiter=';', use_quotes=True, round_decimals=True):
"""Returns the data set serialized as CSV or TSV."""
def quote(str):
if use_quotes:
return '"%s"' % str
else:
return str
def serialize_float(value):
if round_decimals:
return "%.2f" % value
else:
return str(value)
out = delimiter.join([
quote(self.label_name),
quote(self.x_name),
quote(self.y_name)
]) + "\n"
for i in range(self.size):
out += delimiter.join([
quote(self.label[i]),
serialize_float(self.x[i]),
serialize_float(self.y[i])
]) + "\n"
return out
def reduce_to_samples(self, num_samples):
"""Returns samples from the data that are closest to their centroid."""
centroid = (np.sum(self.x) / self.size, np.sum(self.y) / self.size)
centroid_vector = np.column_stack(
(np.repeat(centroid[0], self.size),
np.repeat(centroid[1], self.size)))
distance = np.linalg.norm(
self.samples - centroid_vector, keepdims=True, axis=1)
data = np.column_stack((
self.label.astype(np.object),
self.x,
self.y,
distance))
data = data[data[:,3].argsort()]
data = data[0:num_samples]
result = self._init_interim()
for i, var in enumerate(["label", "x", "y"]):
result[var] = list(data[:, i])
return InputData(**result)
def _init_interim(self):
"""
Returns the interim structure to fill in when creating a new data set.
"""
return {
"label": [],
"x": [],
"y": [],
"label_name": self.label_name,
"x_name": self.x_name,
"y_name": self.y_name
}
def read_input_data(
filename,
label_field="label",
x_axis="valance",
y_axis="arousal",
label_name="label",
x_name="valance",
y_name="arousal",
fields=["label", "valance", "arousal"],
delimiter=",",
quotechar='"'):
"""Loads and deserializes the data set into the provided columns."""
label, x, y = [], [], []
with open(filename, "r") as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
for row in reader:
row_data = {}
for i, field in enumerate(fields):
row_data[field] = row[i]
label.append(row_data[label_field])
x.append(float(row_data[x_axis]))
y.append(float(row_data[y_axis]))
return InputData(
label=label,
x=x,
y=y,
label_name=label_name,
x_name=x_name,
y_name=y_name)
def read_naps(filename, **kwargs):
"""Returns a loaded NAPS dataset as an InputData."""
return read_input_data(
filename,
delimiter=",",
quotechar='"',
label_field="label",
x_axis="valance",
y_axis="arousal",
fields=["label", "valance", "arousal"],
**kwargs)
def read_naps_be(filename, **kwargs):
"""Returns a loaded NAPS BE dataset as an InputData."""
return read_input_data(
filename,
delimiter=';',
fields=['image_name', 'label'] + BASIC_EMOTIONS + ['arousal', 'valance'],
**kwargs)
def partition_naps(samples, n_clusters):
"""Computes the K-means partitions on samples of shape [N,2]."""
return KMeans(n_clusters=n_clusters, init='random').fit(samples)
def find_dominant(sample):
"""Finds the first max value in the sample (i.e. argmax).
Returns the position and the value."""
dominant_index = np.argmax(sample)
return dominant_index, sample[dominant_index]
def plot_setup(x_label="X", y_label="Y"):
"""Creates a new subplot with the provided setup."""
fig, ax = plt.subplots()
fig.set_size_inches(w=16, h=9)
fig.set_dpi(80)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return fig, ax
def reindex_partitions(samples, indices):
"""Reindexes partitions based on the centroid positions.
(lexicographical sorting of points for stable coloring)."""
count = len(indices)
partitions = {}
for i in range(count):
cluster = indices[i]
if not cluster in partitions:
partitions[cluster] = {
'x': 0.0,
'y': 0.0,
'count': 0.0,
'center': None,
'cluster': cluster
}
partitions[cluster]['x'] += samples[i, 0]
partitions[cluster]['y'] += samples[i, 1]
partitions[cluster]['count'] += 1.0
ordering = [None] * len(partitions.keys())
for cluster, partition in partitions.items():
partition['center'] = (
partition['x'] / partition['count'],
partition['y'] / partition['count'])
ordering[cluster] = partition
ordering = list(sorted(ordering, key=lambda p: p['center']))
new_ordering = [None] * len(partitions.keys())
for i, partition in enumerate(ordering):
new_ordering[partition['cluster']] = i
return map(lambda c: new_ordering[c], indices)
def plot(input_data, title='', output_action='show', filename=None):
"""Plots the data set with its labels."""
fig, ax = plot_setup(x_label=input_data.x_name, y_label=input_data.y_name)
ax.scatter(input_data.x, input_data.y)
PlotAnnotator(input_data).output(plt, ax, title, output_action, filename)
def partition_for_plotting(indices, input_data, n_clusters):
"""Partitions the data set for plotting."""
partitions = {i: {'x': [], 'y':[], 'label': None} \
for i in range(n_clusters)}
for i, partition_index in enumerate(indices):
partitions[partition_index]['x'].append(input_data.x[i])
partitions[partition_index]['y'].append(input_data.y[i])
partitions[partition_index]['label'] = input_data.label[i]
return partitions
def plot_clusters(indices, input_data, n_clusters, cluster_names=None,
title=None, output_action='show', filename=None,
block=True):
"""PLots the clusters with different colors and labels them."""
if cluster_names is None:
cluster_names = ["P" + str(i) for i in range(n_clusters)]
fig, ax = plot_setup(x_label=input_data.x_name, y_label=input_data.y_name)
color = plt.cm.rainbow(np.linspace(0, 1, n_clusters))
partitions = partition_for_plotting(indices, input_data, n_clusters)
for partition_index, partition in partitions.items():
ax.scatter(
partition['x'], partition['y'],
c=color[partition_index],
label=cluster_names[partition_index])
if not block:
plt.ion()
plt.legend()
PlotAnnotator(input_data).output(plt, ax, title, output_action, filename)
def mix_colors(colors, indices_prob, post_alpha=0.5):
"""Weighted color mixing: the color ratio depending on indices_prob."""
# C - n_clusters
# N - n_samples
# indices_prob: (N, C) x colors: (C, 4) --> (N, 4) . (1, 4)
return np.multiply(
np.matmul(indices_prob, colors),
np.array([1.0, 1.0, 1.0, post_alpha]))
def plot_clusters_with_probability(
indices_prob, input_data, cluster_names=None,
title=None, output_action='show', filename=None,
block=True, plot_fuzzy_simple=False):
"""
Plots the clusters with different colors and assigns the labels, for each
point the degree of membership is defined in an array of size n_clusters.
"""
n_clusters = indices_prob.shape[1]
if cluster_names is None:
cluster_names = ["P" + str(i) for i in range(n_clusters)]
fig, ax = plot_setup(x_label=input_data.x_name, y_label=input_data.y_name)
color = plt.cm.rainbow(np.linspace(0, 1, n_clusters))
# Split the data into to subsets: points that 100% of the time fall into their
# own cluster, and the ones that don't.
indices = np.argmax(indices_prob, axis=1)
maxes = np.max(indices_prob, axis=1)
exact_data, fuzzy_data, match_idx, non_match_idx = (
input_data.split_on_filter(lambda i, row: maxes[i] == 1.0))
exact_indices = np.take(indices, match_idx)
fuzzy_indices_prob = np.take(indices_prob, non_match_idx, axis=0)
fuzzy_color = mix_colors(color, fuzzy_indices_prob)
# Plot points that are 100% of the time in their own cluster.
partitions = partition_for_plotting(exact_indices, exact_data, n_clusters)
for partition_index, partition in partitions.items():
ax.scatter(
partition['x'], partition['y'],
c=color[partition_index],
label=cluster_names[partition_index])
# Plot the fuzzy points - the edge points between the clusters.
for i in range(fuzzy_data.size):
ax.scatter(
[fuzzy_data.x[i]],
[fuzzy_data.y[i]],
color=fuzzy_color[i] if not plot_fuzzy_simple else [0,0,0,1])
if not block:
plt.ion()
plt.legend()
PlotAnnotator(input_data).output(plt, ax, title, output_action, filename)
| StarcoderdataPython |
11279298 | #!/usr/bin/env python
# Script takes a Rosecheckers output file and extracts its diagnostic
# information
#
# The only argument indicates the file containing the input.
#
# The script should take the text data via standard input. The data
# should be produced from a build process using make and g++. A
# suitable command to generate the text data is:
#
# make 2>&! > makelog
#
# This script produces only one message per diagnostic
#
# Copyright (c) 2007-2018 Carnegie Mellon University. All Rights Reserved.
# See COPYRIGHT file for details.
import sys
import re
if len(sys.argv) != 2:
raise TypeError("Usage: " + sys.argv[0] + " <raw-input> > <org-output>")
input = sys.argv[1]
directory = ""
for line in open(input):
line = line.strip()
parse = re.match(r"^In directory: *(.*)$", line)
if (parse != None):
directory = parse.group(1)
continue
parse = re.match(
r"^(.*?):([0-9]*): (warning|error): ([-A-Za-z0-9]*): (.*?) *$", line)
if (parse == None):
continue
line_file = parse.group(1)
line_line = parse.group(2)
line_id = parse.group(4)
line_message = parse.group(5).replace("|", " ")
print "| " + line_id + " | " + directory + "/" + line_file + " | " + line_line + " | " + line_message + " |"
| StarcoderdataPython |
1926819 | """Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy <EMAIL>
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import json
import os
from types import SimpleNamespace
import pandas as pd
import torch
from torch.utils.data import DataLoader
from preprocess import preprocess
from data import Data
from evaluate import evaluatex
from model import BertYForClassification, RnnForSentencePairClassification
from utils import load_torch_model
LABELS = ['0', '1']
MODEL_MAP = {
'bert': BertYForClassification,
'rnn': RnnForSentencePairClassification
}
TEMPFILE='test.csv'
def main(in_file='/data/',
out_file='/output/result.txt',
model_config='config/bert_config.json'):
"""Test model for given test set on 1 GPU or CPU.
Args:
in_file: file to be tested
out_file: output file
model_config: config file
"""
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
else:
device = torch.device('cpu')
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
# 1.1 preprocess '/data/' to 'test.csv' file.
preprocess(in_file, TEMPFILE)
test_set = data.load_file(TEMPFILE, train=False)
data_loader_test = DataLoader(
test_set, batch_size=config.batch_size, shuffle=False)
# 2. Load model
model = MODEL_MAP[config.model_type](config)
model = load_torch_model(
model, model_path=os.path.join(config.model_path, 'model.bin'))
model.to(device)
# 3. Evaluate
answer_list = evaluatex(model, data_loader_test, device)
# 4. Write answers to file
id_list = pd.read_csv(TEMPFILE)['id'].tolist()
result = {}
for i, j in zip(id_list, answer_list):
if i not in result.keys():
counter = 0
result[i] = []
if j == '1':
result[i].append(chr(ord('A')+counter))
counter+=1
json.dump(result, open(out_file, "w", encoding="utf8"), indent=2, ensure_ascii=False, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_config', '-c', default='config/bert_config.json', help="specific config file", required=False)
parser.add_argument('--in_file', '-i', default='/data', help="data folder", required=False)
parser.add_argument('--out_file', '-o', default='/output/result.txt', help="result file path", required=False)
args = parser.parse_args()
main(args.in_file, args.out_file, args.model_config)
| StarcoderdataPython |
320067 | <gh_stars>1-10
# coding: utf-8
import os
import logging
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings, PostgresDsn
class Setting(BaseSettings):
ENVIRONMENT: str = "developpment"
PG_DNS: PostgresDsn
# docs
OPENAPI_URL: Optional[str] = "/openapi.json"
DOCS_URL: Optional[str] = "/docs"
REDOC_URL: Optional[str] = "/redoc"
# logging
LOG_TO_STDOUT: bool = True
LOGGING_LEVEL: int = logging.INFO
LOGGING_FMT: str = "%(asctime)s | %(name)-20s | %(levelname)-8s | %(message)s"
@lru_cache()
def get_setting() -> Setting:
return Setting(
_env_file=os.environ.get("DOTENV_PATH", ".env"),
_env_file_encoding="utf-8",
)
| StarcoderdataPython |
12858801 | import sys
import numpy as np
import cv2
from easydict import EasyDict as edict
from base_tracker import BaseTracker
import path_config
sys.path.append("external/SiamDW/lib")
from tracker.siamrpn import SiamRPN
import models.models as models
from utils.utils import load_pretrain
class SiamDW(BaseTracker):
def __init__(self):
super().__init__("SiamDW")
net_file = path_config.SIAMDW_MODEL
info = edict()
info.arch = "SiamRPNRes22"
info.dataset = "OTB2015"
info.epoch_test = False
info.cls_type = "thinner"
self.tracker = SiamRPN(info)
self.net = models.__dict__[info.arch](anchors_nums=5, cls_type=info.cls_type)
self.net = load_pretrain(self.net, net_file)
self.net.eval()
self.net = self.net.cuda()
def initialize(self, image_file, box):
image = cv2.imread(image_file)
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
center = np.array([box[0] + (box[2] - 1) / 2, box[1] + (box[3] - 1) / 2])
size = np.array([box[2], box[3]])
self.state = self.tracker.init(image, center, size, self.net)
def track(self, image_file):
image = cv2.imread(image_file)
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
self.state = self.tracker.track(self.state, image)
center = self.state["target_pos"]
size = self.state["target_sz"]
bbox = (center[0] - size[0] / 2, center[1] - size[1] / 2, size[0], size[1])
return bbox
| StarcoderdataPython |
3509503 | import logging
import numpy as np
import xarray as xr
from xclim import run_length as rl
from xclim import utils
from xclim.utils import declare_units
from xclim.utils import units
# logging.basicConfig(level=logging.DEBUG)
# logging.captureWarnings(True)
xr.set_options(enable_cftimeindex=True) # Set xarray to use cftimeindex
# Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start
# See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
# -------------------------------------------------- #
# ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! #
# -------------------------------------------------- #
__all__ = [
"cold_spell_days",
"daily_pr_intensity",
"maximum_consecutive_wet_days",
"cooling_degree_days",
"freshet_start",
"growing_degree_days",
"growing_season_length",
"heat_wave_index",
"heating_degree_days",
"tn_days_below",
"tx_days_above",
"warm_day_frequency",
"warm_night_frequency",
"wetdays",
"maximum_consecutive_dry_days",
"maximum_consecutive_tx_days",
"tropical_nights",
]
@declare_units("days", tas="[temperature]", thresh="[temperature]")
def cold_spell_days(tas, thresh="-10 degC", window=5, freq="AS-JUL"):
r"""Cold spell days
The number of days that are part of a cold spell, defined as five or more consecutive days with mean daily
temperature below a threshold in °C.
Parameters
----------
tas : xarrray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature below which a cold spell begins [℃] or [K]. Default : '-10 degC'
window : int
Minimum number of days with temperature below threshold to qualify as a cold spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Cold spell days.
Notes
-----
Let :math:`T_i` be the mean daily temperature on day :math:`i`, the number of cold spell days during
period :math:`\phi` is given by
.. math::
\sum_{i \in \phi} \prod_{j=i}^{i+5} [T_j < thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
"""
t = utils.convert_units_to(thresh, tas)
over = tas < t
group = over.resample(time=freq)
return group.apply(rl.windowed_run_count, window=window, dim="time")
@declare_units("mm/day", pr="[precipitation]", thresh="[precipitation]")
def daily_pr_intensity(pr, thresh="1 mm/day", freq="YS"):
r"""Average daily precipitation intensity
Return the average precipitation over wet days.
Parameters
----------
pr : xarray.DataArray
Daily precipitation [mm/d or kg/m²/s]
thresh : str
precipitation value over which a day is considered wet. Default : '1 mm/day'
freq : str, optional
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling. Default : '1 mm/day'
Returns
-------
xarray.DataArray
The average precipitation over wet days for each period
Notes
-----
Let :math:`\mathbf{p} = p_0, p_1, \ldots, p_n` be the daily precipitation and :math:`thresh` be the precipitation
threshold defining wet days. Then the daily precipitation intensity is defined as
.. math::
\frac{\sum_{i=0}^n p_i [p_i \leq thresh]}{\sum_{i=0}^n [p_i \leq thresh]}
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the average
precipitation fallen over days with precipitation >= 5 mm at seasonal
frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> pr = xr.open_dataset('pr.day.nc')
>>> daily_int = daily_pr_intensity(pr, thresh='5 mm/day', freq="QS-DEC")
"""
t = utils.convert_units_to(thresh, pr, "hydro")
# put pr=0 for non wet-days
pr_wd = xr.where(pr >= t, pr, 0)
pr_wd.attrs["units"] = pr.units
# sum over wanted period
s = pr_wd.resample(time=freq).sum(dim="time", keep_attrs=True)
sd = utils.pint_multiply(s, 1 * units.day, "mm")
# get number of wetdays over period
wd = wetdays(pr, thresh=thresh, freq=freq)
return sd / wd
@declare_units("days", pr="[precipitation]", thresh="[precipitation]")
def maximum_consecutive_wet_days(pr, thresh="1 mm/day", freq="YS"):
r"""Consecutive wet days.
Returns the maximum number of consecutive wet days.
Parameters
---------
pr : xarray.DataArray
Mean daily precipitation flux [Kg m-2 s-1] or [mm]
thresh : str
Threshold precipitation on which to base evaluation [Kg m-2 s-1] or [mm]. Default : '1 mm/day'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The maximum number of consecutive wet days.
Notes
-----
Let :math:`\mathbf{x}=x_0, x_1, \ldots, x_n` be a daily precipitation series and
:math:`\mathbf{s}` be the sorted vector of indices :math:`i` where :math:`[p_i > thresh] \neq [p_{i+1} >
thresh]`, that is, the days when the precipitation crosses the *wet day* threshold.
Then the maximum number of consecutive wet days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [x_{s_j} > 0\celsius]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
thresh = utils.convert_units_to(thresh, pr, "hydro")
group = (pr > thresh).resample(time=freq)
return group.apply(rl.longest_run, dim="time")
@declare_units("C days", tas="[temperature]", thresh="[temperature]")
def cooling_degree_days(tas, thresh="18 degC", freq="YS"):
r"""Cooling degree days
Sum of degree days above the temperature threshold at which spaces are cooled.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Temperature threshold above which air is cooled. Default : '18 degC'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Cooling degree days
Notes
-----
Let :math:`x_i` be the daily mean temperature at day :math:`i`. Then the cooling degree days above
temperature threshold :math:`thresh` over period :math:`\phi` is given by:
.. math::
\sum_{i \in \phi} (x_{i}-{thresh} [x_i > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
"""
thresh = utils.convert_units_to(thresh, tas)
return (
tas.pipe(lambda x: x - thresh).clip(min=0).resample(time=freq).sum(dim="time")
)
@declare_units("", tas="[temperature]", thresh="[temperature]")
def freshet_start(tas, thresh="0 degC", window=5, freq="YS"):
r"""First day consistently exceeding threshold temperature.
Returns first day of period where a temperature threshold is exceeded
over a given number of days.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default '0 degC'
window : int
Minimum number of days with temperature above threshold needed for evaluation
freq : str, optional
Resampling frequency
Returns
-------
float
Day of the year when temperature exceeds threshold over a given number of days for the first time. If there are
no such day, return np.nan.
Notes
-----
Let :math:`x_i` be the daily mean temperature at day of the year :math:`i` for values of :math:`i` going from 1
to 365 or 366. The start date of the freshet is given by the smallest index :math:`i` for which
.. math::
\prod_{j=i}^{i+w} [x_j > thresh]
is true, where :math:`w` is the number of days the temperature threshold should be exceeded, and :math:`[P]` is
1 if :math:`P` is true, and 0 if false.
"""
thresh = utils.convert_units_to(thresh, tas)
over = tas > thresh
group = over.resample(time=freq)
return group.apply(rl.first_run_ufunc, window=window, index="dayofyear")
@declare_units("C days", tas="[temperature]", thresh="[temperature]")
def growing_degree_days(tas, thresh="4.0 degC", freq="YS"):
r"""Growing degree-days over threshold temperature value [℃].
The sum of degree-days over the threshold temperature.
Parameters
---------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '4.0 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The sum of growing degree-days above 4℃
Notes
-----
Let :math:`TG_{ij}` be the daily mean temperature at day :math:`i` of period :math:`j`. Then the
growing degree days are:
.. math::
GD4_j = \sum_{i=1}^I (TG_{ij}-{4} | TG_{ij} > {4}℃)
"""
thresh = utils.convert_units_to(thresh, tas)
return (
tas.pipe(lambda x: x - thresh).clip(min=0).resample(time=freq).sum(dim="time")
)
@declare_units("days", tas="[temperature]", thresh="[temperature]")
def growing_season_length(tas, thresh="5.0 degC", window=6, freq="YS"):
r"""Growing season length.
The number of days between the first occurrence of at least
six consecutive days with mean daily temperature over 5℃ and
the first occurrence of at least six consecutive days with
mean daily temperature below 5℃ after July 1st in the northern
hemisphere and January 1st in the southern hemisphere.
Parameters
---------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '5.0 degC'.
window : int
Minimum number of days with temperature above threshold to mark the beginning and end of growing season.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Growing season length.
Notes
-----
Let :math:`TG_{ij}` be the mean temperature at day :math:`i` of period :math:`j`. Then counted is
the number of days between the first occurrence of at least 6 consecutive days with:
.. math::
TG_{ij} > 5 ℃
and the first occurrence after 1 July of at least 6 consecutive days with:
.. math::
TG_{ij} < 5 ℃
"""
# i = xr.DataArray(np.arange(tas.time.size), dims='time')
# ind = xr.broadcast(i, tas)[0]
#
# c = ((tas > thresh) * 1).rolling(time=window).sum()
# i1 = ind.where(c == window).resample(time=freq).min(dim='time')
#
# # Resample sets the time to T00:00.
# i11 = i1.reindex_like(c, method='ffill')
#
# # TODO: Adjust for southern hemisphere
#
# #i2 = ind.where(c == 0).where(tas.time.dt.month >= 7)
# # add check to make sure indice of end of growing season is after growing season start
# i2 = ind.where((c==0) & (ind > i11)).where(tas.time.dt.month >= 7)
#
# d = i2 - i11
#
# # take min value (first occurence after july)
# gsl = d.resample(time=freq).min(dim='time')
#
# # turn nan into 0
# gsl = xr.where(np.isnan(gsl), 0, gsl)
# compute growth season length on resampled data
thresh = utils.convert_units_to(thresh, tas)
c = ((tas > thresh) * 1).rolling(time=window).sum().chunk(tas.chunks)
def compute_gsl(c):
nt = c.time.size
i = xr.DataArray(np.arange(nt), dims="time").chunk({"time": 1})
ind = xr.broadcast(i, c)[0].chunk(c.chunks)
i1 = ind.where(c == window).min(dim="time")
i1 = xr.where(np.isnan(i1), nt, i1)
i11 = i1.reindex_like(c, method="ffill")
i2 = ind.where((c == 0) & (ind > i11)).where(c.time.dt.month >= 7)
i2 = xr.where(np.isnan(i2), nt, i2)
d = (i2 - i1).min(dim="time")
return d
gsl = c.resample(time=freq).apply(compute_gsl)
return gsl
@declare_units("days", tasmax="[temperature]", thresh="[temperature]")
def heat_wave_index(tasmax, thresh="25.0 degC", window=5, freq="YS"):
r"""Heat wave index.
Number of days that are part of a heatwave, defined as five or more consecutive days over 25℃.
Parameters
----------
tasmax : xarrray.DataArray
Maximum daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to designate a heatwave [℃] or [K]. Default: '25.0 degC'.
window : int
Minimum number of days with temperature above threshold to qualify as a heatwave.
freq : str, optional
Resampling frequency
Returns
-------
DataArray
Heat wave index.
"""
thresh = utils.convert_units_to(thresh, tasmax)
over = tasmax > thresh
group = over.resample(time=freq)
return group.apply(rl.windowed_run_count, window=window, dim="time")
@declare_units("C days", tas="[temperature]", thresh="[temperature]")
def heating_degree_days(tas, thresh="17.0 degC", freq="YS"):
r"""Heating degree days
Sum of degree days below the temperature threshold at which spaces are heated.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '17.0 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Heating degree days index.
Notes
-----
Let :math:`TG_{ij}` be the daily mean temperature at day :math:`i` of period :math:`j`. Then the
heating degree days are:
.. math::
HD17_j = \sum_{i=1}^{I} (17℃ - TG_{ij})
"""
thresh = utils.convert_units_to(thresh, tas)
return tas.pipe(lambda x: thresh - x).clip(0).resample(time=freq).sum(dim="time")
@declare_units("days", tasmin="[temperature]", thresh="[temperature]")
def tn_days_below(tasmin, thresh="-10.0 degC", freq="YS"):
r"""Number of days with tmin below a threshold in
Number of days where daily minimum temperature is below a threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K] . Default: '-10 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of days Tmin < threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TX_{ij} < Threshold [℃]
"""
thresh = utils.convert_units_to(thresh, tasmin)
f1 = utils.threshold_count(tasmin, "<", thresh, freq)
return f1
@declare_units("days", tasmax="[temperature]", thresh="[temperature]")
def tx_days_above(tasmax, thresh="25.0 degC", freq="YS"):
r"""Number of summer days
Number of days where daily maximum temperature exceed a threshold.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '25 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of summer days.
Notes
-----
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TX_{ij} > Threshold [℃]
"""
thresh = utils.convert_units_to(thresh, tasmax)
f = (tasmax > (thresh)) * 1
return f.resample(time=freq).sum(dim="time")
@declare_units("days", tasmax="[temperature]", thresh="[temperature]")
def warm_day_frequency(tasmax, thresh="30 degC", freq="YS"):
r"""Frequency of extreme warm days
Return the number of days with tasmax > thresh per period
Parameters
----------
tasmax : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default : '30 degC'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of days exceeding threshold.
Notes:
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} > Threshold [℃]
"""
thresh = utils.convert_units_to(thresh, tasmax)
events = (tasmax > thresh) * 1
return events.resample(time=freq).sum(dim="time")
@declare_units("days", tasmin="[temperature]", thresh="[temperature]")
def warm_night_frequency(tasmin, thresh="22 degC", freq="YS"):
r"""Frequency of extreme warm nights
Return the number of days with tasmin > thresh per period
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default : '22 degC'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The number of days with tasmin > thresh per period
"""
thresh = utils.convert_units_to(thresh, tasmin)
events = (tasmin > thresh) * 1
return events.resample(time=freq).sum(dim="time")
@declare_units("days", pr="[precipitation]", thresh="[precipitation]")
def wetdays(pr, thresh="1.0 mm/day", freq="YS"):
r"""Wet days
Return the total number of days during period with precipitation over threshold.
Parameters
----------
pr : xarray.DataArray
Daily precipitation [mm]
thresh : str
Precipitation value over which a day is considered wet. Default: '1 mm/day'.
freq : str, optional
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling.
Returns
-------
xarray.DataArray
The number of wet days for each period [day]
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the number days
with precipitation over 5 mm at the seasonal frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> pr = xr.open_dataset('pr.day.nc')
>>> wd = wetdays(pr, pr_min = 5., freq="QS-DEC")
"""
thresh = utils.convert_units_to(thresh, pr, "hydro")
wd = (pr >= thresh) * 1
return wd.resample(time=freq).sum(dim="time")
@declare_units("days", pr="[precipitation]", thresh="[precipitation]")
def maximum_consecutive_dry_days(pr, thresh="1 mm/day", freq="YS"):
r"""Maximum number of consecutive dry days
Return the maximum number of consecutive days within the period where precipitation
is below a certain threshold.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [mm]
thresh : str
Threshold precipitation on which to base evaluation [mm]. Default : '1 mm/day'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The maximum number of consecutive dry days.
Notes
-----
Let :math:`\mathbf{p}=p_0, p_1, \ldots, p_n` be a daily precipitation series and :math:`thresh` the threshold
under which a day is considered dry. Then let :math:`\mathbf{s}` be the sorted vector of indices :math:`i` where
:math:`[p_i < thresh] \neq [p_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [p_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = utils.convert_units_to(thresh, pr, "hydro")
group = (pr < t).resample(time=freq)
return group.apply(rl.longest_run, dim="time")
@declare_units("days", tasmax="[temperature]", thresh="[temperature]")
def maximum_consecutive_tx_days(tasmax, thresh="25 degC", freq="YS"):
r"""Maximum number of consecutive summer days (Tx > 25℃)
Return the maximum number of consecutive days within the period where temperature is above a certain threshold.
Parameters
----------
tasmax : xarray.DataArray
Max daily temperature [K]
thresh : str
Threshold temperature [K].
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The maximum number of consecutive summer days.
Notes
-----
Let :math:`\mathbf{t}=t_0, t_1, \ldots, t_n` be a daily maximum temperature series and :math:`thresh` the threshold
above which a day is considered a summer day. Let :math:`\mathbf{s}` be the sorted vector of indices :math:`i`
where :math:`[t_i < thresh] \neq [t_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [t_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = utils.convert_units_to(thresh, tasmax)
group = (tasmax > t).resample(time=freq)
return group.apply(rl.longest_run, dim="time")
@declare_units("days", tasmin="[temperature]", thresh="[temperature]")
def tropical_nights(tasmin, thresh="20.0 degC", freq="YS"):
r"""Tropical nights
The number of days with minimum daily temperature above threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '20 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of days with minimum daily temperature above threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} > Threshold [℃]
"""
thresh = utils.convert_units_to(thresh, tasmin)
return (
tasmin.pipe(lambda x: (tasmin > thresh) * 1).resample(time=freq).sum(dim="time")
)
| StarcoderdataPython |
5127227 | <gh_stars>10-100
from attrdict import AttrDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
from torch.distributions.kl import kl_divergence
import numpy as np
from genesis.modules.unet import UNet
import genesis.modules.seq_att as seq_att
from genesis.utils.misc import get_kl
from .base import AutoEncoderModule
from absl import logging
from genesis.utils.misc import average_ari
from .monet import MONet
from .eqv_vae import EquivariantVAE
class EquivariantComponentVAE(EquivariantVAE):
def __init__(self,
in_channels,
out_channels,
n_channels,
img_size,
dim_latent,
activation=F.relu,
readout_fn=None,
fiber_group='trivial',
n_rot=1,
avg_pool_size=1,
optim_lr=0.0001,
profiler=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels+1,
n_channels=n_channels,
img_size=img_size,
dim_latent=dim_latent,
activation=activation,
readout_fn=readout_fn,
fiber_group=fiber_group,
n_rot=n_rot,
avg_pool_size=avg_pool_size,
optim_lr=optim_lr,
profiler=profiler)
def forward(self, x, log_mask):
K = 1
b_sz = x.size(0)
if isinstance(log_mask, list) or isinstance(log_mask, tuple):
K = len(log_mask)
# Repeat x along batch dimension
x = x.repeat(K, 1, 1, 1)
# Concat log_m_k along batch dimension
log_mask = torch.cat(log_mask, dim=0)
# -- Encode
mask = log_mask.exp()
x *= mask
x = torch.cat((x, mask), dim=1)
mu, log_sigma_sq, crs, z_eqv = self.encode(x)
sigma = torch.exp(log_sigma_sq / 2.)
z = self.reparameterize(mu, log_sigma_sq)
x_r = self.decode(z, crs)
# -- Track quantities of interest and return
x_r_k = torch.chunk(x_r, K, dim=0)
z_k = torch.chunk(z, K, dim=0)
mu_k = torch.chunk(mu, K, dim=0)
sigma_k = torch.chunk(sigma, K, dim=0)
stats = AttrDict(mu_k=mu_k, sigma_k=sigma_k, z_k=z_k)
return x_r_k, stats
class EquivariantMONet(MONet):
def __init__(self,
in_channels,
out_channels,
n_channels,
img_size,
dim_latent,
activation=torch.nn.ReLU(),
K_steps=5,
prior_mode='softmax',
montecarlo_kl=False,
pixel_bound=True,
kl_l_beta=0.5,
kl_m_beta=0.5,
pixel_std_fg=0.1,
pixel_std_bg=0.1,
optimizer='ADAM',
fiber_group='trivial',
n_rot=1,
avg_pool_size=3):
self.ldim = dim_latent
self.fiber_group = fiber_group
self.n_rot = n_rot
self.avg_pool_size = avg_pool_size
super().__init__(in_channels=in_channels,
out_channels=out_channels,
n_channels=n_channels,
img_size=img_size,
dim_latent=dim_latent,
activation=activation,
K_steps=K_steps,
prior_mode=prior_mode,
montecarlo_kl=montecarlo_kl,
pixel_bound=pixel_bound,
kl_l_beta=kl_l_beta,
kl_m_beta=kl_m_beta,
pixel_std_fg=pixel_std_fg,
pixel_std_bg=pixel_std_bg,
optimizer=optimizer)
def _create_networks(self):
core = UNet(int(np.log2(self.img_size)-1), 32)
self.att_process = seq_att.SimpleSBP(core)
# - Component VAE
self.comp_vae = EquivariantComponentVAE(self.in_channels+1,
self.out_channels,
self.n_channels,
self.img_size,
self.dim_latent,
activation=F.relu,
readout_fn=None,
fiber_group=self.fiber_group,
n_rot=self.n_rot,
avg_pool_size=self.avg_pool_size)
self.comp_vae.pixel_bound = False
| StarcoderdataPython |
355241 | class JobState(basestring):
"""
Jobs execute as self-contained state machines. They follow a
series of careful steps from creation to destruction. These
steps are dictated by the states that they find themselves in as
well as the allowable list of states they may transition into.
The state can be an indicator of whether a job is executing and
if not, why that is the case.
Possible values:
<ul>
<li> "initial" - Initializing,
<li> "queued" - Queued,
<li> "running" - Running,
<li> "waiting" - Waiting For Another Job,
<li> "pausing" - Entering Paused State,
<li> "paused" - Paused,
<li> "quitting" - Entering Quit State,
<li> "success" - Succeeded,
<li> "failure" - Failed,
<li> "reschedule" - Forcing Reschedule,
<li> "error" - Internal Error,
<li> "quit" - Quit,
<li> "dead" - Died,
<li> "unknown" - Unknown,
<li> "restart" - Forcing Restart,
<li> "dormant" - Waiting For External Event
</ul>
"""
@staticmethod
def get_api_name():
return "job-state"
| StarcoderdataPython |
1900877 | <filename>xTool/algorithms/collections/attrdict.py
# -*- coding: utf-8 -*-
"""
可以使用第三方库:attrdict
"""
try:
from collections import UserDict
except ImportError:
pass
from typing import Any
try:
import typing # noqa
_ObjectDictBase = typing.Dict[str, typing.Any]
except ImportError:
_ObjectDictBase = dict
class AttrDict(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr.startswith("__") and attr.endswith("__"):
super().__setattr__(attr, value)
else:
self[attr] = value
def __iadd__(self, rhs):
self.update(rhs)
return self
def __add__(self, rhs):
d = AttrDict(self)
d.update(rhs)
return d
class ObjectDict(_ObjectDictBase):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class Row(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
class FancyDict(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
# 忽略内置属性
if key.startswith("__") and key.endswith("__"):
super().__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
class StripDict(UserDict):
def __getitem__(self, key):
if isinstance(key, str):
key = key.strip()
value = super().__getitem__(key)
if value and isinstance(value, str):
value = str(value).strip()
return value
# based on http://stackoverflow.com/a/2082169/151401
class CaseInsensitiveDict(dict):
""" A case-insensitive dictionary for header storage.
A limitation of this approach is the inability to store
multiple instances of the same header. If that is changed
then we suddenly care about the assembly rules in sec 2.3.
"""
def __init__(self, d=None, **kwargs):
super(CaseInsensitiveDict, self).__init__(**kwargs)
if d:
self.update((k.lower(), v) for k, v in d.items())
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def __contains__(self, key):
return super(CaseInsensitiveDict, self).__contains__(key.lower())
class ConstantDict(dict):
"""常量字典,不允许修改value值 ."""
def __setitem__(self, key: Any, value: Any):
raise TypeError("modifying %s object values is not allowed"
% self.__class__.__name__)
def get_filter_obj(filter_data, filter_keys):
"""根据key过滤字典对象 ."""
filter_obj = FancyDict()
_filter_data = filter_data or {}
for key in filter_keys:
filter_obj[key] = _filter_data.get(key)
return filter_obj
| StarcoderdataPython |
11383112 | <gh_stars>0
# Adapted based on SOM benchmark.
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import sys
from benchmark import Benchmark
from som.identity_dictionary import IdentityDictionary
from som.identity_set import IdentitySet
from som.set import Set
from som.vector import Vector
# Havlak needs more stack space in CPython
sys.setrecursionlimit(1500)
class Havlak(Benchmark):
def inner_benchmark_loop(self, inner_iterations):
return self._verify_result(
_LoopTesterApp().main(inner_iterations, 50, 10, 10, 5), inner_iterations
)
@staticmethod
def _verify_result(result, inner_iterations):
if inner_iterations == 15_000:
return result[0] == 46_602 and result[1] == 5213
if inner_iterations == 1_500:
return result[0] == 6_102 and result[1] == 5213
if inner_iterations == 150:
return result[0] == 2_052 and result[1] == 5213
if inner_iterations == 15:
return result[0] == 1_647 and result[1] == 5213
if inner_iterations == 1:
return result[0] == 1_605 and result[1] == 5213
print("No verification result for " + str(inner_iterations) + " found")
print("Result is: " + str(result[0]) + ", " + str(result[1]))
return False
def benchmark(self):
raise Exception("should not be reached")
def verify_result(self, result):
raise Exception("should not be reached")
class _BasicBlock:
def __init__(self, name):
self._name = name
self.in_edges = Vector(2)
self.out_edges = Vector(2)
def get_num_pred(self):
return self.in_edges.size()
def add_out_edge(self, to):
self.out_edges.append(to)
def add_in_edge(self, from_):
self.in_edges.append(from_)
def custom_hash(self):
return self._name
class _BasicBlockEdge:
def __init__(self, cfg, from_name, to_name):
self._from = cfg.create_node(from_name)
self._to = cfg.create_node(to_name)
self._from.add_out_edge(self._to)
self._to.add_in_edge(self._from)
cfg.add_edge(self)
class _ControlFlowGraph:
def __init__(self):
self.start_basic_block = None
self.basic_blocks = Vector()
self._edge_list = Vector()
def create_node(self, name):
if self.basic_blocks.at(name):
node = self.basic_blocks.at(name)
else:
node = _BasicBlock(name)
self.basic_blocks.at_put(name, node)
if self.num_nodes() == 1:
self.start_basic_block = node
return node
def add_edge(self, edge):
self._edge_list.append(edge)
def num_nodes(self):
return self.basic_blocks.size()
class _LoopStructureGraph:
def __init__(self):
self._loop_counter = 0
self._loops = Vector()
self._root = _SimpleLoop(None, True)
self._root.set_nesting_level(0)
self._root.counter = self._loop_counter
self._loop_counter += 1
self._loops.append(self._root)
def create_new_loop(self, bb, is_reducible):
loop = _SimpleLoop(bb, is_reducible)
loop.counter = self._loop_counter
self._loop_counter += 1
self._loops.append(loop)
return loop
def calculate_nesting_level(self):
def each(liter):
if not liter.is_root:
if liter.parent is None:
liter.set_parent(self._root)
self._loops.for_each(each)
self._calculate_nesting_level_rec(self._root, 0)
def _calculate_nesting_level_rec(self, loop, depth):
loop.depth_level = depth
def each(liter):
self._calculate_nesting_level_rec(liter, depth + 1)
loop.set_nesting_level(max(loop.nesting_level, 1 + liter.nesting_level))
loop.children.for_each(each)
def num_loops(self):
return self._loops.size()
class _SimpleLoop:
def __init__(self, bb, is_reducible):
self._is_reducible = is_reducible
self.parent = None
self.is_root = False
self.nesting_level = 0
self._depth_level = 0
self._counter = 0
self._basic_blocks = IdentitySet()
self.children = IdentitySet()
if bb is not None:
self._basic_blocks.add(bb)
self._header = bb
def add_node(self, bb):
self._basic_blocks.add(bb)
def add_child_loop(self, loop):
self.children.add(loop)
def set_parent(self, parent):
self.parent = parent
self.parent.add_child_loop(self)
def set_nesting_level(self, level):
self.nesting_level = level
if level == 0:
self.is_root = True
class _UnionFindNode:
def __init__(self):
self.parent = None
self.bb = None
self.dfs_number = 0
self.loop = None
def init_node(self, bb, dfs_number):
self.parent = self
self.bb = bb
self.dfs_number = dfs_number
self.loop = None
def find_set(self):
node_list = Vector()
node = self
while node is not node.parent:
if node.parent is not node.parent.parent:
node_list.append(node)
node = node.parent
node_list.for_each(lambda i: i.union(self.parent))
return node
def union(self, basic_block):
self.parent = basic_block
class _LoopTesterApp:
def __init__(self):
self._cfg = _ControlFlowGraph()
self._lsg = _LoopStructureGraph()
self._cfg.create_node(0)
def _build_diamond(self, start):
bb0 = start
_BasicBlockEdge(self._cfg, bb0, bb0 + 1)
_BasicBlockEdge(self._cfg, bb0, bb0 + 2)
_BasicBlockEdge(self._cfg, bb0 + 1, bb0 + 3)
_BasicBlockEdge(self._cfg, bb0 + 2, bb0 + 3)
return bb0 + 3
def _build_connect(self, start, end_):
_BasicBlockEdge(self._cfg, start, end_)
def _build_straight(self, start, n):
for i in range(n):
self._build_connect(start + i, start + i + 1)
return start + n
def _build_base_loop(self, from_):
header = self._build_straight(from_, 1)
diamond1 = self._build_diamond(header)
d11 = self._build_straight(diamond1, 1)
diamond2 = self._build_diamond(d11)
footer = self._build_straight(diamond2, 1)
self._build_connect(diamond2, d11)
self._build_connect(diamond1, header)
self._build_connect(footer, from_)
footer = self._build_straight(footer, 1)
return footer
def main(
self, num_dummy_loops, find_loop_iterations, par_loops, ppar_loops, pppar_loops
):
self._construct_simple_cfg()
self._add_dummy_loops(num_dummy_loops)
self._construct_cfg(par_loops, ppar_loops, pppar_loops)
self._find_loops(self._lsg)
for _ in range(find_loop_iterations):
self._find_loops(_LoopStructureGraph())
self._lsg.calculate_nesting_level()
return [self._lsg.num_loops(), self._cfg.num_nodes()]
def _construct_cfg(self, par_loops, ppar_loops, pppar_loops):
n = 2
for _ in range(par_loops):
self._cfg.create_node(n + 1)
self._build_connect(2, n + 1)
n += 1
for _ in range(ppar_loops):
top = n
n = self._build_straight(n, 1)
for _ in range(pppar_loops):
n = self._build_base_loop(n)
bottom = self._build_straight(n, 1)
self._build_connect(n, top)
n = bottom
self._build_connect(n, 1)
def _add_dummy_loops(self, num_dummy_loops):
for _ in range(num_dummy_loops):
self._find_loops(self._lsg)
def _find_loops(self, loop_structure):
finder = _HavlakLoopFinder(self._cfg, loop_structure)
finder.find_loops()
def _construct_simple_cfg(self):
self._cfg.create_node(0)
self._build_base_loop(0)
self._cfg.create_node(1)
_BasicBlockEdge(self._cfg, 0, 2)
_UNVISITED = 2_147_483_647
_MAXNONBACKPREDS = 32 * 1024
class _BasicBlockClass(Enum):
BB_TOP = 0 # uninitialized
BB_NONHEADER = 1 # a regular BB
BB_REDUCIBLE = 2 # reducible loop
BB_SELF = 3 # single BB loop
BB_IRREDUCIBLE = 4 # irreducible loop
BB_DEAD = 5 # a dead BB
BB_LAST = 6 # Sentinel
class _HavlakLoopFinder:
def __init__(self, cfg, lsg):
self._cfg = cfg
self._lsg = lsg
self._non_back_preds = Vector()
self._back_preds = Vector()
self._number = IdentityDictionary()
self._max_size = 0
self._header = None
self._type = None
self._last = None
self._nodes = None
def _is_ancestor(self, w, v):
return w <= v and v <= self._last[w]
def _do_dfs(self, current_node, current):
self._nodes[current].init_node(current_node, current)
self._number.at_put(current_node, current)
last_id = current
outer_blocks = current_node.out_edges
def each(target):
nonlocal last_id
if self._number.at(target) == _UNVISITED:
last_id = self._do_dfs(target, last_id + 1)
outer_blocks.for_each(each)
self._last[current] = last_id
return last_id
def _init_all_nodes(self):
self._cfg.basic_blocks.for_each(lambda bb: self._number.at_put(bb, _UNVISITED))
self._do_dfs(self._cfg.start_basic_block, 0)
def _identify_edges(self, size):
for w in range(size):
self._header[w] = 0
self._type[w] = _BasicBlockClass.BB_NONHEADER
node_w = self._nodes[w].bb
if node_w is None:
self._type[w] = _BasicBlockClass.BB_DEAD
else:
self._process_edges(node_w, w)
def _process_edges(self, node_w, w):
if node_w.get_num_pred() > 0:
def each(node_v):
v = self._number.at(node_v)
if v != _UNVISITED:
if self._is_ancestor(w, v):
self._back_preds.at(w).append(v)
else:
self._non_back_preds.at(w).add(v)
node_w.in_edges.for_each(each)
def find_loops(self):
if self._cfg.start_basic_block is None:
return
size = self._cfg.num_nodes()
self._non_back_preds.remove_all()
self._back_preds.remove_all()
self._number.remove_all()
if size > self._max_size:
self._header = [0] * size
self._type = [None] * size
self._last = [0] * size
self._nodes = [None] * size
self._max_size = size
for i in range(size):
self._non_back_preds.append(Set())
self._back_preds.append(Vector())
self._nodes[i] = _UnionFindNode()
self._init_all_nodes()
self._identify_edges(size)
self._header[0] = 0
for w in range(size - 1, -1, -1):
node_pool = Vector()
node_w = self._nodes[w].bb
if node_w is not None:
self._step_d(w, node_pool)
work_list = Vector()
node_pool.for_each(work_list.append)
if node_pool.size() != 0:
self._type[w] = _BasicBlockClass.BB_REDUCIBLE
while not work_list.is_empty():
x = work_list.remove_first()
non_back_size = self._non_back_preds.at(x.dfs_number).size()
if non_back_size > _MAXNONBACKPREDS:
return
self._step_e_process_non_back_preds(w, node_pool, work_list, x)
if node_pool.size() > 0 or self._type[w] == _BasicBlockClass.BB_SELF:
loop = self._lsg.create_new_loop(
node_w, self._type[w] != _BasicBlockClass.BB_IRREDUCIBLE
)
self._set_loop_attributes(w, node_pool, loop)
def _step_e_process_non_back_preds(self, w, node_pool, work_list, x):
def each(i):
y = self._nodes[i]
ydash = y.find_set()
if not self._is_ancestor(w, ydash.dfs_number):
self._type[w] = _BasicBlockClass.BB_IRREDUCIBLE
self._non_back_preds.at(w).add(ydash.dfs_number)
else:
if ydash.dfs_number != w:
if not node_pool.has_some(lambda e: e == ydash):
work_list.append(ydash)
node_pool.append(ydash)
self._non_back_preds.at(x.dfs_number).for_each(each)
def _set_loop_attributes(self, w, node_pool, loop):
self._nodes[w].loop = loop
def each(node):
self._header[node.dfs_number] = w
node.union(self._nodes[w])
if node.loop is not None:
node.loop.set_parent(loop)
else:
loop.add_node(node.bb)
node_pool.for_each(each)
def _step_d(self, w, node_pool):
def each(v):
if v != w:
node_pool.append(self._nodes[v].find_set())
else:
self._type[w] = _BasicBlockClass.BB_SELF
self._back_preds.at(w).for_each(each)
| StarcoderdataPython |
5167005 | <gh_stars>1-10
"""
Methods for reading and writing FITS files.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from pathlib import Path
from typing import Optional, Tuple, Union, overload
from typing_extensions import Literal
import json
from astropy.io import fits
import numpy as np
# -----------------------------------------------------------------------------
# FUNCTION DEFINITIONS
# -----------------------------------------------------------------------------
@overload
def read_fits(
file_path: Union[Path, str], return_header: Literal[True]
) -> Tuple[np.ndarray, dict]:
... # pragma: no cover
@overload
def read_fits(
file_path: Union[Path, str], return_header: Literal[False]
) -> np.ndarray:
... # pragma: no cover
def read_fits(
file_path: Union[Path, str], return_header: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, dict]]:
"""
Open a FITS file and return its contents as a numpy array.
Args:
file_path: Path of the FITS file to be read in.
return_header: Whether to return the FITS header.
Returns:
A numpy array containing the contents of the given FITS file.
Optionally also a dictionary containing the FITS header.
"""
# Make sure that file_path is a proper Path
file_path = Path(file_path)
# Open the FITS file and read the contents as well as the header
with fits.open(file_path.as_posix()) as hdulist:
array = np.array(hdulist[0].data)
header = dict(hdulist[0].header)
# Return either the contents and the header, or just the contents
if return_header:
return array, header
return array
def save_fits(
array: np.ndarray,
file_path: Union[Path, str],
header: Optional[dict] = None,
overwrite: bool = True,
) -> None:
"""
Save a numpy array as a FITS file.
Args:
array: The numpy array to be saved to a FITS file.
file_path: The path where to save the FITS file.
header: A dictionary with additional header information.
overwrite: Whether to overwrite an existing FITS file.
"""
# Make sure that file_path is a proper Path
file_path = Path(file_path)
# If the array is boolean, convert to integer (FITS does not support bool)
if array.dtype == 'bool':
array = array.astype(int)
# Create a new HDU for the array
hdu = fits.PrimaryHDU(array)
# If applicable, add header information
if header is not None:
for key, value in header.items():
# FITS does not support list-type values in the header, which is
# why these values need to be serialized to strings
if isinstance(value, (list, tuple)):
value = json.dumps(value)
if isinstance(value, np.ndarray):
value = json.dumps(value.tolist())
# Take special care of NaN, because FITS can't deal with them
if not isinstance(value, str) and np.isnan(value):
value = 'NaN'
# Save value to HDU header. We cast the key to all-caps because
# that is the default for FITS; that is, header fields that are
# automatically, such as NAXIS, are always all-caps.
hdu.header[key.upper()] = value
# Save the HDU to the specified FITS file
fits.HDUList([hdu]).writeto(file_path.as_posix(), overwrite=overwrite)
| StarcoderdataPython |
8149085 | import os
from decisionengine.framework.modules import Source
PRODUCES = ["available_cloud_budget"]
class CloudBudget(Source.Source):
def __init__(self, params_dict):
self.budget_file = params_dict["budget_file"]
def produces(self, schema_id_list):
return PRODUCES
# The DataBlock given to the source is t=0
def acquire(self):
with open(self.budget_file, "r") as fd:
budget = float(fd.read().strip())
return {"available_cloud_budget": budget}
| StarcoderdataPython |
12863470 | <reponame>mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep<filename>mapclientplugins/scaffoldfiniteelementmeshfitterstep/model/imageplanemodel.py
from opencmiss.utils.maths.algorithms import calculate_line_plane_intersection
class ImagePlaneModel(object):
def __init__(self, master_model):
self._master_model = master_model
self._region = None
self._frames_per_second = -1
self._images_file_name_listing = []
self._image_dimensions = [-1, -1]
self._duration_field = None
self._image_based_material = None
self._scaled_coordinate_field = None
self._time_sequence = []
def set_image_information(self, frames_per_second, image_dimensions):
self._frames_per_second = frames_per_second
self._image_dimensions = image_dimensions
| StarcoderdataPython |
4873661 | <gh_stars>0
# pylint: skip-file
"""データセットの読み込みなど。"""
from .ic import *
from .keras import *
from .sklearn import *
from .ss import *
from . import coco
from . import voc
| StarcoderdataPython |
11344666 | # -*- coding: utf-8 -*-
# file: lca_bert.py
# author: yangheng <<EMAIL>>
# Copyright (C) 2020. All Rights Reserved.
import torch
import torch.nn as nn
import numpy as np
import copy
from pytorch_transformers.modeling_bert import BertPooler, BertSelfAttention
class SelfAttention(nn.Module):
def __init__(self, config, opt):
super(SelfAttention, self).__init__()
self.opt = opt
self.config = config
self.SA = BertSelfAttention(config)
self.tanh = torch.nn.Tanh()
def forward(self, inputs):
zero_vec = np.zeros((inputs.size(0), 1, 1, self.opt.max_seq_len))
zero_tensor = torch.tensor(zero_vec).float().to(self.opt.device)
SA_out = self.SA(inputs, zero_tensor)
return self.tanh(SA_out[0])
class LCA_BERT(nn.Module):
def __init__(self, bert, opt):
super(LCA_BERT, self).__init__()
self.bert4global = bert
self.bert4local = copy.deepcopy(bert) if opt.use_dual_bert else self.bert4global
self.lc_embed = nn.Embedding(opt.max_seq_len, opt.embed_dim)
self.opt = opt
self.dropout = nn.Dropout(opt.dropout)
self.bert_SA_L = SelfAttention(bert.config, opt)
self.bert_SA_G = SelfAttention(bert.config, opt)
self.linear = nn.Linear(opt.embed_dim * 2, opt.embed_dim)
self.pool = BertPooler(bert.config)
if self.opt.dataset in {'camera', 'notebook', 'car', 'phone'}:
self.dense = nn.Linear(opt.embed_dim, 2)
else:
self.dense = nn.Linear(opt.embed_dim, 3)
self.classifier = nn.Linear(opt.embed_dim, 2)
def forward(self, inputs):
if self.opt.use_bert_spc:
text_global_indices = inputs[0]
else:
text_global_indices = inputs[1]
text_local_indices = inputs[1]
bert_segments_ids = inputs[2]
lca_ids = inputs[3]
lcf_matrix = inputs[4]
bert_global_out, _ = self.bert4global(text_global_indices, token_type_ids=bert_segments_ids)
bert_local_out, _ = self.bert4local(text_local_indices)
if self.opt.lca and 'lca' in self.opt.model_name:
lc_embedding = self.lc_embed(lca_ids)
bert_global_out = torch.mul(bert_global_out, lc_embedding)
# # LCF-layer
bert_local_out = torch.mul(bert_local_out, lcf_matrix)
bert_local_out = self.bert_SA_L(bert_local_out)
cat_features = torch.cat((bert_local_out, bert_global_out), dim=-1)
cat_features = self.linear(cat_features)
lca_logits = self.classifier(cat_features)
lca_logits = lca_logits.view(-1, 2)
lca_ids = lca_ids.view(-1)
cat_features = self.dropout(cat_features)
pooled_out = self.pool(cat_features)
dense_out = self.dense(pooled_out)
if self.opt.lcp:
return dense_out, lca_logits, lca_ids
else:
return dense_out
| StarcoderdataPython |
9638871 | """Chase program: run the lights in sequence, one light on at a time.
"""
from collections import deque
from functools import partial
from itertools import cycle
import time
import sys
def chase(n_lights, until=sys.maxsize, forward=True):
direction = 1 if forward else -1
# start with the first light on and the rest off
lights = deque([1.0] + [0.0] * (n_lights - 1))
while time.time() < until:
yield lights
time.sleep(0.15)
lights.rotate(direction)
forward = chase
reverse = partial(chase, forward=False)
| StarcoderdataPython |
194806 | <filename>platformio/package/pack.py
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import shutil
import tarfile
import tempfile
from platformio import fs
from platformio.compat import IS_WINDOWS
from platformio.package.exception import PackageException, UserSideException
from platformio.package.manifest.parser import ManifestFileType, ManifestParserFactory
from platformio.package.manifest.schema import ManifestSchema
from platformio.package.meta import PackageItem
from platformio.package.unpack import FileUnpacker
class PackagePacker(object):
INCLUDE_DEFAULT = ManifestFileType.items().values()
EXCLUDE_DEFAULT = [
# PlatformIO internal files
PackageItem.METAFILE_NAME,
".pio/",
"**/.pio/",
# Hidden files
"._*",
"__*",
".DS_Store",
".vscode",
".cache",
"**/.cache",
# VCS
".git/",
".hg/",
".svn/",
]
EXCLUDE_EXTRA = [
# Tests
"tests?",
# Docs
"doc",
"docs",
"mkdocs",
"**/*.[pP][dD][fF]",
"**/*.[dD][oO][cC]?",
"**/*.[pP][pP][tT]?",
"**/*.[dD][oO][xX]",
"**/*.[hH][tT][mM]?",
"**/*.[tT][eE][xX]",
"**/*.[jJ][sS]",
"**/*.[cC][sS][sS]",
# Binary files
"**/*.[jJ][pP][gG]",
"**/*.[jJ][pP][eE][gG]",
"**/*.[pP][nN][gG]",
"**/*.[gG][iI][fF]",
"**/*.[zZ][iI][pP]",
"**/*.[gG][zZ]",
"**/*.3[gG][pP]",
"**/*.[mM][oO][vV]",
"**/*.[mM][pP][34]",
"**/*.[pP][sS][dD]",
"**/*.[wW][aA][wW]",
]
EXCLUDE_LIBRARY_EXTRA = [
"assets",
"extra",
"resources",
"html",
"media",
"doxygen",
"**/build/",
"**/*.flat",
"**/*.[jJ][aA][rR]",
"**/*.[eE][xX][eE]",
"**/*.[bB][iI][nN]",
"**/*.[hH][eE][xX]",
"**/*.[dD][bB]",
"**/*.[dD][aA][tT]",
"**/*.[dD][lL][lL]",
]
def __init__(self, package, manifest_uri=None):
self.package = package
self.manifest_uri = manifest_uri
@staticmethod
def get_archive_name(name, version, system=None):
return re.sub(
r"[^\da-zA-Z\-\._\+]+",
"",
"{name}{system}-{version}.tar.gz".format(
name=name,
system=("-" + system) if system else "",
version=version,
),
)
def pack(self, dst=None):
tmp_dir = tempfile.mkdtemp()
try:
src = self.package
# if zip/tar.gz -> unpack to tmp dir
if not os.path.isdir(src):
if IS_WINDOWS:
raise UserSideException(
"Packaging from an archive does not work on Windows OS. Please "
"extract data from `%s` manually and pack a folder instead"
% src
)
with FileUnpacker(src) as fu:
assert fu.unpack(tmp_dir, silent=True)
src = tmp_dir
src = self.find_source_root(src)
manifest = self.load_manifest(src)
filename = self.get_archive_name(
manifest["name"],
manifest["version"],
manifest["system"][0] if "system" in manifest else None,
)
if not dst:
dst = os.path.join(os.getcwd(), filename)
elif os.path.isdir(dst):
dst = os.path.join(dst, filename)
return self._create_tarball(src, dst, manifest)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def load_manifest(src):
mp = ManifestParserFactory.new_from_dir(src)
return ManifestSchema().load_manifest(mp.as_dict())
def find_source_root(self, src):
if self.manifest_uri:
mp = (
ManifestParserFactory.new_from_file(self.manifest_uri[5:])
if self.manifest_uri.startswith("file:")
else ManifestParserFactory.new_from_url(self.manifest_uri)
)
manifest = ManifestSchema().load_manifest(mp.as_dict())
include = manifest.get("export", {}).get("include", [])
if len(include) == 1:
if not os.path.isdir(os.path.join(src, include[0])):
raise PackageException(
"Non existing `include` directory `%s` in a package"
% include[0]
)
return os.path.join(src, include[0])
for root, _, __ in os.walk(src):
if ManifestFileType.from_dir(root):
return root
return src
def _create_tarball(self, src, dst, manifest):
include = manifest.get("export", {}).get("include")
exclude = manifest.get("export", {}).get("exclude")
# remap root
if (
include
and len(include) == 1
and os.path.isdir(os.path.join(src, include[0]))
):
src = os.path.join(src, include[0])
with open(
os.path.join(src, "library.json"), mode="w", encoding="utf8"
) as fp:
manifest_updated = manifest.copy()
del manifest_updated["export"]["include"]
json.dump(manifest_updated, fp, indent=2, ensure_ascii=False)
include = None
src_filters = self.compute_src_filters(src, include, exclude)
with tarfile.open(dst, "w:gz") as tar:
for f in fs.match_src_files(src, src_filters, followlinks=False):
tar.add(os.path.join(src, f), f)
return dst
def compute_src_filters(self, src, include, exclude):
exclude_extra = self.EXCLUDE_EXTRA[:]
# extend with library extra filters
if any(
os.path.isfile(os.path.join(src, name))
for name in (
ManifestFileType.LIBRARY_JSON,
ManifestFileType.LIBRARY_PROPERTIES,
ManifestFileType.MODULE_JSON,
)
):
exclude_extra.extend(self.EXCLUDE_LIBRARY_EXTRA)
result = ["+<%s>" % p for p in include or ["*", ".*"]]
result += ["-<%s>" % p for p in self.EXCLUDE_DEFAULT]
# exclude items declared in manifest
result += ["-<%s>" % p for p in exclude or []]
# apply extra excludes if no custom "export" field in manifest
if not include and not exclude:
result += ["-<%s>" % p for p in exclude_extra]
# automatically include manifests
result += ["+<%s>" % p for p in self.INCLUDE_DEFAULT]
return result
| StarcoderdataPython |
5190495 | <gh_stars>0
from setuptools import setup
setup(name='whopy',
version='1.0.2',
description='Provides Whois data for domains.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/amshamah419/whopy',
packages=['whopy'],
package_dir={"whopy":"whopy"},
package_data={"whopy": ["*.dat", "*.json"]},
install_requires=['argparse'],
provides=['whopy'],
license="MIT"
)
| StarcoderdataPython |
4985454 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# __author__ = '__Jack__'
from django.urls import path
from wenhu.users import views
app_name = "users"
urlpatterns = [
path("update/", views.UserUpdateView.as_view(), name="update"),
path("<str:username>/", views.UserDetailView.as_view(), name="detail"),
]
| StarcoderdataPython |
1758458 | from UdonPie import UnityEngine
from UdonPie.Undefined import *
class RigidbodyInterpolation:
def __new__(cls, arg1=None):
'''
:returns: RigidbodyInterpolation
:rtype: UnityEngine.RigidbodyInterpolation
'''
pass
| StarcoderdataPython |
3510845 | <gh_stars>10-100
# -*- coding: utf-8 -*-
## @package color_histogram.core.hist_1d
#
# Implementation of 1D color histograms.
# @author tody
# @date 2015/08/29
import numpy as np
from color_histogram.core.color_pixels import ColorPixels
from color_histogram.core.hist_common import colorCoordinates, colorDensities, rgbColors, clipLowDensity, range2ticks
## Implementation of 1D color histograms.
class Hist1D:
## Constructor
# @param image input image.
# @param num_bins target number of histogram bins.
# @param alpha low density clip.
# @param color_space target color space. 'rgb' or 'Lab' or 'hsv'.
# @param channel target color channel. 0 with 'Lab' = L channel.
def __init__(self, image, num_bins=16, alpha=0.1, color_space='Lab', channel=0):
self._computeTargetPixels(image, color_space, channel)
self._num_bins = num_bins
self._alpha = alpha
self._color_space = color_space
self._channel = channel
self._computeColorRange()
self._computeHistogram()
self._plotter = Hist1DPlot(self)
## Plot histogram.
def plot(self, ax):
self._plotter.plot(ax)
def numBins(self):
return self._num_bins
def colorSpace(self):
return self._color_space
def channel(self):
return self._channel
def colorIDs(self):
color_ids = np.where(self._histPositive())
return color_ids
def colorCoordinates(self):
color_ids = self.colorIDs()
num_bins = self._num_bins
color_range = self._color_range
return colorCoordinates(color_ids, num_bins, color_range)
def colorDensities(self):
return colorDensities(self._hist_bins)
def rgbColors(self):
return rgbColors(self._hist_bins, self._color_bins)
def colorRange(self):
return self._color_range
def _computeTargetPixels(self, image, color_space, channel):
color_pixels = ColorPixels(image)
self._pixels = color_pixels.pixels(color_space)[:, channel]
self._rgb_pixels = color_pixels.rgb()
def _computeColorRange(self):
pixels = self._pixels
c_min = np.min(pixels)
c_max = np.max(pixels)
self._color_range = [c_min, c_max]
def _computeHistogram(self):
pixels = self._pixels
num_bins = self._num_bins
c_min, c_max = self._color_range
hist_bins = np.zeros((num_bins), dtype=np.float32)
color_bins = np.zeros((num_bins, 3), dtype=np.float32)
color_ids = (num_bins - 1) * (pixels - c_min) / (c_max - c_min)
color_ids = np.int32(color_ids)
for pi, color_id in enumerate(color_ids):
hist_bins[color_id] += 1
color_bins[color_id] += self._rgb_pixels[pi]
self._hist_bins = hist_bins
hist_positive = self._hist_bins > 0.0
for ci in xrange(3):
color_bins[hist_positive, ci] /= self._hist_bins[hist_positive]
self._color_bins = color_bins
self._clipLowDensity()
def _clipLowDensity(self):
clipLowDensity(self._hist_bins, self._color_bins, self._alpha)
def _histPositive(self):
return self._hist_bins > 0.0
## 1D color histogram plotter.
class Hist1DPlot:
## Constructor.
# @param hist1D histogram for plotting.
def __init__(self, hist1D):
self._hist1D = hist1D
def plot(self, ax):
color_samples = self._hist1D.colorCoordinates()
color_densities = self._hist1D.colorDensities()
colors = self._hist1D.rgbColors()
color_range = self._hist1D.colorRange()
width = (color_range[1] - color_range[0]) / float(self._hist1D.numBins())
ax.bar(color_samples, color_densities, width=width, color=colors)
self._axisSetting(ax)
def _range2lims(self, tick_range):
unit = 0.1 * (tick_range[:, 1] - tick_range[:, 0])
lim = np.array(tick_range)
lim[0, 0] += -unit[0]
lim[0, 1] += unit[0]
lim[1, 1] += unit[1]
return lim[0], lim[1]
def _axisSetting(self, ax):
color_space = self._hist1D.colorSpace()
channel = self._hist1D.channel()
ax.set_xlabel(color_space[channel])
ax.set_ylabel("Density")
color_range = self._hist1D.colorRange()
tick_range = np.array([color_range, [0.0, 1.0]])
xticks, yticks = range2ticks(tick_range)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
xlim, ylim = self._range2lims(tick_range)
ax.set_xlim(xlim)
ax.set_ylim(ylim) | StarcoderdataPython |
1747832 | import pytest
import itertools
from signs import named_chars, build_index
@pytest.fixture
def first_5():
return [(' ', 'SPACE'),
('!', 'EXCLAMATION MARK'),
('"', 'QUOTATION MARK'),
('#', 'NUMBER SIGN'),
('$', 'DOLLAR SIGN')]
def test_first_5_named(first_5):
assert first_5 == list(itertools.islice(named_chars(), 5))
def test_build_index(first_5):
index = build_index(first_5)
assert len(index) == 7
assert index['SPACE'] == [' ']
assert index['EXCLAMATION'] == ['!']
assert index['SIGN'] == ['#', '$']
| StarcoderdataPython |
4950275 | <filename>examples/lambda.py
def apply(f, x):
return f(x)
apply(lambda x: x + 1, 5) | StarcoderdataPython |
3377308 | <filename>com/vmware/content/library_client.py
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.content.library.
#---------------------------------------------------------------------------
"""
The Content Library module provides classes and classes for defining and
managing the library's items, subscription, publication, and storage.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class ItemModel(VapiStruct):
"""
The ``ItemModel`` class represents a library item that has been stored in a
library.
A ``ItemModel`` represents a single logical unit to be managed within a
:class:`com.vmware.content_client.LibraryModel`. Items contain the actual
content of a library, and their placement within a library determines
policies that affect that content such as publishing.
A library item can have a specified type, indicated with the
:attr:`ItemModel.type` attribute. This property is associated with a
Content Library Service plugin that supports specific types and provides
additional services. The types available in a specific Content Library
Service can be queried using the :class:`com.vmware.content_client.Type`
class. Items of an unknown or unspecified type are treated generically.
Because subscribed library catalogs are synchronized as is, subscribing to
a remote Content Library Service effectively gives you a library with the
functionality of the remote service's type adapter plugins, even if they
are not installed locally.
Items can be managed using the :class:`Item` class and, for items in
subscribed libraries, the :class:`SubscribedItem` class.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
id=None,
library_id=None,
content_version=None,
creation_time=None,
description=None,
last_modified_time=None,
last_sync_time=None,
metadata_version=None,
name=None,
cached=None,
size=None,
type=None,
version=None,
source_id=None,
):
"""
:type id: :class:`str`
:param id: A unique identifier for this library item.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Item``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.content.library.Item``.
This attribute is not used for the ``create`` method. It will not
be present in the return value of the ``get`` or ``list`` methods.
It is not used for the ``update`` method.
:type library_id: :class:`str`
:param library_id: The identifier of the
:class:`com.vmware.content_client.LibraryModel` to which this item
belongs.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.Library``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.content.Library``.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type content_version: :class:`str`
:param content_version: The latest version of the file content list of this library item.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.item.Version``. When methods return a
value of this class as a return value, the attribute will be an
identifier for the resource type:
``com.vmware.content.library.item.Version``.
This attribute is not used for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type creation_time: :class:`datetime.datetime`
:param creation_time: The date and time when this library item was created.
This attribute is not used for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type description: :class:`str`
:param description: A human-readable description for this library item.
This attribute is optional for the ``create`` method. Leaving it
None during creation will result in an empty string value. It will
always be present in the result of a ``get`` or ``list`` method. It
is optional for the ``update`` method. Leaving it None during
update indicates that the description remains unchanged.
:type last_modified_time: :class:`datetime.datetime`
:param last_modified_time: The date and time when the metadata for this library item was last
changed.
This attribute is affected by changes to the properties or file
content of this item. It is not modified by changes to the tags of
the item, or by changes to the library which owns this item.
This attribute is not used for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type last_sync_time: :class:`datetime.datetime`
:param last_sync_time: The date and time when this library item was last synchronized.
This attribute is updated every time a synchronization is triggered
on the library item, including when a synchronization is triggered
on the library to which this item belongs. The value is None for a
library item that belongs to a local library.
This attribute is not used for the ``create`` method. It is
optional in the return value of the ``get`` or ``list`` methods. It
is not used for the ``update`` method.
:type metadata_version: :class:`str`
:param metadata_version: A version number for the metadata of this library item.
This value is incremented with each change to the metadata of this
item. Changes to name, description, and so on will increment this
value. The value is not incremented by changes to the content or
tags of the item or the library which owns it.
This attribute is not used for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type name: :class:`str`
:param name: A human-readable name for this library item.
The name may not be None or an empty string. The name does not have
to be unique, even within the same library.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
:type cached: :class:`bool`
:param cached: The status that indicates whether the library item is on disk or
not. The library item is cached when all its files are on disk.
This attribute is not used for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type size: :class:`long`
:param size: The library item size, in bytes. The size is the sum of the size
used on the storage backing for all the files in the item. When the
library item is not cached, the size is 0.
This attribute is not used for the ``create`` method. It is
optional in the return value of the ``get`` or ``list`` methods. It
is not used for the ``update`` method.
:type type: :class:`str`
:param type: An optional type identifier which indicates the type adapter plugin
to use.
This attribute may be set to a non-empty string value that
corresponds to an identifier supported by a type adapter plugin
present in the Content Library Service. A type adapter plugin, if
present for the specified type, can provide additional information
and services around the item content. A type adapter can guide the
upload process by creating file entries that are in need of being
uploaded to complete an item.
The types and plugins supported by the Content Library Service can
be queried using the :class:`com.vmware.content_client.Type` class.
This attribute is optional for the ``create`` and ``update``
methods. During creation, if the type is left unspecified, or if
the type is specified but does not have a corresponding type
support plugin, then the type of the library item is considered to
be generic and all data is treated as generic files. During update,
if the type is not specified, then it is not updated.
:type version: :class:`str`
:param version: A version number that is updated on metadata changes. This value is
used to validate update requests to provide optimistic concurrency
of changes.
This value represents a number that is incremented every time
library item properties, such as name or description, are changed.
It is not incremented by changes to the file content of the library
item, including adding or removing files. It is also not affected
by tagging the library item.
This attribute is not used for the ``create`` method. It will
always be present in the result of a ``get`` or ``list`` method. It
is optional for the ``update`` method. Leaving it None during
update indicates that you do not need to detect concurrent updates.
:type source_id: :class:`str`
:param source_id: The identifier of the :class:`ItemModel` to which this item is
synchronized to if the item belongs to a subscribed library. The
value is None for a library item that belongs to a local library.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Item``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.content.library.Item``.
This attribute is not used for the ``create`` method. It is
optional in the return value of the ``get`` or ``list`` methods. It
is not used for the ``update`` method.
"""
self.id = id
self.library_id = library_id
self.content_version = content_version
self.creation_time = creation_time
self.description = description
self.last_modified_time = last_modified_time
self.last_sync_time = last_sync_time
self.metadata_version = metadata_version
self.name = name
self.cached = cached
self.size = size
self.type = type
self.version = version
self.source_id = source_id
VapiStruct.__init__(self)
ItemModel._set_binding_type(type.StructType(
'com.vmware.content.library.item_model', {
'id': type.OptionalType(type.IdType()),
'library_id': type.OptionalType(type.IdType()),
'content_version': type.OptionalType(type.IdType()),
'creation_time': type.OptionalType(type.DateTimeType()),
'description': type.OptionalType(type.StringType()),
'last_modified_time': type.OptionalType(type.DateTimeType()),
'last_sync_time': type.OptionalType(type.DateTimeType()),
'metadata_version': type.OptionalType(type.StringType()),
'name': type.OptionalType(type.StringType()),
'cached': type.OptionalType(type.BooleanType()),
'size': type.OptionalType(type.IntegerType()),
'type': type.OptionalType(type.StringType()),
'version': type.OptionalType(type.StringType()),
'source_id': type.OptionalType(type.IdType()),
},
ItemModel,
True,
["id"]))
class OptimizationInfo(VapiStruct):
"""
The ``OptimizationInfo`` class defines different optimizations and
optimization parameters applied to particular library.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
optimize_remote_publishing=None,
):
"""
:type optimize_remote_publishing: :class:`bool`
:param optimize_remote_publishing: If set to ``true`` then library would be optimized for remote
publishing.
Turn it on if remote publishing is dominant use case for this
library. Remote publishing means here that publisher and
subscribers are not the part of the same ``Vcenter`` SSO domain.
Any optimizations could be done as result of turning on this
optimization during library creation. For example, library content
could be stored in different format but optimizations are not
limited to just storage format.
Note, that value of this toggle could be set only during creation
of the library and you would need to migrate your library in case
you need to change this value (optimize the library for different
use case).
This attribute is optional for the ``create`` method. If not
specified for the ``create``, the default is for the library to not
be optmized for specific use case. It is not used for the
``update`` method.
"""
self.optimize_remote_publishing = optimize_remote_publishing
VapiStruct.__init__(self)
OptimizationInfo._set_binding_type(type.StructType(
'com.vmware.content.library.optimization_info', {
'optimize_remote_publishing': type.OptionalType(type.BooleanType()),
},
OptimizationInfo,
False,
None))
class PublishInfo(VapiStruct):
"""
The ``PublishInfo`` class defines how a local library is published publicly
for synchronization to other libraries.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
authentication_method=None,
published=None,
publish_url=None,
user_name=None,
password=<PASSWORD>,
current_password=<PASSWORD>,
persist_json_enabled=None,
):
"""
:type authentication_method: :class:`PublishInfo.AuthenticationMethod`
:param authentication_method: Indicates how a subscribed library should authenticate (BASIC,
NONE) to the published library endpoint.
This attribute is required for the
:func:`com.vmware.content_client.LocalLibrary.create` method. It is
optional for the
:func:`com.vmware.content_client.LocalLibrary.update` operation,
and if None the value will not be changed. When the existing
authentication method is
:attr:`PublishInfo.AuthenticationMethod.BASIC` and authentication
is being turned off by updating this attribute to
:attr:`PublishInfo.AuthenticationMethod.NONE`, then the
:attr:`PublishInfo.current_password` attribute is required. This
attribute will always be present in the results of the
:func:`com.vmware.content_client.LocalLibrary.get` method.
:type published: :class:`bool`
:param published: Whether the local library is published.
This attribute is required for the
:func:`com.vmware.content_client.LocalLibrary.create` method. It is
optional for the
:func:`com.vmware.content_client.LocalLibrary.update` operation,
and if None the value will not be changed. When the existing
authentication method is
:attr:`PublishInfo.AuthenticationMethod.BASIC` and the local
library is published, the :attr:`PublishInfo.current_password`
attribute is required before turning off publishing. This attribute
will always be present in the results of the
:func:`com.vmware.content_client.LocalLibrary.get` method.
:type publish_url: :class:`str`
:param publish_url: The URL to which the library metadata is published by the Content
Library Service.
This value can be used to set the
:attr:`SubscriptionInfo.subscription_url` property when creating a
subscribed library.
This attribute is not used for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type user_name: :class:`str`
:param user_name: The username to require for authentication.
This attribute is optional for the
:func:`com.vmware.content_client.LocalLibrary.create` and
:func:`com.vmware.content_client.LocalLibrary.update` methods. When
the authentication method is
:attr:`PublishInfo.AuthenticationMethod.NONE`, the username can be
left None. When the authentication method is
:attr:`PublishInfo.AuthenticationMethod.BASIC`, the username is
ignored in the current release. It defaults to "vcsp". It is
preferable to leave this None. If specified, it must be set to
"vcsp".
:type password: :class:`str`
:param password: The new password to require for authentication.
This attribute is optional for the
:func:`com.vmware.content_client.LocalLibrary.create` method. When
the authentication method is
:attr:`PublishInfo.AuthenticationMethod.NONE`, the password can be
left None. When the authentication method is
:attr:`PublishInfo.AuthenticationMethod.BASIC`, the password should
be a non-empty string. This attribute is optional for the
:func:`com.vmware.content_client.LocalLibrary.update` method.
Leaving it None during update indicates that the password is not
changed. When the password is changed, the
:attr:`PublishInfo.current_password` attribute is required. This
attribute is not used for the
:func:`com.vmware.content_client.LocalLibrary.get` method.
:type current_password: :class:`str`
:param current_password: The <PASSWORD>. This attribute is available
starting in vSphere 6.7.
This attribute is unused for the
:func:`com.vmware.content_client.LocalLibrary.create` method. This
attribute is optional for the
:func:`com.vmware.content_client.LocalLibrary.update` method. When
the existing authentication method is
:attr:`PublishInfo.AuthenticationMethod.NONE`, the current password
can be left None. When the existing authentication method is
:attr:`PublishInfo.AuthenticationMethod.BASIC`, the current
password is verified before applying the new
:attr:`PublishInfo.password`, turning off authentication, or
unpublishing the library. This attribute is not used for the
:func:`com.vmware.content_client.LocalLibrary.get` method.
:type persist_json_enabled: :class:`bool`
:param persist_json_enabled: Whether library and library item metadata are persisted in the
storage backing as JSON files. This flag only applies if the local
library is published.
Enabling JSON persistence allows you to synchronize a subscribed
library manually instead of over HTTP. You copy the local library
content and metadata to another storage backing manually and then
create a subscribed library referencing the location of the library
JSON file in the :attr:`SubscriptionInfo.subscription_url`. When
the subscribed library's storage backing matches the subscription
URL, files do not need to be copied to the subscribed library.
For a library backed by a datastore, the library JSON file will be
stored at the path contentlib-{library_id}/lib.json on the
datastore.
For a library backed by a remote file system, the library JSON file
will be stored at {library_id}/lib.json in the remote file system
path.
This attribute is optional for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
"""
self.authentication_method = authentication_method
self.published = published
self.publish_url = publish_url
self.user_name = user_name
self.password = password
self.current_password = <PASSWORD>
self.persist_json_enabled = persist_json_enabled
VapiStruct.__init__(self)
class AuthenticationMethod(Enum):
"""
The ``PublishInfo.AuthenticationMethod`` class indicates how a subscribed
library should authenticate to the published library endpoint.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
BASIC = None
"""
Require HTTP Basic authentication matching a specified username and
password.
"""
NONE = None
"""
Require no authentication.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`AuthenticationMethod` instance.
"""
Enum.__init__(string)
AuthenticationMethod._set_values([
AuthenticationMethod('BASIC'),
AuthenticationMethod('NONE'),
])
AuthenticationMethod._set_binding_type(type.EnumType(
'com.vmware.content.library.publish_info.authentication_method',
AuthenticationMethod))
PublishInfo._set_binding_type(type.StructType(
'com.vmware.content.library.publish_info', {
'authentication_method': type.OptionalType(type.ReferenceType(__name__, 'PublishInfo.AuthenticationMethod')),
'published': type.OptionalType(type.BooleanType()),
'publish_url': type.OptionalType(type.URIType()),
'user_name': type.OptionalType(type.StringType()),
'password': type.OptionalType(type.SecretType()),
'current_password': type.OptionalType(type.SecretType()),
'persist_json_enabled': type.OptionalType(type.BooleanType()),
},
PublishInfo,
False,
None))
class SourceInfo(VapiStruct):
"""
The ``SourceInfo`` class contains information about the source published
library of a subscribed library. This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
source_library=None,
subscription=None,
):
"""
:type source_library: :class:`str`
:param source_library: Identifier of the published library. This attribute was added in
vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.Library``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.content.Library``.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
:type subscription: :class:`str`
:param subscription: Identifier of the subscription associated with the subscribed
library. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Subscriptions``. When methods return a
value of this class as a return value, the attribute will be an
identifier for the resource type:
``com.vmware.content.library.Subscriptions``.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
"""
self.source_library = source_library
self.subscription = subscription
VapiStruct.__init__(self)
SourceInfo._set_binding_type(type.StructType(
'com.vmware.content.library.source_info', {
'source_library': type.OptionalType(type.IdType()),
'subscription': type.OptionalType(type.IdType()),
},
SourceInfo,
False,
None))
class StorageBacking(VapiStruct):
"""
The ``StorageBacking`` class defines a storage location where content in a
library will be stored. The storage location can either be a Datastore or
Other type.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'DATASTORE' : [('datastore_id', False)],
'OTHER' : [('storage_uri', False)],
}
),
]
def __init__(self,
type=None,
datastore_id=None,
storage_uri=None,
):
"""
:type type: :class:`StorageBacking.Type`
:param type: Type (DATASTORE, OTHER) of :class:`StorageBacking`.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is not used for the ``update`` method.
:type datastore_id: :class:`str`
:param datastore_id: Identifier of the datastore used to store the content in the
library.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Datastore``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Datastore``.
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`StorageBacking.Type.DATASTORE`.
:type storage_uri: :class:`str`
:param storage_uri: URI identifying the location used to store the content in the
library.
The following URI formats are supported:
vSphere 6.5
* nfs://server/path?version=4 (for vCenter Server Appliance only) -
Specifies an NFS Version 4 server.
* nfs://server/path (for vCenter Server Appliance only) - Specifies
an NFS Version 3 server. The nfs://server:/path format is also
supported.
* smb://server/path - Specifies an SMB server or Windows share.
vSphere 6.0 Update 1
* nfs://server:/path (for vCenter Server Appliance only)
* file://unc-server/path (for vCenter Server for Windows only)
* file:///mount/point (for vCenter Server Appliance only) - Local
file URIs are supported only when the path is a local mount point
for an NFS file system. Use of file URIs is strongly discouraged.
Instead, use an NFS URI to specify the remote file system.
vSphere 6.0
* nfs://server:/path (for vCenter Server Appliance only)
* file://unc-server/path (for vCenter Server for Windows only)
* file:///path - Local file URIs are supported but strongly
discouraged because it may interfere with the performance of
vCenter Server.
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`StorageBacking.Type.OTHER`.
"""
self.type = type
self.datastore_id = datastore_id
self.storage_uri = storage_uri
VapiStruct.__init__(self)
class Type(Enum):
"""
The ``StorageBacking.Type`` class specifies the type of the
:class:`StorageBacking`.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
DATASTORE = None
"""
The content of the library will be stored on a datastore.
These are vCenter Server managed datastores, and are logical containers
that hide specifics of each storage device. Depending on the type of
storage you use, datastores can be backed by the following file system
formats:
* Virtual Machine File System (VMFS)
* Network File System (NFS)
"""
OTHER = None
"""
The content of the library will be stored on a remote file system.
Supports the following remote file systems:
* NFS (on vCenter Server Appliance)
* SMB (on vCenter Server Appliance and vCenter Server for Windows)
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Type` instance.
"""
Enum.__init__(string)
Type._set_values([
Type('DATASTORE'),
Type('OTHER'),
])
Type._set_binding_type(type.EnumType(
'com.vmware.content.library.storage_backing.type',
Type))
StorageBacking._set_binding_type(type.StructType(
'com.vmware.content.library.storage_backing', {
'type': type.OptionalType(type.ReferenceType(__name__, 'StorageBacking.Type')),
'datastore_id': type.OptionalType(type.IdType()),
'storage_uri': type.OptionalType(type.URIType()),
},
StorageBacking,
False,
None))
class SubscriptionInfo(VapiStruct):
"""
The ``SubscriptionInfo`` class defines the subscription behavior for a
subscribed library.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
authentication_method=None,
automatic_sync_enabled=None,
on_demand=None,
password=<PASSWORD>,
ssl_thumbprint=None,
subscription_url=None,
user_name=None,
source_info=None,
):
"""
:type authentication_method: :class:`SubscriptionInfo.AuthenticationMethod`
:param authentication_method: Indicate how the subscribed library should authenticate (BASIC,
NONE) with the published library endpoint.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
:type automatic_sync_enabled: :class:`bool`
:param automatic_sync_enabled: Whether the library should participate in automatic library
synchronization. In order for automatic synchronization to happen,
the global
:attr:`com.vmware.content_client.ConfigurationModel.automatic_sync_enabled`
option must also be true. The subscription is still active even
when automatic synchronization is turned off, but synchronization
is only activated with an explicit call to
:func:`com.vmware.content_client.SubscribedLibrary.sync` or
:func:`SubscribedItem.sync`. In other words, manual synchronization
is still available even when automatic synchronization is disabled.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
:type on_demand: :class:`bool`
:param on_demand: Indicates whether a library item's content will be synchronized
only on demand.
If this is set to ``true``, then the library item's metadata will
be synchronized but the item's content (its files) will not be
synchronized. The Content Library Service will synchronize the
content upon request only. This can cause the first use of the
content to have a noticeable delay.
Items without synchronized content can be forcefully synchronized
in advance using the :func:`SubscribedItem.sync` call with
``forceSyncContent`` set to true. Once content has been
synchronized, the content can removed with the
:func:`SubscribedItem.evict` call.
If this value is set to ``false``, all content will be synchronized
in advance.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
:type password: :class:`str`
:param password: The password to use when authenticating.
The password must be set when using a password-based authentication
method; empty strings are not allowed.
This attribute is optional for the ``create`` method. It will not
be present in the return value of the ``get`` or ``list`` methods.
It is optional for the ``update`` method.
:type ssl_thumbprint: :class:`str`
:param ssl_thumbprint: An optional SHA-1 hash of the SSL certificate for the remote
endpoint.
If this value is defined the SSL certificate will be verified by
comparing it to the SSL thumbprint. The SSL certificate must verify
against the thumbprint. When specified, the standard certificate
chain validation behavior is not used. The certificate chain is
validated normally if this value is None.
This attribute is optional for the ``create`` method. It will not
be present in the return value of the ``get`` or ``list`` methods.
It is optional for the ``update`` method.
:type subscription_url: :class:`str`
:param subscription_url: The URL of the endpoint where the metadata for the remotely
published library is being served.
This URL can be the :attr:`PublishInfo.publish_url` of the
published library (for example, https://server/path/lib.json).
If the source content comes from a published library with
:attr:`PublishInfo.persist_json_enabled`, the subscription URL can
be a URL pointing to the library JSON file on a datastore or remote
file system. The supported formats are:
vSphere 6.5
* ds:///vmfs/volumes/{uuid}/mylibrary/lib.json (for datastore)
* nfs://server/path/mylibrary/lib.json (for NFSv3 server on vCenter
Server Appliance)
* nfs://server/path/mylibrary/lib.json?version=4 (for NFSv4 server
on vCenter Server Appliance)
* smb://server/path/mylibrary/lib.json (for SMB server)
vSphere 6.0
* file://server/mylibrary/lib.json (for UNC server on vCenter
Server for Windows)
* file:///path/mylibrary/lib.json (for local file system)
When you specify a DS subscription URL, the datastore must be on
the same vCenter Server as the subscribed library. When you specify
an NFS or SMB subscription URL, the
:attr:`StorageBacking.storage_uri` of the subscribed library must
be on the same remote file server and should share a common parent
path with the subscription URL.
This attribute must be provided for the ``create`` method. It will
always be present in the return value of the ``get`` or ``list``
methods. It is optional for the ``update`` method.
:type user_name: :class:`str`
:param user_name: The username to use when authenticating.
The username must be set when using a password-based authentication
method. Empty strings are allowed for usernames.
This attribute is optional for the ``create`` method. It is
optional in the return value of the ``get`` or ``list`` methods. It
is optional for the ``update`` method.
:type source_info: :class:`SourceInfo`
:param source_info: Information about the source published library. This attribute will
be set for a subscribed library which is associated with a
subscription of the published library. This attribute was added in
vSphere API 6.7.2.
This attribute is optional for the ``create`` method. It is
optional in the return value of the ``get`` or ``list`` methods. It
is optional for the ``update`` method.
"""
self.authentication_method = authentication_method
self.automatic_sync_enabled = automatic_sync_enabled
self.on_demand = on_demand
self.password = password
self.ssl_thumbprint = ssl_thumbprint
self.subscription_url = subscription_url
self.user_name = user_name
self.source_info = source_info
VapiStruct.__init__(self)
class AuthenticationMethod(Enum):
"""
Indicate how the subscribed library should authenticate with the published
library endpoint.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
BASIC = None
"""
Require HTTP Basic authentication matching a specified username and
password.
"""
NONE = None
"""
Require no authentication.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`AuthenticationMethod` instance.
"""
Enum.__init__(string)
AuthenticationMethod._set_values([
AuthenticationMethod('BASIC'),
AuthenticationMethod('NONE'),
])
AuthenticationMethod._set_binding_type(type.EnumType(
'com.vmware.content.library.subscription_info.authentication_method',
AuthenticationMethod))
SubscriptionInfo._set_binding_type(type.StructType(
'com.vmware.content.library.subscription_info', {
'authentication_method': type.OptionalType(type.ReferenceType(__name__, 'SubscriptionInfo.AuthenticationMethod')),
'automatic_sync_enabled': type.OptionalType(type.BooleanType()),
'on_demand': type.OptionalType(type.BooleanType()),
'password': type.OptionalType(type.SecretType()),
'ssl_thumbprint': type.OptionalType(type.StringType()),
'subscription_url': type.OptionalType(type.URIType()),
'user_name': type.OptionalType(type.StringType()),
'source_info': type.OptionalType(type.ReferenceType(__name__, 'SourceInfo')),
},
SubscriptionInfo,
False,
None))
class Item(VapiInterface):
"""
The ``Item`` class provides methods for managing library items.
"""
RESOURCE_TYPE = "com.vmware.content.library.Item"
"""
Resource type for item.
"""
_VAPI_SERVICE_ID = 'com.vmware.content.library.item'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ItemStub)
self._VAPI_OPERATION_IDS = {}
class FindSpec(VapiStruct):
"""
The ``Item.FindSpec`` class specifies the properties that can be used as a
filter to find library items. When multiple attributes are specified, all
properties of the item must match the specification.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
library_id=None,
source_id=None,
type=None,
cached=None,
):
"""
:type name: :class:`str` or ``None``
:param name: The name of the library item. The name is case-insensitive. See
:attr:`ItemModel.name`.
If not specified all library item names are searched.
:type library_id: :class:`str` or ``None``
:param library_id: The identifier of the library containing the item. See
:attr:`ItemModel.library_id`.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.Library``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.content.Library``.
If not specified all libraries are searched.
:type source_id: :class:`str` or ``None``
:param source_id: The identifier of the library item as reported by the publisher.
See :attr:`ItemModel.source_id`.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Item``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.content.library.Item``.
If not specified all library items are searched.
:type type: :class:`str` or ``None``
:param type: The type of the library item. The type is case-insensitive. See
:attr:`ItemModel.type`.
If not specified all types are searched.
:type cached: :class:`bool` or ``None``
:param cached: Whether the item is cached. Possible values are 'true' or 'false'.
See :attr:`ItemModel.cached`.
If not specified all library items are searched.
"""
self.name = name
self.library_id = library_id
self.source_id = source_id
self.type = type
self.cached = cached
VapiStruct.__init__(self)
FindSpec._set_binding_type(type.StructType(
'com.vmware.content.library.item.find_spec', {
'name': type.OptionalType(type.StringType()),
'library_id': type.OptionalType(type.IdType()),
'source_id': type.OptionalType(type.IdType()),
'type': type.OptionalType(type.StringType()),
'cached': type.OptionalType(type.BooleanType()),
},
FindSpec,
False,
None))
class DestinationSpec(VapiStruct):
"""
The ``Item.DestinationSpec`` class contains information required to publish
the library item to a specific subscription. This class was added in
vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
subscription=None,
):
"""
:type subscription: :class:`str`
:param subscription: Identifier of the subscription associated with the subscribed
library. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Subscriptions``. When methods return a
value of this class as a return value, the attribute will be an
identifier for the resource type:
``com.vmware.content.library.Subscriptions``.
"""
self.subscription = subscription
VapiStruct.__init__(self)
DestinationSpec._set_binding_type(type.StructType(
'com.vmware.content.library.item.destination_spec', {
'subscription': type.IdType(resource_types='com.vmware.content.library.Subscriptions'),
},
DestinationSpec,
False,
None))
def copy(self,
source_library_item_id,
destination_create_spec,
client_token=None,
):
"""
Copies a library item.
Copying a library item allows a duplicate to be made within the same or
different library. The copy occurs by first creating a new library
item, whose identifier is returned. The content of the library item is
then copied asynchronously. This copy can be tracked as a task.
If the copy fails, Content Library Service will roll back the copy by
deleting any content that was already copied, and removing the new
library item. A failure during rollback may require manual cleanup by
an administrator.
A library item cannot be copied into a subscribed library.
:type client_token: :class:`str` or ``None``
:param client_token: A unique token generated on the client for each copy request. The
token should be a universally unique identifier (UUID), for
example: ``<PASSWORD>``. This token can
be used to guarantee idempotent copy.
If not specified copy is not idempotent.
:type source_library_item_id: :class:`str`
:param source_library_item_id: Identifier of the existing library item from which the content will
be copied.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Item``.
:type destination_create_spec: :class:`ItemModel`
:param destination_create_spec: Specification for the new library item to be created.
:rtype: :class:`str`
:return: The identifier of the new library item into which the content is
being copied.
The return value will be an identifier for the resource type:
``com.vmware.content.library.Item``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the library item with ``source_library_item_id`` does not exist,
or if the library referenced by the :attr:`ItemModel.library_id`
property of ``destination_create_spec`` does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if one of the following is true for the new library item:
* name is empty
* name exceeds 80 characters
* description exceeds 2000 characters
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the ``client_token`` does not conform to the UUID format.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
if the :attr:`ItemModel.library_id` property of
``destination_create_spec`` refers to a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the copy operation failed because the source or destination
library item is not accessible.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the content of the source library item specified by
``source_library_item_id``, or the content of the target library
specified by the library ID (see :attr:`ItemModel.library_id`)
property of ``destination_create_spec`` has been deleted from the
storage backings (see null) associated with it.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.library.Item`` referenced by
the parameter ``source_library_item_id`` requires ``System.Read``.
* The resource ``com.vmware.content.Library`` referenced by the
attribute :attr:`ItemModel.library_id` requires
``ContentLibrary.AddLibraryItem``.
"""
return self._invoke('copy',
{
'client_token': client_token,
'source_library_item_id': source_library_item_id,
'destination_create_spec': destination_create_spec,
})
def create(self,
create_spec,
client_token=None,
):
"""
Creates a new library item.
A new library item is created without any content. After creation,
content can be added through the
:class:`com.vmware.content.library.item_client.UpdateSession` and
:class:`com.vmware.content.library.item.updatesession_client.File`
classes.
A library item cannot be created in a subscribed library.
:type client_token: :class:`str` or ``None``
:param client_token: A unique token generated on the client for each creation request.
The token should be a universally unique identifier (UUID), for
example: ``<PASSWORD>``. This token can
be used to guarantee idempotent creation.
If not specified creation is not idempotent.
:type create_spec: :class:`ItemModel`
:param create_spec: Specification that defines the properties of the new library item.
:rtype: :class:`str`
:return: Identifier of the new library item.
The return value will be an identifier for the resource type:
``com.vmware.content.library.Item``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the :attr:`ItemModel.library_id` property of ``create_spec``
refers to a library that does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if one of the following is true for the new library item:
* name is empty
* name exceeds 80 characters
* description exceeds 2000 characters
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the ``client_token`` does not conform to the UUID format.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
if the :attr:`ItemModel.library_id` property of ``create_spec``
refers to a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the content of the library specified by the library ID (see
:attr:`ItemModel.library_id`) property of ``create_spec`` has been
deleted from the storage backings (see null) associated with it.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
if there is already a library item with same name in the library.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.Library`` referenced by the
attribute :attr:`ItemModel.library_id` requires
``ContentLibrary.AddLibraryItem``.
"""
return self._invoke('create',
{
'client_token': client_token,
'create_spec': create_spec,
})
def delete(self,
library_item_id,
):
"""
Deletes a library item.
This method will immediately remove the item from the library that owns
it. The content of the item will be asynchronously removed from the
storage backings. The content deletion can be tracked with a task. In
the event that the task fails, an administrator may need to manually
remove the files from the storage backing.
This method cannot be used to delete a library item that is a member of
a subscribed library. Removing an item from a subscribed library
requires deleting the item from the original published local library
and syncing the subscribed library.
:type library_item_id: :class:`str`
:param library_item_id: Identifier of the library item to delete.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Item``.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
if the library item with the given ``library_item_id`` is a member
of a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the library item with the specified ``library_item_id`` does not
exist.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the library item contains a virtual machine template and a
virtual machine is checked out of the library item.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.library.Item`` referenced by
the parameter ``library_item_id`` requires
``ContentLibrary.DeleteLibraryItem``.
"""
return self._invoke('delete',
{
'library_item_id': library_item_id,
})
def get(self,
library_item_id,
):
"""
Returns the :class:`ItemModel` with the given identifier.
:type library_item_id: :class:`str`
:param library_item_id: Identifier of the library item to return.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Item``.
:rtype: :class:`ItemModel`
:return: The :class:`ItemModel` instance with the given ``library_item_id``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if no item with the given ``library_item_id`` exists.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.library.Item`` referenced by
the parameter ``library_item_id`` requires ``System.Read``.
"""
return self._invoke('get',
{
'library_item_id': library_item_id,
})
def list(self,
library_id,
):
"""
Returns the identifiers of all items in the given library.
:type library_id: :class:`str`
:param library_id: Identifier of the library whose items should be returned.
The parameter must be an identifier for the resource type:
``com.vmware.content.Library``.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of identifiers of the items in the library
specified by ``library_id``.
The return value will contain identifiers for the resource type:
``com.vmware.content.library.Item``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the library associated with ``library_id`` does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.Library`` referenced by the
parameter ``library_id`` requires ``System.Read``.
"""
return self._invoke('list',
{
'library_id': library_id,
})
def find(self,
spec,
):
"""
Returns identifiers of all the visible (as determined by authorization
policy) library items matching the requested :class:`Item.FindSpec`.
:type spec: :class:`Item.FindSpec`
:param spec: Specification describing what properties to filter on.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of identifiers of all the visible library items
matching the given ``spec``.
The return value will contain identifiers for the resource type:
``com.vmware.content.library.Item``.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if no properties are specified in the ``spec``.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* Method execution requires ``System.Read``.
* The resource ``com.vmware.content.Library`` referenced by the
attribute :attr:`Item.FindSpec.library_id` requires
``System.Read``.
"""
return self._invoke('find',
{
'spec': spec,
})
def update(self,
library_item_id,
update_spec,
):
"""
Updates the specified properties of a library item.
This is an incremental update to the library item. Attributes that are
None in the update specification are left unchanged.
This method cannot update a library item that is a member of a
subscribed library. Those items must be updated in the source published
library and synchronized to the subscribed library.
:type library_item_id: :class:`str`
:param library_item_id: Identifier of the library item to update.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Item``.
:type update_spec: :class:`ItemModel`
:param update_spec: Specification of the properties to set.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the library item specified by ``library_item_id`` does not
exist.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
if the library item corresponding to ``library_item_id`` is a
member of a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if one of the following is true for the ``update_spec``:
* name is empty
* name exceeds 80 characters
* description exceeds 2000 characters
* version is not equal to the current version of the library item
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the library item belongs to a published library with JSON
persistence enabled (see :attr:`PublishInfo.persist_json_enabled`)
and the content of the library item specified by
``library_item_id`` has been deleted from the storage backings (see
null) associated with it.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
if there is already a library item with same name in the library.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.library.Item`` referenced by
the parameter ``library_item_id`` requires
``ContentLibrary.UpdateLibraryItem``.
"""
return self._invoke('update',
{
'library_item_id': library_item_id,
'update_spec': update_spec,
})
def publish(self,
library_item_id,
force_sync_content,
subscriptions=None,
):
"""
Publishes the library item to specified subscriptions of the library.
If no subscriptions are specified, then publishes the library item to
all subscriptions of the library. This method was added in vSphere API
6.7.2.
:type library_item_id: :class:`str`
:param library_item_id: Library item identifier.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Item``.
:type force_sync_content: :class:`bool`
:param force_sync_content: Whether to synchronize file content as well as metadata. This
parameter applies only if the subscription is on-demand.
:type subscriptions: :class:`list` of :class:`Item.DestinationSpec` or ``None``
:param subscriptions: The list of subscriptions to publish this library item to.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
If the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the library item specified by ``library_item_id`` does not
exist.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
If one or more arguments in ``subscriptions`` is not valid.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
If the library item specified by ``library_item_id`` is a member of
a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
If the library item specified by ``library_item_id`` does not
belong to a published library.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
If the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
If the user that requested the method is not authorized to perform
the method.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.library.Item`` referenced by
the parameter ``library_item_id`` requires
``ContentLibrary.PublishLibraryItem``.
"""
return self._invoke('publish',
{
'library_item_id': library_item_id,
'force_sync_content': force_sync_content,
'subscriptions': subscriptions,
})
class SubscribedItem(VapiInterface):
"""
The ``SubscribedItem`` class manages the unique features of library items
that are members of a subscribed library.
"""
_VAPI_SERVICE_ID = 'com.vmware.content.library.subscribed_item'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _SubscribedItemStub)
self._VAPI_OPERATION_IDS = {}
def evict(self,
library_item_id,
):
"""
Evicts the cached content of a library item in a subscribed library.
This method allows the cached content of a library item to be removed
to free up storage capacity. This method will only work when a library
item is synchronized on-demand. When a library is not synchronized
on-demand, it always attempts to keep its cache up-to-date with the
published source. Evicting the library item will set
:attr:`ItemModel.cached` to false.
:type library_item_id: :class:`str`
:param library_item_id: Identifier of the library item whose content should be evicted.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Item``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the library item specified by ``library_item_id`` does not
exist.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
if the library item specified by ``library_item_id`` is not a
member of a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementConfiguration`
if the library item specified by ``library_item_id`` is a member of
a subscribed library that does not synchronize on-demand.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the content of the library item specified by ``library_item_id``
has been deleted from the storage backings (see null) associated
with it.
For instance, this {\\\\@term error) is reported on evicting a
library item in an on-demand subscribed library that was restored
from backup, and the library item was deleted after backup, thus
resulting in its content being deleted from the associated storage
backings. In this scenario, the metadata of the library item is
present on a restore, while its content has been deleted.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.library.Item`` referenced by
the parameter ``library_item_id`` requires
``ContentLibrary.EvictLibraryItem``.
"""
return self._invoke('evict',
{
'library_item_id': library_item_id,
})
def sync(self,
library_item_id,
force_sync_content,
):
"""
Forces the synchronization of an individual library item in a
subscribed library.
Synchronizing an individual item will update that item's metadata from
the remote source. If the source library item on the remote library has
been deleted, this method will delete the library item from the
subscribed library as well.
The default behavior of the synchronization is determined by the
:class:`SubscriptionInfo` of the library which owns the library item.
* If :attr:`SubscriptionInfo.on_demand` is true, then the file content
is not synchronized by default. In this case, only the library item
metadata is synchronized. The file content may still be forcefully
synchronized by passing true for the ``force_sync_content`` parameter.
* If :attr:`SubscriptionInfo.on_demand` is false, then this call will
always synchronize the file content. The ``force_sync_content``
parameter is ignored when the subscription is not on-demand.
When the file content has been synchronized, the
:attr:`ItemModel.cached` attribute will be true.
This method will return immediately and create an asynchronous task to
perform the synchronization.
:type library_item_id: :class:`str`
:param library_item_id: Identifier of the library item to synchronize.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Item``.
:type force_sync_content: :class:`bool`
:param force_sync_content: Whether to synchronize file content as well as metadata. This
parameter applies only if the subscription is on-demand.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the library item specified by ``library_item_id`` could not be
found.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
if the library item specified by ``library_item_id`` is not a
member of a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the content of the library item specified by ``library_item_id``
has been deleted from the storage backings (see null) associated
with it.
For instance, this {\\\\@term error) is reported on synchronizing a
library item in a subscribed library that was restored from backup,
and the library item was deleted after backup, thus resulting in
its content being deleted from the associated storage backings. In
this scenario, the metadata of the library item is present on a
restore, while its content has been deleted.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.library.Item`` referenced by
the parameter ``library_item_id`` requires
``ContentLibrary.SyncLibraryItem``.
"""
return self._invoke('sync',
{
'library_item_id': library_item_id,
'force_sync_content': force_sync_content,
})
class Subscriptions(VapiInterface):
"""
The ``Subscriptions`` class provides methods for managing the subscription
information of the subscribers of a published library. This class was added
in vSphere API 6.7.2.
"""
RESOURCE_TYPE = "com.vmware.content.library.Subscriptions"
"""
Resource type for Subscription resource. This class attribute was added in
vSphere API 6.7.2.
"""
_VAPI_SERVICE_ID = 'com.vmware.content.library.subscriptions'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _SubscriptionsStub)
self._VAPI_OPERATION_IDS = {}
class Location(Enum):
"""
The ``Subscriptions.Location`` class defines the location of subscribed
library relative to the published library. This enumeration was added in
vSphere API 6.7.2.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
LOCAL = None
"""
The subscribed library belongs to the same vCenter instance as the
published library. This class attribute was added in vSphere API 6.7.2.
"""
REMOTE = None
"""
The subscribed library belongs to a different vCenter instance than the
published library. This class attribute was added in vSphere API 6.7.2.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Location` instance.
"""
Enum.__init__(string)
Location._set_values([
Location('LOCAL'),
Location('REMOTE'),
])
Location._set_binding_type(type.EnumType(
'com.vmware.content.library.subscriptions.location',
Location))
class CreateSpecNewSubscribedLibrary(VapiStruct):
"""
The ``Subscriptions.CreateSpecNewSubscribedLibrary`` class defines the
information required to create a new subscribed library. This class was
added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
description=None,
storage_backings=None,
automatic_sync_enabled=None,
on_demand=None,
):
"""
:type name: :class:`str`
:param name: Name of the subscribed library. This attribute was added in vSphere
API 6.7.2.
:type description: :class:`str` or ``None``
:param description: Description of the subscribed library. This attribute was added in
vSphere API 6.7.2.
If None, the description will be an empty string.
:type storage_backings: :class:`list` of :class:`StorageBacking`
:param storage_backings: The list of default storage backings for this library.
The list must contain exactly one storage backing. Multiple default
storage locations are not currently supported but may become
supported in future releases.. This attribute was added in vSphere
API 6.7.2.
:type automatic_sync_enabled: :class:`bool`
:param automatic_sync_enabled: Specifies whether the library should participate in automatic
library synchronization. This attribute was added in vSphere API
6.7.2.
:type on_demand: :class:`bool`
:param on_demand: Specifies whether a library item's content will be synchronized
only on demand. This attribute was added in vSphere API 6.7.2.
"""
self.name = name
self.description = description
self.storage_backings = storage_backings
self.automatic_sync_enabled = automatic_sync_enabled
self.on_demand = on_demand
VapiStruct.__init__(self)
CreateSpecNewSubscribedLibrary._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.create_spec_new_subscribed_library', {
'name': type.StringType(),
'description': type.OptionalType(type.StringType()),
'storage_backings': type.ListType(type.ReferenceType(__name__, 'StorageBacking')),
'automatic_sync_enabled': type.BooleanType(),
'on_demand': type.BooleanType(),
},
CreateSpecNewSubscribedLibrary,
False,
None))
class CreateSpecVcenter(VapiStruct):
"""
The ``Subscriptions.CreateSpecVcenter`` class defines information about the
vCenter Server instance where the subscribed library associated with the
subscription exists or will be created. This class was added in vSphere API
6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
hostname=None,
https_port=None,
):
"""
:type hostname: :class:`str`
:param hostname: The hostname of the subscribed library's vCenter Server. This
attribute was added in vSphere API 6.7.2.
:type https_port: :class:`long` or ``None``
:param https_port: The HTTPS port of the vCenter Server instance where the subscribed
library exists. This attribute was added in vSphere API 6.7.2.
If None, port 443 will be used.
"""
self.hostname = hostname
self.https_port = https_port
VapiStruct.__init__(self)
CreateSpecVcenter._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.create_spec_vcenter', {
'hostname': type.StringType(),
'https_port': type.OptionalType(type.IntegerType()),
},
CreateSpecVcenter,
False,
None))
class CreateSpecPlacement(VapiStruct):
"""
The ``Subscriptions.CreateSpecPlacement`` class defines the placement
information for the subscribed library's virtual machine template library
items. Storage location of the virtual machine template items is defined by
the subscribed library's storage backing. This placement information needs
to be compatible with the subscribed library's storage backing. The
``Subscriptions.CreateSpecPlacement`` class is only applicable for the
virtual machine template library items of the subscribed library. This
class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
folder=None,
cluster=None,
resource_pool=None,
host=None,
network=None,
):
"""
:type folder: :class:`str` or ``None``
:param folder: Virtual machine folder into which the virtual machine template
should be placed. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Folder:VCenter``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Folder:VCenter``.
This attribute is currently required. In future, if this is None,
the system will attempt to choose a suitable folder for the virtual
machine template; if a folder cannot be chosen, publishing a
virtual machine template item will fail.
:type cluster: :class:`str` or ``None``
:param cluster: Cluster onto which the virtual machine template should be placed.
If ``cluster`` and ``resourcePool`` are both specified,
``resourcePool`` must belong to ``cluster``. If ``cluster`` and
``host`` are both specified, ``host`` must be a member of
``cluster``. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ClusterComputeResource:VCenter``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``ClusterComputeResource:VCenter``.
If ``resourcePool`` or ``host`` is specified, it is recommended
that this attribute be None.
:type resource_pool: :class:`str` or ``None``
:param resource_pool: Resource pool into which the virtual machine template should be
placed. If ``host`` and ``resourcePool`` are both specified,
``resourcePool`` must belong to ``host``. If ``cluster`` and
``resourcePool`` are both specified, ``resourcePool`` must belong
to ``cluster``. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ResourcePool:VCenter``. When methods return a value of this class
as a return value, the attribute will be an identifier for the
resource type: ``ResourcePool:VCenter``.
This attribute is currently required. In future, if this is None,
the system will attempt to choose a suitable resource pool for the
virtual machine template; if a resource pool cannot be chosen,
publish of virtual machine template item will fail.
:type host: :class:`str` or ``None``
:param host: Host onto which the virtual machine template should be placed. If
``host`` and ``resourcePool`` are both specified, ``resourcePool``
must belong to ``host``. If ``host`` and ``cluster`` are both
specified, ``host`` must be a member of ``cluster``. This attribute
was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``HostSystem:VCenter``. When methods return a value of this class
as a return value, the attribute will be an identifier for the
resource type: ``HostSystem:VCenter``.
If this is None, the system will attempt to choose a suitable host
for the virtual machine template; if a host cannot be chosen,
publishing the virtual machine template item will fail.
:type network: :class:`str` or ``None``
:param network: Network that backs the virtual Ethernet adapters in the virtual
machine template. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Network:VCenter``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Network:VCenter``.
If None, the virtual Ethernet adapters will not be backed by a
network.
"""
self.folder = folder
self.cluster = cluster
self.resource_pool = resource_pool
self.host = host
self.network = network
VapiStruct.__init__(self)
CreateSpecPlacement._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.create_spec_placement', {
'folder': type.OptionalType(type.IdType()),
'cluster': type.OptionalType(type.IdType()),
'resource_pool': type.OptionalType(type.IdType()),
'host': type.OptionalType(type.IdType()),
'network': type.OptionalType(type.IdType()),
},
CreateSpecPlacement,
False,
None))
class CreateSpecSubscribedLibrary(VapiStruct):
"""
The ``Subscriptions.CreateSpecSubscribedLibrary`` class defines the
subscribed library information used to create the subscription. This class
was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'target',
{
'CREATE_NEW' : [('new_subscribed_library', True)],
'USE_EXISTING' : [('subscribed_library', True)],
}
),
UnionValidator(
'location',
{
'REMOTE' : [('vcenter', True)],
'LOCAL' : [],
}
),
]
def __init__(self,
target=None,
new_subscribed_library=None,
subscribed_library=None,
location=None,
vcenter=None,
placement=None,
):
"""
:type target: :class:`Subscriptions.CreateSpecSubscribedLibrary.Target`
:param target: Specifies whether the target subscribed library should be newly
created or an existing subscribed library should be used. This
attribute was added in vSphere API 6.7.2.
:type new_subscribed_library: :class:`Subscriptions.CreateSpecNewSubscribedLibrary`
:param new_subscribed_library: Specification for creating a new subscribed library associated with
the subscription. This attribute was added in vSphere API 6.7.2.
This attribute is optional and it is only relevant when the value
of ``target`` is
:attr:`Subscriptions.CreateSpecSubscribedLibrary.Target.CREATE_NEW`.
:type subscribed_library: :class:`str`
:param subscribed_library: Identifier of the existing subscribed library to associate with the
subscription. Only the subscribed libraries for which
:attr:`SubscriptionInfo.subscription_url` property is set to the
:attr:`PublishInfo.publish_url` of the published library can be
associated with the subscription. This attribute was added in
vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.Library``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.content.Library``.
This attribute is optional and it is only relevant when the value
of ``target`` is
:attr:`Subscriptions.CreateSpecSubscribedLibrary.Target.USE_EXISTING`.
:type location: :class:`Subscriptions.Location`
:param location: Location of the subscribed library relative to the published
library. This attribute was added in vSphere API 6.7.2.
:type vcenter: :class:`Subscriptions.CreateSpecVcenter`
:param vcenter: Specification for the subscribed library's vCenter Server instance.
This attribute was added in vSphere API 6.7.2.
This attribute is optional and it is only relevant when the value
of ``location`` is :attr:`Subscriptions.Location.REMOTE`.
:type placement: :class:`Subscriptions.CreateSpecPlacement` or ``None``
:param placement: Placement specification for the virtual machine template library
items on the subscribed library. This attribute was added in
vSphere API 6.7.2.
This attribute is currently required. In future, if this is None,
the system will attempt to choose a suitable placement
specification for the virtual machine template items; if a
placement specification cannot be chosen, publish of virtual
machine template items will fail.
"""
self.target = target
self.new_subscribed_library = new_subscribed_library
self.subscribed_library = subscribed_library
self.location = location
self.vcenter = vcenter
self.placement = placement
VapiStruct.__init__(self)
class Target(Enum):
"""
The ``Subscriptions.CreateSpecSubscribedLibrary.Target`` class defines the
options related to the target subscribed library which will be associated
with the subscription. This enumeration was added in vSphere API 6.7.2.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
CREATE_NEW = None
"""
Create a new subscribed library. This class attribute was added in vSphere
API 6.7.2.
"""
USE_EXISTING = None
"""
Use the specified existing subscribed library. This class attribute was
added in vSphere API 6.7.2.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Target` instance.
"""
Enum.__init__(string)
Target._set_values([
Target('CREATE_NEW'),
Target('USE_EXISTING'),
])
Target._set_binding_type(type.EnumType(
'com.vmware.content.library.subscriptions.create_spec_subscribed_library.target',
Target))
CreateSpecSubscribedLibrary._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.create_spec_subscribed_library', {
'target': type.ReferenceType(__name__, 'Subscriptions.CreateSpecSubscribedLibrary.Target'),
'new_subscribed_library': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.CreateSpecNewSubscribedLibrary')),
'subscribed_library': type.OptionalType(type.IdType()),
'location': type.ReferenceType(__name__, 'Subscriptions.Location'),
'vcenter': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.CreateSpecVcenter')),
'placement': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.CreateSpecPlacement')),
},
CreateSpecSubscribedLibrary,
False,
None))
class CreateSpec(VapiStruct):
"""
The ``Subscriptions.CreateSpec`` class defines the information required to
create a new subscription of the published library. This class was added in
vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
subscribed_library=None,
):
"""
:type subscribed_library: :class:`Subscriptions.CreateSpecSubscribedLibrary`
:param subscribed_library: Specification for the subscribed library to be associated with the
subscription. This attribute was added in vSphere API 6.7.2.
"""
self.subscribed_library = subscribed_library
VapiStruct.__init__(self)
CreateSpec._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.create_spec', {
'subscribed_library': type.ReferenceType(__name__, 'Subscriptions.CreateSpecSubscribedLibrary'),
},
CreateSpec,
False,
None))
class Summary(VapiStruct):
"""
The ``Subscriptions.Summary`` class contains commonly used information
about the subscription. This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
subscription=None,
subscribed_library=None,
subscribed_library_name=None,
subscribed_library_vcenter_hostname=None,
):
"""
:type subscription: :class:`str`
:param subscription: Identifier of the subscription. This attribute was added in vSphere
API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Subscriptions``. When methods return a
value of this class as a return value, the attribute will be an
identifier for the resource type:
``com.vmware.content.library.Subscriptions``.
:type subscribed_library: :class:`str`
:param subscribed_library: Identifier of the subscribed library. This attribute was added in
vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.Library``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.content.Library``.
:type subscribed_library_name: :class:`str`
:param subscribed_library_name: Name of the subscribed library. This attribute was added in vSphere
API 6.7.2.
:type subscribed_library_vcenter_hostname: :class:`str` or ``None``
:param subscribed_library_vcenter_hostname: Hostname of the vCenter instance where the subscribed library
exists. This attribute was added in vSphere API 6.7.2.
This attribute is unset if the subscribed library is on the same
vCenter Server instance as the published library.
"""
self.subscription = subscription
self.subscribed_library = subscribed_library
self.subscribed_library_name = subscribed_library_name
self.subscribed_library_vcenter_hostname = subscribed_library_vcenter_hostname
VapiStruct.__init__(self)
Summary._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.summary', {
'subscription': type.IdType(resource_types='com.vmware.content.library.Subscriptions'),
'subscribed_library': type.IdType(resource_types='com.vmware.content.Library'),
'subscribed_library_name': type.StringType(),
'subscribed_library_vcenter_hostname': type.OptionalType(type.StringType()),
},
Summary,
False,
None))
class UpdateSpecVcenter(VapiStruct):
"""
The ``Subscriptions.UpdateSpecVcenter`` class defines information about the
vCenter Server instance where the subscribed library associated with the
subscription exists. The ``Subscriptions.UpdateSpecVcenter`` class is only
applicable to subscribed library which exists on remote vCenter Server
instance. This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
hostname=None,
https_port=None,
):
"""
:type hostname: :class:`str` or ``None``
:param hostname: The hostname of the subscribed library's vCenter Server. This
attribute was added in vSphere API 6.7.2.
If None, the value is unchanged.
:type https_port: :class:`long` or ``None``
:param https_port: The HTTPS port of the vCenter Server instance where the subscribed
library exists. This attribute was added in vSphere API 6.7.2.
If None, the value is unchanged.
"""
self.hostname = hostname
self.https_port = https_port
VapiStruct.__init__(self)
UpdateSpecVcenter._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.update_spec_vcenter', {
'hostname': type.OptionalType(type.StringType()),
'https_port': type.OptionalType(type.IntegerType()),
},
UpdateSpecVcenter,
False,
None))
class UpdateSpecPlacement(VapiStruct):
"""
The ``Subscriptions.UpdateSpecPlacement`` class defines the placement
information for the subscribed library's virtual machine template library
items. Storage location of the virtual machine template items is defined by
the subscribed library's storage backing. This placement information needs
to be compatible with the subscribed library's storage backing. The
``Subscriptions.UpdateSpecPlacement`` class is only applicable for the
newly published virtual machine template library items of the subscribed
library. Existing items will not be moved. This class was added in vSphere
API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
folder=None,
cluster=None,
resource_pool=None,
host=None,
network=None,
):
"""
:type folder: :class:`str` or ``None``
:param folder: Virtual machine folder into which the virtual machine template
should be placed. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Folder:VCenter``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Folder:VCenter``.
This attribute is currently required. In future, if this is None,
the system will attempt to choose a suitable folder for the virtual
machine template; if a folder cannot be chosen, publishing a
virtual machine template item will fail.
:type cluster: :class:`str` or ``None``
:param cluster: Cluster onto which the virtual machine template should be placed.
If ``cluster`` and ``resourcePool`` are both specified,
``resourcePool`` must belong to ``cluster``. If ``cluster`` and
``host`` are both specified, ``host`` must be a member of
``cluster``. If ``resourcePool`` or ``host`` is specified, it is
recommended that this attribute be None. This attribute was added
in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ClusterComputeResource:VCenter``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``ClusterComputeResource:VCenter``.
If ``resourcePool`` or ``host`` is specified, it is recommended
that this attribute be None.
:type resource_pool: :class:`str` or ``None``
:param resource_pool: Resource pool into which the virtual machine template should be
placed. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ResourcePool:VCenter``. When methods return a value of this class
as a return value, the attribute will be an identifier for the
resource type: ``ResourcePool:VCenter``.
This attribute is currently required. In future, if this is None,
the system will attempt to choose a suitable resource pool for the
virtual machine template; if a resource pool cannot be chosen,
publish of virtual machine template item will fail.
:type host: :class:`str` or ``None``
:param host: Host onto which the virtual machine template should be placed. If
``host`` and ``resourcePool`` are both specified, ``resourcePool``
must belong to ``host``. If ``host`` and ``cluster`` are both
specified, ``host`` must be a member of ``cluster``. This attribute
was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``HostSystem:VCenter``. When methods return a value of this class
as a return value, the attribute will be an identifier for the
resource type: ``HostSystem:VCenter``.
If this is None, the system will attempt to choose a suitable host
for the virtual machine template; if a host cannot be chosen,
publishing the virtual machine template item will fail.
:type network: :class:`str` or ``None``
:param network: Network that backs the virtual Ethernet adapters in the virtual
machine template. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Network:VCenter``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Network:VCenter``.
If None, newly published virtual machine template library items
will not be backed by a network.
"""
self.folder = folder
self.cluster = cluster
self.resource_pool = resource_pool
self.host = host
self.network = network
VapiStruct.__init__(self)
UpdateSpecPlacement._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.update_spec_placement', {
'folder': type.OptionalType(type.IdType()),
'cluster': type.OptionalType(type.IdType()),
'resource_pool': type.OptionalType(type.IdType()),
'host': type.OptionalType(type.IdType()),
'network': type.OptionalType(type.IdType()),
},
UpdateSpecPlacement,
False,
None))
class UpdateSpec(VapiStruct):
"""
The ``Subscriptions.UpdateSpec`` class defines information required to
update the subscription. This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
subscribed_library_vcenter=None,
subscribed_library_placement=None,
):
"""
:type subscribed_library_vcenter: :class:`Subscriptions.UpdateSpecVcenter` or ``None``
:param subscribed_library_vcenter: Specification for the subscribed library's vCenter Server instance.
This attribute was added in vSphere API 6.7.2.
If None, the value is unchanged.
:type subscribed_library_placement: :class:`Subscriptions.UpdateSpecPlacement` or ``None``
:param subscribed_library_placement: Placement specification for the virtual machine template items of
the subscribed library. Updating this information will only affect
new or updated items, existing items will not be moved. The entire
placement configuration of the subscribed library will replaced by
the new specification. This attribute was added in vSphere API
6.7.2.
If None, the placement configuration of the subscribed library will
be unchanged.
"""
self.subscribed_library_vcenter = subscribed_library_vcenter
self.subscribed_library_placement = subscribed_library_placement
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.update_spec', {
'subscribed_library_vcenter': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.UpdateSpecVcenter')),
'subscribed_library_placement': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.UpdateSpecPlacement')),
},
UpdateSpec,
False,
None))
class VcenterInfo(VapiStruct):
"""
The ``Subscriptions.VcenterInfo`` class contains information about the
vCenter Server instance where the subscribed library associated with the
subscription exists. This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
hostname=None,
https_port=None,
server_guid=None,
):
"""
:type hostname: :class:`str`
:param hostname: Hostname of the vCenter Server instance where the subscribed
library exists. This attribute was added in vSphere API 6.7.2.
:type https_port: :class:`long` or ``None``
:param https_port: The HTTPS port of the vCenter Server instance where the subscribed
library exists. This attribute was added in vSphere API 6.7.2.
If None, port 443 will be used.
:type server_guid: :class:`str`
:param server_guid: The unique identifier of the vCenter Server where the subscribed
library exists. This attribute was added in vSphere API 6.7.2.
"""
self.hostname = hostname
self.https_port = https_port
self.server_guid = server_guid
VapiStruct.__init__(self)
VcenterInfo._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.vcenter_info', {
'hostname': type.StringType(),
'https_port': type.OptionalType(type.IntegerType()),
'server_guid': type.StringType(),
},
VcenterInfo,
False,
None))
class PlacementInfo(VapiStruct):
"""
The ``Subscriptions.PlacementInfo`` class contains the placement
information for the subscribed library's virtual machine template library
items. The ``Subscriptions.PlacementInfo`` class is only applicable for the
virtual machine template library items of the subscribed library. This
class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
folder=None,
cluster=None,
resource_pool=None,
host=None,
network=None,
):
"""
:type folder: :class:`str` or ``None``
:param folder: Virtual machine folder into which the virtual machine template
should be placed. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Folder:VCenter``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Folder:VCenter``.
The attribute will be None if the subscribed library associated
with the subscription does not have a virtual machine folder.
:type cluster: :class:`str` or ``None``
:param cluster: Cluster onto which the virtual machine template should be placed.
This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ClusterComputeResource:VCenter``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``ClusterComputeResource:VCenter``.
The attribute will be None if the subscribed library associated
with the subscription does not have a cluster.
:type resource_pool: :class:`str` or ``None``
:param resource_pool: Resource pool into which the virtual machine template should be
placed. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``ResourcePool:VCenter``. When methods return a value of this class
as a return value, the attribute will be an identifier for the
resource type: ``ResourcePool:VCenter``.
The attribute will be None if the subscribed library associated
with the subscription does not have a resource pool.
:type host: :class:`str` or ``None``
:param host: Host onto which the virtual machine template should be placed. If
``host`` and ``resourcePool`` are both specified, ``resourcePool``
must belong to ``host``. If ``host`` and ``cluster`` are both
specified, ``host`` must be a member of ``cluster``. This attribute
was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``HostSystem:VCenter``. When methods return a value of this class
as a return value, the attribute will be an identifier for the
resource type: ``HostSystem:VCenter``.
The attribute will be None if the subscribed library associated
with the subscription does not have a host.
:type network: :class:`str` or ``None``
:param network: Network that backs the virtual Ethernet adapters in the virtual
machine template. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``Network:VCenter``. When methods return a value of this class as a
return value, the attribute will be an identifier for the resource
type: ``Network:VCenter``.
The attribute will be None if the subscribed library associated
with the subscription does not have a network.
"""
self.folder = folder
self.cluster = cluster
self.resource_pool = resource_pool
self.host = host
self.network = network
VapiStruct.__init__(self)
PlacementInfo._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.placement_info', {
'folder': type.OptionalType(type.IdType()),
'cluster': type.OptionalType(type.IdType()),
'resource_pool': type.OptionalType(type.IdType()),
'host': type.OptionalType(type.IdType()),
'network': type.OptionalType(type.IdType()),
},
PlacementInfo,
False,
None))
class Info(VapiStruct):
"""
The ``Subscriptions.Info`` class contains information about the
subscription. This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'subscribed_library_location',
{
'REMOTE' : [('subscribed_library_vcenter', True)],
'LOCAL' : [],
}
),
]
def __init__(self,
subscribed_library=None,
subscribed_library_name=None,
subscribed_library_location=None,
subscribed_library_vcenter=None,
subscribed_library_placement=None,
):
"""
:type subscribed_library: :class:`str`
:param subscribed_library: Identifier of the subscribed library associated with the
subscription. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.Library``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.content.Library``.
:type subscribed_library_name: :class:`str`
:param subscribed_library_name: Name of the subscribed library associated with the subscription.
This attribute was added in vSphere API 6.7.2.
:type subscribed_library_location: :class:`Subscriptions.Location`
:param subscribed_library_location: Location of the subscribed library relative to the published
library. This attribute was added in vSphere API 6.7.2.
:type subscribed_library_vcenter: :class:`Subscriptions.VcenterInfo`
:param subscribed_library_vcenter: Information about the vCenter Server instance where the subscribed
library exists. This attribute was added in vSphere API 6.7.2.
This attribute is optional and it is only relevant when the value
of ``subscribedLibraryLocation`` is
:attr:`Subscriptions.Location.REMOTE`.
:type subscribed_library_placement: :class:`Subscriptions.PlacementInfo`
:param subscribed_library_placement: Placement information about the subscribed library's virtual
machine template items. This attribute was added in vSphere API
6.7.2.
"""
self.subscribed_library = subscribed_library
self.subscribed_library_name = subscribed_library_name
self.subscribed_library_location = subscribed_library_location
self.subscribed_library_vcenter = subscribed_library_vcenter
self.subscribed_library_placement = subscribed_library_placement
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.content.library.subscriptions.info', {
'subscribed_library': type.IdType(resource_types='com.vmware.content.Library'),
'subscribed_library_name': type.StringType(),
'subscribed_library_location': type.ReferenceType(__name__, 'Subscriptions.Location'),
'subscribed_library_vcenter': type.OptionalType(type.ReferenceType(__name__, 'Subscriptions.VcenterInfo')),
'subscribed_library_placement': type.ReferenceType(__name__, 'Subscriptions.PlacementInfo'),
},
Info,
False,
None))
def create(self,
library,
spec,
client_token=None,
):
"""
Creates a subscription of the published library. This method was added
in vSphere API 6.7.2.
:type client_token: :class:`str` or ``None``
:param client_token: A unique token generated on the client for each creation request.
The token should be a universally unique identifier (UUID), for
example: ``b8a2a2e3-2314-43cd-a871-6ede0f429751``. This token can
be used to guarantee idempotent creation.
If not specified, creation is not idempotent.
:type library: :class:`str`
:param library: Identifier of the published library.
The parameter must be an identifier for the resource type:
``com.vmware.content.Library``.
:type spec: :class:`Subscriptions.CreateSpec`
:param spec: Specification for the subscription.
:rtype: :class:`str`
:return: Subscription identifier.
The return value will be an identifier for the resource type:
``com.vmware.content.library.Subscriptions``.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
If a subscription of the published library to the specified
subscribed library already exists. This is only applicable when
``subscribedLibrary#subscribedLibrary`` is specified.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
If the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the library specified by ``library`` does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the subscribed library specified by
``subscribedLibrary#subscribedLibrary`` does not exist at the
vCenter instance specified by ``subscribedLibrary#vcenter``.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
If the vCenter instance specified by ``subscribedLibrary#vcenter``
cannot be contacted or found.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
If :class:`Subscriptions.CreateSpec` contains invalid arguments.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
If the library specified by ``library`` is a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
If the library specified by ``library`` is not a published library.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
If the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
If the user that requested the method is not authorized to perform
the method.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.Library`` referenced by the
parameter ``library`` requires ``ContentLibrary.AddSubscription``.
"""
return self._invoke('create',
{
'client_token': client_token,
'library': library,
'spec': spec,
})
def delete(self,
library,
subscription,
):
"""
Deletes the specified subscription of the published library. The
subscribed library associated with the subscription will not be
deleted. This method was added in vSphere API 6.7.2.
:type library: :class:`str`
:param library: Identifier of the published library.
The parameter must be an identifier for the resource type:
``com.vmware.content.Library``.
:type subscription: :class:`str`
:param subscription: Subscription identifier.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Subscriptions``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
If the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
If the library specified by ``library`` is a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
If the library specified by ``library`` is not a published library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the library specified by ``library`` does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the subscription specified by ``subscription`` does not exist
for the library specified by ``library``.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
If the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
If the user that requested the method is not authorized to perform
the method.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.Library`` referenced by the
parameter ``library`` requires
``ContentLibrary.DeleteSubscription``.
"""
return self._invoke('delete',
{
'library': library,
'subscription': subscription,
})
def list(self,
library,
):
"""
Lists the subscriptions of the published library. This method was added
in vSphere API 6.7.2.
:type library: :class:`str`
:param library: Identifier of the published library.
The parameter must be an identifier for the resource type:
``com.vmware.content.Library``.
:rtype: :class:`list` of :class:`Subscriptions.Summary`
:return: List of commonly used information about subscriptions of the
published library.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
If the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
If the library specified by ``library`` is a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the library specified by ``library`` does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
If the library specified by ``library`` is not a published library.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
If the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
If the user that requested the method is not authorized to perform
the method.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.Library`` referenced by the
parameter ``library`` requires ``System.Read``.
"""
return self._invoke('list',
{
'library': library,
})
def update(self,
library,
subscription,
spec,
):
"""
Updates the specified subscription of the published library.
This is an incremental update to the subscription. Except for the
:class:`Subscriptions.UpdateSpecPlacement` class, attributes that are
None in the update specification will be left unchanged. If
``spec#subscribedLibraryPlacement`` is specified, all attributes of the
current subscribed library placement will be replaced by this
placement.. This method was added in vSphere API 6.7.2.
:type library: :class:`str`
:param library: Identifier of the published library.
The parameter must be an identifier for the resource type:
``com.vmware.content.Library``.
:type subscription: :class:`str`
:param subscription: subscription identifier.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Subscriptions``.
:type spec: :class:`Subscriptions.UpdateSpec`
:param spec: Specification of the new property values to set on the
subscription.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
If the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the library specified by ``library`` does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the subscription specified by ``subscription`` does not exist
for the library specified by ``library``.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
If the subscribed library cannot be contacted or found.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
If :class:`Subscriptions.UpdateSpec` contains invalid arguments.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
If the library specified by ``library`` is a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
If the library specified by ``library`` is not a published library.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
If the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
If the user that requested the method is not authorized to perform
the method.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.Library`` referenced by the
parameter ``library`` requires
``ContentLibrary.UpdateSubscription``.
"""
return self._invoke('update',
{
'library': library,
'subscription': subscription,
'spec': spec,
})
def get(self,
library,
subscription,
):
"""
Returns information about the specified subscription of the published
library. This method was added in vSphere API 6.7.2.
:type library: :class:`str`
:param library: Identifier of the published library.
The parameter must be an identifier for the resource type:
``com.vmware.content.Library``.
:type subscription: :class:`str`
:param subscription: Identifier of the subscription.
The parameter must be an identifier for the resource type:
``com.vmware.content.library.Subscriptions``.
:rtype: :class:`Subscriptions.Info`
:return: Information about the subscription.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
If the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
If the library specified by ``library`` does not exist.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
If the ``subscription`` is not valid.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidElementType`
If the library specified by ``library`` is a subscribed library.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
If the library specified by ``library`` is not a published library.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
If the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
If the user that requested the method is not authorized to perform
the method.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``com.vmware.content.Library`` referenced by the
parameter ``library`` requires ``System.Read``.
"""
return self._invoke('get',
{
'library': library,
'subscription': subscription,
})
class _ItemStub(ApiInterfaceStub):
def __init__(self, config):
# properties for copy operation
copy_input_type = type.StructType('operation-input', {
'client_token': type.OptionalType(type.StringType()),
'source_library_item_id': type.IdType(resource_types='com.vmware.content.library.Item'),
'destination_create_spec': type.ReferenceType(__name__, 'ItemModel'),
})
copy_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
copy_input_value_validator_list = [
]
copy_output_validator_list = [
]
copy_rest_metadata = None
# properties for create operation
create_input_type = type.StructType('operation-input', {
'client_token': type.OptionalType(type.StringType()),
'create_spec': type.ReferenceType(__name__, 'ItemModel'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = None
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'library_item_id': type.IdType(resource_types='com.vmware.content.library.Item'),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = None
# properties for get operation
get_input_type = type.StructType('operation-input', {
'library_item_id': type.IdType(resource_types='com.vmware.content.library.Item'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = None
# properties for list operation
list_input_type = type.StructType('operation-input', {
'library_id': type.IdType(resource_types='com.vmware.content.Library'),
})
list_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = None
# properties for find operation
find_input_type = type.StructType('operation-input', {
'spec': type.ReferenceType(__name__, 'Item.FindSpec'),
})
find_error_dict = {
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
}
find_input_value_validator_list = [
]
find_output_validator_list = [
]
find_rest_metadata = None
# properties for update operation
update_input_type = type.StructType('operation-input', {
'library_item_id': type.IdType(resource_types='com.vmware.content.library.Item'),
'update_spec': type.ReferenceType(__name__, 'ItemModel'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = None
# properties for publish operation
publish_input_type = type.StructType('operation-input', {
'library_item_id': type.IdType(resource_types='com.vmware.content.library.Item'),
'force_sync_content': type.BooleanType(),
'subscriptions': type.OptionalType(type.ListType(type.ReferenceType(__name__, 'Item.DestinationSpec'))),
})
publish_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
publish_input_value_validator_list = [
]
publish_output_validator_list = [
]
publish_rest_metadata = None
operations = {
'copy': {
'input_type': copy_input_type,
'output_type': type.IdType(resource_types='com.vmware.content.library.Item'),
'errors': copy_error_dict,
'input_value_validator_list': copy_input_value_validator_list,
'output_validator_list': copy_output_validator_list,
'task_type': TaskType.NONE,
},
'create': {
'input_type': create_input_type,
'output_type': type.IdType(resource_types='com.vmware.content.library.Item'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'ItemModel'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'find': {
'input_type': find_input_type,
'output_type': type.ListType(type.IdType()),
'errors': find_error_dict,
'input_value_validator_list': find_input_value_validator_list,
'output_validator_list': find_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'publish': {
'input_type': publish_input_type,
'output_type': type.VoidType(),
'errors': publish_error_dict,
'input_value_validator_list': publish_input_value_validator_list,
'output_validator_list': publish_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'copy': copy_rest_metadata,
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'find': find_rest_metadata,
'update': update_rest_metadata,
'publish': publish_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.content.library.item',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _SubscribedItemStub(ApiInterfaceStub):
def __init__(self, config):
# properties for evict operation
evict_input_type = type.StructType('operation-input', {
'library_item_id': type.IdType(resource_types='com.vmware.content.library.Item'),
})
evict_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.invalid_element_configuration':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementConfiguration'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
evict_input_value_validator_list = [
]
evict_output_validator_list = [
]
evict_rest_metadata = None
# properties for sync operation
sync_input_type = type.StructType('operation-input', {
'library_item_id': type.IdType(resource_types='com.vmware.content.library.Item'),
'force_sync_content': type.BooleanType(),
})
sync_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
}
sync_input_value_validator_list = [
]
sync_output_validator_list = [
]
sync_rest_metadata = None
operations = {
'evict': {
'input_type': evict_input_type,
'output_type': type.VoidType(),
'errors': evict_error_dict,
'input_value_validator_list': evict_input_value_validator_list,
'output_validator_list': evict_output_validator_list,
'task_type': TaskType.NONE,
},
'sync': {
'input_type': sync_input_type,
'output_type': type.VoidType(),
'errors': sync_error_dict,
'input_value_validator_list': sync_input_value_validator_list,
'output_validator_list': sync_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'evict': evict_rest_metadata,
'sync': sync_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.content.library.subscribed_item',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _SubscriptionsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'client_token': type.OptionalType(type.StringType()),
'library': type.IdType(resource_types='com.vmware.content.Library'),
'spec': type.ReferenceType(__name__, 'Subscriptions.CreateSpec'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = None
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'library': type.IdType(resource_types='com.vmware.content.Library'),
'subscription': type.IdType(resource_types='com.vmware.content.library.Subscriptions'),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = None
# properties for list operation
list_input_type = type.StructType('operation-input', {
'library': type.IdType(resource_types='com.vmware.content.Library'),
})
list_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = None
# properties for update operation
update_input_type = type.StructType('operation-input', {
'library': type.IdType(resource_types='com.vmware.content.Library'),
'subscription': type.IdType(resource_types='com.vmware.content.library.Subscriptions'),
'spec': type.ReferenceType(__name__, 'Subscriptions.UpdateSpec'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = None
# properties for get operation
get_input_type = type.StructType('operation-input', {
'library': type.IdType(resource_types='com.vmware.content.Library'),
'subscription': type.IdType(resource_types='com.vmware.content.library.Subscriptions'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.invalid_element_type':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidElementType'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = None
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.IdType(resource_types='com.vmware.content.library.Subscriptions'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'Subscriptions.Summary')),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Subscriptions.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'list': list_rest_metadata,
'update': update_rest_metadata,
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.content.library.subscriptions',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Item': Item,
'SubscribedItem': SubscribedItem,
'Subscriptions': Subscriptions,
'item': 'com.vmware.content.library.item_client.StubFactory',
}
| StarcoderdataPython |
11244861 | <reponame>Nikandros1997/Sessions-2.0<gh_stars>0
from .Entity import Entity
import os
from ..Helpers import DEV_MODE, running_from, running_apps
class Software(Entity):
def __init__(self, session_name):
super().__init__(session_name)
self.ignoreFile = self.format_file_name('.ignore')
if not session_name:
return
self.file = self.format_file_name(f'{self.session_name}-software.ses')
def ignore(self, to_be_ignored_apps):
# TODO: Create an error message for when an app does not exist in the installed apps
for ignored_app in to_be_ignored_apps:
if not ignored_app in running_apps():
print('Error: This app is not running at the moment.')
self.list_running_apps()
print('Choose an app of the above to ignore')
return
ignored_apps = self.retrieve_ignored_apps()
for ignored_app in to_be_ignored_apps:
if ignored_app in ignored_apps:
ignored_apps.remove(ignored_app)
else:
ignored_apps.append(ignored_app)
with open(self.ignoreFile, 'w') as text_file:
for app in ignored_apps:
text_file.write(f'{app}\n')
def store(self):
ignored_apps = self.retrieve_ignored_apps()
# store all runing apps that are not ignored
with open(self.file, 'w') as text_file:
for app in running_apps():
if not app in ignored_apps:
text_file.write(f'{app}\n')
self.close_apps()
def restore(self):
apps_to_be_loaded = list()
with open(self.file, 'r') as text_file:
for application in text_file:
apps_to_be_loaded.append(application.split('\n')[0] + '.app')
for app in apps_to_be_loaded:
os.system('open -a ' + app.replace(' ', '\\ '))
def list_running_apps(self):
ignored_apps = self.retrieve_ignored_apps()
for app in self.show_only_apps_not_ignored():
print(app)
for app in running_apps():
if app in ignored_apps:
print(app, '(Ignored)')
def list_active_sessions(self):
active_sessions = self.active_sessions()
if len(active_sessions) == 0:
print('Active Sessions: (no active sessions)')
return
print('Active Sessions:')
for session in active_sessions:
print(session)
def active_sessions(self):
application_folder = os.listdir(self.file_storage)
return list(set([f.split('-')[0] for f in application_folder if f.endswith('.ses')]))
def retrieve_ignored_apps(self):
ignored_apps = []
if os.path.isfile(self.ignoreFile):
with open(self.ignoreFile, 'r') as text_file:
for app in text_file:
ignored_apps.append(app.replace('\n', ''))
return ignored_apps
def show_only_apps_not_ignored(self):
apps_to_be_ignored = self.retrieve_ignored_apps()
formatted_string = []
for app in running_apps():
if not app in apps_to_be_ignored:
formatted_string.append(app)
return formatted_string
def close_apps(self):
print('Closing all apps')
last_app = running_from()
with open(self.file, "r") as text_file:
for application in text_file:
application = application.split('\n')[0]
if not application == last_app:
if not DEV_MODE:
os.system('osascript -e \'quit app "{0}"\''.format(application + '.app'))
else:
print('osascript -e \'quit app "{0}"\''.format(application + '.app'))
print(f'Are you sure you want to terminate: {last_app}? [Y/n]')
user_input = input()
if user_input.lower() == 'y':
if not DEV_MODE:
os.system('osascript -e \'quit app "{0}"\''.format(last_app + '.app'))
else:
print('osascript -e \'quit app "{0}"\''.format(last_app + '.app'))
| StarcoderdataPython |
9684123 | from camera_calibrator import CameraCalibrator
import numpy as np
import cv2
import pickle
import copy
from glob import glob
def pmat(mat):
'''Print matrix
'''
print(np.round(mat, 3))
if __name__ == "__main__":
calib = CameraCalibrator(board_shape=(6, 7), tile_side=0.10, apriltag_families="tag36h10")
image = cv2.imread("data/amplitude.png", cv2.IMREAD_GRAYSCALE)
a = calib.get_apriltag_center(image)
print(a) | StarcoderdataPython |
1895909 | #!/usr/bin/env python
__author__ = '<NAME>, <NAME>'
import unittest
from mock import Mock, patch
from pyon.util.int_test import IonIntegrationTestCase
from nose.plugins.attrib import attr
from pyon.core.exception import BadRequest, Conflict, Inconsistent, NotFound
from pyon.public import PRED, RT, IonObject, OT
from interface.objects import PolicyTypeEnum
from ion.service.policy_management_service import PolicyManagementService
from interface.services.core.ipolicy_management_service import PolicyManagementServiceClient
@attr('INT', group='coi')
@patch.dict('pyon.core.governance.governance_controller.CFG', IonIntegrationTestCase._get_alt_cfg({'interceptor': {'interceptors': {'governance': {'config': {'enabled': False}}}}}))
class TestPolicyManagementServiceInt(IonIntegrationTestCase):
def setUp(self):
# Start container
self._start_container()
self.container.start_rel_from_url('res/deploy/basic.yml')
self.container.governance_controller.policy_event_callback = Mock()
self.policy_management_service = PolicyManagementServiceClient()
def test_policy(self):
self._do_test_policy_crud()
def _do_test_policy_crud(self):
policy_rule = '<Rule id="{rule_id}"> <description>{description}</description></Rule>'
policy_obj = IonObject(RT.Policy, name='Test_Policy', description='This is a test policy',
policy_type=PolicyTypeEnum.RESOURCE_ACCESS,
definition=policy_rule)
policy_obj.name = ' '
with self.assertRaises(BadRequest):
self.policy_management_service.create_policy(policy_obj)
policy_obj.name = 'Test_Policy'
policy_id = self.policy_management_service.create_policy(policy_obj)
self.assertNotEqual(policy_id, None)
with self.assertRaises(BadRequest):
self.policy_management_service.read_policy()
policy = None
policy = self.policy_management_service.read_policy(policy_id)
self.assertNotEqual(policy, None)
policy.name = ' '
with self.assertRaises(BadRequest):
self.policy_management_service.update_policy(policy)
policy.name = 'Updated_Test_Policy'
self.policy_management_service.update_policy(policy)
policy = None
policy = self.policy_management_service.read_policy(policy_id)
self.assertNotEqual(policy, None)
self.assertEqual(policy.name, 'Updated_Test_Policy')
with self.assertRaises(BadRequest):
self.policy_management_service.create_resource_access_policy()
with self.assertRaises(BadRequest):
self.policy_management_service.create_resource_access_policy(policy_id)
with self.assertRaises(BadRequest):
self.policy_management_service.create_resource_access_policy(policy_id, policy.name)
with self.assertRaises(BadRequest):
self.policy_management_service.create_resource_access_policy(policy_id, policy.name, "description")
#p_id = self.policy_management_service.create_resource_access_policy(policy_id, "Resource_access_name", "Policy Description", "Test_Rule")
#self.assertNotEqual(p_id, None)
with self.assertRaises(BadRequest):
self.policy_management_service.create_service_access_policy()
with self.assertRaises(BadRequest):
self.policy_management_service.create_service_access_policy(service_name="service_name")
with self.assertRaises(BadRequest):
self.policy_management_service.create_service_access_policy(service_name="service_name", policy_name="policy_name")
with self.assertRaises(BadRequest):
self.policy_management_service.create_service_access_policy(service_name="service_name", policy_name="policy_name", description="description")
#p_obj = self.policy_management_service.create_service_access_policy("service_name", "policy_name", "description", "policy_rule")
#self.assertNotEqual(p_obj, None)
with self.assertRaises(BadRequest):
self.policy_management_service.create_common_service_access_policy()
with self.assertRaises(BadRequest):
self.policy_management_service.create_common_service_access_policy(policy_name="policy_name")
with self.assertRaises(BadRequest):
self.policy_management_service.create_common_service_access_policy(policy_name="policy_name",description="description")
#p_id = self.policy_management_service.create_common_service_access_policy(policy_name="policy_name",description="description", policy_rule="test_rule")
#self.assertNotEqual(p_id, None)
with self.assertRaises(BadRequest):
self.policy_management_service.add_process_operation_precondition_policy()
with self.assertRaises(BadRequest):
self.policy_management_service.add_process_operation_precondition_policy(process_id="process_id")
with self.assertRaises(BadRequest):
self.policy_management_service.add_process_operation_precondition_policy(process_id="process_id", op="op")
self.policy_management_service.enable_policy(policy_id)
self.policy_management_service.enable_policy(policy_id)
with self.assertRaises(BadRequest):
self.policy_management_service.delete_policy()
self.policy_management_service.delete_policy(policy_id)
with self.assertRaises(NotFound) as cm:
self.policy_management_service.read_policy(policy_id)
with self.assertRaises(NotFound) as cm:
self.policy_management_service.delete_policy(policy_id)
def _do_test_policy_finds(self):
pass
| StarcoderdataPython |
9773717 | <filename>Puzzles/checkio/Home/The-Most-Wanted-Letter/mySolution.py
def extractChars(text):
charsList = []
for char in text.lower():
if char not in charsList and unicode.isalpha(char):
charsList.append(char)
return charsList
def checkio(text):
text = text.lower()
chars = extractChars(text)
C = chars[0]
C_freq = text.count(C)
for char in chars:
if (char < C and text.count(char) == C_freq) or text.count(char) > C_freq:
C = char
C_freq = text.count(char)
return C
| StarcoderdataPython |
4960512 | <reponame>khaosans/parse_anchorusd_cointracker
import maya
import csv
def toUTCtimestamp(timestamp):
dt = maya.parse(timestamp).datetime()
return (dt.strftime("%Y-%m-%d %H:%M:%S"))
transaction = ['Sell', 'Buy']
def translate(Datestamp, transaction, Token, Amount, TPXLM):
switcher: Dict[str, str] = {
'Buy': [str(Amount), Token, str(TPXLM),'XLM','','',''],
'Sell': [str(TPXLM),'XLM',str(Amount),Token,'','','']
}
return [toUTCtimestamp(Datestamp)] + switcher[transaction]
with open('stellarx-trade-history-2020-12-31T18_31_27-08_00.csv', "r", newline='') as stellarFile:
csvreader = csv.reader(stellarFile, delimiter=',')
next(csvreader)
rowHeader = ['Date','Received Quantity','Received Currency','Sent Quantity','Sent Currency','Fee Amount','Fee Currency','Tag']
with open('output.csv', 'w', newline='') as csvWritefile:
csvwriter = csv.writer(csvWritefile, delimiter=',')
csvwriter.writerow(rowHeader)
for row in csvreader:
output=translate(row[0], row[1], row[2], row[4], row[7])
csvwriter.writerow(output) | StarcoderdataPython |
3393949 | <filename>terra_sdk/util/converter.py
from datetime import datetime
def to_isoformat(dt: datetime) -> str:
return (
dt.isoformat(timespec="milliseconds")
.replace("+00:00", "Z")
.replace("000Z", "Z")
)
| StarcoderdataPython |
4956969 | """
Indent the each enum item in the enum block.
== Violation ==
enum A {
A_A, <== Violation
A_B <== Violation
}
== Good ==
enum A {
A_A, <== Good
A_B
}
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, typeName, typeFullName, decl, contextStack, typeContext) :
if not decl and typeName == "ENUM" and typeContext != None:
column = GetIndentation(lexer.GetCurToken())
lexer._MoveToToken(typeContext.startToken)
t2 = typeContext.endToken
while(True) :
t = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if t == None or t == t2 :
break
# if typeContext != t.contextStack.Peek() : continue
if GetRealColumn(t) <= (column + 1):
nsiqcppstyle_reporter.Error(t, __name__, "Enum block should be indented. But the token(%s) seems to be unindented" % t.value);
ruleManager.AddTypeNameRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("test/thisFile.c",
"""
enum A {
}
""")
assert not CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA, BB
}
""")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4,
BB
}
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4
,BB
}
""")
assert CheckErrorContent(__name__)
def test5(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4
/** HELLO */
,BB
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("test/thisFile.c",
"""
typedef enum {
AA = 4
/** HELLO */
,BB
} DD
""")
assert not CheckErrorContent(__name__)
def test7(self):
self.Analyze("test/thisFile.c",
"""
typedef enum
{
SERVICE,
SERVER,
BROKER,
MANAGER,
REPL_SERVER,
REPL_AGENT,
UTIL_HELP,
UTIL_VERSION,
ADMIN
} UTIL_SERVICE_INDEX_E;
""")
assert not CheckErrorContent(__name__)
def test8(self):
self.Analyze("test/thisFile.c",
"""
enum COLOR
{
COLOR_TRANSPARENT = RGB(0, 0, 255),
COLOR_ROOM_IN_OUT = 0xffff00,
COLOR_CHAT_ITEM = 0xff9419,
COLOR_CHAT_MY = 0x00b4ff,
COLOR_CHAT_YOUR = 0xa3d5ff,
COLOR_ROOM_INFO = 0x00ffff,
COLOR_RESULT_SCORE = 0xffcc00,
COLOR_RESULT_RATING = 0x00fcff,
COLOR_RESULT_POINT = 0x33ff00
}; """)
assert not CheckErrorContent(__name__) | StarcoderdataPython |
1653286 | import networkx as nx
from spira.gdsii.elemental.label import Label
from spira.param.field.typed_graph import PathList
from spira import param
from spira.core.initializer import ElementalInitializer
import spira
from spira import log as LOG
from spira.core.mixin.gdsii_output import OutputMixin
def _loops(g):
"""
"""
def _is_valid_cycle(g, cycle, devices):
if len(cycle) > 2:
for n in cycle:
if 'pin' in g.node[n]:
lbl = g.node[n]['pin']
if _is_device(lbl):
devices.append(lbl)
if len(devices) < 2:
return True
return False
H = g.to_directed()
cycles = list(nx.simple_cycles(H))
if len(cycles) < 3:
return g
valid_cycle_count = 0
for cycle in cycles:
devices = []
if _is_valid_cycle(g, cycle, devices):
for n in cycle:
if len(devices) > 0:
g.node[n]['pin'] = devices[0]
valid_cycle_count += 1
if valid_cycle_count != 0:
g = _loops(g)
else:
return g
return g
# def _is_master(g, n):
# lbl = g.node[n]['pin']
# if lbl.text.startswith('via'):
# if len([i for i in g[n]]) > 2:
# return True
# masternodes = ['C', 'P', 'ntron', 'user', 'jj', 'gnd', 'shunt']
# for key in masternodes:
# if lbl.text.startswith(key):
# return True
# return False
# def _is_device(lbl):
# devicenodes = ['jj', 'ntron']
# for key in devicenodes:
# if lbl.text.startswith(key): return True
# return False
# def _make_usernode(lbl):
# usernodes = ['via', 'C', 'P', 'ntron', 'user', 'jj', 'gnd', 'shunt']
# for key in usernodes:
# if lbl.text.startswith(key): return True
# return False
def _valid_path(g, path, master_nodes):
"""
Test if path contains masternodes.
"""
valid = True
if path[0] not in master_nodes: valid = False
if path[-1] not in master_nodes: valid = False
for n in path[1:-1]:
if 'pin' in g.node[n]:
# if _is_master(g, n):
# masternodes = (spira.JunctionDevice, spira.UserNode, spira.PortNode)
if isinstance(g.node[n]['pin'], BaseVia):
valid = False
return valid
def store_master_nodes(g):
master_nodes = list()
for n in g.nodes():
if 'pin' in g.node[n]:
# if _is_master(g, n):
# masternodes = (spira.JunctionDevice, spira.UserNode, spira.PortNode)
# if issubclass(type(g.node[n]['pin']), masternodes):
if isinstance(g.node[n]['pin'], BaseVia):
master_nodes.append(n)
return master_nodes
def subgraphs(lgraph):
# logger = logging.getLogger(__name__)
# logger.info('Merging subgraphs')
graphs = list(nx.connected_component_subgraphs(lgraph.g))
gg = list()
for graph in graphs:
save = False
for n in graph.nodes():
if 'pin' in graph.node[n]:
label = graph.node[n]['pin']
if isinstance(label, Terminal):
save = True
if save is True:
gg.append(graph)
lgraph.g = nx.disjoint_union_all(gg)
class __Graph__(ElementalInitializer):
__mixins__ = [OutputMixin]
def __init__(self, subgraphs, data=None, val=None, **kwargs):
ElementalInitializer.__init__(self, **kwargs)
self.g = nx.Graph()
self.subgraphs = subgraphs
self.union_subgraphs
# self.combine_nodes
# self.connect_subgraphs
self.usernodes = []
self.seriesnodes = []
self.master_nodes = []
def __repr__(self):
return ("[SPiRA: Graph] ({} nodes, {} edges)").format(self.g.number_of_nodes(),
self.g.number_of_edges())
def __str__(self):
return self.__repr__()
class GraphAbstract(__Graph__):
union_subgraphs = param.DataField(fdef_name='create_union_subgraphs')
connect_subgraphs = param.DataField(fdef_name='create_connect_subgraphs')
combine_nodes = param.DataField(fdef_name='create_combine_nodes')
def __init__(self, subgraphs, data=None, val=None, **kwargs):
super().__init__(subgraphs, data=None, val=None, **kwargs)
def create_union_subgraphs(self):
self.g = nx.disjoint_union_all(self.subgraphs.values())
def create_connect_subgraphs(self):
graphs = list(nx.connected_component_subgraphs(self.g))
self.g = nx.disjoint_union_all(graphs)
def create_combine_nodes(self):
"""
Combine all nodes of the same type into one node.
"""
def partition_nodes(u, v):
if ('surface' in self.g.node[u]) and ('surface' in self.g.node[v]):
if ('pin' not in self.g.node[u]) and ('pin' not in self.g.node[v]):
if self.g.node[u]['surface'] == self.g.node[v]['surface']:
return True
if ('pin' in self.g.node[u]) and ('pin' in self.g.node[v]):
if self.g.node[u]['pin'] == self.g.node[v]['pin']:
return True
def sub_nodes(b):
S = self.g.subgraph(b)
pin = nx.get_node_attributes(S, 'pin')
surface = nx.get_node_attributes(S, 'surface')
center = nx.get_node_attributes(S, 'pos')
sub_pos = list()
for key, value in center.items():
sub_pos = [value[0], value[1]]
return dict(pin=pin, surface=surface, pos=sub_pos)
Q = nx.quotient_graph(self.g, partition_nodes, node_data=sub_nodes)
Pos = nx.get_node_attributes(Q, 'pos')
Label = nx.get_node_attributes(Q, 'pin')
Polygon = nx.get_node_attributes(Q, 'surface')
Edges = nx.get_edge_attributes(Q, 'weight')
g1 = nx.Graph()
for key, value in Edges.items():
n1, n2 = list(key[0]), list(key[1])
g1.add_edge(n1[0], n2[0])
for n in g1.nodes():
for key, value in Pos.items():
if n == list(key)[0]:
g1.node[n]['pos'] = [value[0], value[1]]
for key, value in Label.items():
if n == list(key)[0]:
if n in value:
g1.node[n]['pin'] = value[n]
for key, value in Polygon.items():
if n == list(key)[0]:
g1.node[n]['surface'] = value[n]
self.g = g1
def flat_copy(self, level=-1, commit_to_gdspy=False):
return self
def flatten(self):
return [self]
def commit_to_gdspy(self, cell):
pass
def transform(self, transform):
pass
class UserGraph(GraphAbstract):
user_nodes = param.DataField(fdef_name='create_label_user_nodes')
convert_nodes = param.DataField(fdef_name='create_convert_user_nodes')
def __init__(self, subgraphs, data=None, val=None, **kwargs):
super().__init__(subgraphs, data=None, val=None, **kwargs)
# self.user_nodes
# self.convert_nodes
def create_label_user_nodes(self):
def _usernode_label(position, id0=None):
params = {}
params['id0'] = id0
params['text'] = 'user'
params['color'] = '#CC99CC'
label = Label(position, **params)
D = spira.UserNode()
D.color = '#1ea8df'
D += label
return D
for n in self.g.nodes():
if len([i for i in self.g[n]]) > 2:
if 'pin' not in self.g.node[n]:
self.g.node[n]['pin'] = _usernode_label(
position=self.g.node[n]['pos'],
id0=self.g.node[n]['surface'].id
)
self.usernodes.append(n)
else:
if not issubclass(type(self.g.node[n]['pin']), spira.Cell):
self.g.node[n]['pin'] = _usernode_label(
position=self.g.node[n]['pos'],
id0=self.g.node[n]['surface'].id
)
self.usernodes.append(n)
# self.create_combine_nodes()
def create_convert_user_nodes(self):
LOG.header('Converting usernodes')
if len(self.usernodes) == 0:
raise ValueError('please run label_user_nodes first')
changed = dict()
for n in self.usernodes:
neighbor_nodes = [i for i in self.g[n]]
for nn in neighbor_nodes:
if 'pin' in self.g.node[nn]:
if issubclass(type(self.g.node[nn]['pin']), spira.JunctionDevice):
changed[n] = self.g.node[nn]['pin']
for n, usernode in changed.items():
self.g.node[n]['pin'] = usernode
self.create_combine_nodes()
self.master_nodes = store_master_nodes(self.g)
class SeriesGraph(UserGraph):
series_nodes = param.DataField(fdef_name='create_label_series_nodes')
remove_lonely = param.DataField(fdef_name='create_remove_lonely_nodes')
remove_series = param.DataField(fdef_name='create_remove_series_nodes')
def __init__(self, subgraphs, data=None, val=None, **kwargs):
super().__init__(subgraphs, data=None, val=None, **kwargs)
# self.series_nodes
# self.remove_lonely
# self.remove_series
def create_label_series_nodes(self, algo=None):
print('running series graph node filtering')
sub_graphs = nx.connected_component_subgraphs(self.g, copy=True)
self.master_nodes = store_master_nodes(self.g)
def _remove_label(lbl, id0=None):
params = {}
params['id0'] = id0
params['text'] = 'remove'
params['gdslayer'] = lbl.gdslayer
params['color'] = '#FFFFFF'
label = Label(lbl.position, **params)
D = spira.RemoveNode()
D.color = '#FFFFFF'
return D
def _none_label(lbl, id0=None):
params = {}
params['id0'] = id0
params['text'] = 'remove'
params['gdslayer'] = lbl.gdslayer
params['color'] = '#FFFFFF'
label = Label(lbl.position, **params)
D = spira.RemoveNode()
D.color = '#FFF000'
return D
def _update_paths(g, paths, s, t):
if nx.has_path(g, s, t):
for p in nx.all_simple_paths(g, source=s, target=t):
if _valid_path(g, p, self.master_nodes):
paths.append(p)
for sg in sub_graphs:
paths = PathList()
for s in self.master_nodes:
targets = filter(lambda x: x not in [s], self.master_nodes)
for t in targets:
_update_paths(self.g, paths, s, t)
# print(paths)
for i, path in enumerate(paths):
if i == 2:
for n in path[1:-1]:
lbl = self.g.node[n]['surface']
if not issubclass(type(lbl), spira.RemoveNode):
self.g.node[n]['pin'] = _none_label(lbl, id0=i)
def create_remove_lonely_nodes(self):
remove = list()
for n in self.g.nodes():
if len([i for i in self.g[n]]) == 1:
if 'pin' not in self.g.node[n]:
remove.append(n)
self.g.remove_nodes_from(remove)
def create_remove_series_nodes(self):
self.create_combine_nodes()
# self.g = _loops(self.g)
# self.combine_nodes()
remove = list()
for n in self.g.nodes():
if 'pin' in self.g.node[n]:
lbl = self.g.node[n]['pin']
# if lbl.text.startswith('remove'):
if issubclass(type(lbl), spira.RemoveNode):
e = tuple([i for i in self.g[n]])
self.g.add_edge(*e, label=None)
remove.append(n)
self.g.remove_nodes_from(remove)
class Graph(SeriesGraph):
pass
| StarcoderdataPython |
11259830 | <reponame>kkingstoun/post_mumax_v4
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# from matplotlib import rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable
from hslcolormap import Hslcolormap
import cmocean
import random
import seaborn as sns
import matplotlib.style as style
from arrow import Arrow
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
# matplotlib.rcParams['mathtext.fontset'] = 'stix'
# matplotlib.rcParams['text.latex.unicode'] = True
# matplotlib.rcParams['font.family'] = 'sans-serif'
# matplotlib.rcParams['font.sans-serif'] = ['Tahoma']
class SubPlot():
def __init__(self, position, ptype, data, desc, selected_freq=None, title=None):
self.title = title
self.shapex = desc['xnodes']*desc['xstepsize']/1e-9
self.shapey = desc['ynodes']*desc['ystepsize']/1e-9
self.colorbar = None
self.ax = position
self._data = data
self._freq = selected_freq
self._ptype = ptype
self._desc = desc
if self._desc is not None:
self.make_it_beuty(self.ax)
self.select_plot(self.ax)
def select_plot(self, ax):
if self._ptype is "plot":
self.prepare_plot(ax)
elif self._ptype is "amplitude":
self.prepare_imshow_amplitude(ax)
elif self._ptype is "phase":
self.prepare_imshow_phase(ax)
elif self._ptype is "imshow":
self.prepare_imshow(ax)
else:
pass
def add_peaks(self,peaks):
print(peaks)
for peak in peaks:
self.ax.axvline(self._data[0][peak], color="gray", alpha=0.2)
# def add_avlines(self,peaks):
# for y in self.peaks:
#
def prepare_plot(self, ax):
self.plot = ax.plot(self._data[0], np.abs(self._data[1]))
ax.axvline(self._data[0][self._freq], color="red")
ax.set_ylabel("Amplitude (a.u.)")
ax.set_xlabel("Frequency (GHz)")
return self.plot
def prepare_imshow_amplitude(self, ax):
im = ax.imshow(np.abs(self._data[1][self._freq, :, :]),
origin="lower",
extent=[0, self.shapex,
0, self.shapey],
cmap=cmocean.cm.haline,
interpolation='none',
aspect="equal"
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
self.colorbar = plt.colorbar(im, cax=cax)
self.colorbar.set_label("Amplitude (a.u.)")
self.colorbar.outline.set_linewidth(0.5)
ax.title.set_text("Amplitude")
ax.title.set_size(10)
ax.title.set_y(1.1)
return im
def prepare_imshow(self, ax):
# datas = np.swapaxes(self._data[:, :, :, :], 1, 2)
# ch_z = int(self._data.shape[0]/2)
im = ax.imshow(Hslcolormap.TransformToColor(self._data[0, :, :, :]),
origin="lower",
interpolation='none',
aspect="equal",
# extent=[0, self.shapex,
# 0, self.shapey]
)
ar = Arrow()
arrows = ar.drawArrows(self._data[:, :, :, :], 10)
patches = []
for i in arrows:
if i is not None:
print(i)
# polygon = Polygon(np.flip(i, 1))
polygon = Polygon(i, 1)
patches.append(polygon)
p = PatchCollection(patches, alpha=0.4)
ax.add_collection(p)
ax.title.set_text("Stable magnetic configuration")
ax.title.set_size(10)
ax.title.set_y(1.1)
return im
def prepare_imshow_phase(self, ax):
# print(np.abs(self._data[1][self._freq, :, :]).shape)
shape = self._data[1]
im = ax.imshow(np.angle(self._data[1][self._freq, :, :], deg=True),
origin="lower",
extent=[0, self.shapex,
0, self.shapey],
cmap="hsv",
interpolation='none',
aspect="equal"
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
ax.title.set_text("Phase")
ax.title.set_size(10)
ax.title.set_y(1.1)
self.colorbar = plt.colorbar(im, cax=cax, ticks=[-180, 0, 180])
im.set_clim(-180, 180)
self.colorbar.set_clim(-180, 180)
self.colorbar.set_label("Phase (degree)")
self.colorbar.outline.set_linewidth(0.5)
return im
def make_it_beuty(self, ax):
ax.set_ylabel("y (nm)")
ax.set_xlabel("x (nm)")
pass
class PreparePlot():
def __init__(self, lpx, lpy):
self.fig, self.axes = plt.subplots(lpx*lpy)
self.colorbars = []
self.ai =0
self.grid = plt.GridSpec(lpx, lpy, wspace=0.4, hspace=0.3)
self.grid.update(hspace=0.5)
style.use("seaborn-white")
def add(self, grid, par1=None):
ax = plt.subplot(grid)
self.axes[self.ai] = ax
# obj.get(self.axes[self.ai])
# if obj.colorbar != None:
# self.colorbars.append(obj.colorbar)
self.ai += 1
return ax
def place(self, obj, grid):
self.axes[self.ai] = plt.subplot(grid)
obj.get(self.axes[self.ai])
if obj.colorbar != None:
self.colorbars.append(obj.colorbar)
self.ai += 1
def save(self, path=None):
plt.savefig(path, dpi=600)
def adjust(self):
for ax in self.axes:
ax.spines['top'].set_linewidth(0.5)
ax.spines['right'].set_linewidth(0.5)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.patch.set_linewidth(0.1)
for cb in self.colorbars:
cb.outline.set_linewidth(0.5)
def show(self):
plt.show()
# def plot_mods(self, path):
# for x in self.peaks:
# print(str(np.round(self.mtzyxc.fmrfreq[x]/1e-9, 3)) + " GHz")
# style.use("seaborn-white")
# fig, ax = plt.subplots()
# grid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.3)
# ax1 = plt.subplot(grid[0, :2])
# ax1.set_title("Frequency " +
# str(np.round(self.mtzyxc.fmrfreq[x]/1e9, 3)) + " GHz")
# ax1.axvline(self.mtzyxc.fmrfreq[x]/1e9, color="red")
# for y in self.peaks:
# ax1.axvline(
# self.mtzyxc.fmrfreq[y]/1e9, color="gray", alpha=0.2)
# fmr = np.abs(self.mtzyxc.fmrspectrum) / \
# np.amax(np.abs(self.mtzyxc.fmrspectrum))
# ax1.plot(self.mtzyxc.fmrfreq/1e9, fmr)
# ax1.set_ylabel("Amplitude (a.u.)")
# ax1.set_xlabel("Frequency (GHz)")
# mods = self.mtzyxc.calculatemods(
# eachZ=False, comp=0, window="hanning")
# ax2 = fig.add_subplot(grid[1, 0])
# # ax2.set_title("Amplitude (a.u)")
# ax2.set_ylabel("y (nm)")
# ax2.set_xlabel("x (nm)")
# mod = np.abs(mods[x, 0, :, :, 0]) / \
# np.amax(np.abs(mods[x, 0, :, :, 0]))
# amp = ax2.imshow(mod,
# origin="lower",
# cmap=cmocean.cm.haline,
# extent=[0, mods.self.shape[2]*self.mtzyxc._headers["xstepsize"]/1e-9,
# 0, mods.self.shape[3]*self.mtzyxc._headers["ystepsize"]/1e-9],
# aspect="equal")
# cba = plt.colorbar(amp, ax=ax2)
# cba.set_label("Amplitude (a.u)")
# ax3 = fig.add_subplot(grid[1, 1])
# # ax3.set_title("Phase (degree)")
# phase = ax3.imshow(np.angle(mods[x, 0, :, :, 0], deg=1),
# origin="lower",
# cmap="hsv",
# extent=[0, mods.self.shape[2]*self.mtzyxc._headers["xstepsize"]/1e-9,
# 0, mods.self.shape[3]*self.mtzyxc._headers["ystepsize"]/1e-9],
# aspect="equal")
# ax3.set_ylabel("y (nm)")
# ax3.set_xlabel("x (nm)")
# phase.set_clim(-180, 180)
# cbp = plt.colorbar(phase, ax=ax3, ticks=[-180, 0, 180])
# cbp.set_clim(-180, 180)
# cbp.set_label("Phase (degree)")
# ticks = [-1, 0, 1]
# cba.outline.set_linewidth(0.5)
# cbp.outline.set_linewidth(0.5)
# # cb.set_clim(-180, 180)
# for ax in [ax1, ax2, ax3]:
# ax.spines['top'].set_linewidth(0.5)
# ax.spines['right'].set_linewidth(0.5)
# ax.spines['bottom'].set_linewidth(0.5)
# ax.spines['left'].set_linewidth(0.5)
# ax.patch.set_linewidth(0.1)
# # plt.tight_layout()
# plt.savefig(path +
# str(np.round(self.mtzyxc.fmrfreq[x]/1e9, 3)) + "GHz.pdf", dpi=600)
# def quivier():
# def plot_colormap():
# pass
# M_szer_0 = Hslcolormap.TransformToColor(mtzyxc.array[100, 1, :, :, :])
# ax2 = plt.subplot(3, 1, 2)
# ax2.imshow(M_szer_0,
# origin="lower",
# aspect="equal")
# X, Y = np.meshgrid(np.arange(0, mtzyxc.array.shape[2],1),
# np.arange(0, mtzyxc.array.shape[3], 1))
# U = mtzyxc.array[100, 1, X, Y, 0]
# V = mtzyxc.array[100, 1, X, Y, 1]
# C = mtzyxc.array[100, 1, X, Y, 2]
# Q = plt.quiver( X[::2, ::2],
# Y[::2, ::2],
# U[::2, ::2],
# V[::2, ::2],
# # C[::2, ::2],
# # units='x',
# # pivot='mid',
# color="black",
# # minlength = 2,
# # pivot='tip',
# # width=0.022,
# # scale=1/10
# )
| StarcoderdataPython |
3596619 | """
Prepare data for running benchmark on sparse linear regression model
"""
from __future__ import print_function
import argparse
import keras_sparse_model
import mxnet as mx
from scipy import sparse
from keras import backend as K
from keras.utils.data_utils import prepare_sliced_sparse_data
def invoke_benchmark(batch_size, epochs, num_gpu, mode):
feature_dimension = 10000
train_data = mx.test_utils.rand_ndarray((100000, feature_dimension), 'csr', 0.01)
target_weight = mx.nd.arange(1, feature_dimension + 1).reshape((feature_dimension, 1))
train_label = mx.nd.dot(train_data, target_weight)
eval_data = train_data
eval_label = mx.nd.dot(eval_data, target_weight)
train_data = prepare_sliced_sparse_data(train_data, batch_size)
train_label = prepare_sliced_sparse_data(train_label, batch_size)
eval_data = prepare_sliced_sparse_data(eval_data, batch_size)
eval_label = prepare_sliced_sparse_data(eval_label, batch_size)
print("Running Keras benchmark script on sparse data")
keras_sparse_model.run_benchmark(train_data=sparse.csr_matrix(train_data.asnumpy()),
train_label=train_label.asnumpy(),
eval_data=sparse.csr_matrix(eval_data.asnumpy()),
eval_label=eval_label.asnumpy(),
batch_size=batch_size,
epochs=epochs,
num_gpu=num_gpu,
mode=mode)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--batch", default=512,
help="Batch of data to be processed for training")
parser.add_argument("--epochs", default=10,
help="Number of epochs to train the model on. Set epochs>=10 for the best results")
parser.add_argument("--gpus", default=0,
help="Benchmark scripts run by default on CPU. Set gpus>=1 for running model on single or "
"multi-GPU")
parser.add_argument("--mode", default='training',
help="Benchmark scripts run by default for training the model. Set mode=inference for running "
"benchmark on inference")
args = parser.parse_args()
invoke_benchmark(int(args.batch), int(args.epochs), int(args.gpus), str(args.mode))
| StarcoderdataPython |
3315939 | """
This module defines a datastructure for creating
kmers.
"""
from dataclasses import dataclass
import pickle
from typing import Dict, List, Optional, Union
from tqdm import tqdm
import os
from os.path import exists
# in-house packages
from src.debruijnextend.utils import hamming_dist
@dataclass
class KmerCluster:
clusters: Dict[str, List[str]]
def get_close_kmers_clusters(self, hash_table, kmer, centroid_diff_threshold, top_N=1):
"""
finds possible structures using clusers instead of all vs all
"""
# find relasted clusters
kmers_to_look_at = []
for centroid, cluster_kmers in tqdm(self.clusters.items()):
if hamming_dist(centroid, kmer) < centroid_diff_threshold:
kmers_to_look_at += [kmer_i for kmer_i in cluster_kmers]
# use found kmers for further evaluation
priority_queue = []
highest_score = float("inf")
for kmer_j in tqdm(kmers_to_look_at):
hamming_score = hamming_dist(kmer_j, kmer)
if hamming_score < highest_score:
secondary_structs = hash_table[kmer_j]
priority_queue.append((hamming_score, secondary_structs))
priority_queue.sort(key=lambda a: a[0])
if len(priority_queue) > top_N: priority_queue.pop(-1)
highest_score = priority_queue[-1][0]
# turn into output dictionary
output_dict = {}
for saved_res in priority_queue:
output_dict.update(saved_res[1])
return output_dict
@classmethod
def init_struct(self, outputfile, cluster_file, threshold=6):
""" create the data structure from a passed kmer dictionary """
if exists(cluster_file):
with open(cluster_file, 'rb') as cls_file:
return pickle.load(cls_file)
clusters = {}
hash_table = pickle.load(open(outputfile, "rb"))
seqs = hash_table.keys()
# greedy cluster
counter = 0
for prot_kmer in tqdm(seqs):
cluster_found = False
for centroid_kmer in clusters.keys():
if hamming_dist(prot_kmer, centroid_kmer) <= threshold:
clusters[centroid_kmer].append(prot_kmer)
cluster_found = True
break
if not cluster_found:
clusters[prot_kmer] = [prot_kmer]
counter += 1
if (counter % 10000) == 0:
print(f"number of centroids: {len(clusters.keys())}")
print(f"number of total seqs: {counter}")
print(len(clusters))
# save the pickle
with open(cluster_file, "wb") as outfile:
pickle.dump(KmerCluster(clusters), outfile, protocol=pickle.HIGHEST_PROTOCOL)
return KmerCluster(clusters)
| StarcoderdataPython |
12822866 | <filename>annotation_utils/rgb_to_gs_segmentation_mask.py
#!/usr/bin/env python
# coding: utf-8
import os
from glob import glob
from PIL import Image
import numpy as np
from tqdm import tqdm
import sys
# DATA_FOLDER =
# '/mnt/storage/data/object_detection/helmet_100/annotated/segmentation_masks/validation' # noqa
def create_label_map(label_map_file: str, background_class='background'):
# Create map from label to RGB code
label_rgb_map = dict()
label_idx = 1
with open(label_map_file, 'r') as lf:
# Skip first line, has the header
lf.readline()
line = lf.readline().strip()
while line:
line_arr = line.split(':')
if line_arr[0] != background_class:
# Save reference color and index value
label_rgb_map[line_arr[0]] = (
np.array(
[int(x) for x in line_arr[1].split(',')]),
label_idx
)
label_idx += 1
# Read next line
line = lf.readline().strip()
return label_rgb_map
def rgb_to_gs_mask(rgb_im_array: np.array, label_rgb_map: dict) -> np.array:
# Create blank mask
gs_mask = np.zeros(rgb_im_array.shape[:2], dtype='uint8')
for _, (c, idx) in label_rgb_map.items():
# Add index-label for each pixel
gs_mask += np.all(rgb_im_array == c, axis=-1).astype('uint8') * idx
return gs_mask
def main():
DATA_FOLDER = sys.argv[1]
LABEL_MAP_FILE = os.path.join(DATA_FOLDER, 'labelmap.txt')
RGB_MASKS_FOLDER = os.path.join(DATA_FOLDER, 'SegmentationClass')
GS_MASKS_FOLDER = os.path.join(DATA_FOLDER, 'SegmentationClassRaw')
rgb_masks_list = glob(RGB_MASKS_FOLDER + "/*")
label_rgb_map = create_label_map(LABEL_MAP_FILE)
for rgb_file in tqdm(rgb_masks_list):
# Load original rgb image
im_array = np.array(Image.open(rgb_file))
# Actually generate the gs mask (numpy array)
gs_mask_array = rgb_to_gs_mask(im_array, label_rgb_map)
# Generate destination file name: must have same file name as original file
gs_filename = os.path.join(GS_MASKS_FOLDER, os.path.basename(rgb_file))
# Save file to disk
Image.fromarray(gs_mask_array).save(gs_filename, 'PNG')
if __name__ == '__main__':
main() | StarcoderdataPython |
52164 | <gh_stars>1-10
# This file is not empty !
| StarcoderdataPython |
337983 | <reponame>wannabethere/Spark-delta-jupyter-experiments
import json
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql import *
import json
import global_vals
infer_schema = update_kafka_schema
if not infer_schema:
try:
topic_schema_txt = dbutils.fs.head(schema_location)
except:
infer_schema = True
pass
if infer_schema:
topic_schema_txt = infer_topic_schema_json(topic)
dbutils.fs.rm(schema_location)
dbutils.fs.put(schema_location, topic_schema_txt)
# Restore schema from json:
def getSparkSchemaFromJson(schema_json):
new_schema = StructType.fromJson(json.loads(schema_json))
return new_schema
def get_merged_schema(delta_table_schema, json_data_schema):
print('str(len(delta_table_schema.fields)) -> ' + str(len(delta_table_schema.fields)))
print('str(len(json_data_schema.fields)) -> '+ str(len(json_data_schema.fields)))
no_commom_elements=False
no_new_elements=False
import numpy as np
struct_field_array=[]
if len(set(delta_table_schema.names).intersection(set(json_data_schema.names))) > 0:
common_col=set(delta_table_schema.names).intersection(set(json_data_schema.names))
print('common_col len: -> '+ str(len(common_col)))
for name in common_col:
for f in delta_table_schema.fields:
if(f.name == name):
struct_field_array.append(StructField(f.name, f.dataType, f.nullable))
else:
no_commom_elements=True
print("no common elements")
if len(np.setdiff1d(json_data_schema.names,delta_table_schema.names)) > 0:
diff_list = np.setdiff1d(json_data_schema.names,delta_table_schema.names)
print('diff_list len: -> '+ str(len(diff_list)))
for name in diff_list:
for f in json_data_schema.fields:
if(f.name == name):
struct_field_array.append(StructField(f.name, f.dataType, f.nullable))
else:
no_new_elements=True
print("no new elements")
print('len(StructType(struct_field_array)) -> '+str(len(StructType(struct_field_array))))
df=spark.createDataFrame(spark.sparkContext.emptyRDD(),StructType(struct_field_array))
if no_commom_elements and no_new_elements:
return StructType(None)
else:
return df.select(sorted(df.columns)).schema
def str_to_bool(value):
FALSE_VALUES = ['false', 'no', '0']
TRUE_VALUES = ['true', 'yes', '1']
lvalue = str(value).lower()
if lvalue in (FALSE_VALUES): return False
if lvalue in (TRUE_VALUES): return True
raise Exception("String value should be one of {}, but got '{}'.".format(FALSE_VALUES + TRUE_VALUES, value))
def validate_required_argument_and_return_value(name):
value = getArgument(name)
if len(value) < 1:
dbutils.notebook.exit("'{}' argument value is required.".format(name))
return value
def infer_topic_schema_json(topic):
df_json = (spark.read
.format("kafka") \
.option("kafka.bootstrap.servers", kafka_broker) \
.option("subscribe", topic) \
.option("startingOffsets", "earliest") \
.option("endingOffsets", "latest") \
.option("failOnDataLoss", "false") \
.load() \
# filter out empty values
.withColumn("value", expr("string(value)")) \
.filter(col("value").isNotNull()) \
# get latest version of each record
.select("key", expr("struct(offset, value) r")) \
.groupBy("key").agg(expr("max(r) r")) \
.select("r.value"))
# decode the json values
df_read = spark.read.json(
df_json.rdd.map(lambda x: x.value), multiLine=True)
# drop corrupt records
if "_corrupt_record" in df_read.columns:
df_read = (df_read.filter(col("_corrupt_record").isNotNull()).drop("_corrupt_record"))
return df_read.schema.json() | StarcoderdataPython |
4965715 | <gh_stars>1-10
#!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
try:
from .generator import Generator, ucfirst
from .models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
except ValueError:
from generator import Generator, ucfirst
from models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
log = logging.getLogger('global')
def join_type_and_name(type_str, name_str):
if type_str.endswith('*'):
return type_str + name_str
return type_str + ' ' + name_str
def strip_block_comment_markers(str):
return str.replace('/*', '').replace('*/', '')
def remove_duplicate_from_str(str, possible_duplicate):
return str.replace(possible_duplicate + possible_duplicate, possible_duplicate)
_OBJC_IDENTIFIER_RENAME_MAP = {
'this': 'thisObject', # Debugger.CallFrame.this
'description': 'stringRepresentation', # Runtime.RemoteObject.description
'id': 'identifier', # Page.Frame.id, Runtime.ExecutionContextDescription.id, Debugger.BreakpointAction.id
}
_OBJC_IDENTIFIER_REVERSE_RENAME_MAP = dict((v, k) for k, v in _OBJC_IDENTIFIER_RENAME_MAP.items())
class ObjCTypeCategory:
Simple = 0
String = 1
Object = 2
Array = 3
@staticmethod
def category_for_type(_type):
if (isinstance(_type, PrimitiveType)):
if _type.raw_name() == 'string':
return ObjCTypeCategory.String
if _type.raw_name() in ['object', 'any']:
return ObjCTypeCategory.Object
if _type.raw_name() == 'array':
return ObjCTypeCategory.Array
return ObjCTypeCategory.Simple
if (isinstance(_type, ObjectType)):
return ObjCTypeCategory.Object
if (isinstance(_type, ArrayType)):
return ObjCTypeCategory.Array
if (isinstance(_type, AliasedType)):
return ObjCTypeCategory.category_for_type(_type.aliased_type)
if (isinstance(_type, EnumType)):
return ObjCTypeCategory.category_for_type(_type.primitive_type)
return None
# Almost all Objective-C class names require the use of a prefix that depends on the
# target framework's 'objc_prefix' setting. So, most helpers are instance methods.
class ObjCGenerator(Generator):
# Do not use a dynamic prefix for RWIProtocolJSONObject since it's used as a common
# base class and needs a consistent Objective-C prefix to be in a reusable framework.
OBJC_HELPER_PREFIX = 'RWI'
OBJC_SHARED_PREFIX = 'Protocol'
OBJC_STATIC_PREFIX = '%s%s' % (OBJC_HELPER_PREFIX, OBJC_SHARED_PREFIX)
def __init__(self, *args, **kwargs):
Generator.__init__(self, *args, **kwargs)
# The 'protocol name' is used to prefix filenames for a protocol group (a set of domains generated together).
def protocol_name(self):
protocol_group = self.model().framework.setting('objc_protocol_group', '')
return '%s%s' % (protocol_group, ObjCGenerator.OBJC_SHARED_PREFIX)
# The 'ObjC prefix' is used to prefix Objective-C class names and enums with a
# framework-specific prefix. It is separate from filename prefixes.
def objc_prefix(self):
framework_prefix = self.model().framework.setting('objc_prefix', None)
if not framework_prefix:
return ''
else:
return '%s%s' % (framework_prefix, ObjCGenerator.OBJC_SHARED_PREFIX)
# Adjust identifier names that collide with ObjC keywords.
@staticmethod
def identifier_to_objc_identifier(name):
return _OBJC_IDENTIFIER_RENAME_MAP.get(name, name)
@staticmethod
def objc_identifier_to_identifier(name):
return _OBJC_IDENTIFIER_REVERSE_RENAME_MAP.get(name, name)
# Generate ObjC types, command handlers, and event dispatchers for a subset of domains.
DOMAINS_TO_GENERATE = ['CSS', 'DOM', 'DOMStorage', 'Network', 'Security', 'Page', 'Automation', 'GenericTypes']
def should_generate_types_for_domain(self, domain):
if not len(self.type_declarations_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
allowlist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
allowlist.update(set(['Console', 'Debugger', 'Runtime']))
return domain.domain_name in allowlist
def should_generate_commands_for_domain(self, domain):
if not len(self.commands_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
allowlist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
return domain.domain_name in allowlist
def should_generate_events_for_domain(self, domain):
if not len(self.events_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
allowlist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
allowlist.add('Console')
return domain.domain_name in allowlist
# ObjC enum and type names.
def objc_name_for_type(self, type):
name = type.qualified_name().replace('.', '')
name = remove_duplicate_from_str(name, type.type_domain().domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_declaration(self, declaration):
domain_name = declaration.type.type_domain().domain_name
name = '%s%s' % (domain_name, declaration.type.raw_name())
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_member(self, declaration, member):
domain_name = member.type.type_domain().domain_name
name = '%s%s%s' % (domain_name, declaration.type.raw_name(), ucfirst(member.member_name))
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_parameter(self, domain, event_or_command_name, parameter):
domain_name = domain.domain_name
name = '%s%s%s' % (domain_name, ucfirst(event_or_command_name), ucfirst(parameter.parameter_name))
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_non_anonymous_enum(self, _type):
domain_name = _type.type_domain().domain_name
name = _type.qualified_name().replace('.', '')
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
# Miscellaneous name handling.
@staticmethod
def variable_name_prefix_for_domain(domain):
domain_name = domain.domain_name
if domain_name.startswith('DOM'):
return 'dom' + domain_name[3:]
if domain_name.startswith('CSS'):
return 'css' + domain_name[3:]
return domain_name[:1].lower() + domain_name[1:]
# Type basics.
@staticmethod
def objc_accessor_type_for_raw_name(raw_name):
if raw_name in ['string', 'array']:
return 'copy'
if raw_name in ['integer', 'number', 'boolean']:
return 'assign'
if raw_name in ['any', 'object']:
return 'retain'
return None
@staticmethod
def objc_type_for_raw_name(raw_name):
if raw_name == 'string':
return 'NSString *'
if raw_name == 'array':
return 'NSArray *'
if raw_name == 'integer':
return 'int'
if raw_name == 'number':
return 'double'
if raw_name == 'boolean':
return 'BOOL'
if raw_name in ['any', 'object']:
return '%sJSONObject *' % ObjCGenerator.OBJC_STATIC_PREFIX
return None
@staticmethod
def objc_class_for_raw_name(raw_name):
if raw_name == 'string':
return 'NSString'
if raw_name == 'array':
return 'NSArray'
if raw_name in ['integer', 'number', 'boolean']:
return 'NSNumber'
if raw_name in ['any', 'object']:
return '%sJSONObject' % ObjCGenerator.OBJC_STATIC_PREFIX
return None
# FIXME: Can these protocol_type functions be removed in favor of C++ generators functions?
@staticmethod
def protocol_type_for_raw_name(raw_name):
if raw_name == 'string':
return 'String'
if raw_name == 'integer':
return 'int'
if raw_name == 'number':
return 'double'
if raw_name == 'boolean':
return 'bool'
if raw_name in ['any', 'object']:
return 'JSON::Object'
return None
@staticmethod
def protocol_type_for_type(_type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return ObjCGenerator.protocol_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return ObjCGenerator.protocol_type_for_type(_type.primitive_type)
if (isinstance(_type, ObjectType)):
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if (isinstance(_type, ArrayType)):
sub_type = ObjCGenerator.protocol_type_for_type(_type.element_type)
return 'JSON::ArrayOf<%s>' % sub_type
return None
@staticmethod
def is_type_objc_pointer_type(_type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return _type.raw_name() in ['string', 'array', 'any', 'object']
if (isinstance(_type, EnumType)):
return False
if (isinstance(_type, ObjectType)):
return True
if (isinstance(_type, ArrayType)):
return True
return None
def objc_class_for_type(self, _type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return ObjCGenerator.objc_class_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return ObjCGenerator.objc_class_for_raw_name(_type.primitive_type.raw_name())
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type)
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/' % sub_type
return None
def objc_class_for_array_type(self, _type):
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, ArrayType):
return self.objc_class_for_type(_type.element_type)
return None
def objc_accessor_type_for_member(self, member):
return self.objc_accessor_type_for_member_internal(member.type)
def objc_accessor_type_for_member_internal(self, _type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_accessor_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return 'assign'
if (isinstance(_type, ObjectType)):
return 'retain'
if (isinstance(_type, ArrayType)):
return 'copy'
return None
def objc_type_for_member(self, declaration, member):
return self.objc_type_for_member_internal(member.type, declaration, member)
def objc_type_for_member_internal(self, _type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
if (_type.is_anonymous):
return self.objc_enum_name_for_anonymous_enum_member(declaration, member)
return self.objc_enum_name_for_non_anonymous_enum(_type)
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type) + ' *'
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/ *' % sub_type
return None
def objc_type_for_param(self, domain, event_or_command_name, parameter, respect_optional=True):
objc_type = self.objc_type_for_param_internal(parameter.type, domain, event_or_command_name, parameter)
if respect_optional and parameter.is_optional:
if objc_type.endswith('*'):
return objc_type + '*'
return objc_type + ' *'
return objc_type
def objc_type_for_param_internal(self, _type, domain, event_or_command_name, parameter):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
if _type.is_anonymous:
return self.objc_enum_name_for_anonymous_enum_parameter(domain, event_or_command_name, parameter)
return self.objc_enum_name_for_non_anonymous_enum(_type)
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type) + ' *'
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/ *' % sub_type
return None
# ObjC <-> Protocol conversion for commands and events.
# - convert a command call parameter received from Protocol to ObjC for handler
# - convert a command return parameter in callback block from ObjC to Protocol to send
# - convert an event parameter from ObjC API to Protocol to send
def objc_protocol_export_expression_for_variable(self, var_type, var_name):
category = ObjCTypeCategory.category_for_type(var_type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(var_type, EnumType):
return 'toProtocolString(%s)' % var_name
return var_name
if category == ObjCTypeCategory.Object:
return '[%s toJSONObject]' % var_name
if category == ObjCTypeCategory.Array:
protocol_type = ObjCGenerator.protocol_type_for_type(var_type.element_type)
objc_class = self.objc_class_for_type(var_type.element_type)
if protocol_type == 'JSON::ArrayOf<String>':
return 'toJSONStringArrayArray(%s)' % var_name
if protocol_type == 'String' and objc_class == 'NSString':
return 'toJSONStringArray(%s)' % var_name
if protocol_type == 'int' and objc_class == 'NSNumber':
return 'toJSONIntegerArray(%s)' % var_name
if protocol_type == 'double' and objc_class == 'NSNumber':
return 'toJSONDoubleArray(%s)' % var_name
return 'toJSONObjectArray(%s)' % var_name
def objc_protocol_import_expression_for_member(self, name, declaration, member):
if isinstance(member.type, EnumType):
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), name)
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(member.type), name)
return self.objc_protocol_import_expression_for_variable(member.type, name)
def objc_protocol_import_expression_for_parameter(self, name, domain, event_or_command_name, parameter):
if isinstance(parameter.type, EnumType):
if parameter.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_parameter(domain, event_or_command_name, parameter), name)
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(parameter.type), name)
return self.objc_protocol_import_expression_for_variable(parameter.type, name)
def objc_protocol_import_expression_for_variable(self, var_type, var_name):
category = ObjCTypeCategory.category_for_type(var_type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
return var_name
if category == ObjCTypeCategory.Object:
objc_class = self.objc_class_for_type(var_type)
return '[[[%s alloc] initWithJSONObject:%s] autorelease]' % (objc_class, var_name)
if category == ObjCTypeCategory.Array:
objc_class = self.objc_class_for_type(var_type.element_type)
if objc_class == 'NSString':
return 'toObjCStringArray(%s)' % var_name
if objc_class == 'NSNumber': # FIXME: Integer or Double?
return 'toObjCIntegerArray(%s)' % var_name
return 'toObjCArray<%s>(%s)' % (objc_class, var_name)
# ObjC <-> JSON object conversion for types getters/setters.
# - convert a member setter from ObjC API to JSON object setter
# - convert a member getter from JSON object to ObjC API
def objc_to_protocol_expression_for_member(self, declaration, member, sub_expression):
category = ObjCTypeCategory.category_for_type(member.type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(member.type, EnumType):
return 'toProtocolString(%s)' % sub_expression
return sub_expression
if category == ObjCTypeCategory.Object:
return sub_expression
if category == ObjCTypeCategory.Array:
objc_class = self.objc_class_for_type(member.type.element_type)
if objc_class == 'NSString':
return 'toJSONStringArray(%s)' % sub_expression
if objc_class == 'NSNumber':
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
if protocol_type == 'double':
return 'toJSONDoubleArray(%s)' % sub_expression
return 'toJSONIntegerArray(%s)' % sub_expression
return 'toJSONObjectArray(%s)' % sub_expression
def protocol_to_objc_expression_for_member(self, declaration, member, sub_expression):
category = ObjCTypeCategory.category_for_type(member.type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(member.type, EnumType):
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s).value()' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), sub_expression)
return 'fromProtocolString<%s>(%s).value()' % (self.objc_enum_name_for_non_anonymous_enum(member.type), sub_expression)
return sub_expression
if category == ObjCTypeCategory.Object:
raise Exception("protocol_to_objc_expression_for_member does not support an Object type. See: protocol_to_objc_code_block_for_object_member")
if category == ObjCTypeCategory.Array:
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
objc_class = self.objc_class_for_type(member.type.element_type)
if objc_class == 'NSString':
return 'toObjCStringArray(%s)' % sub_expression
if objc_class == 'NSNumber':
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
if protocol_type == 'double':
return 'toObjCDoubleArray(%s)' % sub_expression
return 'toObjCIntegerArray(%s)' % sub_expression
return 'toObjCArray<%s>(%s)' % (objc_class, sub_expression)
def protocol_to_objc_code_block_for_object_member(self, declaration, member, sub_expression):
objc_class = self.objc_class_for_type(member.type)
lines = []
lines.append(' %sJSONObject *object = %s;' % (ObjCGenerator.OBJC_STATIC_PREFIX, sub_expression))
lines.append(' if (!object)')
lines.append(' return nil;')
lines.append(' return [[%s alloc] initWithJSONObject:[%s toJSONObject].get()];' % (objc_class, sub_expression))
return '\n'.join(lines)
def payload_to_objc_expression_for_member(self, declaration, member):
_type = member.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, PrimitiveType):
sub_expression = 'payload[@"%s"]' % member.member_name
raw_name = _type.raw_name()
if raw_name == 'boolean':
return '[%s boolValue]' % sub_expression
if raw_name == 'integer':
return '[%s integerValue]' % sub_expression
if raw_name == 'number':
return '[%s doubleValue]' % sub_expression
if raw_name in ['any', 'object', 'array', 'string']:
return sub_expression # The setter will check the incoming value.
return None
if isinstance(member.type, EnumType):
sub_expression = 'payload[@"%s"]' % member.member_name
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), sub_expression)
else:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(member.type), sub_expression)
if isinstance(_type, ObjectType):
objc_class = self.objc_class_for_type(member.type)
return '[[%s alloc] initWithPayload:payload[@"%s"]]' % (objc_class, member.member_name)
if isinstance(_type, ArrayType):
element_type = member.type.element_type
if isinstance(element_type, EnumType):
element_type = element_type.primitive_type
# In this case, there is no conversion that needs to be done, the array already contains an ObjC type.
if isinstance(element_type, PrimitiveType):
return 'payload[@"%s"]' % member.member_name
else:
objc_class = self.objc_class_for_type(element_type)
return 'objcArrayFromPayload<%s>(payload[@"%s"])' % (objc_class, member.member_name)
# JSON object setter/getter selectors for types.
@staticmethod
def objc_setter_method_for_member(declaration, member):
return ObjCGenerator.objc_setter_method_for_member_internal(member.type, declaration, member)
@staticmethod
def objc_setter_method_for_member_internal(_type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
raw_name = _type.raw_name()
if raw_name == 'boolean':
return 'setBool'
if raw_name == 'integer':
return 'setInteger'
if raw_name == 'number':
return 'setDouble'
if raw_name == 'string':
return 'setString'
if raw_name in ['any', 'object']:
return 'setObject'
if raw_name == 'array':
return 'setJSONArray'
return None
if (isinstance(_type, EnumType)):
return 'setString'
if (isinstance(_type, ObjectType)):
return 'setObject'
if (isinstance(_type, ArrayType)):
return 'setJSONArray'
return None
@staticmethod
def objc_getter_method_for_member(declaration, member):
return ObjCGenerator.objc_getter_method_for_member_internal(member.type, declaration, member)
@staticmethod
def objc_getter_method_for_member_internal(_type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
raw_name = _type.raw_name()
if raw_name == 'boolean':
return 'boolForKey'
if raw_name == 'integer':
return 'integerForKey'
if raw_name == 'number':
return 'doubleForKey'
if raw_name == 'string':
return 'stringForKey'
if raw_name in ['any', 'object']:
return 'objectForKey'
if raw_name == 'array':
return 'JSONArrayForKey'
return None
if (isinstance(_type, EnumType)):
return 'stringForKey'
if (isinstance(_type, ObjectType)):
return 'objectForKey'
if (isinstance(_type, ArrayType)):
return 'JSONArrayForKey'
return None
| StarcoderdataPython |
8152523 | import discord
from discord.ext import commands
from discord.utils import get
class c90(commands.Cog, name="c90"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='A_Term_for_Peace', aliases=['c90'])
async def example_embed(self, ctx):
embed = discord.Embed(title='A Term for Peace',
color=0xBC5A84)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2321533.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type', value='Trap/Normal', inline=False)
embed.add_field(name='Card Effect', value='If each player has 2 or less cards in their hand: Pay LP in multiples of 1000 (max. 3000); each player draws 1 card for each 1000 LP paid, then each player discards cards from their hand equal to the number of cards drawn -1. You can banish this card from your GY, except the turn it was sent there; draw 1 card. You can only use this effect of "A Term for Peace" once per turn. You can only activate 1 "A Term for Peace" per turn.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c90(bot)) | StarcoderdataPython |
123897 | <filename>shape.py<gh_stars>1-10
import turtle
import time
import random
squares = input("Enter the number of sides of the shape: ")
numshapes = input("Enter the number of shapes to draw: ")
angle = 180 - 180*(squares-2)/squares
turtle.up
x = 0
y = 0
turtle.setpos(x,y)
for x in range(numshapes):
turtle.color(random.random(),random.random(), random.random())
#turtle.forward(x)
turtle.left(y)
for i in range(squares):
turtle.begin_fill() # Begin the fill process.
turtle.down() # "Pen" down?
for i in range(squares): # For each edge of the shape
turtle.forward(100) # Move forward 100 units
turtle.left(angle) # Turn ready for the next edge
turtle.up() # Pen up
turtle.end_fill() # End fill.
y = 40
time.sleep(11)
turtle.bye()
| StarcoderdataPython |
9605635 | def functionCalculator(first_num,operator,second_num):
if operator =='+':
result =first_num+ second_num
elif operator =='-':
result =first_num- second_num
elif operator =='/':
result =first_num/second_num
elif operator =='*':
result =first_num*second_num
print('first_num: ',first_num,'operator: ', operator,'second_num: ',second_num, 'result: ',result)
# викликаємо фукнцію, написавши її назву, та передавши в дужки числа
functionCalculator(2,'+',3)
functionCalculator(6,'-',5)
functionCalculator(6,'/',3)
functionCalculator(6,'*',3) | StarcoderdataPython |
8180101 | <reponame>caiyongji/py36-tf2.0rc<gh_stars>0
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.random namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow._api.v2.compat.v2.random import experimental
from tensorflow.python.framework.random_seed import set_seed
from tensorflow.python.ops.candidate_sampling_ops import all_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import fixed_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import learned_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import log_uniform_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import uniform_candidate_sampler
from tensorflow.python.ops.random_ops import categorical
from tensorflow.python.ops.random_ops import random_gamma as gamma
from tensorflow.python.ops.random_ops import random_normal as normal
from tensorflow.python.ops.random_ops import random_poisson_v2 as poisson
from tensorflow.python.ops.random_ops import random_shuffle as shuffle
from tensorflow.python.ops.random_ops import random_uniform as uniform
from tensorflow.python.ops.random_ops import truncated_normal
from tensorflow.python.ops.stateless_random_ops import stateless_categorical
from tensorflow.python.ops.stateless_random_ops import stateless_random_normal as stateless_normal
from tensorflow.python.ops.stateless_random_ops import stateless_random_uniform as stateless_uniform
from tensorflow.python.ops.stateless_random_ops import stateless_truncated_normal
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "compat.v2.random", public_apis=None, deprecation=False,
has_lite=False)
| StarcoderdataPython |
1859595 | <gh_stars>1-10
#!/usr/bin/env python3
# vim: set fileencoding=utf-8
from PIL import Image
from time import sleep
class Sprite:
'''
Collection of animation sprites, based on a single source image (spritesheet).
This class provides the #define-method to create animation cycle methods (with internal counter).
'''
def __init__(self, filename, grid_size=7, offset=(0,0) ):
self.offset = offset
self.src = Image.open(filename)
self.gs = grid_size
self.grid = (
int(self.src.width/self.gs),
int(self.src.height/self.gs)
)
def getTile(self, idx):
''' Get the tile at the given index. The sprite's tiles are enumerated in a left-to-right, bottom-to-up fashion. '''
ox, oy = self.offset
x = idx%self.grid[0]
y = int( idx/self.grid[0] )
return self.src.crop((
x*self.gs + ox,
y*self.gs + oy,
(x+1)*self.gs + ox,
(y+1)*self.gs + oy
))
def define(self, label, frame_idxs):
'''
Define a new animation cycle, consisting of the passed frame indices -- accessible as an object method under the given label.
'''
# Define the closure
step = 0
def cycle():
nonlocal step
tile = self.getTile(frame_idxs[ step ])
step += 1
if step == len(frame_idxs): step = 0
return tile
setattr(self, label, cycle)
if __name__ == '__main__':
from lib import SevenBySeven
import keyboard
mervin = Sprite('images/mervin.png', 8, (1, 1) )
screen = SevenBySeven()
mervin.define('walk_right', range(4) )
mervin.define('walk_left', range(4, 8) )
mervin.define('walk_down', range(8, 12) )
mervin.define('walk_up', range(12, 16) )
keyboard.add_hotkey('space', lambda key: print('asdf' + key))
while True:
for i in range(10):
screen.show( mervin.walk_up() )
sleep(0.1)
for i in range(10):
screen.show( mervin.walk_right() )
sleep(0.1)
for i in range(10):
screen.show( mervin.walk_down() )
sleep(0.1)
for i in range(10):
screen.show( mervin.walk_left() )
sleep(0.1)
| StarcoderdataPython |
6567405 | <filename>Dataset/Leetcode/train/70/81.py
class Solution:
def XXX(self, n: int) -> int:
f=lambda x:f(x-1)*x if x>=2 else 1
i=r=1
while n>i:
n-=1
r+=f(n)/(f(i)*f(n-i))
i+=1
return int(r)
| StarcoderdataPython |
6413974 | <reponame>iwoithe/Gimel-Studio
# THIS FILE IS A PART OF GIMEL STUDIO AND IS LICENSED UNDER THE SAME TERMS:
# ----------------------------------------------------------------------------
# Gimel Studio Copyright 2019-2021 by <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import cv2
from PIL import ImageFilter
from GimelStudio import api
from GimelStudio.utils.image import ArrayFromImage, ArrayToImage
class EdgeDetectNode(api.NodeBase):
def __init__(self, _id):
api.NodeBase.__init__(self, _id)
@property
def NodeMeta(self):
meta_info = {
"label": "Edge Detect",
"author": "iwoithe",
"version": (0, 0, 1),
"supported_app_version": (0, 5, 0),
"category": "MASK",
"description": "Detects the edges",
}
return meta_info
def NodeInitProps(self):
self.method = api.ChoiceProp(
idname="Method",
default="Canny",
choices=["Find Edges", "Canny"],
label="Method:"
)
self.lower_threshold = api.PositiveIntegerProp(
idname="Lower Threshold",
default=30,
min_val=0,
max_val=100,
widget=api.SLIDER_WIDGET,
label="Lower Threshold:"
)
self.higher_threshold = api.PositiveIntegerProp(
idname="Higher Threshold",
default=100,
min_val=0,
max_val=100,
widget=api.SLIDER_WIDGET,
label="Higher Threshold:"
)
self.NodeAddProp(self.method)
self.NodeAddProp(self.lower_threshold)
self.NodeAddProp(self.higher_threshold)
def NodeInitParams(self):
image = api.RenderImageParam("Image")
self.NodeAddParam(image)
def WidgetEventHook(self, idname, value):
if idname == "Method" and value == "Find Edges":
self.lower_threshold.SetIsVisible(False)
self.higher_threshold.SetIsVisible(False)
self.RefreshPropertyPanel()
elif idname == "Method" and value == "Canny":
self.lower_threshold.SetIsVisible(True)
self.higher_threshold.SetIsVisible(True)
self.RefreshPropertyPanel()
def NodeEvaluation(self, eval_info):
input_image = eval_info.EvaluateParameter("Image")
method = eval_info.EvaluateProperty("Method")
lower_threshold = eval_info.EvaluateProperty("Lower Threshold")
higher_threshold = eval_info.EvaluateProperty("Higher Threshold")
image = api.RenderImage()
# Consider removing the Pillow method?
if method == "Find Edges":
img = input_image.GetImage().convert("L").filter(ImageFilter.FIND_EDGES)
image.SetAsImage(img.convert("RGBA"))
elif method == "Canny":
input_image_array = ArrayFromImage(input_image.GetImage())
output_image_array = cv2.Canny(input_image_array, lower_threshold, higher_threshold)
image.SetAsImage(ArrayToImage(output_image_array).convert("RGBA"))
else:
image.SetAsImage(input_image.GetImage())
self.NodeSetThumb(image.GetImage())
return image
api.RegisterNode(EdgeDetectNode, "corenode_edgedetect")
| StarcoderdataPython |
5152077 | #!/usr/bin/env python
# coding: utf-8
# author: <NAME> <EMAIL>
# platform: python 2.6-2.7, 3.5-3.8+
# demos are provided in test_json_compare.py
from __future__ import print_function
import json
import re
import traceback
import six
import codecs
_NUMBER_TYPES = list(six.integer_types) + [float]
class _Compare(object):
def __init__(self):
self._float_fuzzy_digits = None
self._strict_number_type = None
self._res = None
self._ignore_list_seq = None
self._re_compare = True
self._ignore_path = None
self._omit_path = None
self._handle = None
@staticmethod
def _tuple_append(t, i):
return tuple(list(t) + [six.text_type(i)])
@staticmethod
def _to_unicode_if_string(strlike):
if type(strlike) == six.binary_type:
try:
return strlike.decode('utf-8')
except UnicodeDecodeError:
raise ValueError("decoding string {} failed, may be local encoded".format(repr(strlike)))
else:
return strlike
@staticmethod
def _to_list_if_tuple(listlike):
if type(listlike) == tuple:
return list(listlike)
else:
return listlike
def _common_warp(self, anylike):
return self._to_list_if_tuple(self._to_unicode_if_string(anylike))
def _fuzzy_float_equal(self, a, b):
if self._float_fuzzy_digits:
return abs(a - b) < 10 ** (-self._float_fuzzy_digits)
else:
return a == b
@staticmethod
def _modify_a_key(dic, from_key, to_key):
assert not any([type(to_key) == type(exist_key) and to_key == exist_key for exist_key in
dic.keys()]), 'cannot change the key due to key conflicts'
# cannot use IN here `to_key in dic.keys()`, because u"a" in ["a"] == True
dic[to_key] = dic.pop(from_key)
def _fuzzy_number_type(self, value):
if not self._strict_number_type:
type_dict = {x: float for x in six.integer_types}
else:
type_dict = {x: int for x in six.integer_types}
res = type(value)
return type_dict.get(res, res)
def _turn_dict_keys_to_unicode(self, dic):
keys = dic.keys()
modifiers = []
for key in keys: # a.keys() returns a constant, so it is safe because ak won't change
if type(key) == six.binary_type:
modifiers.append((key, self._to_unicode_if_string(key)))
else:
assert type(key) == six.text_type, 'key {} must be string or unicode in dict {}'.format(key, dic)
for from_key, to_key in modifiers:
self._modify_a_key(dic, from_key, to_key)
def _set_false(self):
self._res = False
@staticmethod
def _escape(s):
"""
:param s: binary if py2 else unicode
:return:
"""
if r'\x' in s:
s = s.decode('string-escape') if six.PY2 else codecs.escape_decode(s)[0].decode('utf-8') # no string-escape
if r'\u' in s:
s = s.decode('unicode-escape') if six.PY2 else s.encode().decode('unicode-escape')
if type(s) == six.binary_type:
s = s.decode('utf-8') # This often comes from unix servers
return s
# difference_print methods
def _different_type(self, a, b, root):
self._set_false()
self._handle("different type at /{}".format("/".join(root)))
self._handle("a {}: ".format(type(a)) + repr(a))
self._handle("b {}: ".format(type(b)) + repr(b))
def _different_value(self, a, b, root):
self._set_false()
self._handle("different value at /{}".format("/".join(root)))
self._handle("a: " + repr(a))
self._handle("b: " + repr(b))
def _different_length(self, a, b, root):
self._set_false()
self._handle("different length of list at /{}".format("/".join(root)))
self._handle("len(a)={} : ".format(len(a)) + repr(a))
self._handle("len(b)={} : ".format(len(b)) + repr(b))
def _list_item_not_found(self, ele, which, root):
self._set_false()
self._handle("list {} at /{}".format(which, "/".join(root)))
self._handle("has element that another list hasn't :")
self._handle(repr(ele))
def _list_freq_not_match(self, root, aplace, bplace, ele, counta, countb):
self._set_false()
self._handle(
"list at /{}, index {}, has different frequency from b at index {}:".format("/".join(root), aplace, bplace))
self._handle("element is {}".format(ele))
self._handle("count of list a: {}".format(counta))
self._handle("count of list b: {}".format(countb))
def _dict_key_not_found(self, keys, which, root):
self._set_false()
self._handle("dict {} at /{}".format(which, "/".join(root)))
self._handle("has key(s) that another dict hasn't :")
self._handle(keys)
# internal compare methods
def _list_comp(self, a, b, root, printdiff):
if len(a) != len(b):
if not printdiff:
return False
self._different_length(a, b, root)
found_b = [False] * len(b)
for i, a_i in enumerate(a):
found = False
for j, b_j in enumerate(b):
if self._common_comp(a_i, b_j, printdiff=False):
found_b[j] = True
found = True
break
if not found:
buff = self._tuple_append(root, i)
self._list_item_not_found(a_i, "a", buff)
found_a = [False] * len(a)
for j, b_j in enumerate(b):
found = False
for i, a_i in enumerate(a):
if self._common_comp(a_i, b_j, printdiff=False):
found_a[i] = True
found = True
break
if not found:
buff = self._tuple_append(root, j)
self._list_item_not_found(b_j, "b", buff)
return
if not self._ignore_list_seq:
for i in range(min(len(a), len(b))):
buff = self._tuple_append(root, i)
if not self._common_comp(a[i], b[i], buff, printdiff):
if not printdiff:
return False
else:
counts_a = [[0, None] for _ in range(len(a))]
counts_b = [[0, None] for _ in range(len(a))]
need_to_compare_number = True
for i in range(len(a)):
for j in range(len(a)):
buff = self._tuple_append(root, len(a) * 10)
if self._common_comp(a[i], b[j], buff, printdiff=False):
counts_a[i][1] = j
counts_a[i][0] += 1
if self._common_comp(b[i], a[j], buff, printdiff=False):
counts_b[i][1] = j
counts_b[i][0] += 1
if not counts_a[i][0]:
if not printdiff:
return False
need_to_compare_number = False
buff = self._tuple_append(root, i)
self._list_item_not_found(a[i], "a", buff)
if not counts_b[i][0]:
if not printdiff:
return False
need_to_compare_number = False
buff = self._tuple_append(root, i)
self._list_item_not_found(b[i], "b", buff)
if need_to_compare_number:
for i in range(len(counts_a)):
counta, place = counts_a[i]
countb = counts_b[place][0]
if countb != counta and counts_b[place][1] == i: # to prevent printing twice
if not printdiff:
return False
self._list_freq_not_match(root, i, place, a[i], countb, counta) # need to swap counter here:)
if not printdiff:
return True
def _dict_comp(self, a, b, root, printdiff):
self._turn_dict_keys_to_unicode(a)
self._turn_dict_keys_to_unicode(b)
if self._omit_path:
omit_dict = {}
for x in self._omit_path:
pre, tat = x.split(u"/")[1:-1], x.split(u"/")[-1]
for i, v in enumerate(pre):
if v == u"*" and i < len(root):
pre[i] = root[i]
pre = tuple(pre)
if pre not in omit_dict:
omit_dict[pre] = [tat]
else:
omit_dict[pre].append(tat)
if root in omit_dict:
a = {k: v for k, v in a.items() if k not in omit_dict[root]}
b = {k: v for k, v in b.items() if k not in omit_dict[root]}
ak = a.keys() # refresh again to make sure it's unicode now
bk = b.keys()
diffak = [x for x in ak if x not in bk]
diffbk = [x for x in bk if x not in ak]
if diffak:
if not printdiff:
return False
self._dict_key_not_found(diffak, "a", root)
if diffbk:
if not printdiff:
return False
self._dict_key_not_found(diffbk, "b", root)
samekeys = [x for x in ak if x in bk]
for key in samekeys:
buff = self._tuple_append(root, key)
if not self._common_comp(a[key], b[key], buff, printdiff):
if not printdiff:
return False
if not printdiff:
return True
def _common_comp(self, a, b, root=(), printdiff=True):
if self._ignore_path:
current_path = u"/{}".format(u"/".join(root))
for ignore_item in self._ignore_path:
if ignore_item[0] == u"^" or ignore_item[-1] == u"$":
find = re.findall(ignore_item, current_path)
assert len(find) < 2, "shouldn't be this"
if find and find[0] == current_path:
return True
else:
if u"/{}".format(u"/".join(root)) == ignore_item:
return True
a = self._common_warp(a)
b = self._common_warp(b)
if self._fuzzy_number_type(a) != self._fuzzy_number_type(b):
if not printdiff:
return False
self._different_type(a, b, root)
return
if type(a) not in [dict, list]:
if not self._value_comp(a, b, printdiff):
if not printdiff:
return False
self._different_value(a, b, root)
elif not printdiff:
return True
return
if type(a) == list:
return self._list_comp(a, b, root, printdiff)
if type(a) == dict:
return self._dict_comp(a, b, root, printdiff)
raise TypeError("shouldn't be here")
def _value_comp(self, a, b, printdiff=True): # the most base comparison
if not self._re_compare or type(a) != six.text_type or type(b) != six.text_type:
if (type(a) == float and type(b) in _NUMBER_TYPES) or (type(b) == float and type(a) in _NUMBER_TYPES):
return self._fuzzy_float_equal(a, b)
else:
return a == b
else:
a_is_re = len(a) > 0 and (a[0] == u"^" or a[-1] == u"$")
b_is_re = len(b) > 0 and (b[0] == u"^" or b[-1] == u"$") # lazy eval prevents index out of range error
if not a_is_re and not b_is_re:
return a == b
assert not (a_is_re and b_is_re), "can't compare two regular expressions"
if b_is_re: # let a be re
a, b = b, a
find = re.findall(a, b)
assert len(find) < 2, "shouldn't be this"
if not find:
if printdiff:
self._handle("re compare failed, empty match, see next line")
return False
if not find[0] == b:
if printdiff:
self._handle("re compare failed, found {}, expect {}, see next line".format(find[0], b))
return False
return True
def compare(self, a, b, ignore_list_seq=True, re_compare=True, ignore_path=None, callback=print, strict_json=False,
float_fuzzy_digits=0, strict_number_type=False, omit_path=None):
"""
real compare entrance
"""
self._handle = callback
flag = False # transferred str to object, need recursion
if type(a) in [six.text_type, six.binary_type]:
json_loaded_a = json.loads(a) # json only, should use eval when using python dict/list-like strings instead
flag = True
else:
json_loaded_a = a
if type(b) in [six.text_type, six.binary_type]:
json_loaded_b = json.loads(b)
flag = True
else:
json_loaded_b = b
if flag:
return self.compare(json_loaded_a, json_loaded_b, ignore_list_seq, re_compare, ignore_path, callback,
strict_json, float_fuzzy_digits, strict_number_type, omit_path)
if strict_json:
try:
json.dumps(a, ensure_ascii=False)
json.dumps(b, ensure_ascii=False)
except TypeError:
self._handle(traceback.format_exc())
raise TypeError("unsupported type found during strict json check")
self._res = True
self._ignore_list_seq = ignore_list_seq
self._re_compare = re_compare
self._float_fuzzy_digits = float_fuzzy_digits
self._strict_number_type = strict_number_type
self._ignore_path = None if ignore_path is None else [self._to_unicode_if_string(path) for path in ignore_path]
self._omit_path = None if omit_path is None else [self._to_unicode_if_string(path) for path in omit_path]
if self._ignore_path:
assert all([path[0] == u"/" or u"(/" in path for path in self._ignore_path]), "invalid ignore path"
if self._omit_path:
assert all([path[0] == u"/" and path.split(u"/")[-1] not in (u"", u"*") and not path.split(u"/")[-1].
isdigit() for path in self._omit_path]), "invalid omit path"
self._handle(self._escape("a is {}".format(a)))
self._handle(self._escape("b is {}".format(b)))
self._handle("ignore_list_seq = {}, re_compare = {}, ignore_path = {}, omit_path = {}, float_fuzzy_digits = {}"
.format(ignore_list_seq, re_compare, ignore_path, omit_path, self._float_fuzzy_digits))
self._common_comp(a, b)
return self._res
def compare(a, b, *args, **kwargs):
return _Compare().compare(a, b, *args, **kwargs)
def check(a, b, *args, **kwargs):
assert _Compare().compare(a, b, *args, **kwargs)
| StarcoderdataPython |
159824 | from flask import Flask
app = Flask(__name__)
import lachesis.views
from lachesis.models.database import init_db, clear_db
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
6557644 | # -*- coding: utf-8 -*-
"""Contains app-specific admin classes."""
# Django imports
from django.conf import settings
from django.contrib.admin import ModelAdmin, register
# app imports
from auth_enhanced.models import UserEnhancement
def register_only_debug(*models, **kwargs):
"""Register the given model(s) classes and wrapped ModelAdmin class with
admin site, if DEBUG=True in project's settings.
See https://github.com/django/django/blob/master/django/contrib/admin/decorators.py
for the original Django implementation.
TODO: Using '**kwargs' doesn't mimic Django2.0 codebase, but Django1.11!"""
# need a callable here, but just 'pass'ing is fine...
def _wrapper_noop(admin_class):
pass
if settings.DEBUG:
# re-use Django's register-decorator
return register(*models, **kwargs)
# return a noop
return _wrapper_noop
@register_only_debug(UserEnhancement)
class UserEnhancementAdmin(ModelAdmin):
"""Integrates UserEnhancement into Django's admin menu.
This ModelAdmin is just used for development and should not be registered
in real production versions.
UserEnhancements will be integrated into the respective admin class for
the User-objects."""
pass
| StarcoderdataPython |
6511816 | """
海龟交易策略
此示例策略适用于OKEX币本位合约,
可根据自己需求自行修改
Author: <NAME>
Date: 2020/09/17
email: <EMAIL>
"""
from purequant.trade import OKEXFUTURES
from purequant.indicators import INDICATORS
from purequant.market import MARKET
from purequant.position import POSITION
from purequant.logger import logger
from purequant.time import *
from purequant.config import config
from purequant.push import push
from purequant.storage import storage
import pandas as pd
class Strategy:
def __init__(self, instrument_id, time_frame, start_asset): # 策略初始化时需传入合约id、k线周期、初始资金参数
print("{} {} 海龟交易策略已启动!".format(get_localtime(), instrument_id)) # 程序启动时打印提示信息
config.loads("config.json") # 载入配置文件
self.instrument_id = instrument_id # 合约id
self.time_frame = time_frame # k线周期
self.exchange = OKEXFUTURES(config.access_key, config.secret_key, config.passphrase, self.instrument_id, leverage=20) # 初始化交易所
self.market = MARKET(self.exchange, self.instrument_id, self.time_frame) # 初始化market
self.position = POSITION(self.exchange, self.instrument_id, self.time_frame) # 初始化position
self.indicators = INDICATORS(self.exchange, self.instrument_id, self.time_frame) # 初始化indicators
self.database = "回测" # 如从purequant服务器的数据库上获取历史k线数据进行回测,必须为"回测"
self.datasheet = self.instrument_id.split("-")[0].lower() + "_" + time_frame # 数据表
if config.first_run: # 程序第一次启动时保存数据,实盘时如策略中止再重启时,可以将配置文件中的first_run改成"false",程序再次启动会直接读取数据库中保存的数据
storage.mysql_save_strategy_run_info(self.database, self.datasheet, get_localtime(),
"none", 0, 0, 0, 0, "none", 0, 0, 0, start_asset)
# 读取数据库中保存的总资金、总盈亏数据
self.total_asset = storage.read_mysql_datas(0, self.database, self.datasheet, "总资金", ">")[-1][-1]
self.total_profit = storage.read_mysql_datas(0, self.database, self.datasheet, "总资金", ">")[-1][-2] # 策略总盈亏
# 一些策略参数
self.contract_value = self.market.contract_value() # 合约面值
self.ATRLength = 20 # 平均波动周期
self.boLength = 20 # 短周期 BreakOut Length
self.fsLength = 55 # 长周期 FailSafe Length
self.teLength = 10 # 离市周期 Trailing Exit Length
self.LastProfitableTradeFilter = 1 # 使用入市过滤条件
self.PreBreakoutFailure = False # 前一次是否突破失败
self.CurrentEntries = 0 # 当前持仓的开仓次数
self.counter = 0 # 计数器,用以控制单根bar最大交易次数
def begin_trade(self, kline=None): # 实盘时从交易所实时获取k线数据,回测时传入自定义的kline
try:
# 如果k线数据不够长就返回
if self.indicators.CurrentBar(kline=kline) < self.fsLength:
return
# 非回测模式下时间戳就是当前本地时间
timestamp = ts_to_datetime_str(utctime_str_to_ts(kline[-1][0])) if kline else get_localtime()
# k线更新时计数器归零
if self.indicators.BarUpdate(kline=kline):
self.counter = 0
AvgTR = self.indicators.ATR(self.ATRLength, kline=kline) # 计算真实波幅
N = float(AvgTR[-2]) # N值为前一根bar上的ATR值,需将numpy.float64数据类型转换为float类型,下面的转换同理
Units = int(self.total_asset / self.contract_value / 5) # 每一份头寸大小为总资金的20%
"""计算短周期唐奇安通道"""
# 唐奇安通道上轨,延后1个Bar
DonchianHi = float(self.indicators.HIGHEST(self.boLength, kline=kline)[-2])
# 唐奇安通道下轨,延后1个Bar
DonchianLo = float(self.indicators.LOWEST(self.boLength, kline=kline)[-2])
"""计算长周期唐奇安通道"""
# 唐奇安通道上轨,延后1个Bar,长周期
fsDonchianHi = float(self.indicators.HIGHEST(self.fsLength, kline=kline)[-2])
# 唐奇安通道下轨,延后1个Bar,长周期
fsDonchianLo = float(self.indicators.LOWEST(self.fsLength, kline=kline)[-2])
"""计算止盈唐奇安通道"""
# 离市时判断需要的N周期最低价
ExitLowestPrice = float(self.indicators.LOWEST(self.teLength, kline=kline)[-2])
# 离市时判断需要的N周期最高价
ExitHighestPrice = float(self.indicators.HIGHEST(self.teLength, kline=kline)[-2])
# 当不使用过滤条件,或者使用过滤条件且条件PreBreakoutFailure为True时,短周期开仓
if self.indicators.CurrentBar(kline=kline) >= self.boLength and self.position.amount() == 0 and (self.LastProfitableTradeFilter != 1 or self.PreBreakoutFailure == False) and self.counter < 1:
if self.market.high(-1, kline=kline) >= DonchianHi: # 突破了短周期唐奇安通道上轨
price = DonchianHi # 开多价格为短周期唐奇安通道上轨
amount = Units # 开多数量为Units
receipt = self.exchange.buy(price, amount) # 开多
push(receipt) # 推送下单结果
self.CurrentEntries += 1 # 记录一次开仓次数
self.PreBreakoutFailure = False # 将标识重置为默认值,根据离场时的盈亏情况再修改
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "买入开多",
price, amount, amount * self.contract_value, price,
"long", amount, 0, self.total_profit,
self.total_asset) # 将信息保存至数据库
self.counter += 1 # 计数器加1
if self.market.low(-1, kline=kline) <= DonchianLo: # 突破了短周期唐奇安通道下轨
price = DonchianLo # 开空价格为DonchianLo
amount = Units # 开空数量为Units
receipt = self.exchange.sellshort(price, amount) # 开空
push(receipt) # 推送下单结果
self.CurrentEntries += 1 # 记录一次开仓次数
self.PreBreakoutFailure = False # 将标识重置为默认值,根据离场时的盈亏情况再修改
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "卖出开空",
price, amount, amount * self.contract_value, price,
"short", amount, 0, self.total_profit, self.total_asset) # 保存信息至数据库
self.counter += 1 # 计数器加1
# 长周期突破开仓,其他逻辑和短周期突破开仓一样。
if self.indicators.CurrentBar(kline=kline) >= self.fsLength and self.position.amount() == 0 and self.counter < 1:
if self.market.high(-1, kline=kline) >= fsDonchianHi: # 突破了长周期唐奇安通道上轨
price = fsDonchianHi # 开多价格为长周期唐奇安通道上轨值
amount = Units # 数量为Units
receipt = self.exchange.buy(price, amount) # 下单并返回下单结果
push(receipt) # 推送下单结果
self.CurrentEntries += 1 # 记录一次开仓次数
self.PreBreakoutFailure = False # 将标识重置为默认值
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "买入开多",
price, amount, amount * self.contract_value, price,
"long", amount, 0, self.total_profit,
self.total_asset) # 将信息保存至数据库
self.counter += 1 # 计数器加1
if self.market.low(-1, kline=kline) <= fsDonchianLo: # 突破长周期唐奇安通道下轨
price = fsDonchianLo # 开空价格为长周期唐奇安通道下轨值
amount = Units # 开空数量为Units
receipt = self.exchange.sellshort(price, amount) # 下单并返回下单结果
push(receipt) # 推送下单结果
self.CurrentEntries += 1 # 记录一次开仓次数
self.PreBreakoutFailure = False # 将标识重置为默认值
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "卖出开空",
price, amount, amount * self.contract_value, price,
"short", amount, 0, self.total_profit, self.total_asset)
self.counter += 1 # 计数器加1
# 止盈、加仓和止损
if self.position.direction() == "long" and self.counter < 1: # 持多仓的情况。回测时是一根k线上整个策略从上至下运行一次,所以在此处设置计数器过滤
if self.market.low(-1, kline=kline) <= ExitLowestPrice: # 跌破止盈价
profit = self.position.coverlong_profit(last=ExitLowestPrice, market_type="usd_contract") # 平仓前计算利润,传入最新价以及计算盈利的合约类型
self.total_profit += profit # 计算经过本次盈亏后的总利润
self.total_asset += profit # 计算经过本次盈亏后的总资金
price = ExitLowestPrice # 平多价格为ExitLowestPrice
amount = self.position.amount() # 数量为当前持仓数量
receipt = self.exchange.sell(price, amount) # 平所有多单仓位
push(receipt) # 推送下单结果
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "卖出平多",
price, amount, amount * self.contract_value,
0, "none", 0, profit, self.total_profit, self.total_asset)
self.counter += 1 # 计数器加1
self.CurrentEntries = 0 # 平仓后将开仓次数还原为0
else:
# 加仓指令
'''以最高价为标准,判断是否能加仓,并限制最大加仓次数
如果价格过前次开仓价格1/2N,则直接加仓
'''
while self.market.high(-1, kline=kline) >= (self.position.price() + 0.5 * N) and (self.CurrentEntries <= 4):
price = self.position.price() + 0.5 * N # 加仓的开仓价格为持仓价格+0.5 * N
amount = Units # 数量为Units
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "多头加仓",
price, amount, amount * self.contract_value,
(self.position.price() + price) / 2,
"long", self.position.amount() + amount,
0, self.total_profit, self.total_asset)
receipt = self.exchange.buy(price, amount)
push(receipt)
self.CurrentEntries += 1
# 止损指令
if self.market.low(-1, kline=kline) <= (self.position.price() - 2 * N): # 如果回落大于最后下单价格-2n,就止损
profit = self.position.coverlong_profit(last=self.position.price() - 2 * N, market_type="usd_contract")
self.total_profit += profit # 计算经过本次盈亏后的总利润
self.total_asset += profit # 计算经过本次盈亏后的总资金
price = self.position.price() - 2 * N
amount = self.position.amount()
receipt = self.exchange.sell(price, amount) # 全部止损平仓
push(receipt)
self.PreBreakoutFailure = True # 记录为突破失败,下次交易将使用长周期开仓
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "卖出止损",
price, amount, amount * self.contract_value,
0, "none", 0, profit, self.total_profit, self.total_asset)
self.counter += 1
self.CurrentEntries = 0 # 平仓后将开仓次数还原为0
elif self.position.direction() == "short" and self.counter < 1: # 持空头的情况,除方向以外,其他逻辑和上面持多仓的一致
if self.market.high(-1, kline=kline) >= ExitHighestPrice:
profit = self.position.covershort_profit(last=ExitHighestPrice, market_type="usd_contract")
self.total_profit += profit
self.total_asset += profit
price = ExitHighestPrice
amount = self.position.amount()
receipt = self.exchange.buytocover(price, amount)
push(receipt)
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp,
"买入平空", price, amount, amount * self.contract_value,
0, "none", 0, profit, self.total_profit, self.total_asset)
self.counter += 1
self.CurrentEntries = 0 # 平仓后将开仓次数还原为0
else:
while self.market.low(-1, kline=kline) <= (self.position.price() - 0.5 * N) and (self.CurrentEntries <= 4):
price = self.position.price() - 0.5 * N
amount = Units
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, "空头加仓",
price, amount, amount * self.contract_value,
(self.position.price() + price) / 2,
"short", self.position.amount() + amount,
0, self.total_profit, self.total_asset)
receipt = self.exchange.sellshort(self.position.price() - 0.5 * N, Units)
push(receipt)
self.CurrentEntries += 1
if self.market.high(-1, kline=kline) >= (self.position.price() + 2 * N):
profit = self.position.covershort_profit(last=self.position.price() + 2 * N, market_type="usd_contract")
self.total_profit += profit
self.total_asset += profit
price = self.position.price() + 2 * N
amount = self.position.amount()
receipt = self.exchange.buytocover(price, amount)
push(receipt)
self.PreBreakoutFailure = True
storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp,
"买入止损", price, amount, amount * self.contract_value,
0, "none", 0, profit, self.total_profit, self.total_asset)
self.counter += 1
self.CurrentEntries = 0 # 平仓后将开仓次数还原为0
except:
logger.error()
if __name__ == "__main__":
instrument_id = "BTC-USD-201225"
time_frame = "1d"
strategy = Strategy(instrument_id, time_frame, start_asset=1000)
if config.backtest: # 回测模式
"""先处理csv数据"""
df = pd.read_csv('BTCUSD_bmx_1d_20170505-20200420.csv') # 读取csv文件
order = ['candle_begin_time', 'open', 'high', 'low', 'close', 'volume'] # 列名排序
df = df[order]
df.columns = ['timestamp', 'open', 'high', 'low', 'close', 'volume'] # 重命名列
for i in df['timestamp']: # 处理时间戳
j = ts_to_utc_str(datetime_str_to_ts(i))
df.replace(i, j, inplace=True)
data = df.values.tolist() # 将dataframe转换成list
print("正在回测,可能需要一段时间,请稍后...")
start_time = get_cur_timestamp()
records = []
for k in data:
records.append(k)
strategy.begin_trade(kline=records)
cost_time = get_cur_timestamp() - start_time
print("回测用时{}秒,结果已保存至mysql数据库!".format(cost_time))
else: # 实盘模式
while True: # 循环运行begin_trade函数
strategy.begin_trade()
sleep(3) # 休眠3秒,防止请求超出交易所频率限制 | StarcoderdataPython |
5077864 |
from django.apps import AppConfig
import logging
from nalkinscloud_django.settings import PROJECT_NAME
# Define logger
logger = logging.getLogger(PROJECT_NAME)
class NalkinsCloudAPIConfig(AppConfig):
name = 'nalkinscloud_api'
# def ready(self):
# logger.info("#################################\n"
# "Nalkinscloud API is up and running\n"
# "#################################")
# pass
| StarcoderdataPython |
11398925 | from pyrad import dictionary, packet, server
import logging
from loguru import logger
import traceback
import platform
from util import *
from config import config
from users import *
logging.basicConfig(filename="pyrad.log", level="DEBUG",
format="%(asctime)s [%(levelname)-8s] %(message)s")
class RadiusAuthServer(server.Server):
def HandleAuthPacket(self, pkt):
logger.info("Received an authentication request ID={}", pkt.id)
try:
if not pkt.verify_message_authenticator():
logger.warning("Authentication request ID={} verify failed, ignore packet.", pkt.id)
return
logger.debug("ID={} {}", pkt.id, pkt_to_str(pkt))
auth_username = pkt["User-Name"][0]
auth_password = pkt.PwDecrypt(pkt["User-Password"][0])
logger.info("ID={} username={} password={}", pkt.id, auth_username, auth_password)
auth_ok, user = find_user(auth_username, auth_password)
if auth_ok:
reply_attr = fill_with_default_attr(user["reply_attr"])
reply = self.CreateReplyPacket(pkt, **reply_attr)
reply.code = packet.AccessAccept
self.SendReplyPacket(pkt.fd, reply)
logger.info("ID={} auth_ok, description={}\n{}", pkt.id, user["description"], reply_attr)
elif config["default"]["accept"]:
reply_attr = fill_with_default_attr(config["default"]["accept-attr"])
reply = self.CreateReplyPacket(pkt, **reply_attr)
reply.code = packet.AccessAccept
self.SendReplyPacket(pkt.fd, reply)
logger.info("ID={} auth_default_accept\n{}", pkt.id, reply_attr)
else:
reply = self.CreateReplyPacket(pkt)
reply.code = packet.AccessReject
self.SendReplyPacket(pkt.fd, reply)
logger.info("ID={} auth_default_reject", pkt.id)
except Exception as e:
logger.error("Got an error during HandleAuthPacket, pkt.id={}, reply reject.\n{}\n{}", pkt.id, pkt_to_str(pkt), traceback.format_exc())
reply = self.CreateReplyPacket(pkt)
reply.code = packet.AccessReject
self.SendReplyPacket(pkt.fd, reply)
def pkt_to_str(pkt):
s = "Attributes: "
for attr in pkt.keys():
try:
s = s + "\n%s: %s" % (attr, pkt[attr][0])
except Exception as e:
s = s + "\n%s: %s" % (attr, e)
return s
def fill_with_default_attr(attr):
for k in config["attr"]:
if k not in attr:
attr[k] = config["attr"][k]
return attr
if __name__ == '__main__':
# create server and read dictionary.txt
if not platform.system().lower() == 'linux':
logger.warning("System platform is not Linux, install custom poll")
import poll
poll.install()
authport = config["port"]
logger.info("Initializing RadiusAuthServer, authport={}", authport)
srv = RadiusAuthServer(dict=dictionary.Dictionary(get_script_path_file("dict.txt")),
authport=authport,
auth_enabled=True, acct_enabled=False, coa_enabled=False)
# add clients (address, secret, name)
for client in config["clients"]:
logger.info("Add client {}, secret={}", client["client"], client["secret"])
srv.hosts[client["client"]] = server.RemoteHost("", client["secret"].encode(), "")
# srv.hosts["10.99.0.233"] = server.RemoteHost("", b"secret12345", "")
for addr in config["bind"]:
logger.info("Bind to address {}", addr)
srv.BindToAddress(addr)
# start server
logger.info("Start RadiusAuthServer")
srv.Run()
| StarcoderdataPython |
9674974 | <filename>simple_ml/knn.py
# -*- coding:utf-8 -*-
from __future__ import division, absolute_import
from simple_ml.base.base_enum import *
from simple_ml.base.base_error import *
from simple_ml.evaluation import *
from simple_ml.base.base_model import *
import numpy as np
from collections import Counter
__all__ = ['KNN', 'DisType']
class KNN(BaseClassifier):
__doc__ = "K Nearest Neighbor(s)"
def __init__(self, k=1, distance_type=DisType.Eculidean):
super(KNN, self).__init__()
self.k = k
self.dist_type = distance_type
self.x = None
self.y = None
def fit(self, x, y):
super(KNN, self).fit(x, y)
self._fit(x, y)
def _fit(self, x, y):
"""
由于是惰性学习,此处无需做任何事
"""
pass
def predict(self, x):
if self.x is None:
raise ModelNotFittedError
super(KNN, self).predict(x)
dist_func = self._get_dist_func(self.dist_type)
return np.array(list(map(lambda i: self._predict_single_sample(i, self.k, dist_func), x)))
def _predict_single_sample(self, x, k, dist_func):
sim_list = list(map(lambda i: dist_func(x, i), self.x))
sim_y_list = zip(sim_list, self.y)
selected_sim_y = sorted(sim_y_list, key=lambda i: i[0], reverse=False)[:k]
return self._vote([i[1] for i in selected_sim_y])
@staticmethod
def _vote(y_list):
count_dict = dict(Counter(y_list))
return max(count_dict, key=count_dict.get)
@staticmethod
def _get_dist_func(dist_type):
if dist_type == DisType.Eculidean:
# 向量相减的2范数,就是马氏距离
return lambda x1, x2: np.linalg.norm(x1-x2, 2)
elif dist_type == DisType.Manhattan:
return lambda x1, x2: np.linalg.norm(x1-x2, 1)
elif dist_type == DisType.Chebyshev:
return lambda x1, x2: np.linalg.norm(x1-x2, np.inf)
elif dist_type == DisType.CosSim:
return lambda x1, x2: -np.dot(x1, x2) / (np.linalg.norm(x1, 2) * np.linalg.norm(x2, 2))
else:
raise DistanceTypeError
def score(self, x, y):
super(KNN, self).score(x, y)
y_predict = self.predict(x)
if self.label_type == LabelType.binary:
return classify_f1(y_predict, y)
elif self.label_type == LabelType.multi_class:
return classify_f1_macro(y_predict, y)
else:
raise LabelTypeError
def classify_plot(self, x, y, title=""):
classify_plot(self.new(), self.x, self.y, x, y, title=self.__doc__+title)
def new(self):
return KNN(self.k, self.dist_type)
class Node:
__slot__ = ['left', 'right', 'parent', 'value', 'dimension', 'sampleIds']
def __init__(self, left, right, parent, value, dimension, sample_ids):
self.left = left
self.right = right
self.value = value
self.parent = parent
self.dimension = dimension
self.sample_ids = sample_ids
class KDTree(KNN):
def __init__(self, k=5, dist_type=DisType.Eculidean):
super(KDTree, self).__init__(k, dist_type)
@staticmethod
def _choose_split_feature(x, ids):
x = x[ids]
variance_list = list(map(np.var, x.T))
split_feature_id = variance_list.index(max(variance_list))
median = np.median(x.T[split_feature_id])
# split_node = node(None, None, median, split_feature_id, ids)
return split_feature_id, median
def _fit(self, x, y):
"""
此处建KD树
"""
self.root_node = Node(None, None, None, None, None, np.arange(self.x.shape[0]))
self._build_kd_tree(self.root_node, 0)
def _build_kd_tree(self, input_node, depth):
if len(input_node.sample_ids) == 0:
return None
sample_ids = input_node.sample_ids
feat_id, median = self._choose_split_feature(self.x, input_node.sample_ids)
input_node.value = median # sample_ids[self.x[sample_ids, feat_id]==median]
input_node.dimension = feat_id
left_ids = sample_ids[self.x[sample_ids, feat_id] < median]
left_node = Node(None, None, input_node, None, None, left_ids)
right_ids = sample_ids[self.x[sample_ids, feat_id] > median]
right_node = Node(None, None, input_node, None, None, right_ids)
input_node.left = self._build_kd_tree(left_node, depth+1)
input_node.right = self._build_kd_tree(right_node, depth+1)
return input_node
def _predict_single_sample(self, x, k, dist_func):
"""
1 . 从root节点开始,DFS搜索直到叶子节点,同时在stack中顺序存储已经访问的节点。
2. 如果搜索到叶子节点,当前的叶子节点被设为最近邻节点。
3. 然后通过stack回溯:
4. 如果当前点的距离比最近邻点距离近,更新最近邻节点.
5. 然后检查以最近距离为半径的圆是否和父节点的超平面相交.
6. 如果相交,则必须到父节点的另外一侧,用同样的DFS搜索法,开始检查最近邻节点。
7. 如果不相交,则继续往上回溯,而父节点的另一侧子节点都被淘汰,不再考虑的范围中.
8. 当搜索回到root节点时,搜索完成,得到最近邻节点。
"""
pass
def _search_kd_tree(self, x, the_node, dist_func):
if x[the_node.dimension] == the_node.value:
return the_node
if the_node.left is None and the_node.right is None:
return the_node
if x[the_node.dimension] < the_node.value:
return self._search_kd_tree(x, the_node.left, dist_func)
else:
return self._search_kd_tree(x, the_node.right, dist_func)
def _back_trace(self, x, the_node: Node, best_node: Node):
"""
如果x和node的父节点不相交,则一直往上回溯到近邻节点
如果x和node的父节点相交,则要回溯父节点的另一分支
# TODO
"""
pass
| StarcoderdataPython |
9731097 | def mean(mylist):
the_mean = sum(mylist) / len(mylist)
return the_mean
print(mean([1,4,5]))
def meanListDict(value):
if type(value) == dict:
the_mean = sum(value.values()) / len(value)
else:
the_mean = sum(value) / len(value)
return the_mean
monday_temperatures = [8.8, 9.1, 9.8]
student_grades = {'marry':9.1,'sim':8.8,'john':7.5}
print(meanListDict(student_grades))
print(meanListDict(monday_temperatures))
| StarcoderdataPython |
8091863 | <filename>backend/src/gql/mutation/import_milestone.py
from containers import SDContainer
from models import Milestone, OnPathway
from .mutation_type import mutation
from datacreators import ImportMilestone
from authentication.authentication import needsAuthorization
from graphql.type import GraphQLResolveInfo
from dependency_injector.wiring import Provide, inject
from SdTypes import Permissions
@mutation.field("importMilestone")
@needsAuthorization([Permissions.MILESTONE_CREATE])
@inject
async def resolver(
obj=None,
info: GraphQLResolveInfo = None,
input: dict = None,
pub=Provide[SDContainer.pubsub_service]
) -> Milestone:
milestone = await ImportMilestone(
context=info.context,
on_pathway_id=input["onPathwayId"],
milestone_type_id=input["milestoneTypeId"],
description=input["description"],
current_state=input["currentState"],
)
await pub.publish(
'on-pathway-updated',
await OnPathway.get(milestone.on_pathway_id)
)
return milestone
| StarcoderdataPython |
142833 | <reponame>cron-ooo/django-compressor
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import smart_str
from django.utils.functional import cached_property
from compressor.exceptions import ParserError
from compressor.parser import ParserBase
class LxmlParser(ParserBase):
"""
LxmlParser will use `lxml.html` parser to parse rendered contents of
{% compress %} tag.
"""
def __init__(self, content):
try:
from lxml.html import fromstring
from lxml.etree import tostring
except ImportError as err:
raise ImproperlyConfigured("Error while importing lxml: %s" % err)
except Exception as err:
raise ParserError("Error while initializing parser: %s" % err)
self.fromstring = fromstring
self.tostring = tostring
super().__init__(content)
@cached_property
def tree(self):
"""
Document tree.
"""
content = '<root>%s</root>' % self.content
tree = self.fromstring(content)
self.tostring(tree, encoding=str)
return tree
def css_elems(self):
return self.tree.xpath('//link[re:test(@rel, "^stylesheet$", "i")]|style',
namespaces={"re": "http://exslt.org/regular-expressions"})
def js_elems(self):
return self.tree.findall('script')
def elem_attribs(self, elem):
return elem.attrib
def elem_content(self, elem):
return smart_str(elem.text)
def elem_name(self, elem):
return elem.tag
def elem_str(self, elem):
return smart_str(self.tostring(elem, method='html', encoding=str))
| StarcoderdataPython |
1666494 | #coding:utf-8
#
# id: bugs.core_1787
# title: Consistency check when subquery is ordered by aggregate function from other context
# decription:
# tracker_id: CORE-1787
# min_versions: []
# versions: 2.5.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.0
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE TEST_TABLE1
(ID BIGINT,
FK_ID INTEGER,
REG_DATE TIMESTAMP NOT NULL);
COMMIT;
insert into test_table1 values (1,5,'01.01.2000');
insert into test_table1 values (2,5,'01.01.2001');
insert into test_table1 values (3,7,'01.01.2002');
insert into test_table1 values (4,8,'01.01.2003');
insert into test_table1 values (5,8,'01.01.2004');
insert into test_table1 values (6,8,'01.01.2005');
insert into test_table1 values (7,8,'01.01.2007');
COMMIT;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """select t.fk_id,(select first 1 t1.reg_date from test_table1 t1 where t1.fk_id = t.fk_id
order by min(t.fk_id))
from test_table1 t
group by t.fk_id;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
FK_ID REG_DATE
============ =========================
5 2000-01-01 00:00:00.0000
7 2002-01-01 00:00:00.0000
8 2003-01-01 00:00:00.0000
"""
@pytest.mark.version('>=2.5.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| StarcoderdataPython |
9711680 | import os
from museum_site.core.detail_identifiers import *
from museum_site.models.detail import Detail
class File_Extension_Info(object):
def __init__(self, extension, name="", associated_details=[], file_viewer_type="", file_viewer_func="", ambiguous=False):
self.extension = extension
self.name = name
self.associated_details = associated_details
self.file_viewer_type = file_viewer_type
self.file_viewer_func = file_viewer_func
self.ambiguous = ambiguous
EXTENSIONS = {
# ZZT
".BRD": File_Extension_Info(".BRD", "Board File", [DETAIL_ZZT_BOARD], ambiguous=True),
".ZZT": File_Extension_Info(".ZZT", "ZZT World", [DETAIL_ZZT]),
".Z_T": File_Extension_Info(".ZZT", "ZZT World", [DETAIL_ZZT]),
".HI": File_Extension_Info(".HI", "High Score File", [DETAIL_ZZT_SCORE], ambiguous=True),
".MH": File_Extension_Info(".MH", "Mystical Winds ZZT High Score File", [DETAIL_ZZT_SCORE]),
".MWZ": File_Extension_Info(".MWZ", "Mystical Winds ZZT World", [DETAIL_ZZT]),
".SAV": File_Extension_Info(".MWZ", "Mystical Winds ZZT World", [DETAIL_ZZT_SAVE], ambiguous=True),
# Super ZZT
".SZT": File_Extension_Info(".SZT", "Super ZZT World", [DETAIL_SZZT]),
".HGS": File_Extension_Info(".HGS", "Super ZZT High Score File", [DETAIL_SZZT_SCORE]),
# Charsets
".CHR": File_Extension_Info(".CHR", "Charset", [DETAIL_GFX]),
".COM": File_Extension_Info(".COM", "Charset", [DETAIL_GFX], ambiguous=True),
".FNT": File_Extension_Info(".COM", "Charset", [DETAIL_GFX]),
# Palettes
".PAL": File_Extension_Info(".PAL", "Palette", [DETAIL_GFX]),
".PLD": File_Extension_Info(".PLD", "Palette", [DETAIL_GFX]),
# ZIG
".INF": File_Extension_Info(".INF", "ZIG Information File", ambiguous=True),
".ZIG": File_Extension_Info(".ZIG", "ZIG World", [DETAIL_ZIG]),
".ZBR": File_Extension_Info(".ZBR", "ZIG Board"),
".ZCH": File_Extension_Info(".ZCH", "ZIG Charset"),
".ZPL": File_Extension_Info(".ZPL", "ZIG Palette"),
".OLF": File_Extension_Info(".OLF", "ZIG Object Library File"),
# ZZM
".ZZM": File_Extension_Info(".ZZM", "ZZM Audio"),
# ZZT Clone Worlds
".ZZ3": File_Extension_Info(".ZZ3", "ZZ3 World", [DETAIL_CLONE_WORLD]),
".SWW": File_Extension_Info(".SWW", "SuperWAD World", [DETAIL_CLONE_WORLD]),
".PGF": File_Extension_Info(".PGF", "Platic Game File", [DETAIL_CLONE_WORLD]),
".PWORLD": File_Extension_Info(".PWORLD", "Plastic Game File", [DETAIL_CLONE_WORLD]),
# Source Code
".ASM": File_Extension_Info(".ASM", "Assembly Source Code", [DETAIL_SOURCE_CODE]),
".BAS": File_Extension_Info(".BAS", "BASIC Source Code", [DETAIL_SOURCE_CODE]),
".BI": File_Extension_Info(".BI", "Source Code", [DETAIL_SOURCE_CODE]),
".C": File_Extension_Info(".C", "C Source Code", [DETAIL_SOURCE_CODE]),
".CC": File_Extension_Info(".CC", "Source Code", [DETAIL_SOURCE_CODE]),
".CPP": File_Extension_Info(".CPP", "C++ Source Code", [DETAIL_SOURCE_CODE]),
".E": File_Extension_Info(".E", "Euphoria Source Code", [DETAIL_SOURCE_CODE]),
".EX": File_Extension_Info(".EX", "Source Code", [DETAIL_SOURCE_CODE]),
".H": File_Extension_Info(".H", "Source Code", [DETAIL_SOURCE_CODE]),
".JAVA": File_Extension_Info(".JAVA", "Java Source Code", [DETAIL_SOURCE_CODE]),
".INC": File_Extension_Info(".INC", "Source Code", [DETAIL_SOURCE_CODE]),
".LUA": File_Extension_Info(".LUA", "Lua Source Code", [DETAIL_SOURCE_CODE]),
".PAS": File_Extension_Info(".PAS", "Pascal Source Code", [DETAIL_SOURCE_CODE]),
".PY": File_Extension_Info(".PY", "Python Source Code", [DETAIL_SOURCE_CODE]),
# Plaintext
".135": File_Extension_Info(".135", "Text File", [DETAIL_TEXT]),
".ASC": File_Extension_Info(".ASC", "Text File", [DETAIL_TEXT]),
".1ST": File_Extension_Info(".1ST", "Text File", [DETAIL_TEXT]),
".ANS": File_Extension_Info(".ANS", "Text File", [DETAIL_TEXT]),
".BAT": File_Extension_Info(".BAT", "Text File", [DETAIL_TEXT]),
".BB": File_Extension_Info(".BB", "Text File", [DETAIL_TEXT]),
".CFG": File_Extension_Info(".CFG", "Text File", [DETAIL_TEXT]),
"COPYING": File_Extension_Info("COPYING", "Text File", [DETAIL_TEXT]),
".CRD": File_Extension_Info(".CRD", "Text File", [DETAIL_TEXT]),
".DAT": File_Extension_Info(".DAT", "Text File", [DETAIL_TEXT], ambiguous=True),
"DESC": File_Extension_Info("DESC", "Text File", [DETAIL_TEXT]),
".DEF": File_Extension_Info(".DEF", "Text File", [DETAIL_TEXT]),
".DEU": File_Extension_Info(".DEU", "Text File", [DETAIL_TEXT]),
".DIZ": File_Extension_Info(".DIZ", "Text File", [DETAIL_TEXT]),
".DOC": File_Extension_Info(".DOC", "Text File", [DETAIL_TEXT]),
".EED": File_Extension_Info(".EED", "Text File", [DETAIL_TEXT]),
".ENG": File_Extension_Info(".ENG", "Text File", [DETAIL_TEXT]),
".ERR": File_Extension_Info(".ERR", "Text File", [DETAIL_TEXT]),
"EXCLUDE": File_Extension_Info("EXCLUDE", "Text File", [DETAIL_TEXT]),
".FAQ": File_Extension_Info(".FAQ", "Text File", [DETAIL_TEXT]),
".FLG": File_Extension_Info(".FLG", "Text File", [DETAIL_TEXT]),
".FRM": File_Extension_Info(".FRM", "Text File", [DETAIL_TEXT]),
".FYI": File_Extension_Info(".FYI", "Text File", [DETAIL_TEXT]),
".GITIGNORE": File_Extension_Info(".GITIGNORE", "Text File", [DETAIL_TEXT]),
".GUD": File_Extension_Info(".GUD", "Text File", [DETAIL_TEXT]),
".HINTS": File_Extension_Info(".HINTS", "Text File", [DETAIL_TEXT]),
".HLP": File_Extension_Info(".HLP", "Text File", [DETAIL_TEXT]),
".INI": File_Extension_Info(".INI", "Text File", [DETAIL_TEXT]),
".JSON": File_Extension_Info(".JSON", "Text File", [DETAIL_TEXT]),
".KB": File_Extension_Info(".KB", "Text File", [DETAIL_TEXT]),
"LASTSG": File_Extension_Info(".LASTSG", "Text File", [DETAIL_TEXT]),
"LICENSE": File_Extension_Info("LICENSE", "Text File", [DETAIL_TEXT]),
"LPT1": File_Extension_Info("LPT1", "Text File", [DETAIL_TEXT]),
".LOG": File_Extension_Info(".LOG", "Text File", [DETAIL_TEXT]),
".LST": File_Extension_Info(".LST", "Text File", [DETAIL_TEXT]),
".MAC": File_Extension_Info(".MAC", "Text File", [DETAIL_TEXT]),
".MAP": File_Extension_Info(".MAP", "Text File", [DETAIL_TEXT]),
".MD": File_Extension_Info(".MD", "Text File", [DETAIL_TEXT]),
".ME": File_Extension_Info(".ME", "Text File", [DETAIL_TEXT]),
".MSG": File_Extension_Info(".MSG", "Text File", [DETAIL_TEXT]),
".MUZ": File_Extension_Info(".MUZ", "Text File", [DETAIL_TEXT]),
".NEW": File_Extension_Info(".NEW", "Text File", [DETAIL_TEXT]),
"NEWS": File_Extension_Info("NEWS", "Text File", [DETAIL_TEXT]),
".NFO": File_Extension_Info(".NFO", "Text File", [DETAIL_TEXT]),
".NOW": File_Extension_Info(".NOW", "Text File", [DETAIL_TEXT]),
".OBJ": File_Extension_Info(".OBJ", "Text File", [DETAIL_TEXT]),
"ORDER": File_Extension_Info("ORDER", "Text File", [DETAIL_TEXT]),
".OOP": File_Extension_Info(".OOP", "Text File", [DETAIL_TEXT]),
".PAR": File_Extension_Info(".PAR", "Text File", [DETAIL_TEXT]),
".PDF": File_Extension_Info(".PDF", "Text File", [DETAIL_TEXT]),
"README": File_Extension_Info("README", "Text File", [DETAIL_TEXT]),
".REG": File_Extension_Info(".REG", "Text File", [DETAIL_TEXT]),
"REGISTER": File_Extension_Info(".REGISTER", "Text File", [DETAIL_TEXT]),
".RTF": File_Extension_Info(".RTF", "Text File", [DETAIL_TEXT]),
"SAVES": File_Extension_Info("SAVES", "Text File", [DETAIL_TEXT]),
".SDI": File_Extension_Info(".SDI", "Text File", [DETAIL_TEXT]),
".SH": File_Extension_Info(".SH", "Text File", [DETAIL_TEXT]),
".SOL": File_Extension_Info(".SOL", "Text File", [DETAIL_TEXT]),
".SLV": File_Extension_Info(".SLV", "Text File", [DETAIL_TEXT]),
".ST": File_Extension_Info(".ST", "Text File", [DETAIL_TEXT]),
".THEME": File_Extension_Info(".THEME", "Text File", [DETAIL_TEXT]),
".TXT": File_Extension_Info(".TXT", "Text File", [DETAIL_TEXT]),
"WORLDS": File_Extension_Info("WORLDS", "Text File", [DETAIL_TEXT]),
".WPS": File_Extension_Info(".WPS", "Text File", [DETAIL_TEXT]),
".WRI": File_Extension_Info(".WRI", "Text File", [DETAIL_TEXT]),
".ZLN": File_Extension_Info(".ZLN", "Text File", [DETAIL_TEXT]),
".ZML": File_Extension_Info(".ZML", "Text File", [DETAIL_TEXT]),
".ZZL": File_Extension_Info(".ZZL", "Text File", [DETAIL_TEXT]),
# HTML
".HTM": File_Extension_Info(".HTM", "HTML File", [DETAIL_HTML]),
".HTML": File_Extension_Info(".HTML", "HTML File", [DETAIL_HTML]),
# Audio
".IT": File_Extension_Info(".IT", "Audio File", [DETAIL_AUDIO]),
".MID": File_Extension_Info(".MID", "Audio File", [DETAIL_AUDIO]),
".MIDI": File_Extension_Info(".MIDI", "Audio File", [DETAIL_AUDIO]),
".MOD": File_Extension_Info(".MOD", "Audio File", [DETAIL_AUDIO]),
".MP3": File_Extension_Info(".MP3", "Audio File", [DETAIL_AUDIO]),
".WAV": File_Extension_Info(".WAV", "Audio File", [DETAIL_AUDIO]),
".XM": File_Extension_Info(".XM", "Audio File", [DETAIL_AUDIO]),
".PTF":File_Extension_Info(".PTF", "Audio File", [DETAIL_AUDIO]),
# Image
".BMP": File_Extension_Info(".BMP", "Image File", [DETAIL_IMAGE]),
".GIF": File_Extension_Info(".GIF", "Image File", [DETAIL_IMAGE]),
".ICO": File_Extension_Info(".ICO", "Image File", [DETAIL_IMAGE]),
".JPG": File_Extension_Info(".JPG", "Image File", [DETAIL_IMAGE]),
".JPEG": File_Extension_Info(".JPEG", "Image File", [DETAIL_IMAGE]),
".PCX": File_Extension_Info(".PCX", "Image File", [DETAIL_IMAGE]),
".PNG": File_Extension_Info(".PNG", "Image File", [DETAIL_IMAGE]),
# Video
".ACI": File_Extension_Info(".AVI", "Video File", [DETAIL_VIDEO]),
# Programs
# .COM is assumed font over program
".EXE": File_Extension_Info(".EXE", "Executable", [DETAIL_PROGRAM]),
".JAR": File_Extension_Info(".JAR", "Java Jar", [DETAIL_PROGRAM]),
# Compression Formats
".ZIP": File_Extension_Info(".ZIP", "Compressed File", [DETAIL_COMPRESSED]),
# ROMs
".GBA": File_Extension_Info(".GBA", "GBA Rom", [DETAIL_ROM]),
".NES": File_Extension_Info(".NES", "NES Rom", [DETAIL_ROM]),
".PRG": File_Extension_Info(".PRG", "C64 Rom", [DETAIL_ROM]),
# Etc.
"/": File_Extension_Info("/", "", []),
".---": File_Extension_Info(".---", "", []),
".~~~": File_Extension_Info(".~~~", "", []),
"._3DSKULL": File_Extension_Info("._3DSKULL", "", []),
".ANI": File_Extension_Info(".ANI", "", []),
".BIN": File_Extension_Info(".BIN", "", [],),
".BSV": File_Extension_Info(".BSV", "", []),
".CER": File_Extension_Info(".CER", "", []),
".CORRUPT": File_Extension_Info(".CORRUPT", "", []),
".CUR": File_Extension_Info(".CUR", "", []),
".DB": File_Extension_Info(".DB", "", []),
".DLL": File_Extension_Info(".DLL", "", []),
".DLM": File_Extension_Info(".DLM", "", []),
".DS_STORE": File_Extension_Info(".DS_STORE", "", []),
".LNK": File_Extension_Info(".LNK", "", []),
".MS": File_Extension_Info(".MS", "", []), # Weird file in Trash Fleet 3.0
".OBJ": File_Extension_Info(".OBJ", "", []), # pazzt
".OZ": File_Extension_Info(".OZ", "", []),
".PIF": File_Extension_Info(".PIF", "", []),
".SCR": File_Extension_Info(".SCR", "", []), # BSV2BRD
".TRS": File_Extension_Info(".TRS", "", []),
".VSP": File_Extension_Info(".VSP", "", []),
".WAR": File_Extension_Info(".WAR", "", []),
".ZR": File_Extension_Info(".ZR", "", []),
}
def get_detail_suggestions(file_list):
suggestions = {
"hints": [],
"hint_ids": [],
"unknown_extensions": [],
}
for name in file_list:
ext = os.path.splitext(os.path.basename(name).upper())
ext = ext[0] if ext[1] == "" else ext[1]
extension = EXTENSIONS.get(ext)
if extension:
suggest = extension.associated_details
suggestions["hints"].append(
{"name": name, "type": extension.name, "suggested": suggest, "role": "ambiguous-ext" if extension.ambiguous else "known-ext"}
)
suggestions["hint_ids"] += suggest
else:
suggestions["hints"].append(
{"name": name, "type": "Unknown Extension", "role":"unknown-ext"}
)
suggestions["unknown_extensions"].append(ext)
# Get detail names
qs = Detail.objects.all().values("pk", "title")
detail_mapping = {}
for d in qs:
detail_mapping[d["pk"]] = d["title"]
suggestions["hint_ids"] = set(suggestions["hint_ids"])
suggestions["unknown_extensions"] = set(suggestions["unknown_extensions"])
return suggestions
| StarcoderdataPython |
385259 | from PIL import Image, ImageOps
import numpy as np
import matplotlib.pyplot as plt
import os
def colorize(path):
img = Image.open(path)
# img2 = ImageOps.colorize(img, black ="Blue", white ="white")
# img2.show()
img_arr = np.array(ImageOps.equalize(img).convert("L"), np.uint8)
arr = np.array(ImageOps.equalize(img), np.uint8)
r = np.abs(np.array((arr + 85) % 256, dtype=np.uint8))
g = np.abs(np.array((arr + 170) % 256, dtype=np.uint8))
b = arr
w, h = img.size
colored = Image.new('RGB', (w, h))
full_array = np.stack([r, g, b], axis=2)
colored = Image.fromarray(full_array)
return colored
def colorize(path, map):
cm_hot = plt.cm.get_cmap(map)
img_src = ImageOps.equalize(Image.open(path)).convert('L')
# img_src.thumbnail((512,512))
im = np.array(img_src)
im = cm_hot(im)
im = np.uint8(im * 255)
return Image.fromarray(im)
dirs = ['sub-rat1/microscopy/', 'sub-rat2/microscopy/', 'sub-rat3/microscopy/', 'sub-rat4/microscopy/',
'sub-rat5/microscopy/', 'sub-rat6/microscopy/', 'sub-rat7/microscopy/', 'sub-rat8/microscopy/']
dirs = ['sub-rat6/microscopy/']
for i, dir in enumerate(dirs):
source_dir = './dataset/data_axondeepseg_sem/' + dir
targer_dir = './dataset/data_axondeepseg_sem_colored/' + dir
os.makedirs(targer_dir, exist_ok=True)
filelist=os.listdir(source_dir)
for file in filelist:
if file.endswith(".png"):
colored = colorize(source_dir + file, 'PiYG')
colored.save(targer_dir + file)
| StarcoderdataPython |
46534 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from sphinx.application import Sphinx
sys.path.append(os.path.join(os.getcwd(), ".."))
# -- Project information -----------------------------------------------------
project = "GHAS Compliance"
copyright = "2021, GeekMasher"
author = "GeekMasher"
# The full version, including alpha/beta/rc tags
release = "v1.5"
# -- General configuration ---------------------------------------------------
extensions = [
"myst_parser",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinx.ext.autosectionlabel",
]
master_doc = "index"
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
}
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
html_theme = "alabaster"
html_static_path = ["_static"]
html_logo = "_static/SecurityPolicy.png"
htmlhelp_basename = "GHASComplianceDoc"
# -- Options for Napoleon output ------------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# -- Options for manual page output ------------------------------------------
man_pages = [
(master_doc, "ghascompliance", "GHASCompliance Documentation", [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
texinfo_documents = [
(
master_doc,
"GHASCompliance",
"GHASCompliance Documentation",
author,
"GHASCompliance",
"One line description of project.",
"Miscellaneous",
),
]
# unwrap decorators
def unwrap_decorators():
import sphinx.util.inspect as inspect
import functools
old_getargspec = inspect.getargspec
def getargspec(x):
return old_getargspec(getattr(x, "_original_function", x))
inspect.getargspec = getargspec
old_update_wrapper = functools.update_wrapper
def update_wrapper(wrapper, wrapped, *a, **kw):
rv = old_update_wrapper(wrapper, wrapped, *a, **kw)
rv._original_function = wrapped
return rv
functools.update_wrapper = update_wrapper
unwrap_decorators()
del unwrap_decorators
def setup(app: Sphinx):
def cut_module_meta(app, what, name, obj, options, lines):
"""Remove metadata from autodoc output."""
if what != "module":
return
lines[:] = [
line for line in lines if not line.startswith((":copyright:", ":license:"))
]
app.connect("autodoc-process-docstring", cut_module_meta)
| StarcoderdataPython |
9790795 | <reponame>CodingGorit/Coding-with-Python<filename>Python Web/Flask/Flask WeChat Official Accounts/service/__init__.py
#!/usr/bin/python
# -*- coding: utf-8 --
#@File: __init__.py.py
#@author: Gorit
#@contact: <EMAIL>
#@time: 2020/3/4 0:13 | StarcoderdataPython |
1659918 | <reponame>aaronkurz/hitl-ab-bpm<gh_stars>1-10
""" Area for user to upload process versions and metadata, which will start a new experiment """
import streamlit as st
import requests
from resources import user_assistance
from config import BACKEND_URI
def upload_files():
""" Area for user to upload process versions and metadata, which will start a new experiment """
st.title("Upload Metadata")
st.write("... and start experiment!")
with st.expander("Upload Process Versions", expanded=True):
with st.form(key="Upload Files"):
process_name = st.text_input("Process name")
f_a = st.file_uploader("Upload process variant A", type=['bpmn'])
f_b = st.file_uploader("Upload process variant B", type=['bpmn'])
customer_categories = st.text_input("Customer categories (separate with dash -)",
help=user_assistance.CUSTOMER_CATEGORIES_INPUT)
default=st.radio("Default version", ('a', 'b'),
help=user_assistance.DEFAULT_VERSION_INPUT)
default_history = st.file_uploader("Upload data about default version", type=['json'],
help=user_assistance.HISTORY_UPLOAD_DEFAULT)
if st.form_submit_button("Submit"):
if f_a is not None \
and f_b is not None \
and process_name.replace(" ", "") != ""\
and customer_categories.replace(" ", "") != ""\
and default_history is not None:
files_in = {
"variantA": f_a,
"variantB": f_b,
"defaultHistory": default_history
}
params = {
'customer-categories': customer_categories,
'default-version': default,
}
response = requests.post(BACKEND_URI + "process/" + process_name, files=files_in, params=params)
if response.status_code == requests.codes.ok: # pylint: disable=no-member
st.success("Files uploaded, continue below.")
else:
st.exception("File upload unsuccessful! Try again.")
else:
st.warning("All fields have to be supplied.")
| StarcoderdataPython |
1721282 | import torch
import torch.nn as nn
from utils import DynamicRNN
class HierarchicalRecurrentEncoder(nn.Module):
def add_cmdline_args(parser):
parser.add_argument_group('HRE specific arguments')
parser.add_argument('-img_feature_size', default=4096,
help='Channel size of image feature')
parser.add_argument('-embed_size', default=300,
help='Size of the input word embedding')
parser.add_argument('-rnn_hidden_size', default=512,
help='Size of the multimodal embedding')
parser.add_argument('-num_layers', default=2,
help='Number of layers in LSTM')
parser.add_argument('-max_history_len', default=60,
help='Size of the multimodal embedding')
parser.add_argument('-dropout', default=0.5, help='Dropout')
parser.add_argument('-attend_hist', action='store_true',
help='Attention-over-history mechanism')
def __init__(self, args):
super().__init__()
self.args = args
print(args.dropout)
self.word_embed = nn.Embedding(args.vocab_size, args.embed_size, padding_idx=0)
self.ques_img_rnn = nn.LSTM(args.embed_size + args.img_feature_size,
args.rnn_hidden_size, args.num_layers,
batch_first=True, dropout=args.dropout)
self.hist_rnn = nn.LSTM(args.embed_size, args.rnn_hidden_size, args.num_layers,
batch_first=True, dropout=args.dropout)
self.dialog_rnn = nn.LSTM(args.rnn_hidden_size*2, args.rnn_hidden_size,
args.num_layers, batch_first=True, dropout=args.dropout)
self.ques_img_rnn = DynamicRNN(self.ques_img_rnn)
self.hist_rnn = DynamicRNN(self.hist_rnn)
self.dialog_rnn = DynamicRNN(self.dialog_rnn)
def forward(self, batch):
# extract data
img = batch['img_feat'] # batch x feat_size
ques = batch['ques'] # batch x num_rounds x max_q_len
hist = batch['hist'] # batch x num_rounds x max_h_len
batch_size, num_rounds, max_q_len = ques.shape
# each round can be treated as an independent sample
ques = ques.view(-1, ques.size(2)) # batch * num_rounds x max_q_len
ques_embed = self.word_embed(ques) # batch * num_rounds x max_q_len x embed_size
ques_len = batch['ques_len'].view(-1)
# can I just repeat the image vectors? depends if DynamicRNN will caught of the sections that should be padded
# can I use expand instead of repeat? effect on gradient?
# concatenate image to each word embedding in each question
expand_img = torch.zeros(batch_size*num_rounds, max_q_len, img.size(-1)).cuda()
for sample in range(expand_img.size(0)):
expand_img[sample, :ques_len[sample]] = img[sample // num_rounds]
ques_img = torch.cat([ques_embed, expand_img], dim=-1)
ques_img_embed = self.ques_img_rnn(ques_img, batch['ques_len']) # batch * num_rounds x hidden_state_size
# LSTM embedding of every previous round
hist = hist.view(-1, hist.size(2)) # batch * num_rounds x max_h_len
hist_embed = self.word_embed(hist) # batch * num_rounds x max_h_len x embed_size
hist_embed = self.hist_rnn(hist_embed, batch['hist_len']) # batch * num_rounds x hidden_state_size
# concatenate image + question embedding with history embedding for every previous round
ques_img_hist = torch.cat([ques_img_embed, hist_embed], -1)
ques_img_hist = ques_img_hist.view(-1, num_rounds, 1, ques_img_hist.size(-1)) # batch x num_rounds x hidden_state_size * 2
# initialize input to the dialog LSTM
dialog = torch.zeros(batch_size, num_rounds, num_rounds,
hist_embed.size(-1) + ques_img_embed.size(-1)).cuda()
# dialog LSTM input sequence length size for each sample
dialog_len = torch.arange(1, num_rounds + 1).unsqueeze(0).cuda()
dialog_len = dialog_len.repeat(batch_size, 1) # batch_size x num_rounds
# define the inputs to the dialog LSTM
for round in range(num_rounds):
dialog[:, round:, round, :] = ques_img_hist[:, round]
# batch * num_rounds x num_rounds x hidden_state_size
dialog = dialog.reshape(-1, num_rounds, dialog.size(-1))
dialog_embed = self.dialog_rnn(dialog, dialog_len)
return dialog_embed
| StarcoderdataPython |
12860059 | <reponame>thusoy/grunt-pylint
""" This module is used for integration testing. """
# pylint: disable=locally-disabled,unused-import
import venv_exclusive
| StarcoderdataPython |
8021155 | #!/usr/bin/env python
import ecto
from ecto.tutorial import Increment, Add
from ecto import BlackBoxCellInfo as CellInfo, BlackBoxForward as Forward
class MyBlackBox(ecto.BlackBox):
"""
We encapsulate the plasm from the hello_tutorial by exposing the
start value of 'i2' as a parameter to the BlackBox and forwarding
the output of the 'add' cell
"""
@staticmethod
def declare_cells(_p):
"""
Implement the virtual function from the base class
Only cells from which something is forwarded have to be declared
"""
cells = {}
# 'i2' has its start value exposed to the user so only a type is given
cells['i2'] = CellInfo(Increment, name='Inc 2')
# 'add' is always the same so we could define with a CellInfo(Add, name='Add') or
# just with an instance
cells['add'] = Add('Add')
return cells
@staticmethod
def declare_forwards(_p):
"""
Implement the virtual function from the base class
"""
# we forward the start parameter of the cells 'i2' but the user will
# see it as 'start2'
p = {'i2': [Forward('start',new_key='start2',new_default=20)]}
# there are no inputs to expose to the user
i={}
# we forward all the outputs from add to the user
o = {'add': 'all'}
return (p, i, o)
def configure(self, _p,_i,_o):
# implicitly, 'add' and 'i2' will be created as they have been declared
# only 'i1' needs to be defined
self.i1 = Increment('Inc 1', start=18)
def connections(self, _p):
# define the connections like you would for the plasm
return [ self.i1['output'] >> self.add['a'],
self.i2['output'] >> self.add['b'] ]
# create an instance of my BlackBox
my_blackbox = MyBlackBox(start2=18)
# create a plasm that only contains the BlackBox
plasm = ecto.Plasm()
plasm.insert(my_blackbox)
# execute the plasm
plasm.execute(niter=2)
# display the output name 'output' in the outputs of cell 'add'
print my_blackbox.outputs.output
| StarcoderdataPython |
1792212 | import numpy as np
import matplotlib.pyplot as plt
def train(X, T):
# Standardize input data (X)
# Calculate mean and std.
means = X.mean(axis = 0)
std = X.std(axis = 0)
Xs = (X - means) / std
# Tack a column of 1s
Xs = np.insert(Xs, 0, 1, 1)
# Use Xs to generate model (w)
w = np.linalg.lstsq(Xs.T @ Xs, Xs.T @ T, rcond = None)[0]
# Return as a dictionary
dict = {'means': means, 'stds': std, 'w': w}
return dict
def use(model, X):
# Use model and input X to predict.
means = model['means']
std = model['stds']
w = model['w']
# Standardize X
Xs = (X - means) / std
# Tack column of 1s
Xs = np.insert(Xs, 0, 1, 1)
# Predict
predict = Xs @ w
#print(Xs.shape)
#print(w.shape)
return predict
def rmse(predict, T):
rmerr = np.sqrt(np.mean((T - predict) ** 2))
return rmerr
def trainSGD(X, T, learningRate, numberOfIterations):
# Standardize inputs X.
means = X.mean(axis = 0)
std = X.std(axis = 0)
Xs = (X - means) / std
nSamples = Xs.shape[0]
ncolsT = T.shape[1]
# Tack a column of 1s
Xs = np.insert(Xs, 0, 1, 1)
ncolsX = Xs.shape[1]
# Initialize weights to zero.
w = np.zeros((ncolsX, ncolsT))
for i in range(numberOfIterations):
for n in range(nSamples):
predicted = Xs[n:n+1, :] @ w
w += learningRate * Xs[n:n+1, :].T * (T[n:n+1, :] - predicted)
dict = {'means': means, 'stds': std, 'w': w}
return dict
if __name__ == '__main__':
X = np.array([[0,1,2],[3,4,5], [5,6,7]])
T = np.array([[1,2,3]])
T = np.transpose(T)
model = train(X, T)
predict = use(model, X)
r = rmse(predict, T)
model2 = trainSGD(X, T, 0.1, 100)
print(model2)
| StarcoderdataPython |
3303646 | """
Export Directory constants
"""
from setuptools import setup, find_packages
setup(
name='directory_constants',
version='21.1.0',
url='https://github.com/uktrade/directory-constants',
license='MIT',
author='Department for International Trade',
description='Constant values shared between Directory apps.',
packages=find_packages(exclude=["tests.*", "tests"]),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
package_data={
'directory_constants': [
'fixtures/*',
'locale/*',
]
},
include_package_data=True,
install_requires=[
'django>=2.2.24,<=3.2.5',
],
extras_require={
'test': [
'pytest==3.6.0',
'pytest-cov==2.7.1',
'pytest-django==3.3.0',
'flake8==3.0.4',
'twine>=1.11.0,<2.0.0',
'wheel>=0.31.0,<1.0.0',
'freezegun==0.3.8',
'setuptools>=38.6.0,<39.0.0',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| StarcoderdataPython |
284915 | import requests
from bs4 import BeautifulSoup
def parse_url(url):
list_of_tasks = []
r = requests.get(url)
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html.parser')
div = soup.find_all("div", {"class": "item-title"})
for tag in div:
a = tag.find_all("a")
list_of_tasks.append(a[0].get_text())
return list_of_tasks
| StarcoderdataPython |
9660384 | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
import os
import os.path
import time
from openpyxl import load_workbook
import csv
class Command(BaseCommand):
help = 'Converts an XLS spreadsheet into a CSV format.'
def add_arguments(self, parser):
parser.add_argument("--xlsfile", required=True, help="Input XLS file with the russian spreadsheet data.", default="NewVisualizingRussian.xlsx")
parser.add_argument("--csvfile", required=False, help="Output CSV file.", default="russian.csv")
parser.add_argument("--verbose", help="Increase output verbosity.", action="store_true")
def handle(self, *args, **options):
xlsfile = options['xlsfile']
csvfile = options['csvfile']
verbose = options['verbose']
if not os.path.exists(xlsfile):
raise CommandError("Input XLS file %s does not exist!" % xlsfile)
self.stdout.write(f"=> Reading excel file...\n")
start = time.time()
workbook = load_workbook(filename=xlsfile)
worksheet = workbook.active
with open(csvfile, 'w', newline="") as f:
csvwriter = csv.writer(f, quoting=csv.QUOTE_ALL) # commas may appear inside fields
for row in worksheet.values:
csvwriter.writerow(row)
end = time.time()
self.stdout.write("=> Completed. Execution time: %f seconds\n" % (end - start))
self.stdout.write("=> CSV file saved to %s\n" % csvfile)
| StarcoderdataPython |
278804 | #!/usr/bin/python
class TaggedAttribute(object):
"""Simple container object to tag values with attributes.
Feel free to initialize any node with a TA instead of its actual
value only and it will then have the desired metadata. For example:
from pylink import TaggedAttribute as TA
tx_power = TA(2,
part_number='234x',
test_report='http://reports.co/234x')
m = DAGModel([pylink.Transmitter(tx_power_at_pa_dbw=tx_power)])
"""
def __init__(self, value, **kwargs):
self.meta = kwargs
self.value = value
| StarcoderdataPython |
3533865 | <reponame>Cheezegami/HKU-ECTTP
from Controls import Controls
from GenericObject import GenericObject
from ObjectRunner import ObjectRunner
class ScoreBoard():
def __init__(self,int_score) : # Constructor
self.int_score=int_score
self.bool_mayClickMouse = True
self.int_clickDelay = 20
self.int_clickTimer = self.int_clickDelay
def update(self):
self.countScoreBoard()
self.drawScoreBoard()
def countScoreBoard(self):
global objectList
if(mousePressed):
if(self.bool_mayClickMouse == True):
self.bool_mayClickMouse = False
self.int_clickDelay = self.int_clickTimer
for i in range(0, len(ObjectRunner.objectList)):
if(mouseX < (ObjectRunner.objectList(i).int_oX)):
self.int_score += 20
else:
self.int_score += 10
if(self.bool_mayClickMouse == False):
self.int_clickDelay -= 1
print(self.int_clickDelay)
if(self.int_clickDelay <= 0):
self.bool_mayClickMouse = True
def drawScoreBoard(self):
textSize(30)
text(self.int_score,width-200,50) | StarcoderdataPython |
1647862 | <reponame>hodakamori/deepmd-starter-kit<filename>gen_cp2k_inputs.py
import MDAnalysis as mda
import os
import shutil
import subprocess as sub
def write_trj_as_cp2k_input(system_name, cell, types, type_map, positions):
types = [type_map[t] for t in types]
positions = positions.astype('str')
with open(system_name, 'w') as fo:
fo.write('&CELL\n')
fo.write(f' ABC {cell[0]} {cell[1]} {cell[2]}\n')
fo.write(f' ALPHA_BETA_GAMMA {cell[3]} {cell[4]} {cell[5]}\n')
fo.write('&END CELL\n')
fo.write('&COORD\n')
for t, (x, y, z) in zip(types, positions):
fo.write(' '.join([' ', t, x, y, z, '\n']))
fo.write('&END COORD\n')
def modify_input(cp2k_input, prj_name):
with open(cp2k_input, 'r') as fi:
cfg = fi.read()
cfg = cfg.replace('@SET PRJ train', f'@SET PRJ {prj_name}')
with open(cp2k_input, 'w') as fo:
fo.write(cfg)
lammps_trj = './example/lammps/water.lammpstrj'
cp2k_format_dir = './cp2k_format'
u = mda.Universe(lammps_trj, format="LAMMPSDUMP")
type_map = {"1":'O', "2":'H'}
for i, t in enumerate(u.trajectory):
os.makedirs(f'train_data/train_{i}', exist_ok=True)
shutil.copy("./cp2k_format/train.inp", f'train_data/train_{i}/water_{i}.inp')
for item in ['cp2k.dft', 'cp2k.dump', 'cp2k.potentials', 'cp2k.kinds']:
shutil.copy(os.path.join(cp2k_format_dir, item), f'train_data/train_{i}/')
write_trj_as_cp2k_input(f'train_data/train_{i}/water_{i}.system', u.dimensions, u.atoms.types, type_map, u.atoms.positions)
modify_input(f'train_data/train_{i}/water_{i}.inp', f'water_{i}')
| StarcoderdataPython |
6551572 | import json
import urllib.parse
from typing import Callable, Dict, Tuple, List
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
LOGGING_INTEGRATION_NAME = "[Atlassian Confluence Cloud]"
HTTP_ERROR = {
401: "An error occurred while validating the credentials, please check the username or password.",
404: "The resource cannot be found.",
500: "The server encountered an internal error for Atlassian Confluence Cloud "
"and was unable to complete your request."
}
URL_SUFFIX = {
"CONTENT_SEARCH": "/wiki/rest/api/content/search",
"GROUP": "/wiki/rest/api/group",
"CONTENT": "/wiki/rest/api/content",
"USER": "/wiki/rest/api/search/user?cql=type=user",
"SPACE": "/wiki/rest/api/space",
"PRIVATE_SPACE": "/wiki/rest/api/space/_private"
}
MESSAGES = {
"REQUIRED_URL_FIELD": "Site Name can not be empty.",
"NO_RECORDS_FOUND": "No {} were found for the given argument(s).",
"LIMIT": "{} is an invalid value for limit. Limit must be between 0 and int32.",
"START": "{} is an invalid value for start. Start must be between 0 and int32.",
"INVALID_ACCESS_TYPE": "Invalid value for access type. Access type parameter must be one of 'user', 'admin', "
"or 'site-admin' ",
"REQUIRED_ARGUMENT": "Invalid argument value. {} is a required argument.",
"INVALID_CONTENT_TYPE": "Invalid value for content type. Content type parameter can be 'page' or 'blogpost' ",
"HR_DELETE_CONTENT": "Content with Id {} is deleted successfully.",
"INVALID_STATUS": "Invalid value for status. Status must be one of 'current', 'draft' or 'trashed'.",
"BAD_REQUEST": "Bad request: An error occurred while fetching the data.",
"REQUIRED_SORT_KEY": "If 'sort_order' is specified, 'sort_key' is required.",
"INVALID_STATUS_SEARCH": "Invalid value for status. Status must be one of 'current', 'any', 'archived', 'draft' "
"or 'trashed'.",
"INVALID_PERMISSION": "If the 'permission_account_id' or 'permission_group_name' arguments are given, "
"the 'permission_operations' argument must also be given.",
"INVALID_PERMISSIONS_OPERATION": "If the 'permission_operations' argument is given, "
"'permission_account_id' or 'permission_group_name' argument must also be given.",
"PERMISSION_FORMAT": "Please provide the permission in the valid JSON format. "
"Format accepted - 'operation1:targetType1,operation2:targetType2'",
"ADVANCE_PERMISSION_FORMAT": "Please provide the 'advanced_permissions' in the valid JSON format. ",
"INVALID_SPACE_STATUS": "Invalid value for status. Status must be one of 'current' or 'archived'.",
"INVALID_CONTENT_TYPE_UPDATE_CONTENT": "Invalid value for content type. Content type parameter can be 'page', "
"'blogpost', 'comment' or 'attachment'.",
"INVALID_BODY_REPRESENTATION": "Invalid value for body_representation. Body representation must be one of "
"'editor', 'editor2' or 'storage'.",
"INVALID_DELETION_TYPE": "Invalid value for deletion_type. Deletion type must be one of 'move to trash', "
"'permanent delete' or 'permanent delete draft'.",
"INVALID_TITLE_LENGTH": "Title cannot be longer than 255 characters.",
"INVALID_SPACE_NAME_LENGTH": "Space name cannot be longer than 200 characters.",
"INVALID_SPACE_KEY": "Space Key cannot be longer than 255 characters and should contain alphanumeric characters "
"only.",
"PRIVATE_SPACE_PERMISSION": "Permission can not be granted for a private space."
}
OUTPUT_PREFIX = {
"GROUP": "ConfluenceCloud.Group",
"USER": "ConfluenceCloud.User",
"CONTENT": "ConfluenceCloud.Content",
"COMMENT": "ConfluenceCloud.Comment",
"SPACE": "ConfluenceCloud.Space",
"PAGETOKEN": "ConfluenceCloud.PageToken.Content"
}
DEFAULT_LIMIT = "50"
DEFAULT_START = "0"
LEGAL_ACCESS_TYPES = ["user", "site-admin", "admin"]
LEGAL_CONTENT_STATUS = ['current', 'trashed', 'draft', 'archived', 'any']
LEGAL_CONTENT_TYPES = ["page", "blogpost"]
LEGAL_CONTENT_TYPE_UPDATE_COMMAND = ["page", "blogpost", "comment", "attachment"]
DEFAULT_EXPANDED_FIELD_CONTENT = "childTypes.all,space,version,history,ancestors,container,body"
DEFAULT_EXPANDED_FIELD_SPACE = "history"
LEGAL_SPACE_STATUS = ['current', 'archived']
LEGAL_BODY_REPRESENTATION = ['editor', 'editor2', 'storage']
LEGAL_DELETION_TYPES = {
"move to trash": "current",
"permanent delete": "trashed",
"permanent delete draft": "draft"
}
''' CLIENT CLASS '''
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any XSOAR logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
For this implementation, no special attributes defined
"""
def http_request(self, *args, **kwargs) -> requests.Response:
"""
Function to make http requests using inbuilt _http_request() method.
"""
kwargs['ok_codes'] = (200, 201, 204)
kwargs['error_handler'] = self.exception_handler
kwargs['resp_type'] = 'response'
return super()._http_request(*args, **kwargs)
@staticmethod
def exception_handler(response: requests.models.Response):
"""
Handle error in the response and display error message based on status code.
:type response: ``requests.models.Response``
:param response: response from API.
:raises: raise DemistoException based on status code of response.
"""
err_msg = ""
if response.status_code in HTTP_ERROR:
err_msg = HTTP_ERROR[response.status_code]
elif response.status_code > 500:
err_msg = HTTP_ERROR[500]
elif response.status_code not in HTTP_ERROR:
try:
# Try to parse json error response
error_entry = response.json()
demisto.error(f"{LOGGING_INTEGRATION_NAME} {error_entry}")
errors = error_entry.get('data', {}).get('errors', [])
if errors:
err_msg = get_error_message(errors)
elif response.status_code == 400:
err_msg = MESSAGES['BAD_REQUEST']
else:
err_msg = error_entry.get('message', '')
except ValueError:
err_msg = '{}'.format(response.text)
raise DemistoException(err_msg)
''' HELPER FUNCTIONS '''
def get_error_message(errors):
err_msg = ""
for error in errors:
if error.get('message').get('key'):
err_msg += f"{error.get('message').get('key')} \n"
if error.get('message').get('translation'):
err_msg += f"{error.get('message').get('translation')} \n"
return err_msg
def strip_args(args: dict):
"""
Strips argument dictionary values.
:type args: dict
:param args: argument dictionary
"""
for key, value in args.items():
if isinstance(value, str):
args[key] = value.strip()
def validate_url(url: str):
"""
To Validate url parameter.
:type url: str
:param url: url to validate.
"""
if not url:
raise ValueError(MESSAGES["REQUIRED_URL_FIELD"])
def remove_empty_elements_for_context(src):
"""
Recursively remove empty lists, empty dicts, empty string or None elements from a dictionary.
:type src: ``dict``
:param src: Input dictionary.
:return: Dictionary with all empty lists,empty string and empty dictionaries removed.
:rtype: ``dict``
"""
def empty(x):
return x is None or x == '' or x == {} or x == []
if not isinstance(src, (dict, list)):
return src
elif isinstance(src, list):
return [v for v in (remove_empty_elements_for_context(v) for v in src) if not empty(v)]
else:
return {k: v for k, v in ((k, remove_empty_elements_for_context(v))
for k, v in src.items()) if not empty(v)}
def validated_required_args_for_permission(permission_account_id, permission_group_name, permission_operations):
"""
Raise value-error when null-values or whitespaces are provided for permission arguments.
:type permission_account_id: ``str``
:param permission_account_id: Account ID
:type permission_group_name: ``str``
:param permission_group_name: Name of the group
:type permission_operations: ``str``
:param permission_operations: Permissions to be granted
:return: None
"""
if (permission_account_id or permission_group_name) and not permission_operations:
raise ValueError(MESSAGES["INVALID_PERMISSION"])
if permission_operations and (not permission_group_name and not permission_account_id):
raise ValueError(MESSAGES["INVALID_PERMISSIONS_OPERATION"])
def prepare_permission_object(permission_account_id: str, permission_group_name: str, attr: List) -> Dict:
"""
Prepare permission object from the user provided values
:type permission_account_id: ``str``
:param permission_account_id: Account ID of the user to whom permission should be granted.
:type permission_group_name: ``str``
:param permission_group_name: Group name to whom permission should be granted.
:type attr: ``List``
:param attr: Operation and Target Type specified by user
:rtype: ``Dict``
:return: Returns permission object
"""
permission_object = {
"subjects": {
"user": {
"results": [
{
"accountId": permission_account_id
}
]
},
"group": {
"results": [
{
"name": permission_group_name
}
]
}
},
"operation": {
"operation": attr[0],
"targetType": attr[1]
},
"anonymousAccess": False,
"unlicensedAccess": False
}
return permission_object
def validate_permissions(args: Dict[str, Any]) -> List:
"""
Validates the permission argument provided by user and prepare permission object accordingly
:type args: ``dict``
:param args: Input dictionary.
:return: Permission object.
:rtype: ``List``
"""
space_permission = []
permission_account_id = args.get('permission_account_id', '')
permission_group_name = args.get('permission_group_name', '')
permission_operations = args.get('permission_operations', '')
validated_required_args_for_permission(permission_account_id, permission_group_name, permission_operations)
if permission_operations:
# create a list of all the permission provided by user
permissions = [permission.strip() for permission in permission_operations.split(",") if permission.strip()]
# separate target_type and operation for the single permission
for permission in permissions:
if permission:
attr = [operation.strip() for operation in permission.split(":") if operation.strip()]
# if target_type or operation is missing then raise ValueError
if len(attr) != 2:
raise ValueError(MESSAGES["PERMISSION_FORMAT"])
permission_object = prepare_permission_object(permission_account_id, permission_group_name, attr)
space_permission.append(permission_object)
return space_permission
def validate_list_command_args(args: Dict[str, str]) -> Tuple[Optional[int], Optional[int]]:
"""
Validate arguments for all list commands, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Tuple``
"""
limit = arg_to_number(args.get('limit', DEFAULT_LIMIT))
if limit < 0 or limit > 2147483647: # type:ignore
raise ValueError(MESSAGES["LIMIT"].format(limit))
offset = arg_to_number(args.get('offset', DEFAULT_START))
if offset < 0 or offset > 2147483647: # type:ignore
raise ValueError(MESSAGES["START"].format(offset))
return limit, offset
def validate_list_group_args(args: Dict[str, str]):
"""
Validate arguments for confluence-cloud-group-list command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
"""
access_type = args.get("access_type", "").lower()
if access_type and access_type not in LEGAL_ACCESS_TYPES:
raise ValueError(MESSAGES["INVALID_ACCESS_TYPE"])
return access_type
def prepare_group_args(args: Dict[str, str]) -> Dict[str, str]:
"""
Prepare params for list group command
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
"""
limit, offset = validate_list_command_args(args)
access_type = validate_list_group_args(args)
return assign_params(limit=limit, start=offset, accessType=access_type)
def prepare_hr_for_groups(groups: List[Dict[str, Any]]) -> str:
"""
Prepare human readable for list groups command.
:type groups: ``List[Dict[str, Any]]``
:param groups:The group data.
:rtype: ``str``
:return: Human readable.
"""
hr_list = []
for group in groups:
hr_record = {
'ID': group.get('id', ''),
'Name': group.get('name', '')
}
hr_list.append(hr_record)
return tableToMarkdown('Group(s)', hr_list, ['ID', 'Name'],
removeNull=True)
def prepare_content_create_params(args) -> Dict[str, Any]:
"""
Prepare json object for content create command
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Body parameters to send in request
:rtype: ``Dict[str, Any]``
"""
body_representation = args.get('body_representation', '')
params = {
"title": args['title'],
"type": args['type'].lower(),
"space": {
"key": args.get('space_key', '')
},
"status": args.get('status', 'current'),
"body": {
body_representation: {
"value": args.get('body_value', ''),
"representation": body_representation
}
},
"ancestors": [
{
"id": args.get('ancestor_id', '')
}
]
}
return remove_empty_elements_for_context(params)
def validate_create_content_args(args: Dict[str, str], is_update: bool = False):
"""
Validate arguments for confluence-cloud-content-create command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:type is_update: ``bool``
:param is_update: Whether command is update content or not.
:return: None
:rtype: ``None``
"""
title = args['title']
if not title:
raise ValueError(MESSAGES['REQUIRED_ARGUMENT'].format("title"))
if len(title) > 255:
raise ValueError(MESSAGES["INVALID_TITLE_LENGTH"])
content_type = args['type'].lower()
if not content_type:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("type"))
if not is_update and content_type not in LEGAL_CONTENT_TYPES:
raise ValueError(MESSAGES["INVALID_CONTENT_TYPE"])
if is_update and content_type not in LEGAL_CONTENT_TYPE_UPDATE_COMMAND:
raise ValueError(MESSAGES["INVALID_CONTENT_TYPE_UPDATE_CONTENT"])
space_key = args.get('space_key', '')
if not is_update and not space_key:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("space_key"))
body_value = args.get('body_value', '')
body_representation = args.get('body_representation', '')
if content_type == "comment":
if body_value and body_representation:
if body_representation not in LEGAL_BODY_REPRESENTATION:
raise ValueError(MESSAGES["INVALID_BODY_REPRESENTATION"])
else:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("'body_value' and 'body_representation'"))
def prepare_hr_for_content_create(content: Dict[str, Any], content_type: str) -> str:
"""
Prepare human readable for content create, comment create and content update command.
:type content: ``Dict[str, Any]``
:param content:The content data.
:type content_type: ``str``
:param content_type: Type of the content.
:rtype: ``str``
:return: Human readable.
"""
hr_record = {
'ID': content.get('id', ''),
'Title': f"[{content.get('title', '')}]"
f"({content.get('_links', {}).get('base', '')}{content.get('_links', {}).get('webui', '')})",
'Type': content.get('type', ''),
'Status': content.get('status', ''),
'Space Name': content.get('space', {}).get('name', ''),
'Created By': content.get('history', {}).get('createdBy', {}).get('displayName', ''),
'Created At': content.get('history', {}).get('createdDate', '')
}
return tableToMarkdown(f'{content_type}', hr_record,
['ID', 'Title', 'Type', 'Status', 'Space Name', 'Created By', 'Created At'],
removeNull=True)
def prepare_hr_for_content_search(contents: list, url_prefix: str) -> str:
"""
Prepare human readable for content search and content list command.
:type contents: ``list``
:param contents: List of content.
:type url_prefix: ``str``
:param url_prefix: Url prefix the content.
:rtype: ``str``
:return: Human readable.
"""
hr_list = []
for content in contents:
hr_record = {
'ID': content.get('id', ''),
'Title': f"[{content.get('title', '')}]"
f"({url_prefix}{content.get('_links', {}).get('webui', '')})",
'Type': content.get('type', ''),
'Status': content.get('status', ''),
'Space Name': content.get('space', {}).get('name', ''),
'Created By': content.get('history', {}).get('createdBy', {}).get('displayName', ''),
'Created At': content.get('history', {}).get('createdDate', ''),
'Version': content.get('version', {}).get('number', '')
}
hr_list.append(hr_record)
hr = tableToMarkdown('Content(s)', hr_list,
['ID', 'Title', 'Type', 'Status', 'Space Name', 'Created By', 'Created At', 'Version'],
removeNull=True)
return hr
def validate_delete_content_args(args: Dict[str, str]):
"""
Validate arguments for confluence-cloud-content-delete command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: None
"""
content_id = args["content_id"]
if not content_id:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("content_id"))
status = args.get("deletion_type", "").lower()
if status:
if status not in LEGAL_DELETION_TYPES.keys():
raise ValueError(MESSAGES["INVALID_DELETION_TYPE"])
def prepare_comment_create_params(args) -> Dict[str, Any]:
"""
Prepare json object for comment create command
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Body parameters to send in request
:rtype: ``Dict[str, Any]``
"""
body_representation = args['body_representation']
container_type = args.get('container_type', '')
params = {
"type": "comment",
"status": args.get('status', 'current'),
"container": {
"id": args['container_id'],
"type": container_type
},
"body": {
body_representation: {
"value": args['body_value'],
"representation": body_representation
}
},
"ancestors": [
{
"id": args.get('ancestor_id', '')
}
]
}
params = remove_empty_elements_for_context(params)
params["container"]["type"] = container_type
return params
def validate_comment_args(args: Dict[str, str]):
"""
Validate arguments for confluence-cloud-comment-create command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: None
"""
body_value = args['body_value']
if not body_value:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("Comment body_value"))
body_representation = args['body_representation']
if not body_representation:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("body_representation"))
if body_representation not in LEGAL_BODY_REPRESENTATION:
raise ValueError(MESSAGES["INVALID_BODY_REPRESENTATION"])
container_id = args['container_id']
if not container_id:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("container_id"))
def prepare_hr_for_users(users: List[Dict[str, Any]]) -> str:
"""
Prepare human readable for list users command.
:type users: ``List[Dict[str, Any]]``
:param users: The user data.
:rtype: ``str``
:return: Human readable.
"""
hr_list = []
for user in users:
hr_record = {
'Account ID': user['user'].get('accountId', ''),
'Name': user['user'].get('displayName', ''),
'User Type': user['user'].get('type', '')
}
hr_list.append(hr_record)
return tableToMarkdown('User(s)', hr_list, ['Account ID', 'Name', 'User Type'], removeNull=True)
def prepare_expand_argument(expand: str, default_fields: str) -> str:
"""
The 'expand' command argument specifies which properties should be expanded.
In this integration, several of the most significant characteristics are extended by default.
Other attributes that users want to expand can still be provided.
This method combines the default expand fields with the expand fields specified by the user.
:type expand: ``str``
:param expand: The expand argument passed by the user.
:type default_fields: ``str``
:param default_fields: The default fields.
:return: expand argument value to send in request
:rtype: ``str``
"""
default_expand_fields = default_fields.split(",")
custom_expand_fields = set(expand.split(","))
expand_fields = ""
for expand_field in custom_expand_fields:
if expand_field.strip() not in default_expand_fields:
expand_fields += f',{expand_field.strip()}'
return default_fields + expand_fields
def validate_query_argument(args: Dict[str, str]):
"""
Validate query argument of content search command
:param args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: None
"""
query = args['query']
if not query:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("query"))
def prepare_search_content_argument(args: Dict[str, str]) -> Dict[str, Any]:
"""
Prepare params for confluence-cloud-content-search command.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
limit, offset = validate_list_command_args(args)
validate_query_argument(args)
params = {'cql': args['query'],
'cursor': args.get('next_page_token'),
'expand': DEFAULT_EXPANDED_FIELD_CONTENT,
'limit': limit
}
expand = args.get('expand', '')
if expand:
params['expand'] = prepare_expand_argument(expand, DEFAULT_EXPANDED_FIELD_CONTENT)
content_status = argToList(args.get('content_status', ''))
params["cqlcontext"] = json.dumps({"contentStatuses": content_status})
return assign_params(**params)
def prepare_cursor_for_content(response_json: Dict[str, str]) -> str:
"""
Split query string parameters from a link and extract value of parameter 'cursor'.
:type response_json: ``Dict[str, str]``
:param response_json: API response.
:return: Next Page Token(Cursor).
:rtype: ``str``
"""
next_cursor = ""
next_record = response_json.get('_links', {}).get('next', '') # type:ignore
if next_record:
next_cursor_split = next_record.split('?')
parsed_next_cursor = urllib.parse.parse_qs(next_cursor_split[1])
next_cursor = parsed_next_cursor.get('cursor', [])[0]
return next_cursor
def validate_list_content_args(args):
"""
Validate arguments for confluence_cloud_content_list command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: None
"""
sort_order = args.get('sort_order', '').lower()
sort_key = args.get('sort_key', '')
if sort_order and not sort_key:
raise ValueError(MESSAGES['REQUIRED_SORT_KEY'])
content_type = args.get('type', 'page').lower()
if content_type not in LEGAL_CONTENT_TYPES:
raise ValueError(MESSAGES['INVALID_CONTENT_TYPE'])
status = args.get('status', '').lower()
if status and status not in LEGAL_CONTENT_STATUS:
raise ValueError(MESSAGES['INVALID_STATUS_SEARCH'])
def prepare_list_content_argument(args: Dict[str, str]) -> Dict[str, Any]:
"""
Prepare params for confluence_cloud_content_list command.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
validate_list_content_args(args)
limit, offset = validate_list_command_args(args)
params = {'limit': limit,
'start': offset,
'spaceKey': args.get('space_key', ''),
'type': args.get('type', 'page').lower()
}
sort_order = args.get('sort_order', '').lower()
sort_key = args.get('sort_key', '')
if sort_order and sort_key:
params['orderby'] = f'{sort_key} {sort_order}'
elif sort_key:
params['orderby'] = f'{sort_key}'
content_creation_date = arg_to_datetime(args.get('creation_date'))
if content_creation_date:
params['postingDay'] = content_creation_date.date() # type: ignore
params['status'] = args.get('status', '').lower()
params['expand'] = DEFAULT_EXPANDED_FIELD_CONTENT
expand = args.get('expand', '')
if expand:
params['expand'] = prepare_expand_argument(expand, DEFAULT_EXPANDED_FIELD_CONTENT)
return assign_params(**params)
def validate_create_space_args(args: Dict[str, str]):
"""
Validate arguments for confluence-cloud-space-create command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: None
"""
unique_key = args.get('unique_key')
if not unique_key:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("unique_key"))
if len(unique_key) > 255 or not unique_key.isalnum():
raise ValueError(MESSAGES["INVALID_SPACE_KEY"])
name = args.get('name')
if not name:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("name"))
if len(name) > 200:
raise ValueError(MESSAGES["INVALID_SPACE_NAME_LENGTH"])
is_private_space = argToBoolean(args.get('is_private_space', False))
if is_private_space:
if args.get('advanced_permissions') or args.get('permission_operations'):
raise ValueError(MESSAGES["PRIVATE_SPACE_PERMISSION"])
if args.get('advanced_permissions'):
try:
json.loads(args['advanced_permissions'])
except (json.JSONDecodeError, json.decoder.JSONDecodeError, AttributeError):
raise ValueError(MESSAGES["ADVANCE_PERMISSION_FORMAT"])
def prepare_create_space_args(args: Dict[str, str]) -> Tuple[dict, Union[bool, str]]:
"""
Prepare json object for confluence-cloud-space-create command.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
is_private_space = argToBoolean(args.get('is_private_space', False))
if args.get('advanced_permissions'):
permissions = json.loads(args['advanced_permissions'])
else:
permissions = validate_permissions(args)
params = {
"key": args['unique_key'],
"name": args['name'],
"description": {
"plain": {
"value": args.get('description', ''),
"representation": "plain"
}
},
"permissions": permissions
}
params = remove_empty_elements_for_context(params)
return params, is_private_space
def prepare_hr_for_space_create(space: Dict[str, Any]) -> str:
"""
Prepare human readable for create space command.
:type space: ``List[Dict[str, Any]]``
:param space: The space data.
:rtype: ``str``
:return: Human readable.
"""
hr_record = {
'ID': space.get('id', ''),
'Name': f"[{space.get('name', '')}]"
f"({space.get('_links', {}).get('base', '')}{space.get('_links', {}).get('webui', '')})",
'Type': space.get('type', ''),
'Status': space.get('status', ''),
}
return tableToMarkdown('Space', hr_record,
['ID', 'Name', 'Type', 'Status'],
removeNull=True)
def validate_status_argument(args: Dict[str, str]):
"""
Validates the status argument of confluence-cloud-space-list command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: None
"""
status = args.get('status')
if status and status.lower() not in LEGAL_SPACE_STATUS:
raise ValueError(MESSAGES["INVALID_SPACE_STATUS"])
def prepare_list_space_args(args: Dict[str, str]) -> Dict[str, Any]:
"""
Prepare params for confluence-cloud-space-list command.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
validate_status_argument(args)
limit, offset = validate_list_command_args(args)
params = {'limit': limit, 'start': offset,
'spaceKey': argToList(args.get('space_key')),
'spaceId': argToList(args.get('space_id')),
'type': args.get('type'),
'status': args.get('status')
}
favourite = args.get('favourite', '')
if favourite:
favourite = "true" if argToBoolean(favourite) else "false"
params['favourite'] = favourite
params['expand'] = DEFAULT_EXPANDED_FIELD_SPACE
expand = args.get('expand', '')
if expand:
params['expand'] = prepare_expand_argument(expand, DEFAULT_EXPANDED_FIELD_SPACE)
return assign_params(**params)
def prepare_hr_for_space_list(spaces: List[Dict[str, Any]], url_prefix: str) -> str:
"""
Prepare human readable for list space command.
:param url_prefix:
:type spaces: ``List[Dict[str, Any]]``
:param spaces: The space data.
:rtype: ``str``
:return: Human readable.
"""
hr_list = []
for space in spaces:
hr_record = {
'ID': space.get('id', ''),
'Space Key': space.get('key', ''),
'Name': f"[{space.get('name', '')}]"
f"({url_prefix}{space.get('_links', {}).get('webui', '')})",
'Type': space.get('type', ''),
'Status': space.get('status', ''),
'Created By': space.get('history', {}).get('createdBy', {}).get('displayName', ''),
'Created At': space.get('history', {}).get('createdDate', '')
}
hr_list.append(hr_record)
hr = tableToMarkdown('Space(s)', hr_list,
['ID', 'Space Key', 'Name', 'Type', 'Status', 'Created By', 'Created At'], removeNull=True)
return hr
def validate_update_content_args(args: Dict[str, str]):
"""
Validate arguments for confluence-cloud-content-update command, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: None
"""
validate_create_content_args(args, is_update=True)
content_id = args["content_id"]
if not content_id:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("content_id"))
version = args["version"]
if not version:
raise ValueError(MESSAGES["REQUIRED_ARGUMENT"].format("version"))
''' COMMAND FUNCTIONS '''
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
params: Dict = {
"cql": "type=page",
"limit": 1
}
client.http_request(method='GET', url_suffix=URL_SUFFIX["CONTENT_SEARCH"], params=params)
return 'ok'
def confluence_cloud_user_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Returns a list of users.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
limit, offset = validate_list_command_args(args)
params = assign_params(limit=limit, start=offset)
response = client.http_request(method="GET", url_suffix=URL_SUFFIX["USER"], params=params)
response_json = response.json()
total_records = response_json.get('results', [])
if not total_records:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('user(s)'))
context = []
for user in total_records:
context.append(remove_empty_elements_for_context(user.get('user', {})))
readable_hr = prepare_hr_for_users(total_records)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['USER'],
outputs_key_field='accountId',
outputs=context,
readable_output=readable_hr,
raw_response=response_json)
def confluence_cloud_content_search_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Returns the list of content that matches a Confluence Query Language (CQL) query.
The type of content can be a page, blogpost, or comment.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
params = prepare_search_content_argument(args)
response = client.http_request(method="GET", url_suffix=URL_SUFFIX["CONTENT_SEARCH"], params=params)
response_json = response.json()
total_records = response_json.get('results', [])
if not total_records:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('content(s)'))
# Creating Context data
context = remove_empty_elements_for_context(total_records)
next_cursor = prepare_cursor_for_content(response_json)
next_page_context = {
"next_page_token": next_cursor,
"name": "confluence-cloud-content-search"
}
next_page_context = remove_empty_elements_for_context(next_page_context)
outputs = {
f"{OUTPUT_PREFIX['CONTENT']}(val.id == obj.id)": context,
f"{OUTPUT_PREFIX['PAGETOKEN']}(val.name == obj.name)": next_page_context
}
# Creating Human Readable
url_prefix = response_json.get('_links', {}).get('base', '')
readable_hr = prepare_hr_for_content_search(total_records, url_prefix)
if next_cursor:
readable_hr += f'Run the command with argument next_page_token={next_cursor} to see the next set of contents.\n'
return CommandResults(
outputs=outputs,
readable_output=readable_hr,
raw_response=response_json)
def confluence_cloud_content_update_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Update the existing content with new content.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
validate_update_content_args(args)
content_id = args["content_id"]
params = prepare_content_create_params(args)
params["version"] = {
"number": args["version"]
}
request_url = URL_SUFFIX["CONTENT"] + "/{}".format(content_id)
response = client.http_request(method="PUT", url_suffix=request_url, json_data=params)
response_json = response.json()
context = remove_empty_elements_for_context(response_json)
readable_hr = prepare_hr_for_content_create(response_json, "Content")
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['CONTENT'],
outputs_key_field='id',
outputs=context,
readable_output=readable_hr,
raw_response=response_json)
def confluence_cloud_content_delete_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
This command moves a piece of content to the space's trash or purges it from the trash,
depending on the content's type and status.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
validate_delete_content_args(args)
content_id = args["content_id"]
status = args.get("deletion_type", "").lower()
params = assign_params(status=LEGAL_DELETION_TYPES.get(status))
request_url = URL_SUFFIX["CONTENT"] + "/{}".format(content_id)
client.http_request(method="DELETE", url_suffix=request_url, params=params)
return CommandResults(readable_output=MESSAGES["HR_DELETE_CONTENT"].format(content_id))
def confluence_cloud_content_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Returns the list of contents of confluence.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
params = prepare_list_content_argument(args)
response = client.http_request(method="GET", url_suffix=URL_SUFFIX["CONTENT"], params=params)
response_json = response.json()
total_records = response_json.get('results', [])
if not total_records:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('content(s)'))
context = remove_empty_elements_for_context(total_records)
url_prefix = response_json.get('_links', {}).get('base', '')
readable_hr = prepare_hr_for_content_search(total_records, url_prefix)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['CONTENT'],
outputs_key_field="id",
outputs=context,
readable_output=readable_hr,
raw_response=response_json)
def confluence_cloud_space_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Returns a list of all Confluence spaces.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
params = prepare_list_space_args(args)
response = client.http_request(method="GET", url_suffix=URL_SUFFIX["SPACE"], params=params)
response_json = response.json()
total_records = response_json.get('results', [])
if not total_records:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('space(s)'))
context = remove_empty_elements_for_context(total_records)
url_prefix = response_json.get('_links', {}).get('base', '')
readable_hr = prepare_hr_for_space_list(total_records, url_prefix)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['SPACE'],
outputs_key_field='id',
outputs=context,
readable_output=readable_hr,
raw_response=response_json)
def confluence_cloud_comment_create_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Creates a comment for a given content.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
validate_comment_args(args)
params = prepare_comment_create_params(args)
response = client.http_request(method="POST", url_suffix=URL_SUFFIX["CONTENT"], json_data=params)
response_json = response.json()
context = remove_empty_elements_for_context(response_json)
readable_hr = prepare_hr_for_content_create(response_json, "Comment")
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['COMMENT'],
outputs_key_field='id',
outputs=context,
readable_output=readable_hr,
raw_response=response_json)
def confluence_cloud_content_create_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Create a page or blogpost for a specified space .
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
validate_create_content_args(args)
params = prepare_content_create_params(args)
response = client.http_request(method="POST", url_suffix=URL_SUFFIX["CONTENT"], json_data=params)
response_json = response.json()
context = remove_empty_elements_for_context(response_json)
readable_hr = prepare_hr_for_content_create(response_json, "Content")
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['CONTENT'],
outputs_key_field='id',
outputs=context,
readable_output=readable_hr,
raw_response=response_json)
def confluence_cloud_space_create_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Creates a new space in confluence cloud.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
validate_create_space_args(args)
params, is_private_space = prepare_create_space_args(args)
url_suffix = URL_SUFFIX["SPACE"]
if is_private_space:
url_suffix = URL_SUFFIX["PRIVATE_SPACE"]
if 'permissions' in params.keys():
del params['permissions']
response = client.http_request(method="POST", url_suffix=url_suffix, json_data=params)
response_json = response.json()
# Creating the Context data
context = remove_empty_elements_for_context(response_json)
# Creating the Human Readable
readable_hr = prepare_hr_for_space_create(response_json)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['SPACE'],
outputs_key_field='id',
outputs=context,
readable_output=readable_hr,
raw_response=response_json
)
def confluence_cloud_group_list_command(client: Client, args: Dict[str, str]) -> CommandResults:
"""
Retrieves the list of groups.
:type client: ``Client``
:param client: Client object to be used.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Standard command result or no records found message.
:rtype: ``CommandResults``
"""
params = prepare_group_args(args)
response = client.http_request(method="GET", url_suffix=URL_SUFFIX["GROUP"], params=params)
response_json = response.json()
total_records = response_json.get('results', [])
if not total_records:
return CommandResults(readable_output=MESSAGES['NO_RECORDS_FOUND'].format('group(s)'))
context = remove_empty_elements(total_records)
readable_hr = prepare_hr_for_groups(total_records)
return CommandResults(
outputs_prefix=OUTPUT_PREFIX['GROUP'],
outputs_key_field='id',
outputs=context,
readable_output=readable_hr,
raw_response=response_json)
''' MAIN FUNCTION '''
def main() -> None:
"""
main function, parses params and runs command functions
"""
params = demisto.params()
# get the service API url
url = params['url'].strip()
base_url = "https://{}.atlassian.net".format(url)
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
credentials = params.get("username", {})
username = credentials.get('identifier').strip()
password = credentials.get('password')
demisto.debug(f'{LOGGING_INTEGRATION_NAME} Command being called is {demisto.command()}')
try:
validate_url(url)
headers: Dict = {
"Accept": "application/json"
}
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
auth=(username, password)
)
# Commands dictionary
commands: Dict[str, Callable] = {
'confluence-cloud-group-list': confluence_cloud_group_list_command,
'confluence-cloud-user-list': confluence_cloud_user_list_command,
'confluence-cloud-content-search': confluence_cloud_content_search_command,
'confluence-cloud-content-update': confluence_cloud_content_update_command,
'confluence-cloud-content-delete': confluence_cloud_content_delete_command,
'confluence-cloud-content-list': confluence_cloud_content_list_command,
'confluence-cloud-space-list': confluence_cloud_space_list_command,
'confluence-cloud-comment-create': confluence_cloud_comment_create_command,
'confluence-cloud-content-create': confluence_cloud_content_create_command,
'confluence-cloud-space-create': confluence_cloud_space_create_command
}
command = demisto.command()
args = demisto.args()
strip_args(args)
remove_nulls_from_dictionary(args)
if command == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif command in commands:
return_results(commands[command](client, args))
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| StarcoderdataPython |
3444058 | import subprocess
import sys
import unittest
import erwin.__main__
class TestMain(unittest.TestCase):
def test_help(self):
output = subprocess.check_output(
[sys.executable, "-m", "erwin", "--help"])
def test_list(self):
output = subprocess.check_output(
[sys.executable, "-m", "erwin", "--list"])
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
11310644 | <filename>test/hlt/pytest/python/com/huawei/iotplatform/client/dto/QueryTaskDetailsInDTO.py
class QueryTaskDetailsInDTO(object):
def __init__(self):
self.appId = None
self.taskId = None
self.status = None
self.index = None
self.nodeId = None
self.deviceId = None
self.commandId = None
self.pageNo = None
self.pageSize = None
def getAppId(self):
return self.appId
def setAppId(self, appId):
self.appId = appId
def getTaskId(self):
return self.taskId
def setTaskId(self, taskId):
self.taskId = taskId
def getStatus(self):
return self.status
def setStatus(self, status):
self.status = status
def getIndex(self):
return self.index
def setIndex(self, index):
self.index = index
def getNodeId(self):
return self.nodeId
def setNodeId(self, nodeId):
self.nodeId = nodeId
def getDeviceId(self):
return self.deviceId
def setDeviceId(self, deviceId):
self.deviceId = deviceId
def getCommandId(self):
return self.commandId
def setCommandId(self, commandId):
self.commandId = commandId
def getPageNo(self):
return self.pageNo
def setPageNo(self, pageNo):
self.pageNo = pageNo
def getPageSize(self):
return self.pageSize
def setPageSize(self, pageSize):
self.pageSize = pageSize | StarcoderdataPython |
8019363 | import tensorflow as tf
import numpy as np
"""
Implementation of Deep Deterministic Policy Gradients (A2C):
Deep Deterministic Policy Gradient (DDPG) is an algorithm which concurrently learns a Q-function and a policy. It
uses off-policy data and the Bellman equation to learn the Q-function, and uses the Q-function to learn the policy.
----------------------------------------------
Created:
15.01.2021, <NAME> <<EMAIL>>
Paper:
Continuous control with deep reinforcement learning (https://arxiv.org/abs/1509.02971)
Code-Sources:
https://github.com/slowbull/DDPG
https://github.com/hill-a/stable-baselines
https://stable-baselines3.readthedocs.io/en/master/modules/a2c.html
https://github.com/openai/baselines
"""
class NumpyBuffer:
def __init__(self, size, shape=(1,)):
self.size = size
self.items = np.zeros((size,) + shape).astype('float32')
self.head = 0
self.count = 0
def __getitem__(self, item):
return self.items[(self.head + item) % self.size]
def append(self, v):
if self.count < self.size:
self.count += 1
elif self.count == self.size:
# Replace first item
self.head = (self.head + 1) % self.size
self.items[(self.head + self.count - 1) % self.size] = v
class DataStorage:
def __init__(self, size):
self.size = size
self.observations = NumpyBuffer(size)
self.actions = NumpyBuffer(size)
self.rewards = NumpyBuffer(size)
self.observations_next = NumpyBuffer(size)
def add_data(self, observation, action, reward, next_observation):
self.observations.append(observation)
self.actions.append(action)
self.rewards.append(reward)
self.observations_next.append(next_observation)
def get_batch(self, batch_size):
batch_indexes = np.random.randint(len(self.observations), size=batch_size)
observation_batch = self.observations.get_batch(batch_indexes)
action_batch = self.actions.get_batch(batch_indexes)
reward_batch = self.rewards.get_batch(batch_indexes)
observation_next_batch = self.observations_next.get_batch(batch_indexes)
return observation_batch, action_batch, reward_batch, observation_next_batch
def create_network(input_shape):
# TODO: try different parameters
input_layer = tf.keras.Input(shape=input_shape)
layers = input_layer
for i in range(2):
layers = tf.keras.layers.Dense(units=64, activation=tf.keras.activations.relu)(layers)
network = tf.keras.Model(inputs=[input_layer], outputs=[layers])
return network
class Actor(tf.keras.Model):
def __init__(self, observation_shape):
number_of_actions = 2
self.network = create_network(observation_shape)
self.output_layer = tf.keras.layers.Dense(units=number_of_actions, activation=tf.keras.activations.relu)(self.network.outputs[0])
@tf.function
def call(self, obs):
return self.output_layer(self.network(obs))
class Critic(tf.keras.Model):
def __init__(self, observation_shape):
number_of_actions = 2
self.network = create_network((observation_shape[0] + number_of_actions,))
self.output_layer = tf.keras.layers.Dense(units=self.number_of_actions, activation=tf.keras.activations.relu)(self.network.outputs[0])
@tf.function
def call(self, observations, actions):
observations_actions = tf.concat([observations, actions], axis=-1)
observations_actions = self.network_builder(observations_actions)
return self.output_layer(observations_actions)
class DDPGModel(tf.Module):
def __init__(self, actor, critic, observation_shape):
self.actor = actor
self.critic = critic
self.actor_lr = 5e-4
self.critic_lr = 5e-4
self.target_actor = Actor(observation_shape=observation_shape)
self.target_critic = Critic(observation_shape=observation_shape)
self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=self.actor_lr)
self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=self.critic_lr)
@tf.function
def step(self, obs):
pass
def train(self):
pass
def train(env, conf):
critic = Critic(observation_shape=env.observation_space.shape)
actor = Actor(observation_shape=env.observation_space.shape)
ddpg = DDPGModel(actor, critic, env.observation_space.shape)
def val(env, conf):
# TODO
pass
| StarcoderdataPython |
3439835 | <reponame>Phenylalaninquelle/python-snippets<gh_stars>0
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Example usage of plot_radar_chart function
"""
import numpy as np
from pysnips.plotting import plot_radar_chart
def main():
# random sampled data
data = np.random.randint(1, 5, (4, 5))
# the variables in our fake data
variables = ['Magnificance', 'Foolishness', 'Intelligence', 'Brilliance', 'Thrapp']
title = 'This is the title'
# labels of the different lines for the legend
observations = ['A', 'B', 'C', 'D']
# we want radial lines at 1,2,3,4 and 5 but only labels at 1,2,3 and 4
r_ticks = [1, 2, 3, 4, 5]
r_tick_labels = [1, 2, 3, 4]
fig, ax = plot_radar_chart(data, observations, variables,
title=title, r_ticks=r_ticks,
r_tick_labels=r_tick_labels)
# plot adjustments: if the theta labels overlap with the figure us this:
# pad_length = 20
# ax.tick_params(pad=pad_length)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1618734 | """Platform for climate integration."""
import logging
from typing import List, Optional
from homeassistant.components.climate import (
ATTR_TEMPERATURE,
HVAC_MODE_HEAT,
SUPPORT_TARGET_TEMPERATURE,
TEMP_CELSIUS,
ClimateEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PRECISION_HALVES
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
from .devolo_multi_level_switch import DevoloMultiLevelSwitchDeviceEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Get all cover devices and setup them via config entry."""
entities = []
for device in hass.data[DOMAIN]["homecontrol"].multi_level_switch_devices:
for multi_level_switch in device.multi_level_switch_property:
if device.device_model_uid in [
"devolo.model.Thermostat:Valve",
"devolo.model.Room:Thermostat",
]:
entities.append(
DevoloClimateDeviceEntity(
homecontrol=hass.data[DOMAIN]["homecontrol"],
device_instance=device,
element_uid=multi_level_switch,
)
)
async_add_entities(entities, False)
class DevoloClimateDeviceEntity(DevoloMultiLevelSwitchDeviceEntity, ClimateEntity):
"""Representation of a climate/thermostat device within devolo Home Control."""
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._value
@property
def hvac_mode(self) -> str:
"""Return the supported HVAC mode."""
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_HEAT]
@property
def min_temp(self) -> float:
"""Return the minimum set temperature value."""
return self._multi_level_switch_property.min
@property
def max_temp(self) -> float:
"""Return the maximum set temperature value."""
return self._multi_level_switch_property.max
@property
def precision(self) -> float:
"""Return the precision of the set temperature."""
return PRECISION_HALVES
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def temperature_unit(self) -> str:
"""Return the supported unit of temperature."""
return TEMP_CELSIUS
def set_temperature(self, **kwargs):
"""Set new target temperature."""
self._multi_level_switch_property.set(kwargs[ATTR_TEMPERATURE])
| StarcoderdataPython |
1634358 | <reponame>AllMyChanges/allmychanges.com<filename>allmychanges/tests/api2/user_tags.py
# coding: utf-8
from nose.tools import eq_
from operator import itemgetter
from django.test import Client
from ..utils import create_user, get_json, post_json
from allmychanges.models import Changelog, Tag
from unittest import TestCase
from hamcrest import (
assert_that,
contains,
has_properties,
equal_to,
)
def attr_or_item_getter(name, default=None):
def getter(obj):
if hasattr(obj, name):
return getattr(obj, name)
return obj.get(name, default)
return getter
get_names = attr_or_item_getter('name')
get_versions = itemgetter('version_number')
class TestUserTags(TestCase):
def setUp(self):
self.cl = Client()
self.user = create_user('art')
self.cl.login(username='art', password='<PASSWORD>')
self.changelog = Changelog.objects.create(namespace='python',
name='pip',
source='https://github.com/some/url')
self.versions = []
for i in range(10):
v = self.changelog.versions.create(number='0.1.{0}'.format(i))
self.versions.append(v)
def test_if_no_tags_return_empty_list(self):
data = get_json(self.cl, '/v1/tags/')
eq_([], data['results'])
def test_if_there_are_some_tags(self):
self.versions[3].set_tag(self.user, 'amch')
self.versions[3].set_tag(self.user, 'foo')
self.versions[5].set_tag(self.user, 'bar')
data = get_json(self.cl, '/v1/tags/')
eq_(['amch', 'foo', 'bar'],
map(get_names, data['results']))
def test_that_other_user_does_not_see_mine(self):
ivan = create_user('ivan')
self.versions[0].set_tag(ivan, 'foo')
# now I create a tag myself
self.versions[0].set_tag(self.user, 'bar')
# and should see only the 'bar' tag
data = get_json(self.cl, '/v1/tags/')
eq_(['bar'], map(get_names, data['results']))
def test_that_i_can_filter_tags_by_project_id(self):
clint = Changelog.objects.create(
namespace='python',
name='clint',
source='https://github.com/some/clint')
first_version = clint.versions.create(number='0.1.0')
first_version.set_tag(self.user, 'blah')
self.versions[0].set_tag(self.user, 'minor')
data = get_json(self.cl, '/v1/tags/?project_id={0}'.format(
clint.id))
eq_(['blah'], map(get_names, data['results']))
def test_when_other_version_tagged_with_same_tag_it_is_moved(self):
self.versions[0].set_tag(self.user, 'blah')
self.versions[5].set_tag(self.user, 'blah')
data = get_json(self.cl, '/v1/tags/')
eq_(['blah'], map(get_names, data['results']))
eq_(['0.1.5'], map(get_versions, data['results']))
def test_same_version_can_have_different_tags(self):
self.versions[0].set_tag(self.user, 'blah')
self.versions[0].set_tag(self.user, 'minor')
self.versions[0].set_tag(self.user, 'again')
data = get_json(self.cl, '/v1/tags/')
eq_(['blah', 'minor', 'again'],
map(get_names, data['results']))
def test_special_handle_can_tag_a_version(self):
version = self.versions[3]
post_json(
self.cl,
'/v1/changelogs/{}/tag/'.format(self.changelog.pk),
expected_code=201,
name='blah',
version=version.number)
eq_(['blah'],
map(get_names, version.tags.all()))
def test_special_handle_can_untag_a_version(self):
version = self.versions[3]
version.set_tag(self.user, 'foo')
post_json(
self.cl,
'/v1/changelogs/{}/untag/'.format(self.changelog.pk),
expected_code=204,
name='foo')
# check if tag was removed
eq_([],
map(get_names, version.tags.all()))
def test_untag_of_unknown_tag_is_ok(self):
version = self.versions[3]
version.set_tag(self.user, 'foo') # here we have 'foo' tag
# but remove 'bar' tag
post_json(
self.cl,
'/v1/changelogs/{}/untag/'.format(self.changelog.pk),
expected_code=204,
name='bar')
# and foo tag is still there
eq_(['foo'],
map(get_names, version.tags.all()))
def test_unknown_version_can_be_tagged_and_assigned_to_version_later(self):
# checking if we can create a tag with version which is not known
self.changelog.set_tag(self.user, 'blah', '0.2.0')
# this tag should be set on changelog
# but have no version assigned
all_tags = list(self.changelog.tags.all())
assert_that(
all_tags,
contains(
has_properties(
name='blah',
version=None,
)
)
)
# after we create the version with this number
version = self.changelog.versions.create(
number='0.2.0'
)
# this version should have this tag
tag = Tag.objects.get(pk=all_tags[0].pk)
assert_that(
tag.version,
equal_to(version))
| StarcoderdataPython |
8039856 | import enum
from typing import Dict, List
import re
import pandas as pd
from pandas import DataFrame
class Sentiments(enum.Enum):
POS = 'POS'
NEG = 'NEG'
def read_sample() -> DataFrame:
df = pd.read_csv('data/raw/reviews.csv')
df['rating'] = df['rating'].astype(dtype='int64')
return df
def create_classes(df: DataFrame) -> Dict[str, List[str]]:
df['sentiment'] = df['rating'].apply(lambda x: Sentiments.POS if x>=40 else Sentiments.NEG)
review_classes = {
sentiment.value: df[df['sentiment'] == sentiment]['review'].values.tolist()
for sentiment in Sentiments
}
return review_classes
def clean_sentences(df):
data = df.review.values.tolist()
data = [re.sub(r'\S*@\S*\s?', '', sent) for sent in data]
data = [re.sub(r'\s+', ' ', sent) for sent in data]
data = [re.sub(r"\'", "", sent) for sent in data]
return data
| StarcoderdataPython |
6672329 | <filename>Engine/Render/rpcore/gui/loading_screen.py
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from Engine.Render.rplibs.six.moves import range # pylint: disable=import-error
from Engine.Render.rpcore.gui.sprite import Sprite
from Engine.Render.rpcore.rpobject import RPObject
from Engine.Render.rpcore.globals import Globals
class LoadingScreen(RPObject):
""" This is the default loading screen used by the pipeline. It provides
the ability to display a simple image during loading. The image should be
in the format 16:9 and not too small, to avoid being blurred out. """
def __init__(self, pipeline, image_source="/$$rp/data/gui/loading_screen_bg.txo"):
""" Inits the loading screen with a given image source. By default,
this is the pipeline loading screen, but it can be overridden. """
RPObject.__init__(self)
self.pipeline = pipeline
self.image_source = image_source
def create(self):
""" Creates the gui components """
screen_w, screen_h = Globals.native_resolution.x, Globals.native_resolution.y
self.fullscreen_node = Globals.base.pixel2dp.attach_new_node("LoadingScreen")
self.fullscreen_node.set_bin("fixed", 10)
self.fullscreen_node.set_depth_test(False)
scale_w = screen_w / 1920.0
scale_h = screen_h / 1080.0
scale = max(scale_w, scale_h)
self.fullscreen_bg = Sprite(
image=self.image_source, x=(screen_w - 1920.0 * scale) // 2,
y=(screen_h - 1080.0 * scale) // 2, w=int(1920 * scale),
h=int(1080 * scale), parent=self.fullscreen_node, near_filter=False)
for _ in range(2):
Globals.base.graphicsEngine.render_frame()
def remove(self):
""" Removes the loading screen """
self.fullscreen_bg.node["image"].get_texture().release_all()
self.fullscreen_node.remove_node()
| StarcoderdataPython |
1834875 | <reponame>spencerkent/pyPyrTools<gh_stars>10-100
import numpy
def sp3Filters():
''' Steerable pyramid filters. Transform described in:
@INPROCEEDINGS{Simoncelli95b,
TITLE = "The Steerable Pyramid: A Flexible Architecture for
Multi-Scale Derivative Computation",
AUTHOR = "<NAME> and <NAME>",
BOOKTITLE = "Second Int'l Conf on Image Processing",
ADDRESS = "Washington, DC", MONTH = "October", YEAR = 1995 }
Filter kernel design described in:
@INPROCEEDINGS{Karasaridis96,
TITLE = "A Filter Design Technique for
Steerable Pyramid Image Transforms",
AUTHOR = "<NAME> and <NAME>",
BOOKTITLE = "ICASSP", ADDRESS = "Atlanta, GA",
MONTH = "May", YEAR = 1996 } '''
filters = {}
filters['harmonics'] = numpy.array([1, 3])
filters['mtx'] = (
numpy.array([[0.5000, 0.3536, 0, -0.3536],
[-0.0000, 0.3536, 0.5000, 0.3536],
[0.5000, -0.3536, 0, 0.3536],
[-0.0000, 0.3536, -0.5000, 0.3536]]))
filters['hi0filt'] = (
numpy.array([[-4.0483998600E-4, -6.2596000498E-4, -3.7829999201E-5,
8.8387000142E-4, 1.5450799838E-3, 1.9235999789E-3,
2.0687500946E-3, 2.0898699295E-3, 2.0687500946E-3,
1.9235999789E-3, 1.5450799838E-3, 8.8387000142E-4,
-3.7829999201E-5, -6.2596000498E-4, -4.0483998600E-4],
[-6.2596000498E-4, -3.2734998967E-4, 7.7435001731E-4,
1.5874400269E-3, 2.1750701126E-3, 2.5626500137E-3,
2.2892199922E-3, 1.9755100366E-3, 2.2892199922E-3,
2.5626500137E-3, 2.1750701126E-3, 1.5874400269E-3,
7.7435001731E-4, -3.2734998967E-4, -6.2596000498E-4],
[-3.7829999201E-5, 7.7435001731E-4, 1.1793200392E-3,
1.4050999889E-3, 2.2253401112E-3, 2.1145299543E-3,
3.3578000148E-4, -8.3368999185E-4, 3.3578000148E-4,
2.1145299543E-3, 2.2253401112E-3, 1.4050999889E-3,
1.1793200392E-3, 7.7435001731E-4, -3.7829999201E-5],
[8.8387000142E-4, 1.5874400269E-3, 1.4050999889E-3,
1.2960999738E-3, -4.9274001503E-4, -3.1295299996E-3,
-4.5751798898E-3, -5.1014497876E-3, -4.5751798898E-3,
-3.1295299996E-3, -4.9274001503E-4, 1.2960999738E-3,
1.4050999889E-3, 1.5874400269E-3, 8.8387000142E-4],
[1.5450799838E-3, 2.1750701126E-3, 2.2253401112E-3,
-4.9274001503E-4, -6.3222697936E-3, -2.7556000277E-3,
5.3632198833E-3, 7.3032598011E-3, 5.3632198833E-3,
-2.7556000277E-3, -6.3222697936E-3, -4.9274001503E-4,
2.2253401112E-3, 2.1750701126E-3, 1.5450799838E-3],
[1.9235999789E-3, 2.5626500137E-3, 2.1145299543E-3,
-3.1295299996E-3, -2.7556000277E-3, 1.3962360099E-2,
7.8046298586E-3, -9.3812197447E-3, 7.8046298586E-3,
1.3962360099E-2, -2.7556000277E-3, -3.1295299996E-3,
2.1145299543E-3, 2.5626500137E-3, 1.9235999789E-3],
[2.0687500946E-3, 2.2892199922E-3, 3.3578000148E-4,
-4.5751798898E-3, 5.3632198833E-3, 7.8046298586E-3,
-7.9501636326E-2, -0.1554141641, -7.9501636326E-2,
7.8046298586E-3, 5.3632198833E-3, -4.5751798898E-3,
3.3578000148E-4, 2.2892199922E-3, 2.0687500946E-3],
[2.0898699295E-3, 1.9755100366E-3, -8.3368999185E-4,
-5.1014497876E-3, 7.3032598011E-3, -9.3812197447E-3,
-0.1554141641, 0.7303866148, -0.1554141641,
-9.3812197447E-3, 7.3032598011E-3, -5.1014497876E-3,
-8.3368999185E-4, 1.9755100366E-3, 2.0898699295E-3],
[2.0687500946E-3, 2.2892199922E-3, 3.3578000148E-4,
-4.5751798898E-3, 5.3632198833E-3, 7.8046298586E-3,
-7.9501636326E-2, -0.1554141641, -7.9501636326E-2,
7.8046298586E-3, 5.3632198833E-3, -4.5751798898E-3,
3.3578000148E-4, 2.2892199922E-3, 2.0687500946E-3],
[1.9235999789E-3, 2.5626500137E-3, 2.1145299543E-3,
-3.1295299996E-3, -2.7556000277E-3, 1.3962360099E-2,
7.8046298586E-3, -9.3812197447E-3, 7.8046298586E-3,
1.3962360099E-2, -2.7556000277E-3, -3.1295299996E-3,
2.1145299543E-3, 2.5626500137E-3, 1.9235999789E-3],
[1.5450799838E-3, 2.1750701126E-3, 2.2253401112E-3,
-4.9274001503E-4, -6.3222697936E-3, -2.7556000277E-3,
5.3632198833E-3, 7.3032598011E-3, 5.3632198833E-3,
-2.7556000277E-3, -6.3222697936E-3, -4.9274001503E-4,
2.2253401112E-3, 2.1750701126E-3, 1.5450799838E-3],
[8.8387000142E-4, 1.5874400269E-3, 1.4050999889E-3,
1.2960999738E-3, -4.9274001503E-4, -3.1295299996E-3,
-4.5751798898E-3, -5.1014497876E-3, -4.5751798898E-3,
-3.1295299996E-3, -4.9274001503E-4, 1.2960999738E-3,
1.4050999889E-3, 1.5874400269E-3, 8.8387000142E-4],
[-3.7829999201E-5, 7.7435001731E-4, 1.1793200392E-3,
1.4050999889E-3, 2.2253401112E-3, 2.1145299543E-3,
3.3578000148E-4, -8.3368999185E-4, 3.3578000148E-4,
2.1145299543E-3, 2.2253401112E-3, 1.4050999889E-3,
1.1793200392E-3, 7.7435001731E-4, -3.7829999201E-5],
[-6.2596000498E-4, -3.2734998967E-4, 7.7435001731E-4,
1.5874400269E-3, 2.1750701126E-3, 2.5626500137E-3,
2.2892199922E-3, 1.9755100366E-3, 2.2892199922E-3,
2.5626500137E-3, 2.1750701126E-3, 1.5874400269E-3,
7.7435001731E-4, -3.2734998967E-4, -6.2596000498E-4],
[-4.0483998600E-4, -6.2596000498E-4, -3.7829999201E-5,
8.8387000142E-4, 1.5450799838E-3, 1.9235999789E-3,
2.0687500946E-3, 2.0898699295E-3, 2.0687500946E-3,
1.9235999789E-3, 1.5450799838E-3, 8.8387000142E-4,
-3.7829999201E-5, -6.2596000498E-4, -4.0483998600E-4]]))
filters['lo0filt'] = (
numpy.array([[-8.7009997515E-5, -1.3542800443E-3, -1.6012600390E-3,
-5.0337001448E-4, 2.5240099058E-3, -5.0337001448E-4,
-1.6012600390E-3, -1.3542800443E-3, -8.7009997515E-5],
[-1.3542800443E-3, 2.9215801042E-3, 7.5227199122E-3,
8.2244202495E-3, 1.1076199589E-3, 8.2244202495E-3,
7.5227199122E-3, 2.9215801042E-3, -1.3542800443E-3],
[-1.6012600390E-3, 7.5227199122E-3, -7.0612900890E-3,
-3.7694871426E-2, -3.2971370965E-2, -3.7694871426E-2,
-7.0612900890E-3, 7.5227199122E-3, -1.6012600390E-3],
[-5.0337001448E-4, 8.2244202495E-3, -3.7694871426E-2,
4.3813198805E-2, 0.1811603010, 4.3813198805E-2,
-3.7694871426E-2, 8.2244202495E-3, -5.0337001448E-4],
[2.5240099058E-3, 1.1076199589E-3, -3.2971370965E-2,
0.1811603010, 0.4376249909, 0.1811603010,
-3.2971370965E-2, 1.1076199589E-3, 2.5240099058E-3],
[-5.0337001448E-4, 8.2244202495E-3, -3.7694871426E-2,
4.3813198805E-2, 0.1811603010, 4.3813198805E-2,
-3.7694871426E-2, 8.2244202495E-3, -5.0337001448E-4],
[-1.6012600390E-3, 7.5227199122E-3, -7.0612900890E-3,
-3.7694871426E-2, -3.2971370965E-2, -3.7694871426E-2,
-7.0612900890E-3, 7.5227199122E-3, -1.6012600390E-3],
[-1.3542800443E-3, 2.9215801042E-3, 7.5227199122E-3,
8.2244202495E-3, 1.1076199589E-3, 8.2244202495E-3,
7.5227199122E-3, 2.9215801042E-3, -1.3542800443E-3],
[-8.7009997515E-5, -1.3542800443E-3, -1.6012600390E-3,
-5.0337001448E-4, 2.5240099058E-3, -5.0337001448E-4,
-1.6012600390E-3, -1.3542800443E-3, -8.7009997515E-5]]))
filters['lofilt'] = (
numpy.array([[-4.3500000174E-5, 1.2078000145E-4, -6.7714002216E-4,
-1.2434000382E-4, -8.0063997302E-4, -1.5970399836E-3,
-2.5168000138E-4, -4.2019999819E-4, 1.2619999470E-3,
-4.2019999819E-4, -2.5168000138E-4, -1.5970399836E-3,
-8.0063997302E-4, -1.2434000382E-4, -6.7714002216E-4,
1.2078000145E-4, -4.3500000174E-5],
[1.2078000145E-4, 4.4606000301E-4, -5.8146001538E-4,
5.6215998484E-4, -1.3688000035E-4, 2.3255399428E-3,
2.8898599558E-3, 4.2872801423E-3, 5.5893999524E-3,
4.2872801423E-3, 2.8898599558E-3, 2.3255399428E-3,
-1.3688000035E-4, 5.6215998484E-4, -5.8146001538E-4,
4.4606000301E-4, 1.2078000145E-4],
[-6.7714002216E-4, -5.8146001538E-4, 1.4607800404E-3,
2.1605400834E-3, 3.7613599561E-3, 3.0809799209E-3,
4.1121998802E-3, 2.2212199401E-3, 5.5381999118E-4,
2.2212199401E-3, 4.1121998802E-3, 3.0809799209E-3,
3.7613599561E-3, 2.1605400834E-3, 1.4607800404E-3,
-5.8146001538E-4, -6.7714002216E-4],
[-1.2434000382E-4, 5.6215998484E-4, 2.1605400834E-3,
3.1757799443E-3, 3.1846798956E-3, -1.7774800071E-3,
-7.4316998944E-3, -9.0569201857E-3, -9.6372198313E-3,
-9.0569201857E-3, -7.4316998944E-3, -1.7774800071E-3,
3.1846798956E-3, 3.1757799443E-3, 2.1605400834E-3,
5.6215998484E-4, -1.2434000382E-4],
[-8.0063997302E-4, -1.3688000035E-4, 3.7613599561E-3,
3.1846798956E-3, -3.5306399222E-3, -1.2604200281E-2,
-1.8847439438E-2, -1.7508180812E-2, -1.6485679895E-2,
-1.7508180812E-2, -1.8847439438E-2, -1.2604200281E-2,
-3.5306399222E-3, 3.1846798956E-3, 3.7613599561E-3,
-1.3688000035E-4, -8.0063997302E-4],
[-1.5970399836E-3, 2.3255399428E-3, 3.0809799209E-3,
-1.7774800071E-3, -1.2604200281E-2, -2.0229380578E-2,
-1.1091699824E-2, 3.9556599222E-3, 1.4385120012E-2,
3.9556599222E-3, -1.1091699824E-2, -2.0229380578E-2,
-1.2604200281E-2, -1.7774800071E-3, 3.0809799209E-3,
2.3255399428E-3, -1.5970399836E-3],
[-2.5168000138E-4, 2.8898599558E-3, 4.1121998802E-3,
-7.4316998944E-3, -1.8847439438E-2, -1.1091699824E-2,
2.1906599402E-2, 6.8065837026E-2, 9.0580143034E-2,
6.8065837026E-2, 2.1906599402E-2, -1.1091699824E-2,
-1.8847439438E-2, -7.4316998944E-3, 4.1121998802E-3,
2.8898599558E-3, -2.5168000138E-4],
[-4.2019999819E-4, 4.2872801423E-3, 2.2212199401E-3,
-9.0569201857E-3, -1.7508180812E-2, 3.9556599222E-3,
6.8065837026E-2, 0.1445499808, 0.1773651242,
0.1445499808, 6.8065837026E-2, 3.9556599222E-3,
-1.7508180812E-2, -9.0569201857E-3, 2.2212199401E-3,
4.2872801423E-3, -4.2019999819E-4],
[1.2619999470E-3, 5.5893999524E-3, 5.5381999118E-4,
-9.6372198313E-3, -1.6485679895E-2, 1.4385120012E-2,
9.0580143034E-2, 0.1773651242, 0.2120374441,
0.1773651242, 9.0580143034E-2, 1.4385120012E-2,
-1.6485679895E-2, -9.6372198313E-3, 5.5381999118E-4,
5.5893999524E-3, 1.2619999470E-3],
[-4.2019999819E-4, 4.2872801423E-3, 2.2212199401E-3,
-9.0569201857E-3, -1.7508180812E-2, 3.9556599222E-3,
6.8065837026E-2, 0.1445499808, 0.1773651242,
0.1445499808, 6.8065837026E-2, 3.9556599222E-3,
-1.7508180812E-2, -9.0569201857E-3, 2.2212199401E-3,
4.2872801423E-3, -4.2019999819E-4],
[-2.5168000138E-4, 2.8898599558E-3, 4.1121998802E-3,
-7.4316998944E-3, -1.8847439438E-2, -1.1091699824E-2,
2.1906599402E-2, 6.8065837026E-2, 9.0580143034E-2,
6.8065837026E-2, 2.1906599402E-2, -1.1091699824E-2,
-1.8847439438E-2, -7.4316998944E-3, 4.1121998802E-3,
2.8898599558E-3, -2.5168000138E-4],
[-1.5970399836E-3, 2.3255399428E-3, 3.0809799209E-3,
-1.7774800071E-3, -1.2604200281E-2, -2.0229380578E-2,
-1.1091699824E-2, 3.9556599222E-3, 1.4385120012E-2,
3.9556599222E-3, -1.1091699824E-2, -2.0229380578E-2,
-1.2604200281E-2, -1.7774800071E-3, 3.0809799209E-3,
2.3255399428E-3, -1.5970399836E-3],
[-8.0063997302E-4, -1.3688000035E-4, 3.7613599561E-3,
3.1846798956E-3, -3.5306399222E-3, -1.2604200281E-2,
-1.8847439438E-2, -1.7508180812E-2, -1.6485679895E-2,
-1.7508180812E-2, -1.8847439438E-2, -1.2604200281E-2,
-3.5306399222E-3, 3.1846798956E-3, 3.7613599561E-3,
-1.3688000035E-4, -8.0063997302E-4],
[-1.2434000382E-4, 5.6215998484E-4, 2.1605400834E-3,
3.1757799443E-3, 3.1846798956E-3, -1.7774800071E-3,
-7.4316998944E-3, -9.0569201857E-3, -9.6372198313E-3,
-9.0569201857E-3, -7.4316998944E-3, -1.7774800071E-3,
3.1846798956E-3, 3.1757799443E-3, 2.1605400834E-3,
5.6215998484E-4, -1.2434000382E-4],
[-6.7714002216E-4, -5.8146001538E-4, 1.4607800404E-3,
2.1605400834E-3, 3.7613599561E-3, 3.0809799209E-3,
4.1121998802E-3, 2.2212199401E-3, 5.5381999118E-4,
2.2212199401E-3, 4.1121998802E-3, 3.0809799209E-3,
3.7613599561E-3, 2.1605400834E-3, 1.4607800404E-3,
-5.8146001538E-4, -6.7714002216E-4],
[1.2078000145E-4, 4.4606000301E-4, -5.8146001538E-4,
5.6215998484E-4, -1.3688000035E-4, 2.3255399428E-3,
2.8898599558E-3, 4.2872801423E-3, 5.5893999524E-3,
4.2872801423E-3, 2.8898599558E-3, 2.3255399428E-3,
-1.3688000035E-4, 5.6215998484E-4, -5.8146001538E-4,
4.4606000301E-4, 1.2078000145E-4],
[-4.3500000174E-5, 1.2078000145E-4, -6.7714002216E-4,
-1.2434000382E-4, -8.0063997302E-4, -1.5970399836E-3,
-2.5168000138E-4, -4.2019999819E-4, 1.2619999470E-3,
-4.2019999819E-4, -2.5168000138E-4, -1.5970399836E-3,
-8.0063997302E-4, -1.2434000382E-4, -6.7714002216E-4,
1.2078000145E-4, -4.3500000174E-5]]))
filters['bfilts'] = (
numpy.array([[-8.1125000725E-4, 4.4451598078E-3, 1.2316980399E-2,
1.3955879956E-2, 1.4179450460E-2, 1.3955879956E-2,
1.2316980399E-2, 4.4451598078E-3, -8.1125000725E-4,
3.9103501476E-3, 4.4565401040E-3, -5.8724298142E-3,
-2.8760801069E-3, 8.5267601535E-3, -2.8760801069E-3,
-5.8724298142E-3, 4.4565401040E-3, 3.9103501476E-3,
1.3462699717E-3, -3.7740699481E-3, 8.2581602037E-3,
3.9442278445E-2, 5.3605638444E-2, 3.9442278445E-2,
8.2581602037E-3, -3.7740699481E-3, 1.3462699717E-3,
7.4700999539E-4, -3.6522001028E-4, -2.2522680461E-2,
-0.1105690673, -0.1768419296, -0.1105690673,
-2.2522680461E-2, -3.6522001028E-4, 7.4700999539E-4,
0.0000000000, 0.0000000000, 0.0000000000,
0.0000000000, 0.0000000000, 0.0000000000,
0.0000000000, 0.0000000000, 0.0000000000,
-7.4700999539E-4, 3.6522001028E-4, 2.2522680461E-2,
0.1105690673, 0.1768419296, 0.1105690673,
2.2522680461E-2, 3.6522001028E-4, -7.4700999539E-4,
-1.3462699717E-3, 3.7740699481E-3, -8.2581602037E-3,
-3.9442278445E-2, -5.3605638444E-2, -3.9442278445E-2,
-8.2581602037E-3, 3.7740699481E-3, -1.3462699717E-3,
-3.9103501476E-3, -4.4565401040E-3, 5.8724298142E-3,
2.8760801069E-3, -8.5267601535E-3, 2.8760801069E-3,
5.8724298142E-3, -4.4565401040E-3, -3.9103501476E-3,
8.1125000725E-4, -4.4451598078E-3, -1.2316980399E-2,
-1.3955879956E-2, -1.4179450460E-2, -1.3955879956E-2,
-1.2316980399E-2, -4.4451598078E-3, 8.1125000725E-4],
[0.0000000000, -8.2846998703E-4, -5.7109999034E-5,
4.0110000555E-5, 4.6670897864E-3, 8.0871898681E-3,
1.4807609841E-2, 8.6204400286E-3, -3.1221499667E-3,
8.2846998703E-4, 0.0000000000, -9.7479997203E-4,
-6.9718998857E-3, -2.0865600090E-3, 2.3298799060E-3,
-4.4814897701E-3, 1.4917500317E-2, 8.6204400286E-3,
5.7109999034E-5, 9.7479997203E-4, 0.0000000000,
-1.2145539746E-2, -2.4427289143E-2, 5.0797060132E-2,
3.2785870135E-2, -4.4814897701E-3, 1.4807609841E-2,
-4.0110000555E-5, 6.9718998857E-3, 1.2145539746E-2,
0.0000000000, -0.1510555595, -8.2495503128E-2,
5.0797060132E-2, 2.3298799060E-3, 8.0871898681E-3,
-4.6670897864E-3, 2.0865600090E-3, 2.4427289143E-2,
0.1510555595, 0.0000000000, -0.1510555595,
-2.4427289143E-2, -2.0865600090E-3, 4.6670897864E-3,
-8.0871898681E-3, -2.3298799060E-3, -5.0797060132E-2,
8.2495503128E-2, 0.1510555595, 0.0000000000,
-1.2145539746E-2, -6.9718998857E-3, 4.0110000555E-5,
-1.4807609841E-2, 4.4814897701E-3, -3.2785870135E-2,
-5.0797060132E-2, 2.4427289143E-2, 1.2145539746E-2,
0.0000000000, -9.7479997203E-4, -5.7109999034E-5,
-8.6204400286E-3, -1.4917500317E-2, 4.4814897701E-3,
-2.3298799060E-3, 2.0865600090E-3, 6.9718998857E-3,
9.7479997203E-4, 0.0000000000, -8.2846998703E-4,
3.1221499667E-3, -8.6204400286E-3, -1.4807609841E-2,
-8.0871898681E-3, -4.6670897864E-3, -4.0110000555E-5,
5.7109999034E-5, 8.2846998703E-4, 0.0000000000],
[8.1125000725E-4, -3.9103501476E-3, -1.3462699717E-3,
-7.4700999539E-4, 0.0000000000, 7.4700999539E-4,
1.3462699717E-3, 3.9103501476E-3, -8.1125000725E-4,
-4.4451598078E-3, -4.4565401040E-3, 3.7740699481E-3,
3.6522001028E-4, 0.0000000000, -3.6522001028E-4,
-3.7740699481E-3, 4.4565401040E-3, 4.4451598078E-3,
-1.2316980399E-2, 5.8724298142E-3, -8.2581602037E-3,
2.2522680461E-2, 0.0000000000, -2.2522680461E-2,
8.2581602037E-3, -5.8724298142E-3, 1.2316980399E-2,
-1.3955879956E-2, 2.8760801069E-3, -3.9442278445E-2,
0.1105690673, 0.0000000000, -0.1105690673,
3.9442278445E-2, -2.8760801069E-3, 1.3955879956E-2,
-1.4179450460E-2, -8.5267601535E-3, -5.3605638444E-2,
0.1768419296, 0.0000000000, -0.1768419296,
5.3605638444E-2, 8.5267601535E-3, 1.4179450460E-2,
-1.3955879956E-2, 2.8760801069E-3, -3.9442278445E-2,
0.1105690673, 0.0000000000, -0.1105690673,
3.9442278445E-2, -2.8760801069E-3, 1.3955879956E-2,
-1.2316980399E-2, 5.8724298142E-3, -8.2581602037E-3,
2.2522680461E-2, 0.0000000000, -2.2522680461E-2,
8.2581602037E-3, -5.8724298142E-3, 1.2316980399E-2,
-4.4451598078E-3, -4.4565401040E-3, 3.7740699481E-3,
3.6522001028E-4, 0.0000000000, -3.6522001028E-4,
-3.7740699481E-3, 4.4565401040E-3, 4.4451598078E-3,
8.1125000725E-4, -3.9103501476E-3, -1.3462699717E-3,
-7.4700999539E-4, 0.0000000000, 7.4700999539E-4,
1.3462699717E-3, 3.9103501476E-3, -8.1125000725E-4],
[3.1221499667E-3, -8.6204400286E-3, -1.4807609841E-2,
-8.0871898681E-3, -4.6670897864E-3, -4.0110000555E-5,
5.7109999034E-5, 8.2846998703E-4, 0.0000000000,
-8.6204400286E-3, -1.4917500317E-2, 4.4814897701E-3,
-2.3298799060E-3, 2.0865600090E-3, 6.9718998857E-3,
9.7479997203E-4, -0.0000000000, -8.2846998703E-4,
-1.4807609841E-2, 4.4814897701E-3, -3.2785870135E-2,
-5.0797060132E-2, 2.4427289143E-2, 1.2145539746E-2,
0.0000000000, -9.7479997203E-4, -5.7109999034E-5,
-8.0871898681E-3, -2.3298799060E-3, -5.0797060132E-2,
8.2495503128E-2, 0.1510555595, -0.0000000000,
-1.2145539746E-2, -6.9718998857E-3, 4.0110000555E-5,
-4.6670897864E-3, 2.0865600090E-3, 2.4427289143E-2,
0.1510555595, 0.0000000000, -0.1510555595,
-2.4427289143E-2, -2.0865600090E-3, 4.6670897864E-3,
-4.0110000555E-5, 6.9718998857E-3, 1.2145539746E-2,
0.0000000000, -0.1510555595, -8.2495503128E-2,
5.0797060132E-2, 2.3298799060E-3, 8.0871898681E-3,
5.7109999034E-5, 9.7479997203E-4, -0.0000000000,
-1.2145539746E-2, -2.4427289143E-2, 5.0797060132E-2,
3.2785870135E-2, -4.4814897701E-3, 1.4807609841E-2,
8.2846998703E-4, -0.0000000000, -9.7479997203E-4,
-6.9718998857E-3, -2.0865600090E-3, 2.3298799060E-3,
-4.4814897701E-3, 1.4917500317E-2, 8.6204400286E-3,
0.0000000000, -8.2846998703E-4, -5.7109999034E-5,
4.0110000555E-5, 4.6670897864E-3, 8.0871898681E-3,
1.4807609841E-2, 8.6204400286E-3, -3.1221499667E-3]]).T)
return filters
| StarcoderdataPython |
6691274 | import os
import bcrypt
import pymongo
from flask import flash, Flask, render_template, request, redirect
from flask.views import MethodView
from flask_admin import Admin, AdminIndexView
from flask_admin.contrib import rediscli
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin.consts import ICON_TYPE_GLYPH
from flask_login import LoginManager, UserMixin, login_user, current_user, logout_user
from redis import Redis
from exceptions import LoginRequiredException
from views import UserView
cwd = os.getcwd()
login_manager = LoginManager()
app = Flask(__name__)
login_manager.init_app(app)
app.config["SECRET_KEY"] = b"\<KEY>"
app.config["MAX_CONTENT_LENGTH"] = 30 * 1024 * 1024
app.config["title"] = "MicroManager"
conn = pymongo.MongoClient()
db = conn.test
"""
Login Block
主要构建用户以及陌生用户等。
"""
class User(UserMixin):
"""
User 用户对象
用于flask-login全局注册
"""
def __init__(
self,
name=None,
uid=None,
hpwd=None,
is_admin=None,
ctime=None,
utime=None,
**kwargs
):
self.name = name
self.uid = uid
self._is_admin = is_admin
@property
def is_authenicated(self):
return self.uid is not None
@property
def is_active(self):
return self.uid is not None
@property
def is_anonymous(self):
return self.uid is None
@property
def is_admin(self):
return self._is_admin
def get_id(self):
return self.uid or None
@classmethod
def get(cls, user_id):
user = db.user.find_one({"uid": user_id})
if user is None:
return cls()
else:
return cls(**user)
@login_manager.user_loader
def load_user(user_id):
"""
加载用户
"""
return User.get(user_id)
class LoginView(MethodView):
def get(self):
if current_user.is_authenticated:
return redirect("/admin")
return render_template("login.html", title=app.config["title"])
def post(self):
username = request.form.get("username", "")
password = request.form.get("password", "")
user = db.user.find_one({"name": username})
if user is None:
flash("用户名不可用", "danger")
return redirect("/login")
elif bcrypt.checkpw(
password.encode("utf8"),
user.get("hpwd", "").encode("utf8")
):
_user = User(**user)
login_user(_user)
# 鉴权通过可以登陆用户
return redirect("/admin")
else:
# 鉴权失败
flash("密码错误", "danger")
return redirect("/login")
@app.route("/logout", methods=["GET", "POST"])
def logout():
logout_user()
flash("登出成功", "info")
return redirect("/login")
app.add_url_rule("/login/", view_func=LoginView.as_view("login"))
"""
Admin Block
管理系统模块
主要涵盖:
- 数据库管理
- 文件夹管理
- Redis管理等
"""
admin = Admin(
app,
name="MicroManager",
base_template="admin/ex_master.html",
template_mode="bootstrap4",
index_view=AdminIndexView(
name="主页",
menu_icon_type=ICON_TYPE_GLYPH,
menu_icon_value="bi-house"
))
admin.add_view(FileAdmin(
os.path.join(cwd, "files"),
name="文件管理",
menu_icon_type=ICON_TYPE_GLYPH,
menu_icon_value="bi-files"
))
admin.add_view(
UserView(
db.user,
"用户管理",
menu_icon_type=ICON_TYPE_GLYPH,
menu_icon_value="bi-people"
))
admin.add_view(rediscli.RedisCli(
Redis(),
"Redis管理"
))
# check login
@app.before_request
def login_required():
endpoint = request.endpoint
if endpoint != "login" and endpoint is not None:
if current_user.is_anonymous:
raise LoginRequiredException
@app.errorhandler(LoginRequiredException)
def handle_login_required_exception(e):
flash("请先登陆系统", "danger")
return redirect("/login")
| StarcoderdataPython |
5078149 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .coord_predict import RelativePolarCoordPredictor as CoordPredictor
class SCLModule(nn.Module):
def __init__(self, size, feature_dim, structure_dim):
super().__init__()
self.size = size
self.feature_dim = feature_dim
self.structure_dim = structure_dim
self.get_structure_feat = nn.Sequential(
nn.ReLU(),
nn.Conv2d(self.feature_dim, self.structure_dim, 1, 1),
nn.ReLU(),
)
self.coord_predictor = CoordPredictor(in_dim=self.structure_dim,
size=self.size)
def forward(self, feature):
structure_map = self.get_structure_feat(feature)
coord_loss = self.coord_predictor(structure_map)
return coord_loss
| StarcoderdataPython |
366619 | <gh_stars>0
""" This holds all implementations of the SingleValueField parent class """
import numpy as np
from .containers import PLUMED_Group
from .specifications import SingleValueField
class PLUMED_Distance(SingleValueField):
""" Class to hold two AtomGroup objects, calculate geometric distances between them
and transform the distance into a PLUMED input string, implements SingleValueField. """
def __init__(self, group_1: PLUMED_Group, group_2: PLUMED_Group, label: str):
"""Constructor for the PLUMED_Distance class, initialised with two PLUMED_Group objects.
:param group_1: First group of two defining the distance.
:type group_1: PLUMED_Group
:param group_2: Second group of two defining the distance.
:type group_2: PLUMED_Group
:param label: Label for this PLUMED_Distance.
:type label: str
"""
self.group_1, self.group_2 = group_1, group_2
super(PLUMED_Distance, self).__init__(label)
def get_plumed_str(self):
"""Obtain the PLUMED representation of this PLUMED_Distance as a string.
:return: The string representing this PLUMED_Distance, ready to be placed in input script.
:rtype: str
"""
return self.label + ": DISTANCE ATOMS=" + self.group_1.label + "," + self.group_2.label
def get_value(self):
"""Obtain the geometric distance between the two groups, at the current frame, in nm.
:return: Distance between the two groups, at the current frame, in nm.
:rtype: float
"""
return np.sqrt(np.sum((self.group_1.atom_group.center_of_geometry() - self.group_2.atom_group.center_of_geometry())**2)) / 10
class PLUMED_Combine(SingleValueField):
def __init__(self, arguments:list, coefficients:list, label:str):
"""Constructure for the PLUMED_Combine class. Initialised with argument SingleValueFields and the coefficients.
:param arguments: List of SingleValueFields to be used as arguments for the linear combination.
:type arguments: List<SingleValueField>
:param coefficients: List of coefficients for each
:type coefficients: List<float>
:param label: Label for this PLUMED_Combine.
:type label: str
"""
super(PLUMED_Combine, self).__init__(label)
if not len(arguments) == len(coefficients):
raise ValueError("Arguments and Coefficients need to have the same length.")
self.arguments, self.coefficients = arguments, coefficients
def get_plumed_str(self):
"""Obtain the PLUMED representation of this PLUMED_Combine as a string.
:return: The string representing this PLUMED_Combine, ready to be placed in input script.
:rtype: str
"""
return f"{self.label}: COMBINE ARG={','.join([x.label for x in self.arguments])} COEFFICIENTS={','.join([str(x) for x in self.coefficients])} PERIODIC=NO"
def get_value(self):
"""Obtain the instantaneous value of this linear combination for the current time frame.
:return: Value of the linear combination.
:rtype: float
"""
return np.sum(np.array([arg.get_value() for arg in self.arguments]).dot(self.coefficients))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.