id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1860298 | <filename>rlpy/domains/domain.py
"""Domain base class"""
from abc import ABC, abstractmethod
from copy import deepcopy
import logging
import numpy as np
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
]
__license__ = "BSD 3-Clause"
class Domain(ABC):
"""
The Domain controls the environment in which the
:py:class:`~rlpy.agents.agent.Agent` resides as well as the reward function the
Agent is subject to.
The Agent interacts with the Domain in discrete timesteps called
*episodes* (see :py:meth:`~rlpy.domains.domain.Domain.step`).
At each step, the Agent informs the Domain what indexed action it wants to
perform. The Domain then calculates the effects this action has on the
environment and updates its internal state accordingly.
It also returns the new state to the agent, along with a reward/penalty,
and whether or not the episode is over (thus resetting the agent to its
initial state).
This process repeats until the Domain determines that the Agent has either
completed its goal or failed.
The :py:class:`~rlpy.experiments.experiment.Experiment` controls this cycle.
Because agents are designed to be agnostic to the Domain that they are
acting within and the problem they are trying to solve, the Domain needs
to completely describe everything related to the task. Therefore, the
Domain must not only define the observations that the Agent receives,
but also the states it can be in, the actions that it can perform, and the
relationships between the three.
The Domain class is a base clase that provides the basic framework for all
domains. It provides the methods and attributes that allow child classes
to interact with the Agent and Experiment classes within the RLPy library.
domains should also provide methods that provide visualization of the
Domain itself and of the Agent's learning
(:py:meth:`~rlpy.domains.domain.Domain.show_domain` and
:py:meth:`~rlpy.domains.domain.Domain.show_learning` respectively) \n
All new domain implementations should inherit from :py:class:`~rlpy.domains.domain.domain`.
.. note::
Though the state *s* can take on almost any value, if a dimension is not
marked as 'continuous' then it is assumed to be integer.
"""
def __init__(
self,
num_actions,
statespace_limits,
discount_factor=0.9,
continuous_dims=None,
episode_cap=None,
):
"""
:param num_actions: The number of Actions the agent can perform
:param discount_factor: The discount factor by which rewards are reduced
:param statespace_limits: Limits of each dimension of the state space.
Each row corresponds to one dimension and has two elements [min, max]
:param state_space_dims: Number of dimensions of the state space
:param continuous_dims: List of the continuous dimensions of the domain
:param episode_cap: The cap used to bound each episode (return to state 0 after)
"""
self.num_actions = num_actions
self.raw_statespace_limits = statespace_limits.copy()
self.statespace_limits = statespace_limits
self.discount_factor = float(discount_factor)
if continuous_dims is None:
self.num_states = int(
np.prod(self.statespace_limits[:, 1] - self.statespace_limits[:, 0] + 1)
)
self.continuous_dims = []
else:
self.num_states = np.inf
self.continuous_dims = continuous_dims
self.episode_cap = episode_cap
self.random_state = np.random.RandomState()
self.state_space_dims = self.statespace_limits.shape[0]
# For discrete domains, limits should be extended by half on each side so that
# the mapping becomes identical with continuous states.
# The original limits will be saved in self.discrete_statespace_limits.
self._extendDiscreteDimensions()
self.logger = logging.getLogger("rlpy.domains." + self.__class__.__name__)
self.seed = None
self.performance = False
def set_seed(self, seed):
"""
Set random seed
"""
self.seed = seed
self.random_state.seed(seed)
def __str__(self):
res = """{self.__class__}:
------------
Dimensions: {self.state_space_dims}
|S|: {self.num_states}
|A|: {self.num_actions}
Episode Cap:{self.episode_cap}
Gamma: {self.discount_factor}
""".format(
self=self
)
return res
def show(self, a=None, representation=None):
"""
Shows a visualization of the current state of the domain and that of
learning.
See :py:meth:`~rlpy.domains.domain.Domain.show_domain()` and
:py:meth:`~rlpy.domains.domain.Domain.show_learning()`,
both called by this method.
.. note::
Some domains override this function to allow an optional *s*
parameter to be passed, which overrides the *self.state* internal
to the domain; however, not all have this capability.
:param a: The action being performed
:param representation: The learned value function
:py:class:`~rlpy.Representation.Representation.Representation`.
"""
self.saveRandomState()
self.show_domain(a=a)
self.show_learning(representation=representation)
self.loadRandomState()
def show_domain(self, a=0):
"""
*Abstract Method:*\n
Shows a visualization of the current state of the domain.
:param a: The action being performed.
"""
pass
def show_learning(self, representation):
"""
*Abstract Method:*\n
Shows a visualization of the current learning,
usually in the form of a gridded value function and policy.
It is thus really only possible for 1 or 2-state domains.
:param representation: the learned value function
:py:class:`~rlpy.Representation.Representation.Representation`
to generate the value function / policy plots.
"""
pass
def close_visualizations(self):
"""Close matplotlib windows."""
pass
@abstractmethod
def s0(self):
"""
Begins a new episode and returns the initial observed state of the Domain.
Sets self.state accordingly.
:return: A numpy array that defines the initial domain state.
"""
pass
def possible_actions(self, s=None):
"""
The default version returns an enumeration of all actions [0, 1, 2...].
We suggest overriding this method in your domain, especially if not all
actions are available from all states.
:param s: The state to query for possible actions
(overrides self.state if ``s != None``)
:return: A numpy array containing every possible action in the domain.
.. note::
*These actions must be integers*; internally they may be handled
using other datatypes. See :py:meth:`~rlpy.tools.general_tools.vec2id`
and :py:meth:`~rlpy.tools.general_tools.id2vec` for converting between
integers and multidimensional quantities.
"""
return np.arange(self.num_actions)
# TODO: change 'a' to be 'aID' to make it clearer when we refer to
# actions vs. integer IDs of actions? They aren't always interchangeable.
@abstractmethod
def step(self, a):
"""
*Abstract Method:*\n
Performs the action *a* and updates the Domain
state accordingly.
Returns the reward/penalty the agent obtains for
the state/action pair determined by *Domain.state* and the parameter
*a*, the next state into which the agent has transitioned, and a
boolean determining whether a goal or fail state has been reached.
.. note::
domains often specify stochastic internal state transitions, such
that the result of a (state,action) pair might vary on different
calls (see also the :py:meth:`~rlpy.domains.domain.Domain.sample_step`
method).
Be sure to look at unique noise parameters of each domain if you
require deterministic transitions.
:param a: The action to perform.
.. warning::
The action *a* **must** be an integer >= 0, and might better be
called the "actionID". See the class description
:py:class:`~rlpy.domains.domain.Domain` above.
:return: The tuple (r, ns, t, p_actions) =
(Reward [value], next observed state, is_terminal [boolean])
"""
pass
def saveRandomState(self):
"""
Stores the state of the the random generator.
Using loadRandomState this state can be loaded.
"""
self.random_state_backup = self.random_state.get_state()
def loadRandomState(self):
"""
Loads the random state stored in the self.random_state_backup
"""
self.random_state.set_state(self.random_state_backup)
def is_terminal(self):
"""
Returns ``True`` if the current Domain.state is a terminal one, ie,
one that ends the episode. This often results from either a failure
or goal state being achieved.\n
The default definition does not terminate.
:return: ``True`` if the state is a terminal state, ``False`` otherwise.
"""
return False
def _extendDiscreteDimensions(self):
"""
Offsets discrete dimensions by 0.5 so that binning works properly.
.. warning::
This code is used internally by the Domain base class.
**It should only be called once**
"""
# Store the original limits for other types of calculations
self.discrete_statespace_limits = self.statespace_limits
self.statespace_limits = self.statespace_limits.astype("float")
for d in range(self.state_space_dims):
if d not in self.continuous_dims:
self.statespace_limits[d, 0] += -0.5
self.statespace_limits[d, 1] += +0.5
@property
def statespace_width(self):
return self.statespace_limits[:, 1] - self.statespace_limits[:, 0]
@property
def discrete_statespace_width(self):
return (
self.discrete_statespace_limits[:, 1]
- self.discrete_statespace_limits[:, 0]
)
def all_states(self):
"""Returns an iterator of all states"""
raise NotImplementedError(f"All states is not implemented for {type(self)}")
def sample_step(self, a, num_samples):
"""
Sample a set number of next states and rewards from the domain.
This function is used when state transitions are stochastic;
deterministic transitions will yield an identical result regardless
of *num_samples*, since repeatedly sampling a (state,action) pair
will always yield the same tuple (r,ns,terminal).
See :py:meth:`~rlpy.domains.domain.Domain.step`.
:param a: The action to attempt
:param num_samples: The number of next states and rewards to be sampled.
:return: A tuple of arrays ( S[], A[] ) where
*S* is an array of next states,
*A* is an array of rewards for those states.
"""
next_states = []
rewards = []
s = self.state.copy()
for i in range(num_samples):
r, ns, terminal = self.step(a)
self.state = s.copy()
next_states.append(ns)
rewards.append(r)
return np.array(next_states), np.array(rewards)
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in list(self.__dict__.items()):
if k == "logger":
continue
# This block bandles matplotlib transformNode objects,
# which cannot be coped
try:
setattr(result, k, deepcopy(v, memo))
except Exception:
if hasattr(v, "frozen"):
setattr(result, k, v.frozen())
else:
import warnings
warnings.warn("Skip {} when copying".format(k))
return result
| StarcoderdataPython |
3540403 | from math import ceil
n, m = map(int, input().split())
a = list(map(int, input().split()))
a.append(n + 1)
a.append(0)
a.sort()
d = []
md = n
for i in range(m + 1):
d_ = a[i + 1] - a[i] - 1
d.append(d_)
if d_ == 0:
continue
md = min(md, d_)
c = 0
for i in d:
c += ceil((i) / md)
print(c)
| StarcoderdataPython |
6433715 | <filename>cafeteria/patterns/borg.py<gh_stars>1-10
class BorgStateManager(object):
"""
A special State Manager for Borg classes and child classes. This is what
makes it possible for child classes to maintain their own state different
to both parents, siblings and their own children.
This itself implements the Borg pattern so that all its instances have a
shared state.
Each class state is mapped to the the hash of the class itself.
"""
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
@classmethod
def get_state(cls, clz):
"""
Retrieve the state of a given Class.
:param clz: types.ClassType
:return: Class state.
:rtype: dict
"""
if clz not in cls.__shared_state:
cls.__shared_state[clz] = (
clz.init_state() if hasattr(clz, "init_state") else {}
)
return cls.__shared_state[clz]
class Borg(object):
"""
A Borg pattern base class. Usable on its own or via inheritance. Uses
`cafeteria.patterns.borg.BorgStateManager` internally to achieve state
separation for children and grand children.
See http://code.activestate.com/recipes/66531-singleton-we-dont-need-no-stinkin-singleton-the-bo/ for more # noqa
information regarding the Borg Pattern.
"""
def __init__(self):
self.__dict__ = self._shared_state
@classmethod
def init_state(cls):
return {}
@property
def _shared_state(self):
return BorgStateManager.get_state(self.__class__)
| StarcoderdataPython |
8182392 | """
iTunes App Store Scraper
"""
import requests
import json
import time
import re
from datetime import datetime
from urllib.parse import quote_plus
from itunes_app_scraper.util import AppStoreException, AppStoreCollections, AppStoreCategories, AppStoreMarkets
class Regex:
STARS = re.compile("<span class=\"total\">[\s\S]*?</span>")
class AppStoreScraper:
"""
iTunes App Store scraper
This class implements methods to retrieve information about iTunes App
Store apps in various ways. The methods are fairly straightforward. Much
has been adapted from the javascript-based app-store-scraper package, which
can be found at https://github.com/facundoolano/app-store-scraper.
"""
def get_app_ids_for_query(self, term, num=50, page=1, country="nl", lang="nl"):
"""
Retrieve suggested app IDs for search query
:param str term: Search query
:param int num: Amount of items to return per page, default 50
:param int page: Amount of pages to return
:param str country: Two-letter country code of store to search in,
default 'nl'
:param str lang: Language code to search with, default 'nl'
:return list: List of App IDs returned for search query
"""
if term is None or term == "":
raise AppStoreException("No term was given")
url = "https://search.itunes.apple.com/WebObjects/MZStore.woa/wa/search?clientApplication=Software&media=software&term="
url += quote_plus(term)
amount = int(num) * int(page)
country = self.get_store_id_for_country(country)
headers = {
"X-Apple-Store-Front": "%s,24 t:native" % country,
"Accept-Language": lang
}
try:
result = requests.get(url, headers=headers).json()
except ConnectionError as ce:
raise AppStoreException("Cannot connect to store: {0}".format(str(ce)))
except json.JSONDecodeError:
raise AppStoreException("Could not parse app store response")
return [app["id"] for app in result["bubbles"][0]["results"][:amount]]
def get_app_ids_for_collection(self, collection="", category="", num=50, country="nl", lang=""):
"""
Retrieve app IDs in given App Store collection
Collections are e.g. 'top free iOS apps'.
:param str collection: Collection ID. One of the values in
`AppStoreCollections`.
:param int category: Category ID. One of the values in
AppStoreCategories. Can be left empty.
:param int num: Amount of results to return. Defaults to 50.
:param str country: Two-letter country code for the store to search in.
Defaults to 'nl'.
:param str lang: Dummy argument for compatibility. Unused.
:return: List of App IDs in collection.
"""
if not collection:
collection = AppStoreCollections.TOP_FREE_IOS
country = self.get_store_id_for_country(country)
params = (collection, category, num, country)
url = "http://ax.itunes.apple.com/WebObjects/MZStoreServices.woa/ws/RSS/%s/%s/limit=%s/json?s=%s" % params
try:
result = requests.get(url).json()
except json.JSONDecodeError:
raise AppStoreException("Could not parse app store response")
return [entry["id"]["attributes"]["im:id"] for entry in result["feed"]["entry"]]
def get_app_ids_for_developer(self, developer_id, country="nl", lang=""):
"""
Retrieve App IDs linked to given developer
:param int developer_id: Developer ID
:param str country: Two-letter country code for the store to search in.
Defaults to 'nl'.
:param str lang: Dummy argument for compatibility. Unused.
:return list: List of App IDs linked to developer
"""
url = "https://itunes.apple.com/lookup?id=%s&country=%s&entity=software" % (developer_id, country)
try:
result = requests.get(url).json()
except json.JSONDecodeError:
raise AppStoreException("Could not parse app store response")
if "results" in result:
return [app["trackId"] for app in result["results"] if app["wrapperType"] == "software"]
else:
# probably an invalid developer ID
return []
def get_similar_app_ids_for_app(self, app_id, country="nl", lang="nl"):
"""
Retrieve list of App IDs of apps similar to given app
This one is a bit special because the response is not JSON, but HTML.
We extract a JSON blob from the HTML which contains the relevant App
IDs.
:param app_id: App ID to find similar apps for
:param str country: Two-letter country code for the store to search in.
Defaults to 'nl'.
:param str lang: Language code to search with, default 'nl'
:return list: List of similar app IDs
"""
url = "https://itunes.apple.com/us/app/app/id%s" % app_id
country = self.get_store_id_for_country(country)
headers = {
"X-Apple-Store-Front": "%s,32" % country,
"Accept-Language": lang
}
result = requests.get(url, headers=headers).text
if "customersAlsoBoughtApps" not in result:
return []
blob = re.search(r"customersAlsoBoughtApps\":\s*(\[[^\]]+\])", result)
if not blob:
return []
try:
ids = json.loads(blob[1])
except (json.JSONDecodeError, IndexError):
return []
return ids
def get_app_details(self, app_id, country="nl", lang="", flatten=True, sleep=None):
"""
Get app details for given app ID
:param app_id: App ID to retrieve details for. Can be either the
numerical trackID or the textual BundleID.
:param str country: Two-letter country code for the store to search in.
Defaults to 'nl'.
:param str lang: Dummy argument for compatibility. Unused.
:param bool flatten: The App Store response may by multi-dimensional.
This makes it hard to transform into e.g. a CSV,
so if this parameter is True (its default) the
response is flattened and any non-scalar values
are removed from the response.
:param int sleep: Seconds to sleep before request to prevent being
temporary blocked if there are many requests in a
short time. Defaults to None.
:return dict: App details, as returned by the app store. The result is
not processed any further, unless `flatten` is True
"""
try:
app_id = int(app_id)
id_field = "id"
except ValueError:
id_field = "bundleId"
url = "https://itunes.apple.com/lookup?%s=%s&country=%s&entity=software" % (id_field, app_id, country)
try:
if sleep is not None:
time.sleep(sleep)
result = requests.get(url).json()
except Exception:
try:
# handle the retry here.
# Take an extra sleep as back off and then retry the URL once.
time.sleep(2)
result = requests.get(url).json()
except Exception:
raise AppStoreException("Could not parse app store response for ID %s" % app_id)
try:
app = result["results"][0]
except IndexError:
raise AppStoreException("No app found with ID %s" % app_id)
# 'flatten' app response
# responses are at most two-dimensional (array within array), so simply
# join any such values
if flatten:
for field in app:
if isinstance(app[field], list):
app[field] = ",".join(app[field])
return app
def get_multiple_app_details(self, app_ids, country="nl", lang="", sleep=1):
"""
Get app details for a list of app IDs
:param list app_id: App IDs to retrieve details for
:param str country: Two-letter country code for the store to search in.
Defaults to 'nl'.
:param str lang: Dummy argument for compatibility. Unused.
:param int sleep: Seconds to sleep before request to prevent being
temporary blocked if there are many requests in a
short time. Defaults to 1.
:return generator: A list (via a generator) of app details
"""
for app_id in app_ids:
try:
yield self.get_app_details(app_id, country=country, lang=lang, sleep=sleep)
except AppStoreException as ase:
self._log_error(country, str(ase))
continue
def get_store_id_for_country(self, country):
"""
Get store ID for country code
:param str country: Two-letter country code
:param str country: Two-letter country code for the store to search in.
Defaults to 'nl'.
"""
country = country.upper()
if hasattr(AppStoreMarkets, country):
return getattr(AppStoreMarkets, country)
else:
raise AppStoreException("Country code not found for {0}".format(country))
def get_app_ratings(self, app_id, countries=None, sleep=1):
"""
Get app ratings for given app ID
:param app_id: App ID to retrieve details for. Can be either the
numerical trackID or the textual BundleID.
:countries: List of countries (lowercase, 2 letter code) or single country (e.g. 'de')
to generate the rating for
if left empty, it defaults to mostly european countries (see below)
:param int sleep: Seconds to sleep before request to prevent being
temporary blocked if there are many requests in a
short time. Defaults to 1.
:return dict: App ratings, as scraped from the app store.
"""
dataset = { 1: 0, 2: 0, 3: 0, 4: 0, 5: 0 }
if countries is None:
countries = ['au', 'at', 'be', 'ch', 'cy', 'cz', 'de', 'dk', 'es', 'fr', 'gb', 'gr', 'ie', 'it', 'hr', 'hu', 'nl', 'lu', 'lt', 'pl', 'ro', 'se', 'sk', 'si', 'sr', 'tr', 'ua', 'us']
elif isinstance(countries, str): # only a string provided
countries = [countries]
else:
countries = countries
for country in countries:
url = "https://itunes.apple.com/%s/customer-reviews/id%s?displayable-kind=11" % (country, app_id)
store_id = self.get_store_id_for_country(country)
headers = { 'X-Apple-Store-Front': '%s,12 t:native' % store_id }
try:
if sleep is not None:
time.sleep(sleep)
result = requests.get(url, headers=headers).text
except Exception:
try:
# handle the retry here.
# Take an extra sleep as back off and then retry the URL once.
time.sleep(2)
result = requests.get(url, headers=headers).text
except Exception:
raise AppStoreException("Could not parse app store rating response for ID %s" % app_id)
ratings = self._parse_rating(result)
if ratings is not None:
dataset[1] = dataset[1] + ratings[1]
dataset[2] = dataset[2] + ratings[2]
dataset[3] = dataset[3] + ratings[3]
dataset[4] = dataset[4] + ratings[4]
dataset[5] = dataset[5] + ratings[5]
# debug
# print("-----------------------")
# print('%d ratings' % (dataset[1] + dataset[2] + dataset[3] + dataset[4] + dataset[5]))
# print(dataset)
return dataset
def _parse_rating(self, text):
matches = Regex.STARS.findall(text)
if len(matches) != 5:
# raise AppStoreException("Cant get stars - expected 5 - but got %d" % len(matches))
return None
ratings = {}
star = 5
for match in matches:
value = match
value = value.replace("<span class=\"total\">", "")
value = value.replace("</span>", "")
ratings[star] = int(value)
star = star - 1
return ratings
def _log_error(self, app_store_country, message):
"""
Write the error to a local file to capture the error.
:param str app_store_country: the country for the app store
:param str message: the error message to log
"""
app_log = "{0}_log.txt".format(app_store_country)
errortime = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
fh = open(app_log, "a")
fh.write("%s %s \n" % (errortime,message))
fh.close()
| StarcoderdataPython |
11321828 | <reponame>bjk7119/fosslight_binary_scanner
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 LG Electronics Inc.
# SPDX-License-Identifier: Apache-2.0
from fosslight_util.help import PrintHelpMsg
_HELP_MESSAGE_BINARY = """
Usage: fosslight_bin [option1] <arg1> [option2] <arg2>...
After extracting the binaries, the open source and license information of the saved binaries are retrieved by comparing the similarity
with the binaries stored in the Binary DB (FOSSLight > Binary DB) with the Binary's TLSH (Trend micro Locality Sensitive Hash).
Mandatory:
-p <binary_path>\t\t Path to analyze binaries
Options:
-h\t\t\t\t Print help message
-v\t\t\t\t Print FOSSLight Binary Scanner version
-o <output_path>\t\t Output path
\t\t\t\t (If you want to generate the specific file name, add the output path with file name.)
-f <format>\t\t\t Output file format (excel, csv, opossum)
-d <db_url>\t\t\t DB Connection(format :'postgresql://username:password@host:port/database_name')"""
def print_help_msg():
helpMsg = PrintHelpMsg(_HELP_MESSAGE_BINARY)
helpMsg.print_help_msg(True)
| StarcoderdataPython |
6615183 | #!/usr/bin/env python3
#
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from config import CLOUD_PROJECT, DOWNSTREAM_PROJECTS
from steps import runner_step, create_docker_step, create_step
from update_last_green_commit import get_last_green_commit
from utils import fetch_bazelcipy_command
from utils import python_binary
def is_pull_request():
third_party_repo = os.getenv("BUILDKITE_PULL_REQUEST_REPO", "")
return len(third_party_repo) > 0
def main(
configs,
project_name,
http_config,
file_config,
git_repository,
monitor_flaky_tests,
use_but,
incompatible_flags,
):
platform_configs = configs.get("platforms", None)
if not platform_configs:
raise Exception("{0} pipeline configuration is empty.".format(project_name))
pipeline_steps = []
if configs.get("buildifier"):
pipeline_steps.append(
create_docker_step("Buildifier", image=f"gcr.io/{CLOUD_PROJECT}/buildifier")
)
# In Bazel Downstream Project pipelines, git_repository and project_name must be specified,
# and we should test the project at the last green commit.
git_commit = None
if (use_but or incompatible_flags) and git_repository and project_name:
git_commit = get_last_green_commit(
git_repository, DOWNSTREAM_PROJECTS[project_name]["pipeline_slug"]
)
for platform in platform_configs:
step = runner_step(
platform,
project_name,
http_config,
file_config,
git_repository,
git_commit,
monitor_flaky_tests,
use_but,
incompatible_flags,
)
pipeline_steps.append(step)
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
all_downstream_pipeline_slugs = []
for _, config in DOWNSTREAM_PROJECTS.items():
all_downstream_pipeline_slugs.append(config["pipeline_slug"])
# We don't need to update last green commit in the following cases:
# 1. This job is a github pull request
# 2. This job uses a custom built Bazel binary (In Bazel Downstream Projects pipeline)
# 3. This job doesn't run on master branch (Could be a custom build launched manually)
# 4. We don't intend to run the same job in downstream with Bazel@HEAD (eg. google-bazel-presubmit)
# 5. We are testing incompatible flags
if not (
is_pull_request()
or use_but
or os.getenv("BUILDKITE_BRANCH") != "master"
or pipeline_slug not in all_downstream_pipeline_slugs
or incompatible_flags
):
pipeline_steps.append("wait")
# If all builds succeed, update the last green commit of this project
pipeline_steps.append(
create_step(
label="Try Update Last Green Commit",
commands=[
fetch_bazelcipy_command(),
python_binary() + " bazelci.py try_update_last_green_commit",
],
)
)
print(yaml.dump({"steps": pipeline_steps}))
| StarcoderdataPython |
9626523 | from pynput import keyboard as kb
import string as s
from PIL import Image, ImageGrab
import random
import os
# Format RGB to Hex
def rgb2hex(r,g,b):
return '#{:02x}{:02x}{:02x}'.format(r,g,b).upper()
# Grabs image from clipboard and outputs colour from pixels.
def clip2png():
i = ImageGrab.grabclipboard()
# if there's no image on clipboard, return
try:
width, height = i.size
except AttributeError:
print("The clipboard does not contain an image.")
return
rgb_op = i.convert('RGB')
r,g,b = rgb_op.getpixel(((width/2),(height/2))) # Get middle
print(rgb2hex(r,g,b))
# On Key Pressed Event
def on_press(key):
if key == kb.Key.f2:
clip2png()
elif key == kb.Key.f12: # exit
exit()
with kb.Listener(
on_press=on_press) as listener:
listener.join()
| StarcoderdataPython |
5180154 | <filename>ghub/services/tests/test_drf_views.py
import pytest
from django.test import RequestFactory
from ghub.services.api.views import ServiceViewSet
from ghub.services.models import Service
pytestmark = pytest.mark.django_db
class TestServiceViewSet:
def test_get_queryset(self, service: Service, rf: RequestFactory):
view = ServiceViewSet()
request = rf.get("/fake-url/")
view.request = request
assert service in view.get_queryset()
| StarcoderdataPython |
11257145 | <reponame>faweigend/pypermod
from pypermod.agents.cp_agent_basis import CpAgentBasis
class CpODEAgentBasisLinear(CpAgentBasis):
"""
This class defines the structure for all differential agents.
Provided are functions for dynamic power outputs and commands for simulations.
Differential agents allow real time estimations.
The most basic virtual agent model employing the 2 parameter CP model.
Characteristics:
* performance above CP drains W' in a linear fashion
* performance below CP allows W' to recover linear fashion.
* depleted W' results in exhaustion
"""
def __init__(self, w_p: float, cp: float, hz: int = 1):
"""
constructor with basic constants
:param cp:
:param w_p:
"""
super().__init__(hz=hz, w_p=w_p, cp=cp)
# fully rested, balance equals w_p
self._w_bal = w_p
# simulation management parameters
self._step = 0
self._hz_t = 0.0
self._pow = 0.0
def is_equilibrium(self) -> bool:
"""
Checks if W'bal is at a steady state.
In case of a CP agent this is either:
1) when the athlete works exactly at CP
2) works below CP and W'bal is full
:return: boolean
"""
return self._pow == self._cp or (self._pow < self._cp and self._w_bal >= (self._w_p * 0.01))
def is_exhausted(self) -> bool:
"""
simple exhaustion check using W' balance
:return: boolean
"""
return self._w_bal == 0
def is_recovered(self) -> bool:
"""
simple recovery check using W' expended
:return: boolean
"""
return self._w_bal >= (self._w_p * 0.01)
def get_time(self) -> float:
"""
:return: time in seconds considering the agent's hz setting
"""
return self._hz_t
def perform_one_step(self) -> float:
"""
Updates power output and internal W' balance parameters.
:return: expended power
"""
# increase time counter
self._step = self._step + 1
self._hz_t = float(self._step / self._hz)
# use updated instantaneous power to update internal capacities
self._pow = self._estimate_possible_power_output()
# final instantaneous power output
return self._pow
def reset(self):
"""
reset internal values to default
"""
self._step = 0
self._hz_t = 0.0
self._pow = 0.0
self._w_bal = self._w_p
def set_power(self, power: float):
"""
set power output directly to skip acceleration phases
:param power: power in Watts
"""
self._pow = power
def get_power(self) -> float:
"""
:return: power in Watts
"""
return self._pow
def get_w_p_balance(self) -> float:
"""
:return: current W'bal
"""
return self._w_bal
def _estimate_possible_power_output(self) -> float:
"""
Update internal capacity estimations by one step.
:return: the amount of power that the athlete was able to put out
"""
p = self._pow
# update W exp and power output
if p < self._cp:
self._recover(p)
else:
p = self._spend_capacity(p)
# return possible power output
return p
def _spend_capacity(self, p: float) -> float:
"""
Capacity is spent for p > cp. It updates W'bal and returns the
achieved power output
:param p: power demand in watts
:return: possible power in watts
"""
# determine aerobic power considering hz
anaer_p = (self._pow - self._cp) * self._delta_t
if self._w_bal < anaer_p:
# not enough balance to perform on requested power
p = self._w_bal + self._cp
self._w_bal = 0.0
else:
# increase expended W'
self._w_bal -= anaer_p
# Update balance
self._w_bal = min(self._w_p, self._w_bal)
self._w_bal = max(0.0, self._w_bal)
return p
def _recover(self, p: float):
"""
linear recovery happens for p < cp. It reduces W' exp and increases W' balance
"""
# nothing to do if fully recovered
if self._w_bal < (self._w_p * 0.01):
diff = (self._cp - p) * self._delta_t
self._w_bal += diff
# cannot be more recovered than w'
self._w_bal = min(self._w_p, self._w_bal)
else:
self._w_bal = self._w_p
| StarcoderdataPython |
59675 | <reponame>sourcery-ai-bot/factom-core<filename>factom_core/blocks/admin_block.py
import hashlib
from dataclasses import dataclass, field
from typing import List
from factom_core.block_elements.admin_messages import *
from factom_core.utils import varint
from .directory_block import DirectoryBlock
@dataclass
class AdminBlockHeader:
CHAIN_ID = bytes.fromhex("000000000000000000000000000000000000000000000000000000000000000a")
prev_back_reference_hash: bytes
height: int
expansion_area: bytes
message_count: int
body_size: int
def __post_init__(self):
# TODO: value assertions
pass
def marshal(self) -> bytes:
buf = bytearray()
buf.extend(AdminBlockHeader.CHAIN_ID)
buf.extend(self.prev_back_reference_hash)
buf.extend(struct.pack(">I", self.height))
buf.extend(varint.encode(len(self.expansion_area)))
buf.extend(self.expansion_area)
buf.extend(struct.pack(">I", self.message_count))
buf.extend(struct.pack(">I", self.body_size))
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes):
h, data = AdminBlockHeader.unmarshal_with_remainder(raw)
assert len(data) == 0, "Extra bytes remaining!"
return h
@classmethod
def unmarshal_with_remainder(cls, raw: bytes):
chain_id, data = raw[:32], raw[32:]
assert chain_id == AdminBlockHeader.CHAIN_ID
prev_back_reference_hash, data = data[:32], data[32:]
height, data = struct.unpack(">I", data[:4])[0], data[4:]
expansion_size, data = varint.decode(data)
expansion_area, data = data[:expansion_size], data[expansion_size:]
# TODO: unmarshal header expansion area
message_count, data = struct.unpack(">I", data[:4])[0], data[4:]
body_size, data = struct.unpack(">I", data[:4])[0], data[4:]
return (
AdminBlockHeader(
prev_back_reference_hash=prev_back_reference_hash,
height=height,
expansion_area=expansion_area,
message_count=message_count,
body_size=body_size,
),
data,
)
@dataclass
class AdminBlockBody:
messages: List[AdminMessage] = field(default_factory=list)
def __post_init__(self):
# TODO: value assertions
pass
def marshal(self) -> bytes:
buf = bytearray()
for message in self.messages:
if type(message) is int:
buf.append(message)
continue
buf.append(message.__class__.ADMIN_ID)
buf.extend(message.marshal())
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes, message_count: int):
body, data = cls.unmarshal_with_remainder(raw, message_count)
assert len(data) == 0, "Extra bytes remaining!"
return body
@classmethod
def unmarshal_with_remainder(cls, raw: bytes, message_count: int):
data = raw
messages = []
for _ in range(message_count):
admin_id, data = data[0], data[1:]
msg = None
if admin_id == MinuteNumber.ADMIN_ID: # Deprecated in M2
size = MinuteNumber.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = MinuteNumber.unmarshal(msg_data)
elif admin_id == DirectoryBlockSignature.ADMIN_ID:
size = DirectoryBlockSignature.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = DirectoryBlockSignature.unmarshal(msg_data)
elif admin_id == MatryoshkaHashReveal.ADMIN_ID:
size = MatryoshkaHashReveal.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = MatryoshkaHashReveal.unmarshal(msg_data)
elif admin_id == MatryoshkaHashAddOrReplace.ADMIN_ID:
size = MatryoshkaHashAddOrReplace.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = MatryoshkaHashAddOrReplace.unmarshal(msg_data)
elif admin_id == ServerCountIncrease.ADMIN_ID:
size = ServerCountIncrease.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = ServerCountIncrease.unmarshal(msg_data)
elif admin_id == AddFederatedServer.ADMIN_ID:
size = AddFederatedServer.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddFederatedServer.unmarshal(msg_data)
elif admin_id == AddAuditServer.ADMIN_ID:
size = AddAuditServer.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddAuditServer.unmarshal(msg_data)
elif admin_id == RemoveFederatedServer.ADMIN_ID:
size = RemoveFederatedServer.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = RemoveFederatedServer.unmarshal(msg_data)
elif admin_id == AddFederatedServerSigningKey.ADMIN_ID:
size = AddFederatedServerSigningKey.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddFederatedServerSigningKey.unmarshal(msg_data)
elif admin_id == AddFederatedServerBitcoinAnchorKey.ADMIN_ID:
size = AddFederatedServerBitcoinAnchorKey.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddFederatedServerBitcoinAnchorKey.unmarshal(msg_data)
elif admin_id == ServerFaultHandoff.ADMIN_ID:
msg = ServerFaultHandoff() # No data on chain for message
elif admin_id == CoinbaseDescriptor.ADMIN_ID:
msg, data = CoinbaseDescriptor.unmarshal_with_remainder(data)
elif admin_id == CoinbaseDescriptorCancel.ADMIN_ID:
msg, data = CoinbaseDescriptorCancel.unmarshal_with_remainder(data)
elif admin_id == AddAuthorityFactoidAddress.ADMIN_ID:
size = AddAuthorityFactoidAddress.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddAuthorityFactoidAddress.unmarshal(msg_data)
elif admin_id == AddAuthorityEfficiency.ADMIN_ID:
size = AddAuthorityEfficiency.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddAuthorityFactoidAddress.unmarshal(msg_data)
elif admin_id <= 0x0E:
msg = admin_id
print(f'Unsupported admin message type {msg} found')
if msg is not None:
messages.append(msg)
assert len(messages) == message_count, "Unexpected message count"
return AdminBlockBody(messages=messages), data
def construct_header(self, prev_back_reference_hash: bytes, height: int) -> AdminBlockHeader:
"""
Seals the admin block by constructing and returning its header
"""
return AdminBlockHeader(
prev_back_reference_hash=prev_back_reference_hash,
height=height,
expansion_area=b"",
message_count=len(self.messages),
body_size=len(self.marshal()),
)
@dataclass
class AdminBlock:
header: AdminBlockHeader
body: AdminBlockBody
_cached_lookup_hash: bytes = None
_cached_back_reference_hash: bytes = None
def __post_init__(self):
# TODO: value assertions
pass
@property
def lookup_hash(self):
if self._cached_lookup_hash is not None:
return self._cached_lookup_hash
self._cached_lookup_hash = hashlib.sha256(self.marshal()).digest()
return self._cached_lookup_hash
@property
def back_reference_hash(self):
if self._cached_back_reference_hash is not None:
return self._cached_back_reference_hash
self._cached_back_reference_hash = hashlib.sha512(self.marshal()).digest()[:32]
return self._cached_back_reference_hash
def marshal(self) -> bytes:
buf = bytearray()
buf.extend(self.header.marshal())
buf.extend(self.body.marshal())
return bytes(buf)
@classmethod
def unmarshal(cls, raw: bytes):
"""Returns a new AdminBlock object, unmarshalling given bytes according to:
https://github.com/FactomProject/FactomDocs/blob/master/factomDataStructureDetails.md#administrative-block
Useful for working with a single ablock out of context, pulled directly from a factomd database for instance.
AdminBlock created will not include contextual metadata, such as timestamp
"""
block, data = cls.unmarshal_with_remainder(raw)
assert len(data) == 0, "Extra bytes remaining!"
return block
@classmethod
def unmarshal_with_remainder(cls, raw: bytes):
header, data = AdminBlockHeader.unmarshal_with_remainder(raw)
messages = []
for _ in range(header.message_count):
admin_id, data = data[0], data[1:]
msg = None
if admin_id == MinuteNumber.ADMIN_ID: # Deprecated in M2
size = MinuteNumber.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = MinuteNumber.unmarshal(msg_data)
elif admin_id == DirectoryBlockSignature.ADMIN_ID:
size = DirectoryBlockSignature.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = DirectoryBlockSignature.unmarshal(msg_data)
elif admin_id == MatryoshkaHashReveal.ADMIN_ID:
size = MatryoshkaHashReveal.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = MatryoshkaHashReveal.unmarshal(msg_data)
elif admin_id == MatryoshkaHashAddOrReplace.ADMIN_ID:
size = MatryoshkaHashAddOrReplace.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = MatryoshkaHashAddOrReplace.unmarshal(msg_data)
elif admin_id == ServerCountIncrease.ADMIN_ID:
size = ServerCountIncrease.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = ServerCountIncrease.unmarshal(msg_data)
elif admin_id == AddFederatedServer.ADMIN_ID:
size = AddFederatedServer.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddFederatedServer.unmarshal(msg_data)
elif admin_id == AddAuditServer.ADMIN_ID:
size = AddAuditServer.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddAuditServer.unmarshal(msg_data)
elif admin_id == RemoveFederatedServer.ADMIN_ID:
size = RemoveFederatedServer.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = RemoveFederatedServer.unmarshal(msg_data)
elif admin_id == AddFederatedServerSigningKey.ADMIN_ID:
size = AddFederatedServerSigningKey.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddFederatedServerSigningKey.unmarshal(msg_data)
elif admin_id == AddFederatedServerBitcoinAnchorKey.ADMIN_ID:
size = AddFederatedServerBitcoinAnchorKey.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddFederatedServerBitcoinAnchorKey.unmarshal(msg_data)
elif admin_id == ServerFaultHandoff.ADMIN_ID:
msg = ServerFaultHandoff() # No data on chain for message
elif admin_id == CoinbaseDescriptor.ADMIN_ID:
msg, data = CoinbaseDescriptor.unmarshal_with_remainder(data)
elif admin_id == CoinbaseDescriptorCancel.ADMIN_ID:
msg, data = CoinbaseDescriptorCancel.unmarshal_with_remainder(data)
elif admin_id == AddAuthorityFactoidAddress.ADMIN_ID:
size = AddAuthorityFactoidAddress.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddAuthorityFactoidAddress.unmarshal(msg_data)
elif admin_id == AddAuthorityEfficiency.ADMIN_ID:
size = AddAuthorityEfficiency.MESSAGE_SIZE
msg_data, data = data[:size], data[size:]
msg = AddAuthorityFactoidAddress.unmarshal(msg_data)
elif admin_id <= 0x0E:
msg = admin_id
print(
"Unsupported admin message type {} found at Admin Block {}".format(
msg, header.height
)
)
if msg is not None:
messages.append(msg)
assert len(messages) == header.message_count, "Unexpected message count at Admin Block {}".format(header.height)
body = AdminBlockBody(messages=messages)
return AdminBlock(header=header, body=body), data
def add_context(self, directory_block: DirectoryBlock):
pass
def to_dict(self):
return {
"lookup_hash": self.lookup_hash.hex(),
"back_reference_hash": self.header.prev_back_reference_hash.hex(),
"height": self.header.height,
"expansion_area": self.header.expansion_area.hex(),
"message_count": self.header.message_count,
"body_size": self.header.body_size,
"messages": [m.to_dict() for m in self.body.messages],
}
def __str__(self):
return "{}(height={}, hash={})".format(self.__class__.__name__, self.header.height, self.lookup_hash.hex())
| StarcoderdataPython |
6425235 | <reponame>PacktPublishing/Designing-Models-for-Responsible-AI<gh_stars>0
import numpy as np
from ..core import BaseServer
class FedAvgServer(BaseServer):
def __init__(self, clients, global_model, server_id=0):
super().__init__(clients, global_model, server_id=server_id)
def update(self, weight=None):
if weight is None:
weight = np.ones(self.num_clients) / self.num_clients
uploaded_parameters = [c.upload() for c in self.clients]
averaged_params = uploaded_parameters[0]
for k in averaged_params.keys():
for i in range(0, len(uploaded_parameters)):
local_model_params = uploaded_parameters[i]
w = weight[i]
if i == 0:
averaged_params[k] = local_model_params[k] * w
else:
averaged_params[k] += local_model_params[k] * w
self.server_model.load_state_dict(averaged_params)
def distribtue(self):
for client in self.clients:
client.download(self.server_model.state_dict())
| StarcoderdataPython |
11321217 | import torch
import torch.nn as nn
from tools import get_kappa
def train_model(model, device, lr, epochs, train_dataloader, valid_dataloader = None, logger = None):
optimizer = torch.optim.Adam(model.parameters(), lr = lr)
criterion = nn.MSELoss()
best_loss = 1e15
for ep in range(epochs):
# train
model.train()
total = 0
sum_loss = 0
for x, lengths, scores, feat in train_dataloader:
x = x.to(device)
lengths = lengths.to(device)
scores = scores.to(device)
feat = feat.to(device)
optimizer.zero_grad()
pred_scores = model(x, lengths, feat)
loss = criterion(scores, pred_scores)
loss.backward()
optimizer.step()
total += scores.shape[0]
sum_loss += loss.item() * scores.shape[0]
# log train loss
if logger is not None:
logger.log(train_loss = sum_loss / total)
# validate
if valid_dataloader is not None:
valid_loss, valid_w_kappa, valid_g_kappa, valid_i_kappa = evaluate_model(model, device, valid_dataloader, criterion)
# log valid metrics
if valid_dataloader is not None and logger is not None:
logger.log(valid_loss = valid_loss, valid_weighted_kappa = valid_w_kappa, valid_global_kappa = valid_g_kappa, valid_individual_kappa = valid_i_kappa)
# save best weights
if valid_loss < best_loss:
best_loss = valid_loss
logger.checkpoint_weights(model)
# display
if (ep + 1) % 5 == 0:
valid_string = f", (valid) loss {valid_loss: .3f}, weighted kappa {valid_w_kappa: .3f}, global kappa {valid_g_kappa: .3f}, individual kappa {list(valid_i_kappa.values())}" if valid_dataloader is not None else ""
print(f"Ep[{ep + 1}/{epochs}] (train) loss {sum_loss / total: .3f}{valid_string}")
def evaluate_model(model, device, dataloader, criterion = nn.MSELoss()):
model.eval()
total = 0
sum_loss = 0
all_pred_scores = torch.zeros(len(dataloader.dataset))
with torch.no_grad():
for x, lengths, scores, feat in dataloader:
x = x.to(device)
lengths = lengths.to(device)
scores = scores.to(device)
feat = feat.to(device)
pred_scores = model(x, lengths, feat)
all_pred_scores[total: total + scores.shape[0]] = pred_scores.cpu()
loss = criterion(scores, pred_scores)
total += scores.shape[0]
sum_loss += loss.item() * scores.shape[0]
scores = dataloader.dataset.recover(dataloader.dataset.get_scores())
pred_scores = dataloader.dataset.recover(all_pred_scores.numpy(), round_to_known = True)
valid_w_kappa, valid_g_kappa, valid_i_kappa = get_kappa(scores, pred_scores, dataloader.dataset.get_sets())
return sum_loss / total, valid_w_kappa, valid_g_kappa, valid_i_kappa
def predict(model,device,dataloader,round_to_known=False):
model.eval()
total = 0
all_pred_scores = torch.zeros(len(dataloader.dataset))
with torch.no_grad():
for x, lengths, scores, feat in dataloader:
x = x.to(device)
lengths = lengths.to(device)
scores = scores.to(device)
feat = feat.to(device)
pred_scores = model(x, lengths, feat)
all_pred_scores[total: total + scores.shape[0]] = pred_scores.cpu()
total += scores.shape[0]
pred_scores = dataloader.dataset.recover(all_pred_scores.numpy(), round_to_known)
return pred_scores
| StarcoderdataPython |
3562916 | # AUTOGENERATED! DO NOT EDIT! File to edit: annotation_viewer.ipynb (unless otherwise specified).
__all__ = ['WINDOW_NAME', 'ANNOTATION_COLOR', 'ImageLoader', 'show_annotated_images', 'configure_logging']
# Cell
import sys
import argparse
import logging
import cv2
import math
import numpy as np
import aiforce.image.opencv_tools as opencv_tools
import aiforce.image.pillow_tools as pillow_tools
from enum import Enum
from os.path import basename
from aiforce import annotation as annotation_package
from .core import list_subclasses, parse_known_args_with_help
from .annotation.core import AnnotationAdapter, annotation_filter, SubsetType, RegionShape
# Cell
# the name of the opencv window
WINDOW_NAME = 'Annotation'
# the color of the annotations
ANNOTATION_COLOR = (0, 255, 255)
# Cell
class ImageLoader(Enum):
"""
Currently supported image loader libraries.
"""
OPEN_CV = 'open_cv'
PILLOW = 'pillow'
def __str__(self):
return self.value
# Cell
def show_annotated_images(annotation_adapter, subset_type, image_loader, max_width=0, max_height=0, filter_names=None):
"""
Show images with corresponding annotations.
Images are shown one at a time with switching by using the arrow left/right keys.
`annotation_adapter`: The annotation adapter to use
`subset_type`: The subset to load
`image_loader`: The image loader library to use
`max_width`: The maximum width to scale the image for visibility.
`max_height`: The maximum height to scale the image for visibility.
"""
categories = annotation_adapter.read_categories()
annotations = annotation_adapter.read_annotations(subset_type)
if filter_names:
annotations = annotation_filter(annotations, lambda _, anno: basename(anno.file_path) in filter_names)
len_annotations = len(annotations)
if len_annotations == 0:
logging.error("No Annotations found")
return
logging.info("Load images with {}".format(image_loader))
index = 0
annotation_keys = list(annotations.keys())
logging.info("Keys to use:")
logging.info("n = Next Image")
logging.info("b = Previous Image")
logging.info("q = Quit")
logging.info("Annotations to view: {}".format(len_annotations))
while True:
annotation_id = annotation_keys[index]
annotation = annotations[annotation_id]
logging.info("View Image {}/{}: {}".format(index + 1, len_annotations, annotation.file_path))
if image_loader == ImageLoader.PILLOW:
img, width, height = pillow_tools.get_image_size(annotation.file_path)
img = opencv_tools.from_pillow_image(img)
elif image_loader == ImageLoader.OPEN_CV:
img, width, height = opencv_tools.get_image_size(annotation.file_path)
else:
logging.error("Unsupported image loader")
img = None
width = 0
height = 0
if img is None:
logging.info("Image not found at {}".format(annotation.file_path))
img = np.zeros(shape=(1, 1, 3))
else:
logging.info("Image size (WIDTH x HEIGHT): ({} x {})".format(width, height))
if annotation.regions:
logging.info("Found {} regions".format(len(annotation.regions)))
for region_index, region in enumerate(annotation.regions):
points = list(zip(region.points_x, region.points_y))
logging.info("Found {} of category {} with {} points: {}".format(region.shape,
','.join(region.labels),
len(points), points))
if region.shape == RegionShape.CIRCLE:
img = cv2.circle(img, points[0], int(region.radius_x), ANNOTATION_COLOR, 2)
elif region.shape == RegionShape.ELLIPSE:
angle = region.rotation * 180 // math.pi
img = cv2.ellipse(img, points[0], (int(region.radius_x), int(region.radius_y)), angle, 0, 360,
ANNOTATION_COLOR, 2)
elif region.shape == RegionShape.POINT:
img = cv2.circle(img, points[0], 1, ANNOTATION_COLOR, 2)
elif region.shape == RegionShape.POLYGON or region.shape == RegionShape.POLYLINE:
pts = np.array(points, np.int32)
pts = pts.reshape((-1, 1, 2))
img = cv2.polylines(img, [pts], region.shape == RegionShape.POLYGON, ANNOTATION_COLOR, 2)
elif region.shape == RegionShape.RECTANGLE:
img = cv2.rectangle(img, points[0], points[1], ANNOTATION_COLOR, 2)
if max_width and max_height:
img = opencv_tools.fit_to_max_size(img, max_width, max_height)
cv2.imshow(WINDOW_NAME, img)
cv2.setWindowTitle(WINDOW_NAME, "Image {}/{}".format(index + 1, len_annotations))
k = cv2.waitKey(0)
if k == ord('q'): # 'q' key to stop
break
elif k == ord('b'):
index = max(0, index - 1)
elif k == ord('n'):
index = min(len_annotations - 1, index + 1)
cv2.destroyAllWindows()
# Cell
def configure_logging(logging_level=logging.INFO):
"""
Configures logging for the system.
:param logging_level: The logging level to use.
"""
logging.basicConfig(level=logging_level)
# Cell
if __name__ == '__main__' and '__file__' in globals():
# for direct shell execution
configure_logging()
# read annotation adapters to use
adapters = list_subclasses(annotation_package, AnnotationAdapter)
parser = argparse.ArgumentParser()
parser.add_argument("-a",
"--annotation",
help="The annotation adapter to read the annotations.",
type=str,
choices=adapters.keys(),
required=True)
parser.add_argument("--image_loader",
help="The image library for reading the image.",
choices=list(ImageLoader),
type=ImageLoader,
default=ImageLoader.PILLOW)
parser.add_argument("--subset",
help="The image subset to read.",
choices=list(SubsetType),
type=SubsetType,
default=SubsetType.TRAINVAL)
parser.add_argument("--max-width",
help="The maximum width to scale the image for visibility.",
type=int,
default=0)
parser.add_argument("--max-height",
help="The maximum height to scale the image for visibility.",
type=int,
default=0)
parser.add_argument("--filter",
help="Filter file names to view.",
nargs="*",
default=[])
argv = sys.argv
args, argv = parse_known_args_with_help(parser, argv)
adapter_class = adapters[args.annotation]
# parse the annotation arguments
annotation_parser = getattr(adapter_class, 'argparse')()
annotation_args, argv = parse_known_args_with_help(annotation_parser, argv)
show_annotated_images(adapter_class(**vars(annotation_args)), args.subset, args.image_loader, args.max_width,
args.max_height, args.filter) | StarcoderdataPython |
1712046 | <gh_stars>1-10
"""
A catch-all file that imports the whole framework.
:file: __init__.py
:date: 27/08/2015
:authors:
- <NAME> <<EMAIL>>
"""
# Core classes.
from .base import *
from .struct import *
# Serializers
from .scalars import *
from .enum import *
# Misc.
from .validators import *
| StarcoderdataPython |
373963 | def tflm_kernel_friends():
return []
def tflm_audio_frontend_friends():
return []
def xtensa_fusion_f1_cpu():
return "F1_190305_swupgrade"
def xtensa_hifi_3z_cpu():
return "HIFI_190304_swupgrade"
def xtensa_hifi_5_cpu():
return "AE_HiFi5_LE5_AO_FP_XC"
| StarcoderdataPython |
11236765 | <filename>svm/dnase/IMR90/svm_predictions_svmtrainset_genometestset/make_bed_from_hdf.py
import sys
fold=sys.argv[1]
import pandas as pd
data=pd.read_hdf("IMR90."+str(fold)+".classification.withgc.dl.pred.svmtrainset.genometestset.hdf5.labels.0")
data.to_csv("labels."+str(fold)+".bed",sep='\t',index=True,header=False)
| StarcoderdataPython |
6602746 | <gh_stars>0
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def genericSearch(problem, fringe, heuristic=None):
"""
A generic search algorithm to solve the Pacman Search
:param problem: The problem
:param fringe: The fringe, either of type:
- Stash, for DFS. A Last-In-First-Out type of stash.
- Queue, for BFS. A First-In-First-Out type of stash.
- PriorityQueue, for UCS (and the A* search). Drops items from the stash based
on the priority/cost given to them
:param heuristic: The type of heuristic being used. Default = None
:return []: Empty array (Safe exit, should not happen)
:return actions: The actions done to get to the goal state.
"""
visited = [] # The nodes that have already been visited
actions = [] # The actions that have already been made
initial = problem.getStartState() # The initial state of the problem
# Push the initial start state + blank action (because we've done nothing to get there) to the fringe
if isinstance(fringe, util.Stack) or isinstance(fringe, util.Queue):
fringe.push((initial, actions))
# If using the PriorityQueue, calculate the priority according to the given heuristic
elif isinstance(fringe, util.PriorityQueue):
fringe.push((initial, actions), heuristic(initial, problem))
"""
Go through the fringe.
Remove the current value from the fringe
If the node was NOT visited, see if it's goal
If not goal, add node's successors to fringe
"""
while fringe:
# If using Stack (DFS) or Queue (BFS)
if isinstance(fringe, util.Stack) or isinstance(fringe, util.Queue):
node, actions = fringe.pop() # Record the node and the actions taken, remove them from the fringe
# If using PriorityQueue (UCS and A*)
elif isinstance(fringe, util.PriorityQueue):
node, actions = fringe.pop() # Record the node and the actions taken, remove them from the fringe
# If the node has NOT been visited
if node not in visited:
visited.append(node) # Add the node to visited
# If at goal --> return with the path (actions) taken
if problem.isGoalState(node):
return actions
"""
The code below only executes if the node wasn't the goal
"""
successors = problem.getSuccessors(node) # Save the successor nodes of the current node
# Cycle through the successors
for successor in successors:
coordinate, direction, cost = successor # Record the values of the current (successor) node
new_actions = actions + [direction] # Expand the actions done so far
# Stack (DFS) and Queue (BFS):
if isinstance(fringe, util.Stack) or isinstance(fringe, util.Queue):
fringe.push((coordinate, new_actions)) # Add the new actions and the coordinate into the fringe
# PriorityQueue (UCS and A*):
elif isinstance(fringe, util.PriorityQueue):
# The new cost is the previous cost + the heuristic factor (which is 0 for UCS)
new_cost = problem.getCostOfActions(new_actions) + heuristic(coordinate, problem)
# Add the new actions, coordinate + cost into the Fringe
fringe.push((coordinate, new_actions), new_cost)
"""
At this point, we have looped through all of the new node's successors and
added them into the fringe according to the Stack (DFS)/Queue (BFS)/PriorityQueue (UCS, A*).
Next we will cycle into the next item in the fringe and start again.
If the next item in the fringe was a node we had already visited, we'll jump over it.
"""
return [] # This only ever happens if the fringe didn't exist or goal was not found.
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Use the genericSearch-algorithm to solve the problem. We use the Stash (LastIn-FirstOut)
to maintain the fringe.
"""
return genericSearch(problem, util.Stack())
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
Use the genericSearch-algorithm to solve the problem. We use the Queue (FirstIn-FirstOut)
to maintain the fringe.
"""
return genericSearch(problem, util.Queue())
def uniformCostSearch(problem):
"""
Search the node of least total cost first.
Use the genericSearch-algorithm to solve the problem. The UCS is essentially
the same as using A* without a heuristic, thus we will simply call A*
with the heuristic set as null
"""
return aStarSearch(problem)
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""
Search the node that has the lowest combined cost and heuristic first.
Use the genericSearch-algorithm to solve the A* search. We use the PriorityQueue
to maintain the fringe. With the priority queue, each item in the fringe has a
priority assigned to it, which defines when the item will be dropped out of the fringe.
Essentially, the heuristic given defines the priority of the item.
If no heuristic given, the nullHeuristic is chosen by default. If nullHeuristic is being
used, the search will act like UCS. We call this function from the UCS with the heuristic
set as null.
"""
return genericSearch(problem, util.PriorityQueue(), heuristic)
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| StarcoderdataPython |
11284542 | <gh_stars>1-10
import sys
import asyncio
import logging
import disnake
from disnake.ext import commands
from . import checks, interactions, error_handler, types, utils
from .config import Config
assert sys.version_info[:2] >= (3, 9)
# required for aiodns on windows
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
logging.basicConfig(format='%(asctime)s: [%(levelname)s] (%(threadName)s) %(name)s: %(message)s', level=logging.INFO)
logger = logging.getLogger('guardianbot')
logger.setLevel(logging.DEBUG if Config.debug else logging.INFO)
intents = disnake.Intents.default()
intents.members = True
bot = interactions.CustomSyncBot(
command_prefix=commands.when_mentioned_or(Config.prefix),
activity=disnake.Activity(type=disnake.ActivityType.watching, name='Link(s)'),
intents=intents,
test_guilds=[Config.guild_id],
sync_commands_debug=Config.debug,
sync_permissions=True,
reload=utils.debugger_active(),
allowed_mentions=disnake.AllowedMentions.none(),
)
def load_ext(name: str) -> None:
bot.load_extension(name, package=__package__)
load_ext('.cogs.core')
load_ext('.cogs.filter')
@bot.event
async def on_ready():
guild = disnake.utils.get(bot.guilds, id=Config.guild_id)
assert guild, f'couldn\'t find guild with ID {Config.guild_id}'
logger.info(
f'{bot.user} is connected to the following guild:\n'
f'{guild.name} (id: {guild.id})'
)
logger.info(f'Latency: {int(bot.latency * 1000)}ms')
@bot.event
async def on_command(ctx: types.Context) -> None:
logger.debug(
f'{str(ctx.author)}/{ctx.author.id} invoked '
f'command \'{ctx.message.content}\' '
f'in \'{ctx.channel}\''
)
async def _on_app_cmd(ctx: types.AppCI, type: str) -> None:
logger.debug(
f'{str(ctx.author)}/{ctx.author.id} invoked '
f'{type} command \'{ctx.application_command.qualified_name} {ctx.filled_options}\' '
f'in \'{ctx.channel}\''
)
@bot.event
async def on_slash_command(ctx: types.AppCI) -> None:
await _on_app_cmd(ctx, 'slash')
@bot.event
async def on_user_command(ctx: types.AppCI) -> None:
await _on_app_cmd(ctx, 'user')
@bot.event
async def on_message_command(ctx: types.AppCI) -> None:
await _on_app_cmd(ctx, 'message')
# add global command checks
cmd_filter = checks.command_filter()
bot.check(cmd_filter)
bot.application_command_check(slash_commands=True, user_commands=True, message_commands=True)(cmd_filter)
# initialize global error handler
error_handler.init(bot)
error_handler.init_warnings_handler(bot)
# connect
bot.run(Config.token)
| StarcoderdataPython |
9797236 | <gh_stars>0
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from src.models.vanilla_classifier import VanillaClassifier
class Bagging(VanillaClassifier):
"""
Bagging Classifier
==================
Child class implementing Bagging classifying model.
Attributes
==========
_estimators - List of classifier to use in the bagging
_voting - Soft: Predicts the class label based on the argmax of the sums of the predicted probabilities
_data_processing - Type of processed data to use in the training est testing process
"""
def __init__(self, _estimators=[('SVM', SVC(probability=True)), ('GBoost', GradientBoostingClassifier()),
('fc_relu', MLPClassifier(hidden_layer_sizes=(128, 64), activation='relu'))],
_voting='soft', data_process=None):
super().__init__(VotingClassifier(estimators=_estimators, voting=_voting), data_process=data_process)
self.parameters = {'estimators': _estimators, 'voting': _voting}
self.param_grid = self.get_param_grid()
def get_param_grid(self):
return {'weights': [[1, 1, 1], [1, 0, 1]]}
| StarcoderdataPython |
3558786 | <gh_stars>1-10
#!/usr/bin/env python3
# # -*- coding: utf-8 -*-
"""
The launcher command available when running the CLI.
Responsible for creating launcher commands.
________________________________________________________________________________
Created by brightSPARK Labs
www.brightsparklabs.com
"""
# standard library
import datetime
import importlib.resources as pkg_resources
import os
# vendor libraries
import click
from jinja2 import StrictUndefined, Template
# local libraries
from appcli import templates
from appcli.commands.appcli_command import AppcliCommand
from appcli.functions import error_and_exit
from appcli.logger import logger
from appcli.models.cli_context import CliContext
from appcli.models.configuration import Configuration
# ------------------------------------------------------------------------------
# CONSTANTS
# ------------------------------------------------------------------------------
LAUNCHER_TEMPLATE_FILENAME = "launcher.j2"
""" The filename of the launcher template """
# ------------------------------------------------------------------------------
# CLASSES
# ------------------------------------------------------------------------------
class LauncherCli:
# ------------------------------------------------------------------------------
# CONSTRUCTOR
# ------------------------------------------------------------------------------
def __init__(self, configuration: Configuration):
self.configuration: Configuration = configuration
@click.command(help="Outputs an appropriate launcher bash script to stdout.")
@click.pass_context
def launcher(ctx):
cli_context: CliContext = ctx.obj
cli_context.get_configuration_dir_state().verify_command_allowed(
AppcliCommand.LAUNCHER
)
logger.info("Generating launcher script ...")
# Get the template from the appcli package
launcher_template = pkg_resources.read_text(
templates, LAUNCHER_TEMPLATE_FILENAME
)
logger.debug(f"Read template file [{LAUNCHER_TEMPLATE_FILENAME}]")
render_variables = {
"app_version": os.environ.get("APP_VERSION", "latest"),
"app_name": configuration.app_name.upper(),
"cli_context": ctx.obj,
"configuration": self.configuration,
"current_datetime": f"{datetime.datetime.utcnow().isoformat()}+00:00", # Since we're using utcnow(), we specify the offset manually
}
logger.debug(
f"Rendering template with render variables: [{render_variables}]"
)
template = Template(
launcher_template,
undefined=StrictUndefined,
trim_blocks=True,
lstrip_blocks=True,
)
try:
output_text = template.render(render_variables)
print(output_text)
except Exception as e:
error_and_exit(
f"Could not generate file from template. The configuration file is likely missing a setting: {e}"
)
# expose the CLI command
self.commands = {"launcher": launcher}
| StarcoderdataPython |
237683 | <reponame>yangruihan/excel_parser
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import sys
import xlrd
LOOP_TYPE_KEYS = ['repeated']
STRUCT_TYPE_KEYS = ['required_struct', 'optional_struct']
class ExcelParser:
"""
Excel 解析器
"""
def parse_all_sheet(self, excel_path: str) -> list:
"""
解析所有 Excel 表里所有 Sheet
返回用字典描述的 Excel 定义结构数组
Args:
excel_path: excel 路径
Returns:
返回一个数组
包含若干个 parse 方法返回的数组结构
"""
ret = []
book = xlrd.open_workbook(excel_path)
for sheet in book.sheets():
ret.append(self.parse_with_sheet(sheet, 0))
return ret
def parse(self, excel_path: str, sheet_idx: str or int, start_col: int, end_col: int = -1, max_element: int = sys.maxsize) -> list:
"""
用字典描述的 Excel 定义的结构
Args:
excel_path: excel 路径
sheet_idx: sheet 索引或名称
start_col: 整型,开始解析的列数
end_col: 整型,结束解析的列数
max_element: 整型,最大元素个数
Returns:
返回一个数组
数组:
表示指定范围内所包含的类型的数组
其中每个元素是一个字典
字典字段如下:
name: 成员名
type: 类型名,其中数组有前缀'[]',结构体为'struct',结构体数组为'[]struct'
col: 该成员开始列
comment: 该成员注释
struct_type(结构体特有): 结构体包含的子成员集合,每一个子成员 是一个与该字典形式相同的字典
形如:
[
{
"name": "id",
"type": "uint32",
"col": 0,
"comment": " @区域ID"
},
{
"name": "scene_id",
"type": "uint32",
"col": 1,
"comment": " @所属场景id"
},
{
"name": "pos",
"type": "[]struct",
"col": 4,
"struct_type": [
{
"name": "pos_x",
"type": "int32",
"col": 5,
"comment": " @x"
},
{
"name": "pos_y",
"type": "int32",
"col": 6,
"comment": " @y"
},
{
"name": "pos_z",
"type": "int32",
"col": 7,
"comment": " @z1"
}
],
"comment": " @结构体声明"
},
{
"name": "pos_empty",
"type": "[]struct",
"col": 114,
"struct_type": [
{
"name": "pos_empty_x",
"type": "int32",
"col": 115,
"comment": " @x"
},
{
"name": "pos_empty_y",
"type": "int32",
"col": 116,
"comment": " @y"
},
{
"name": "pos_empty_z",
"type": "int32",
"col": 117,
"comment": " @z"
}
],
"comment": " @结构体声明"
}
]
"""
book = xlrd.open_workbook(excel_path)
sheet = None
if isinstance(sheet_idx, int):
sheet = book.get_sheet(sheet_idx)
elif isinstance(sheet_idx, str):
for s in book.sheets():
if s.name.strip() == sheet_idx.strip():
sheet = s
break
if sheet is None:
print('Sheet not found')
return
return self.parse_with_sheet(sheet, start_col, end_col, max_element)
def parse_with_sheet(self, sheet: xlrd.sheet.Sheet, start_col: int, end_col: int = -1, max_element: int = sys.maxsize) -> list:
"""
用字典描述的 Excel 定义的结构
Args:
sheet: xlrd sheet 对象
start_col: 整型,开始解析的列数
end_col: 整型,结束解析的列数
max_element: 整型,最大元素个数
Returns:
返回一个数组
数组:
表示指定范围内所包含的类型的数组
其中每个元素是一个字典
字典字段如下:
name: 成员名
type: 类型名,其中数组有前缀'[]',结构体为'struct',结构体数组为'[]struct'
col: 该成员开始列
comment: 该成员注释
struct_type(结构体特有): 结构体包含的子成员集合,每一个子成员 是一个与该字典形式相同的字典
形如:
[
{
"name": "id",
"type": "uint32",
"col": 0,
"comment": " @区域ID"
},
{
"name": "scene_id",
"type": "uint32",
"col": 1,
"comment": " @所属场景id"
},
{
"name": "pos",
"type": "[]struct",
"col": 4,
"struct_type": [
{
"name": "pos_x",
"type": "int32",
"col": 5,
"comment": " @x"
},
{
"name": "pos_y",
"type": "int32",
"col": 6,
"comment": " @y"
},
{
"name": "pos_z",
"type": "int32",
"col": 7,
"comment": " @z"
}
],
"comment": " @结构体声明"
},
{
"name": "pos_empty",
"type": "[]struct",
"col": 114,
"struct_type": [
{
"name": "pos_empty_x",
"type": "int32",
"col": 115,
"comment": " @x"
},
{
"name": "pos_empty_y",
"type": "int32",
"col": 116,
"comment": " @y"
},
{
"name": "pos_empty_z",
"type": "int32",
"col": 117,
"comment": " @z1"
}
],
"comment": " @结构体声明"
}
]
"""
if end_col == -1:
end_col = sheet.ncols - 1
class_define = []
i = start_col
solved_element = 0
while i <= end_col and solved_element < max_element:
define, i = self._parse_col(sheet, i)
class_define.append(define)
return class_define
def _parse_col(self, sheet: xlrd.sheet.Sheet, col: int) -> (dict, int):
# 兼容 proto(required, optional, repeated)
proto_type, define_type, name, comment = self._get_sheet_data(
sheet, col)
# 处理数组
if proto_type in LOOP_TYPE_KEYS:
# 判断是否为单列数组
if isinstance(define_type, int) or isinstance(define_type, float):
next_col = self._get_next(sheet, col)
next_proto_type, next_define_type, next_name, next_comment = self._get_sheet_data(
sheet, next_col)
arr_count = int(define_type)
# 如果是结构体
if next_proto_type in STRUCT_TYPE_KEYS:
struct_element_count = int(next_define_type)
ret = {
'name': next_name,
'type': '[]struct',
'col': next_col,
'struct_type': [],
'comment': next_comment
}
temp_col = next_col + 1
handle_count = 0
while handle_count < struct_element_count:
sub_type, temp_col = self._parse_col(sheet, temp_col)
ret['struct_type'].append(sub_type)
handle_count += 1
item_cnt = temp_col - col - 2
return ret, self._get_next(sheet, col + 1 + item_cnt * arr_count)
else:
ret = {
'name': next_name,
'type': f'[]{next_define_type}',
'col': next_col,
'comment': next_comment
}
return ret, self._get_next(sheet, col + arr_count)
else:
return {
'name': name,
'type': f'[]{define_type}',
'col': col,
'comment': comment
}, self._get_next(sheet, col)
elif proto_type in STRUCT_TYPE_KEYS:
struct_element_count = int(define_type)
ret = {
'name': name,
'type': 'struct',
'struct_type': [],
'col': col,
'comment': comment
}
temp_col = col + 1
handle_count = 0
while handle_count <= struct_element_count:
sub_type, temp_col = self._parse_col(sheet, temp_col)
ret['struct_type'].append(sub_type)
handle_count = handle_count + 1
return ret, temp_col
elif self._is_skip_col(proto_type):
return None
else:
return {
'name': name,
'type': define_type,
'col': col,
'comment': comment
}, self._get_next(sheet, col)
def _get_sheet_data(self, sheet: xlrd.sheet.Sheet, col: int) -> (str, str, str, str):
proto_type = sheet.cell_value(0, col)
define_type = sheet.cell_value(1, col) # 定义的类型
name = sheet.cell_value(2, col) # 字段名
comment = str(sheet.cell_value(4, col)).replace(
'\n', '').replace('\r', '') # 注释
if comment != '':
comment = f' @{comment}'
return proto_type, define_type, name, comment
def _get_next(self, sheet: xlrd.sheet.Sheet, col: int, max: int = -1) -> int:
col = col + 1
if max == -1:
max = sheet.ncols
if col >= max:
return max
proto_type = sheet.cell_value(0, col)
while self._is_skip_col(proto_type):
col = col + 1
if col >= max:
break
proto_type = sheet.cell_value(0, col)
if col >= max:
return max
else:
return col
def _is_skip_col(self, proto_type: str) -> bool:
proto_type = proto_type.strip()
if proto_type == "*" or proto_type == "":
return True
return False
| StarcoderdataPython |
4941668 | <reponame>terrorizer1980/Dashboard-api
from tests import tools
from tests.tools import CLIENT_URL
from Utils.Responses import bad_oauth_response
noauth_client = tools.generate_noauth_client()
def test_oauth_redir():
resp = noauth_client.request("GET", "/api/discord/login", allow_redirects=False)
# Make sure the redirect is formatted correctly
assert resp.status_code == 307
# Make sure that the state key is always present and set correctly
assert "state_key" in resp.cookies
location_parts = str(resp.headers["location"]).split("&")
assert resp.cookies["state_key"] in location_parts[1]
def test_oauth_callback():
# These tests ensures all the proper protections are in place, not the actual OAuth flow
# No state key
resp = noauth_client.request("GET", "/api/discord/callback", allow_redirects=False)
tools.assure_identical_response(resp, bad_oauth_response)
# No OAuth code
state_key = "spooky"
resp = noauth_client.request(
"GET",
f"/api/discord/callback?state={state_key}",
allow_redirects = False,
cookies = {"state_key": state_key}
)
tools.assure_identical_response(resp, bad_oauth_response)
# Make sure we handle user OAuth denials properly
resp = noauth_client.request("GET", "/api/discord/callback?error=denied", allow_redirects=False)
assert resp.status_code == 302
assert resp.headers["location"] == CLIENT_URL
| StarcoderdataPython |
1862882 | <gh_stars>0
from .config import get_config
from .libs.auth import jwt, oauth, register_providers
from .libs.cache import cache
from .models import models # noqa: F401
from .models.common.db import db
from .resources import (
AuditAPI,
HealthCheckAPI,
LoginAPI,
RegisterAPI,
UserAPI,
OAuthLoginAPI,
OAuthAuthorizeAPI,
YelpAPI,
BestTimeAPI
)
from flask import Flask
from flask_restful import Api
from flask_migrate import Migrate as DBMigrate
from flask_cors import CORS
# Initialize flask application
app = Flask(__name__)
# Configure flask application
app.config.from_object(get_config())
# Initialize flask api
api = Api(app, prefix='/api')
# Initialize sqlalchemy extention
db.init_app(app)
# Initialize alembic extention
db_migrate = DBMigrate(app, db)
# Initalize login manager extention
jwt.init_app(app)
# Initialize oauth extention
oauth.init_app(app)
register_providers()
# Initialized redis cache extention
cache.init_app(app)
# Initialize Cors extention
CORS(app, supports_credentials=True)
# Attach API resources to routes
api.add_resource(AuditAPI, '/auditlog')
api.add_resource(HealthCheckAPI, '/healthcheck')
api.add_resource(LoginAPI, '/auth/login')
api.add_resource(RegisterAPI, '/auth/register')
api.add_resource(UserAPI, "/auth/user")
api.add_resource(OAuthLoginAPI, '/auth/oauth/login')
api.add_resource(OAuthAuthorizeAPI, '/auth/oauth/authorize')
api.add_resource(YelpAPI, '/yelp')
api.add_resource(BestTimeAPI, '/besttime')
| StarcoderdataPython |
13541 | __author__ = "<NAME>"
__copyright__ = "Copyright 2015-2019, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
import os
import shutil
import signal
import marshal
import pickle
import json
import time
from base64 import urlsafe_b64encode, b64encode
from functools import lru_cache, partial
from itertools import filterfalse, count
from pathlib import Path
from snakemake.logging import logger
from snakemake.jobs import jobfiles
from snakemake.utils import listfiles
class Persistence:
def __init__(
self,
nolock=False,
dag=None,
conda_prefix=None,
singularity_prefix=None,
shadow_prefix=None,
warn_only=False,
):
self.path = os.path.abspath(".snakemake")
if not os.path.exists(self.path):
os.mkdir(self.path)
self._lockdir = os.path.join(self.path, "locks")
if not os.path.exists(self._lockdir):
os.mkdir(self._lockdir)
self.dag = dag
self._lockfile = dict()
self._metadata_path = os.path.join(self.path, "metadata")
self._incomplete_path = os.path.join(self.path, "incomplete")
self.conda_env_archive_path = os.path.join(self.path, "conda-archive")
self.benchmark_path = os.path.join(self.path, "benchmarks")
if conda_prefix is None:
self.conda_env_path = os.path.join(self.path, "conda")
else:
self.conda_env_path = os.path.abspath(os.path.expanduser(conda_prefix))
if singularity_prefix is None:
self.container_img_path = os.path.join(self.path, "singularity")
else:
self.container_img_path = os.path.abspath(
os.path.expanduser(singularity_prefix)
)
if shadow_prefix is None:
self.shadow_path = os.path.join(self.path, "shadow")
else:
self.shadow_path = os.path.join(shadow_prefix, "shadow")
# place to store any auxiliary information needed during a run (e.g. source tarballs)
self.aux_path = os.path.join(self.path, "auxiliary")
# migration of .snakemake folder structure
migration_indicator = Path(
os.path.join(self._incomplete_path, "migration_underway")
)
if (
os.path.exists(self._metadata_path)
and not os.path.exists(self._incomplete_path)
) or migration_indicator.exists():
os.makedirs(self._incomplete_path, exist_ok=True)
migration_indicator.touch()
self.migrate_v1_to_v2()
migration_indicator.unlink()
self._incomplete_cache = None
for d in (
self._metadata_path,
self._incomplete_path,
self.shadow_path,
self.conda_env_archive_path,
self.conda_env_path,
self.container_img_path,
self.aux_path,
):
os.makedirs(d, exist_ok=True)
if nolock:
self.lock = self.noop
self.unlock = self.noop
if warn_only:
self.lock = self.lock_warn_only
self.unlock = self.noop
self._read_record = self._read_record_cached
def migrate_v1_to_v2(self):
logger.info("Migrating .snakemake folder to new format...")
i = 0
for path, _, filenames in os.walk(self._metadata_path):
path = Path(path)
for filename in filenames:
with open(path / filename, "r") as f:
try:
record = json.load(f)
except json.JSONDecodeError:
continue # not a properly formatted JSON file
if record.get("incomplete", False):
target_path = Path(self._incomplete_path) / path.relative_to(
self._metadata_path
)
os.makedirs(target_path, exist_ok=True)
shutil.copyfile(
path / filename,
target_path / filename,
)
i += 1
# this can take a while for large folders...
if (i % 10000) == 0 and i > 0:
logger.info("{} files migrated".format(i))
logger.info("Migration complete")
@property
def files(self):
if self._files is None:
self._files = set(self.dag.output_files)
return self._files
@property
def locked(self):
inputfiles = set(self.all_inputfiles())
outputfiles = set(self.all_outputfiles())
if os.path.exists(self._lockdir):
for lockfile in self._locks("input"):
with open(lockfile) as lock:
for f in lock:
f = f.strip()
if f in outputfiles:
return True
for lockfile in self._locks("output"):
with open(lockfile) as lock:
for f in lock:
f = f.strip()
if f in outputfiles or f in inputfiles:
return True
return False
def lock_warn_only(self):
if self.locked:
logger.info(
"Error: Directory cannot be locked. This usually "
"means that another Snakemake instance is running on this directory. "
"Another possibility is that a previous run exited unexpectedly."
)
def lock(self):
if self.locked:
raise IOError("Another snakemake process " "has locked this directory.")
self._lock(self.all_inputfiles(), "input")
self._lock(self.all_outputfiles(), "output")
def unlock(self, *args):
logger.debug("unlocking")
for lockfile in self._lockfile.values():
try:
logger.debug("removing lock")
os.remove(lockfile)
except OSError as e:
if e.errno != 2: # missing file
raise e
logger.debug("removed all locks")
def cleanup_locks(self):
shutil.rmtree(self._lockdir)
def cleanup_metadata(self, path):
self._delete_record(self._metadata_path, path)
def cleanup_shadow(self):
if os.path.exists(self.shadow_path):
shutil.rmtree(self.shadow_path)
os.mkdir(self.shadow_path)
def conda_cleanup_envs(self):
# cleanup envs
in_use = set(env.hash[:8] for env in self.dag.conda_envs.values())
for d in os.listdir(self.conda_env_path):
if len(d) >= 8 and d[:8] not in in_use:
if os.path.isdir(os.path.join(self.conda_env_path, d)):
shutil.rmtree(os.path.join(self.conda_env_path, d))
else:
os.remove(os.path.join(self.conda_env_path, d))
# cleanup env archives
in_use = set(env.content_hash for env in self.dag.conda_envs.values())
for d in os.listdir(self.conda_env_archive_path):
if d not in in_use:
shutil.rmtree(os.path.join(self.conda_env_archive_path, d))
def started(self, job, external_jobid=None):
for f in job.output:
self._record(
self._incomplete_path,
{"external_jobid": external_jobid},
f,
)
def finished(self, job, keep_metadata=True):
if not keep_metadata:
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
return
version = str(job.rule.version) if job.rule.version is not None else None
code = self._code(job.rule)
input = self._input(job)
log = self._log(job)
params = self._params(job)
shellcmd = job.shellcmd
conda_env = self._conda_env(job)
fallback_time = time.time()
for f in job.expanded_output:
rec_path = self._record_path(self._incomplete_path, f)
starttime = os.path.getmtime(rec_path) if os.path.exists(rec_path) else None
# Sometimes finished is called twice, if so, lookup the previous starttime
if not os.path.exists(rec_path):
starttime = self._read_record(self._metadata_path, f).get(
"starttime", None
)
endtime = f.mtime.local_or_remote() if f.exists else fallback_time
self._record(
self._metadata_path,
{
"version": version,
"code": code,
"rule": job.rule.name,
"input": input,
"log": log,
"params": params,
"shellcmd": shellcmd,
"incomplete": False,
"starttime": starttime,
"endtime": endtime,
"job_hash": hash(job),
"conda_env": conda_env,
"container_img_url": job.container_img_url,
},
f,
)
self._delete_record(self._incomplete_path, f)
def cleanup(self, job):
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
self._delete_record(self._metadata_path, f)
def incomplete(self, job):
if self._incomplete_cache is None:
self._cache_incomplete_folder()
if self._incomplete_cache is False: # cache deactivated
def marked_incomplete(f):
return self._exists_record(self._incomplete_path, f)
else:
def marked_incomplete(f):
rec_path = self._record_path(self._incomplete_path, f)
return rec_path in self._incomplete_cache
return any(map(lambda f: f.exists and marked_incomplete(f), job.output))
def _cache_incomplete_folder(self):
self._incomplete_cache = {
os.path.join(path, f)
for path, dirnames, filenames in os.walk(self._incomplete_path)
for f in filenames
}
def external_jobids(self, job):
return list(
set(
self._read_record(self._incomplete_path, f).get("external_jobid", None)
for f in job.output
)
)
def metadata(self, path):
return self._read_record(self._metadata_path, path)
def version(self, path):
return self.metadata(path).get("version")
def rule(self, path):
return self.metadata(path).get("rule")
def input(self, path):
return self.metadata(path).get("input")
def log(self, path):
return self.metadata(path).get("log")
def shellcmd(self, path):
return self.metadata(path).get("shellcmd")
def params(self, path):
return self.metadata(path).get("params")
def code(self, path):
return self.metadata(path).get("code")
def version_changed(self, job, file=None):
"""Yields output files with changed versions of bool if file given."""
return _bool_or_gen(self._version_changed, job, file=file)
def code_changed(self, job, file=None):
"""Yields output files with changed code of bool if file given."""
return _bool_or_gen(self._code_changed, job, file=file)
def input_changed(self, job, file=None):
"""Yields output files with changed input of bool if file given."""
return _bool_or_gen(self._input_changed, job, file=file)
def params_changed(self, job, file=None):
"""Yields output files with changed params of bool if file given."""
return _bool_or_gen(self._params_changed, job, file=file)
def _version_changed(self, job, file=None):
assert file is not None
return self.version(file) != job.rule.version
def _code_changed(self, job, file=None):
assert file is not None
return self.code(file) != self._code(job.rule)
def _input_changed(self, job, file=None):
assert file is not None
return self.input(file) != self._input(job)
def _params_changed(self, job, file=None):
assert file is not None
return self.params(file) != self._params(job)
def noop(self, *args):
pass
def _b64id(self, s):
return urlsafe_b64encode(str(s).encode()).decode()
@lru_cache()
def _code(self, rule):
code = rule.run_func.__code__
return b64encode(pickle_code(code)).decode()
@lru_cache()
def _conda_env(self, job):
if job.conda_env:
return b64encode(job.conda_env.content).decode()
@lru_cache()
def _input(self, job):
return sorted(job.input)
@lru_cache()
def _log(self, job):
return sorted(job.log)
@lru_cache()
def _params(self, job):
return sorted(map(repr, job.params))
@lru_cache()
def _output(self, job):
return sorted(job.output)
def _record(self, subject, json_value, id):
recpath = self._record_path(subject, id)
os.makedirs(os.path.dirname(recpath), exist_ok=True)
with open(recpath, "w") as f:
json.dump(json_value, f)
def _delete_record(self, subject, id):
try:
recpath = self._record_path(subject, id)
os.remove(recpath)
recdirs = os.path.relpath(os.path.dirname(recpath), start=subject)
if recdirs != ".":
os.removedirs(recdirs)
except OSError as e:
if e.errno != 2: # not missing
raise e
@lru_cache()
def _read_record_cached(self, subject, id):
return self._read_record_uncached(subject, id)
def _read_record_uncached(self, subject, id):
if not self._exists_record(subject, id):
return dict()
with open(self._record_path(subject, id), "r") as f:
return json.load(f)
def _exists_record(self, subject, id):
return os.path.exists(self._record_path(subject, id))
def _locks(self, type):
return (
f
for f, _ in listfiles(
os.path.join(self._lockdir, "{{n,[0-9]+}}.{}.lock".format(type))
)
if not os.path.isdir(f)
)
def _lock(self, files, type):
for i in count(0):
lockfile = os.path.join(self._lockdir, "{}.{}.lock".format(i, type))
if not os.path.exists(lockfile):
self._lockfile[type] = lockfile
with open(lockfile, "w") as lock:
print(*files, sep="\n", file=lock)
return
def _record_path(self, subject, id):
max_len = (
os.pathconf(subject, "PC_NAME_MAX") if os.name == "posix" else 255
) # maximum NTFS and FAT32 filename length
if max_len == 0:
max_len = 255
b64id = self._b64id(id)
# split into chunks of proper length
b64id = [b64id[i : i + max_len - 1] for i in range(0, len(b64id), max_len - 1)]
# prepend dirs with @ (does not occur in b64) to avoid conflict with b64-named files in the same dir
b64id = ["@" + s for s in b64id[:-1]] + [b64id[-1]]
path = os.path.join(subject, *b64id)
return path
def all_outputfiles(self):
# we only look at output files that will be updated
return jobfiles(self.dag.needrun_jobs, "output")
def all_inputfiles(self):
# we consider all input files, also of not running jobs
return jobfiles(self.dag.jobs, "input")
def deactivate_cache(self):
self._read_record_cached.cache_clear()
self._read_record = self._read_record_uncached
self._incomplete_cache = False
def _bool_or_gen(func, job, file=None):
if file is None:
return (f for f in job.expanded_output if func(job, file=f))
else:
return func(job, file=file)
def pickle_code(code):
consts = [
(pickle_code(const) if type(const) == type(code) else const)
for const in code.co_consts
]
return pickle.dumps((code.co_code, code.co_varnames, consts, code.co_names))
| StarcoderdataPython |
109424 | """ Hello world in python
"""
def helloworld():
""" print hello world to screen
"""
print "Hello world!"
if __name__ == '__main__':
helloworld()
| StarcoderdataPython |
1709790 | ## QUESTION
"""
Given an array Arr of size N, swap the Kth element from beginning with Kth element from end.
Example 1:
Input:
N = 8, K = 3
Arr[] = {1, 2, 3, 4, 5, 6, 7, 8}
Output: 1 2 6 4 5 3 7 8
Explanation: Kth element from beginning is
3 and from end is 6.
Example 2:
Input:
N = 5, K = 2
Arr[] = {5, 3, 6, 1, 2}
Output: 5 1 6 3 2
Explanation: Kth element from beginning is
3 and from end is 1.
Your Task:
You don't need to read input or print anything. Your task is to complete the function swapKth() which takes the array of integers arr, n and k as parameters and returns void. You have to modify the array itself.
Expected Time Complexity: O(1)
Expected Auxiliary Space: O(1)
"""
## SOLUTION:
#User function Template for python3
class Solution:
def swapKth(self,arr, n, k):
# code here
start1=k+1
end1=n-k
temp=arr[k-1]
arr[k-1]=arr[n-k]
arr[n-k]=temp
return arr
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == '__main__':
tc = int(input())
while tc > 0:
n, k = list(map(int, input().strip().split()))
arr = list(map(int, input().strip().split()))
ob = Solution()
ob.swapKth(arr, n, k)
for x in arr:
print(x, end=" ")
print()
tc -= 1
# } Driver Code Ends
| StarcoderdataPython |
6555986 | import argparse
from functools import partial
import json
from pathlib import Path
from wsgiref import simple_server
from . import __version__ as version
from .handler import FeedifyHandler
feeds_file_cache = Path('.feeds')
if not feeds_file_cache.is_dir():
feeds_file_cache.mkdir(0o755)
parser = argparse.ArgumentParser(description='feedify: a command line website-to-RSS application.')
parser.add_argument('--config', dest='config_file', type=str, default='config.json',
help='A user-supplied configuration file.')
parser.add_argument('--user_agent', dest='user_agent', type=str, default=f'feedify/{version}',
help='Provide a custom user-agent')
parser.add_argument('--debug', dest='debug', action='store_true',
help='Run feedify in debug mode.')
parser.add_argument('--version', action='version', version='feedify {}'.format(version))
args = parser.parse_args()
config = {}
config_path = Path(args.config_file).expanduser()
if config_path.exists():
with open(config_path, 'rb') as f:
config = json.load(f)
config.update({
'debug': args.debug,
'user_agent': args.user_agent
})
server = simple_server.WSGIServer(('', 8000), partial(FeedifyHandler, config))
server.serve_forever()
| StarcoderdataPython |
1635882 | import arcade
from fish import ComputerFish, Fish
import random
import math
import abc
class FishGenerator(abc.ABC):
@abc.abstractmethod
def update(self,delta_t):
pass
class RandomFishGenerator(FishGenerator):
generation_rate: float
game_object: "MyGame"
generation_timer: float # timer until next fish generation in seconds
def __init__(self, generation_rate, game_object,
min_fish_size, max_fish_size, min_fish_speed, max_fish_speed):
self.generation_rate = generation_rate
self.game_object = game_object
self.generation_timer = 2
self.min_fish_size = min_fish_size
self.max_fish_size = max_fish_size
self.min_fish_speed = min_fish_speed
self.max_fish_speed = max_fish_speed
def update(self, delta_t):
while True:
self.generation_timer -= delta_t
if self.generation_timer <= 0:
self.generation_timer += self.generation_rate
self.generate_random_fish()
else:
break
def generate_random_fish(self):
screen_height = self.game_object.height
screen_width = self.game_object.width
# roll size
if random.choices([True,False],(0.2,0.8))[0]:
# make player sized fish
new_fish_size = min(self.game_object.player_fish.size*0.8,self.max_fish_size/2)
else:
new_fish_size = math.exp(random.uniform(math.log(self.min_fish_size), math.log(self.max_fish_size)))
# roll speed
new_fish_speed = random.uniform(self.min_fish_speed, self.max_fish_speed)
# roll x_pos
fish_width = ComputerFish.get_size_upper_limmit(scale=new_fish_size)[0]
new_fish_is_on_left = random.choice([True, False])
new_fish_x_pos = -fish_width/2 if new_fish_is_on_left else screen_width + fish_width/2
# roll y_pos
y_pos_probability_density_reshaper = lambda x: 3 * x - 6 * x ** 2 + 4 * x ** 3
new_fish_y_pos = y_pos_probability_density_reshaper(random.uniform(0, 1)) * screen_height
new_fish = ComputerFish(game_object=self.game_object, is_facing_right=new_fish_is_on_left, x_pos=new_fish_x_pos, y_pos=new_fish_y_pos, size=new_fish_size,
speed=new_fish_speed)
self.game_object.fish_sprites.append(new_fish)
class WaveFishGenerator(FishGenerator):
generation_rate: float
game_object: "MyGame"
generation_timer: float # timer until next fish generation in seconds
def __init__(self, generation_rate, game_object,
fish_size=0.1,fish_speed=1000):
self.generation_rate = generation_rate
self.game_object = game_object
self.generation_timer = 2
self.fish_size = fish_size
self.fish_speed = fish_speed
def update(self, delta_t):
while True:
self.generation_timer -= delta_t
if self.generation_timer <= 0:
self.generation_timer += self.generation_rate
self.generate_wave_of_fish()
else:
break
def generate_wave_of_fish(self):
screen_height = self.game_object.height
screen_width = self.game_object.width
for new_fish_y_pos in range(0,screen_height,30):
# roll x_pos
new_fish_is_on_left = False
new_fish_x_pos = screen_width
# roll size
new_fish_size = self.fish_size
# roll speed
new_fish_speed = self.fish_speed
new_fish = ComputerFish(game_object=self.game_object, is_facing_right=new_fish_is_on_left, x_pos=new_fish_x_pos, y_pos=new_fish_y_pos, size=new_fish_size,
speed=new_fish_speed)
self.game_object.fish_sprites.append(new_fish)
| StarcoderdataPython |
8159793 | from typing import List, Tuple, Optional, Union, Any
import matplotlib as mpl
import matplotlib.axes
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.colors import Colormap
from natsort import natsorted
from milkviz.utils import set_cbar, set_ticks, set_spines, \
color_mapper_cat, rotate_points, set_category_legend, color_mapper_val, doc, create_cmap, normalize
@doc
def point_map(
x: Union[List[float], np.ndarray],
y: Union[List[float], np.ndarray],
types: Union[List[str], np.ndarray, None] = None,
values: Union[List[float], np.ndarray, None] = None,
links: Union[List[Tuple[int, int]], np.ndarray, None] = None,
vmin: Optional[float] = None,
vmax: Optional[float] = None,
colors: Optional[List[Any]] = None,
cmap: Union[str, Colormap] = None,
legend_title: Optional[str] = None,
rotate: Optional[int] = None,
markersize: Optional[int] = 5,
linecolor: Union[str, List[str]] = "#cccccc",
linewidth: int = 1,
no_spines: bool = True,
ax: Optional[mpl.axes.Axes] = None,
):
"""Point map
Args:
x: The x-coord array
y: The y-coord array
types: [types] of points
values: [values] of points
links: The links between points, should be a list of (point_index_1, point_index_2)
colors: [hue]
cmap: [cmap]
legend_title: [legend_title]
rotate: The degree to rotate the whole plot according to origin
markersize: The size of marker
linecolor: The color of lines
linewidth: The width of lines
no_spines: [no_spines]
ax: [ax]
Returns:
[return_obj]
"""
if ax is None:
ax = plt.gca()
if no_spines:
set_spines(ax)
set_ticks(ax)
ax.set_aspect("equal")
ax.set_xticklabels([])
ax.set_yticklabels([])
if rotate is not None:
x, y = rotate_points(x, y, (0, 0), rotate)
if links is not None:
if not isinstance(linecolor, str):
if len(linecolor) != len(links):
raise ValueError("Length of linecolor must match to links")
lines = [[(x[i1], y[i1]), (x[i2], y[i2])] for i1, i2 in links]
line_collections = LineCollection(lines, linewidths=linewidth, edgecolors=linecolor, zorder=-100)
ax.add_collection(line_collections)
if types is not None:
cmap = "tab20" if cmap is None else cmap
legend_title = "type" if legend_title is None else legend_title
cmapper, color_array = None, None
if colors is not None:
cmapper = color_mapper_cat(types, c_array=colors)
color_array = colors
else:
cmapper = color_mapper_cat(types, cmap=cmap)
color_array = [cmapper[t] for t in types]
plt.scatter(x=x, y=y, c=color_array, s=markersize)
set_category_legend(ax, cmapper, (1.05, 0, 1, 1), legend_title)
else:
cmap = "OrRd" if cmap is None else cmap
legend_title = "value" if legend_title is None else legend_title
values = normalize(values, vmin, vmax)
if colors is not None:
vc_mapper = dict(zip(values, colors))
cmap = create_cmap([vc_mapper[v] for v in sorted(values)])
p = plt.scatter(x=x, y=y, c=values, s=markersize, cmap=cmap)
cmin = np.nanmin(values)
cmax = np.nanmax(values)
set_cbar(ax, p, (1.07, 0, 0.1, 0.3), legend_title, cmin, cmax)
return ax
@doc
def polygon_map(
polygons: List[List[Tuple[float, float]]],
types: Union[List[str], np.ndarray, None] = None,
values: Union[List[float], np.ndarray, None] = None,
colors: Optional[List[Any]] = None,
cmap: Union[str, Colormap] = None,
legend_title: Optional[str] = None,
rotate: Optional[int] = None,
no_spines: bool = True,
ax: Optional[mpl.axes.Axes] = None,
):
"""Polygon map
Args:
polygons: A list of polygons, a polygon is represented by a list of points
types: [types] of polygons
values: [values] of polygons
colors: [hue]
cmap: [cmap]
legend_title: [legend_title]
rotate: The degree to rotate the whole plot according to origin
no_spines: [no_spines]
ax: [ax]
Returns:
[return_obj]
"""
polygons = [np.array(polygon) for polygon in polygons]
vstack_poly = np.vstack(polygons)
xmin, ymin = np.min(vstack_poly, axis=0)
xmax, ymax = np.max(vstack_poly, axis=0)
if rotate is not None:
rotated_polygons = []
for polygon in polygons:
x, y = rotate_points(polygon[:, 0], polygon[:, 1], (0, 0), rotate)
rotated_polygons.append([(i, j) for i, j in zip(x, y)])
polygons = rotated_polygons
if ax is None:
ax = plt.gca()
if no_spines:
set_spines(ax)
set_ticks(ax)
ax.set_aspect("equal")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if types is not None:
cmap = "tab20" if cmap is None else cmap
legend_title = "type" if legend_title is None else legend_title
uni_types = natsorted(np.unique(types))
cmapper, color_array = None, None
if colors is not None:
cmapper = color_mapper_cat(types, c_array=colors)
color_array = colors
else:
cmapper = color_mapper_cat(types, cmap=cmap)
color_array = [cmapper[t] for t in types]
patches = [mpatches.Polygon(polygon) for polygon in polygons]
patches_collections = PatchCollection(patches, facecolors=[cmapper[t] for t in types])
ax.add_collection(patches_collections)
set_category_legend(ax, cmapper, (1.05, 0, 1, 1), legend_title)
else:
cmap = "OrRd" if cmap is None else cmap
legend_title = "value" if legend_title is None else legend_title
if colors is not None:
vc_mapper = dict(zip(values, colors))
cmap = create_cmap([vc_mapper[v] for v in sorted(values)])
colors = color_mapper_val(values, cmap=cmap)
patches = [mpatches.Polygon(polygon, color=c) for polygon, c in zip(polygons, colors)]
patches_collections = PatchCollection(patches, facecolors=colors, cmap=cmap)
ax.add_collection(patches_collections)
cmin = np.nanmin(values)
cmax = np.nanmax(values)
set_cbar(ax, patches_collections, (1.07, 0, 0.1, 0.3), legend_title, cmin, cmax)
return ax
| StarcoderdataPython |
342889 | <filename>src/olympia/addons/tests/test_addon_utils.py<gh_stars>1-10
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon
from olympia.addons.utils import reverse_name_lookup
class TestReverseNameLookup(TestCase):
fixtures = ('base/addon_3615',)
def setUp(self):
super(TestReverseNameLookup, self).setUp()
self.addon = Addon.objects.get()
def test_delete_addon(self):
assert reverse_name_lookup('Delicious Bookmarks', amo.ADDON_EXTENSION)
self.addon.delete('farewell my sweet amo, it was a good run')
assert not reverse_name_lookup(
'Delicious Bookmarks', amo.ADDON_EXTENSION)
def test_update_addon(self):
assert reverse_name_lookup('Delicious Bookmarks', amo.ADDON_EXTENSION)
self.addon.name = 'boo'
self.addon.save()
assert not reverse_name_lookup(
'Delicious Bookmarks', amo.ADDON_EXTENSION, self.addon)
assert reverse_name_lookup('boo', amo.ADDON_EXTENSION)
# Exclude the add-on from search if we have one (in case of an update)
assert not reverse_name_lookup('boo', amo.ADDON_EXTENSION, self.addon)
def test_get_strip(self):
assert reverse_name_lookup(
'Delicious Bookmarks ', amo.ADDON_EXTENSION)
def test_get_case(self):
assert reverse_name_lookup('delicious bookmarks', amo.ADDON_EXTENSION)
def test_multiple_languages(self):
assert reverse_name_lookup('delicious bookmarks', amo.ADDON_EXTENSION)
self.addon.name = {'de': 'name', 'en-US': 'name', 'fr': 'name'}
self.addon.save()
assert not reverse_name_lookup(
'delicious bookmarks', amo.ADDON_EXTENSION)
assert reverse_name_lookup('name', amo.ADDON_EXTENSION)
assert reverse_name_lookup({'de': 'name'}, amo.ADDON_EXTENSION)
assert reverse_name_lookup({'en-US': 'name'}, amo.ADDON_EXTENSION)
assert not reverse_name_lookup({'es': 'name'}, amo.ADDON_EXTENSION)
# Excludes the add-on instance if given
assert not reverse_name_lookup('name', amo.ADDON_EXTENSION, self.addon)
assert not reverse_name_lookup(
{'de': 'name'}, amo.ADDON_EXTENSION, self.addon)
| StarcoderdataPython |
4906691 | <gh_stars>0
from django.apps import AppConfig
class IceCreamConfig(AppConfig):
name = 'ice_cream'
verbose_name = 'Управление сортами мороженого' | StarcoderdataPython |
22128 | <filename>seabird/cli.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Command line utilities for package Seabird
"""
import click
from seabird.exceptions import CNVError
from .cnv import fCNV
from .netcdf import cnv2nc
@click.group()
def cli():
""" Utilities for seabird files
"""
pass
@cli.command(name='cnvdump')
@click.argument('inputfilename', type=click.Path(exists=True))
def dump(inputfilename):
"""Dump the .cnv content as text
Doesn't matter the version of the .cnv, this command will
show it's content in a unified pattern, as an ASCII text.
Consider the idea of a descriptor file with default values.
"""
try:
data = fCNV(inputfilename)
except CNVError as e:
print("\033[91m%s\033[0m" % e.msg)
return
except:
raise
print("file: %s" % inputfilename)
print("Global attributes")
for a in sorted(data.attrs.keys()):
print("\t\033[93m%s\033[0m: %s" % (a, data.attrs[a]))
print("\nVariabes")
for k in data.keys():
print("\033[91m%s\033[0m" % k)
for a in data[k].attrs.keys():
print("\t\033[93m%s\033[0m: %s" % (a, data[k].attrs[a]))
@cli.command(name='cnv2nc')
@click.option('--outputfilename', default=None,
help='The output netCDF filename.')
@click.argument('inputfilename', type=click.Path(exists=True))
def nc(inputfilename, outputfilename):
""" Export a CNV file as a netCDF
"""
if outputfilename is None:
outputfilename = inputfilename.replace('.cnv','.nc')
click.echo('Saving on %s' % outputfilename)
data = fCNV(inputfilename)
cnv2nc(data, outputfilename)
@cli.command(name='ctdqc')
@click.option('--outputfilename', default=None,
help='The output netCDF filename.')
@click.option('--config', default=None,
help='The output netCDF filename.')
@click.argument('inputfilename', type=click.Path(exists=True))
def qc(inputfilename, outputfilename, config):
"""
"""
from cotede.qc import ProfileQC, combined_flag
if outputfilename is None:
outputfilename = inputfilename.replace('.cnv', '.nc')
click.echo('Saving on %s' % outputfilename)
data = fCNV(inputfilename)
profile = ProfileQC(data, cfg=config, verbose=False)
print(profile.flags)
| StarcoderdataPython |
1606395 | from rxbp.mixins.flowablemixin import FlowableMixin
from rxbp.observables.tolistobservable import ToListObservable
from rxbp.subscriber import Subscriber
from rxbp.subscription import Subscription
class ToListFlowable(FlowableMixin):
def __init__(self, source: FlowableMixin):
super().__init__()
self._source = source
def unsafe_subscribe(self, subscriber: Subscriber) -> Subscription:
subscription = self._source.unsafe_subscribe(subscriber=subscriber)
observable = ToListObservable(source=subscription.observable)
return subscription.copy(observable=observable) | StarcoderdataPython |
3330420 | import datetime
def response(flow):
print ("")
print ("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
print (">>>> Request:")
print (flow.request.method + " " + flow.request.url)
for k,v in flow.request.headers.items():
print (k + ": " + v)
print ("")
print (flow.request.raw_content)
print ("")
print ("<<<< Response:")
print (flow.response.http_version + " " + str(flow.response.status_code) + " " + flow.response.reason)
for k,v in flow.response.headers.items():
print (k + ": " + v)
print ("")
print (flow.response.raw_content)
print ("")
print ("---- Info:")
time = int((flow.response.timestamp_end - flow.request.timestamp_start) * 1000)
print ("Time Elapsed: " + str(time) + "ms")
print ("")
| StarcoderdataPython |
190284 | # Stack & Queue Review
# 9.3 valid paren
# 9.4 Longest Valid Paren
# "(()))(()())"
def longest_valid(s):
left_p, max_len, end = [], 0, -1
for ndx, p in enumerate(s):
if p == "(":
left_p.append(ndx)
# see a ")"
else:
if not left_p:
end = ndx
else:
left_p.pop()
start = end if not left_p else left_p[-1]
max_len = max(max_len, ndx - start)
return max_len
# 9.5 simplify path
# 9.6 print bst
def load_src(name, fpath):
import os, imp
p = fpath if os.path.isabs(fpath) \
else os.path.join(os.path.dirname(__file__), fpath)
return imp.load_source(name, p)
util = load_src("util", "../Tree/util.py")
def print_bst(node):
tn = util.TreeNode()
# 9.7 search posting list
class RandomListNode:
def __init__(self, x):
self.val = x
self.next = None
self.random = None
self.order = -1
def __repr__(self):
rand = self.random.val
return "Node(%i): %i " % (rand, self.val)
def jump_first(head):
_visit(head, [0])
def _visit(node, cur_order):
if node and node.order == -1:
node.order = cur_order[0]
cur_order[0] += 1
_visit(node.random, cur_order)
_visit(node.next, cur_order)
def jump_first_i(head):
order, stack = 0, [head]
while stack:
nxt = stack.pop()
if nxt.order == -1:
nxt.order = order + 1
order += 1
if nxt.next:
stack.append(nxt.next)
if nxt.random:
stack.append(nxt.random)
# 9.8 sunset view stream
def out_put_sunsets():
bldgs = []
nxt = raw_input("nxt building height > ")
while nxt != "":
newbie = int(nxt)
while bldgs and newbie >= bldgs[-1]:
bldgs.pop()
bldgs.append(newbie)
nxt = raw_input("nxt building height > ")
for i in bldgs:
print i,
print
# 9.9
# sort a stack
def sort(stack):
if not stack:
return
top = stack.pop()
sort(stack)
insert(top, stack)
def insert(val, stack):
if not stack or stack[-1] >= val:
stack.append(val)
else:
top = stack.pop()
insert(val, stack)
stack.append(top)
if __name__ == '__main__':
a = [1, 2, 3, 2, 5, 2]
sort(a)
print a
print "9.8"
out_put_sunsets()
print "9.7"
head = RandomListNode(0)
first = RandomListNode(1)
sec = RandomListNode(2)
third = RandomListNode(3)
head.next, head.random = first, sec
first.next, first.random = sec, head
sec.next, sec.random = third, third
third.next, third.random = None, sec
# runner = head
# while runner:
# print runner,
# runner = runner.next
# print
jump_first_i(head)
#jump_first(head, 0)
runner = head
while runner:
print runner, "order : ", runner.order
runner = runner.next
print "Longest valid paren"
test_cases = ["(())", "()(", ")))()"]
for i in test_cases:
print longest_valid(i)
| StarcoderdataPython |
3377013 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.md') as history_file:
history = history_file.read()
with open("requirements.txt") as f:
requirements = [req.strip() for req in f.readlines()]
with open("requirements_dev.txt") as f:
test_requirements = [req.strip() for req in f.readlines()]
setup(
name='textar',
version='0.0.6',
description="Paquete en python para análisis, clasificación y recuperación de textos, utilizado por el equipo de Datos Argentina.",
long_description=readme + '\n\n' + history,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/datosgobar/textar',
packages=[
'textar',
],
package_dir={'textar':
'textar'},
include_package_data=True,
package_data={"textar": ["*.txt"]},
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='textar',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: Spanish',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements
)
| StarcoderdataPython |
6566812 | <reponame>bogomaz1987/ps4you<filename>ps4you/client/migrations/0001_initial.py<gh_stars>0
# Generated by Django 2.0.5 on 2018-05-30 19:51
import django.contrib.auth.models
from django.db import migrations
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
],
options={
'verbose_name': 'Клиент',
'verbose_name_plural': 'Клиенты',
'proxy': True,
'indexes': [],
},
bases=('user.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| StarcoderdataPython |
8012501 | from wtforms import Form, BooleanField, StringField, validators, SubmitField
class RegistrationForm(Form):
fname = StringField('First Name:',[validators.Length(max=25)])
lname = StringField('Last Name:', [validators.Length(min=4, max=25)])
phno = StringField('Phone Number:')
email = StringField('Email Address:', [validators.Length(min=6, max=35), validators.Email(message='Please enter a valid email')])
username = StringField('User Name:')
password = StringField('Password:')
class LoginForm(Form):
username = StringField('User Name:')
password = StringField('Password:') | StarcoderdataPython |
9635271 | <reponame>ted80810/drb-estuary-salinity-ml
import os
import pandas as pd
import numpy as np
import yaml
import utils
# import config
with open("02_munge/params_config_munge_noaa_nerrs.yaml", 'r') as stream:
config = yaml.safe_load(stream)
# check where to read data inputs from
read_location = config['read_location']
# set up write location data outputs
write_location = config['write_location']
s3_client = utils.prep_write_location(write_location, config['aws_profile'])
s3_bucket = config['s3_bucket']
def get_datafile_list(station_id, read_location, s3_client=None, s3_bucket=None):
raw_datafiles = {}
if read_location=='S3':
raw_datafiles = [obj['Key'] for obj in s3_client.list_objects_v2(Bucket=s3_bucket, Prefix=f'01_fetch/out/noaa_nos_{station_id}')['Contents']]
elif read_location=='local':
prefix = os.path.join('01_fetch', 'out')
file_prefix=f'noaa_nerrs_{station_id}'
raw_datafiles = [os.path.join(prefix, f) for f in os.listdir(prefix) if f.startswith(file_prefix)]
return raw_datafiles
def read_data(raw_datafile):
if read_location == 'local':
print(f'reading data from local: {raw_datafile}')
# read in raw data as pandas df
df = pd.read_csv(raw_datafile)
elif read_location == 'S3':
print(f'reading data from s3: {raw_datafile}')
obj = s3_client.get_object(Bucket=s3_bucket, Key=raw_datafile)
# read in raw data as pandas df
df = pd.read_csv(obj.get("Body"))
return df
def fill_gaps(x):
'''fills any data gaps in the middle of the input series
using linear interpolation; returns gap-filled time series
'''
#find nans
bd = np.isnan(x)
#early exit if there are no nans
if not bd.any():
return x
#find nonnans index numbers
gd = np.flatnonzero(~bd)
#ignore leading and trailing nans
bd[:gd.min()]=False
bd[(gd.max()+1):]=False
#interpolate nans
x[bd] = np.interp(np.flatnonzero(bd),gd,x[gd])
return x
def process_data_to_csv(site, site_raw_datafiles, column_mapping, flags_to_drop, agg_level, prop_obs_required, fill_anom_w_nos):
'''
process raw data text files into clean csvs, including:
dropping unwanted flags
converting datetime column to datetime format
converting all data columns to numeric type
removing metadata columns so that only datetime and data columns remain
'''
for raw_datafile in site_raw_datafiles:
year_df = read_data(raw_datafile)
if raw_datafile == site_raw_datafiles[0]:
combined_df = year_df.copy()
else:
combined_df = pd.concat([combined_df, year_df], ignore_index=True)
# filter out data that we don't want
col_values_accepted = config['col_values_accepted']
for col in col_values_accepted.keys():
combined_df = combined_df[combined_df[col].isin(col_values_accepted[col])]
combined_df.drop(col, axis=1, inplace=True)
# replace all flagged data we want to remove with NaN
vars_to_keep = list(column_mapping.keys())
vars_to_keep.remove('DatetimeStamp')
for var in vars_to_keep:
flag_col = f'F_{var}'
combined_df[var] = np.where(combined_df[flag_col].isin(flags_to_drop), np.nan, combined_df[var])
# drop any columns we don't want
combined_df = combined_df[column_mapping.keys()]
# map column names
combined_df.rename(columns=column_mapping, inplace=True)
# convert datetime column to datetime type
combined_df['datetime'] = combined_df['datetime'].astype('datetime64')
# make all other columns numeric
cols = combined_df.columns.drop('datetime')
combined_df[cols] = combined_df[cols].apply(pd.to_numeric, errors='coerce')
# aggregate data to specified timestep
combined_df = utils.process_to_timestep(combined_df, column_mapping.values(), agg_level, prop_obs_required)
# drop any columns with no data
combined_df.dropna(axis=1, how='all', inplace=True)
# there are some anomalous values below 980
combined_df.loc[combined_df['air_pressure'] < 980,'air_pressure'] = np.nan
if fill_anom_w_nos:
try:
noaa_nos_file = os.listdir('02_munge/out/daily_summaries')
noaa_nos = pd.read_csv(os.path.join('02_munge','out','daily_summaries',noaa_nos_file[0]), index_col = 'datetime')
combined_df['air_pressure'] = combined_df['air_pressure'].fillna(noaa_nos['air_pressure'])
except:
print('No NOAA NOS data to fill NERR air pressure record')
#fill gaps in temperature, air pressure, wind speed and wind direction
# all < 0.5 %, precipitation has more like 10% missing so we won't fill that
combined_df['temperature'] = fill_gaps(combined_df['temperature'])
combined_df['air_pressure'] = fill_gaps(combined_df['air_pressure'])
combined_df['wind_speed'] = fill_gaps(combined_df['wind_speed'])
combined_df['wind_direction'] = fill_gaps(combined_df['wind_direction'])
combined_df['wind_speed_direction'] = -1*combined_df['wind_speed']*np.cos(combined_df['wind_direction']*(np.pi/180))
# save pre-processed data
data_outfile_csv = os.path.join('.', '02_munge', 'out', agg_level, 'noaa_nerrs_delsjmet.csv')
combined_df.to_csv(data_outfile_csv, index=True)
if write_location == 'S3':
print('uploading to s3')
s3_client.upload_file(data_outfile_csv, s3_bucket, utils.local_to_s3_pathname(data_outfile_csv))
return combined_df
def munge_single_site_data(site_num):
# site data comes in from snakemake as a set, get single value from set
if type(site_num)==set:
site_num = list(site_num)[0]
# get variables we want to process
column_mapping = config['vars']
# determine which data flags we want to drop
flags_to_drop = config['flags_to_drop']
# timestep to aggregate to
agg_level = config['agg_level']
# number of measurements required to consider average valid
prop_obs_required = config['prop_obs_required']
#should air pressure anomalies in NERR be filled with NOS data
fill_anom_w_nos = config['fill_anom_w_nos']
# process raw data files into csv
site_raw_datafiles = get_datafile_list(site_num, read_location=read_location)
process_data_to_csv(site_num, site_raw_datafiles, column_mapping, flags_to_drop, agg_level, prop_obs_required, fill_anom_w_nos)
def munge_all_sites_data():
with open("01_fetch/wildcards_fetch_config.yaml", 'r') as stream:
site_ids = yaml.safe_load(stream)['fetch_noaa_nerrs.py']['sites']
for site_num in site_ids:
munge_single_site_data(site_num)
if __name__ == '__main__':
munge_all_sites_data() | StarcoderdataPython |
5154207 | <filename>CrySPY/utility.py
'''
Utility for CrySPY
'''
from datetime import datetime
import os
# ---------- parameters
bohr2ang = 0.529177210903
hrt2ev = 27.211386245988
ry2ev = 13.605693122994
kbar2ev_ang3 = 0.0006241509073
# ---------- functions
def get_version():
return 'CrySPY 0.10.1'
def get_date():
return datetime.now().strftime("%Y/%m/%d %H:%M:%S")
def check_fwpath():
# ---------- check find_wy executable file
fwpath = os.path.dirname(os.path.abspath(__file__)) + '/find_wy/find_wy'
if not os.path.isfile(fwpath):
raise IOError('There is no find_wy program in {}'.format(fwpath))
return fwpath
def check_fppath():
# ---------- check cal_fingerprint executable file
fppath = os.path.dirname(
os.path.abspath(__file__)) + '/f-fingerprint/cal_fingerprint'
if not os.path.isfile(fppath):
raise IOError('There is no cal_fingerprint program in {}'.format(
fppath))
return fppath
| StarcoderdataPython |
194015 | import argparse
import tqdm
import random
import math
import os
import pandas as pd
import sklearn
import timeit
import numpy as np
import struct
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import pickle
import torch
import pandas as pd
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from asteroid_dataset import *
import torch.optim as optim
from classifier import *
import visdom
from torchnet.meter import ConfusionMeter
from torchnet.meter import AUCMeter
from sklearn.metrics import matthews_corrcoef
confusion_matrix = ConfusionMeter(2)
# temp_confusion_matrix = ConfusionMeter(2)
auc_meter = AUCMeter()
# confusion_matrix_validation = ConfusionMeter(2)
vis = visdom.Visdom()
draw_graph = None
draw_accuracy = None
draw_roc_curve = None
csv_file = "classifications.csv"
root_dir = "data/"
# hyperparameters
batch_size = 159
learning_rate = 0.001
epoch_num = 50
# experiment parameters
real_exp = True
experiment_num = 19
save_model = real_exp
validate_frequency = 5
draw_graph = None
draw_accuracy = None
draw_validation_graphs = None
# file
if real_exp:
f = open("saved_output/experiment_%d.out" % experiment_num, 'w+')
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = AsteroidDataset(csv_file=csv_file, root_dir=root_dir, train=True, transform=transform)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
validation_dataset = AsteroidDataset(csv_file=csv_file, root_dir=root_dir, train=False, transform=transform)
validation_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
classifier = CNN()
criterion = nn.NLLLoss()
optimizer = optim.Adam(classifier.parameters(), lr=learning_rate)
def model_save(model, path):
pickle.dump(model, open(path, 'wb'))
def adjust_learning_rate(optimizer, epoch):
lr = learning_rate * (0.1 ** (epoch // 1))
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
print('Starting training...')
start = timeit.default_timer()
if real_exp:
f.write('Starting training...\n')
total_iter = 0
for epoch in range(epoch_num):
corrects = 0.0
for i, data in enumerate(train_dataloader, 0):
if total_iter % validate_frequency == 0:
data = next(iter(validation_dataloader))
inputs = data["image"]
labels = data["class"]
inputs, labels = Variable(inputs), Variable(labels)
output = classifier(inputs)
loss = criterion(output, labels)
temp = output[:, 1].data.numpy()
temp = np.apply_along_axis(lambda x: np.rint(np.exp(x)), 0, temp)
temp = torch.from_numpy(temp).long()
num = torch.sum(temp == labels.data)
if type(num) is not int:
num = num.item()
accuracy = num/ float(batch_size)
update = None if draw_validation_graphs is None else 'append'
draw_validation_graphs = vis.line(X = np.array([total_iter]), Y = np.array([loss.data[0]]), win = draw_validation_graphs, update = update, opts=dict(title="Validation NLL loss"))
print("[EPOCH %d ITER %d] Validation Loss: %f (accuracy: %f)" % (epoch, i, loss.data[0], accuracy))
if real_exp:
f.write("[EPOCH %d ITER %d] Validation Loss: %f (accuracy: %f)\n" % (epoch, i, loss.data[0], accuracy))
# confusion_matrix_validation.add(torch.Tensor(output.data), labels.data)
inputs = data["image"]
labels = data["class"]
inputs, labels = Variable(inputs), Variable(labels)
optimizer.zero_grad()
output = classifier(inputs)
loss = criterion(output, labels)
update = None if draw_graph is None else 'append'
draw_graph = vis.line(X = np.array([total_iter]), Y = np.array([loss.data[0]]), win = draw_graph, update = update, opts=dict(title="NLL loss"))
temp = output[:, 1].data.numpy()
temp = np.apply_along_axis(lambda x: np.rint(np.exp(x)), 0, temp)
temp = torch.from_numpy(temp).long()
num = torch.sum(temp == labels.data)
if type(num) is not int:
num = num.item()
accuracy = num/ float(batch_size)
update = None if draw_accuracy is None else 'append'
draw_accuracy = vis.line(X = np.array([total_iter]), Y = np.array([accuracy]), win = draw_accuracy, update = update, opts=dict(title="Accuracy"))
print("[EPOCH %d ITER %d] Loss: %f (accuracy: %f)" % (epoch, i, loss.data[0], accuracy))
if real_exp:
f.write("[EPOCH %d ITER %d] Loss: %f (accuracy: %f)\n" % (epoch, i, loss.data[0], accuracy))
# mcoref = matthews_corrcoef(labels.data, output.data)
# print("matthews coefficient (training): %f" % mcoref)
# if real_exp:
# f.write("matthews coefficient (training): %f\n" % mcoref)
# confusion matrix calculations
if epoch == epoch_num -1:
confusion_matrix.add(torch.Tensor(output.data), labels.data)
print (output[:, 1].data.shape)
auc_meter.add(output[:, 1].data, labels.data)
area, tpr, fpr = auc_meter.value()
mcoref = matthews_corrcoef(labels.data, temp)
print("matthews coefficient (end of training): %f" % mcoref)
print("area under roc curve: %f" % area)
if real_exp:
f.write("matthews coefficient (training): %f\n" % mcoref)
f.write("area under roc curve: %f" % area)
update = None if draw_roc_curve is None else 'append'
draw_roc_curve = vis.line(X = fpr, Y = tpr, win = draw_roc_curve, update = update, opts=dict(title="ROC curve"))
# temp_confusion_matrix.add(torch.Tensor(output.data), labels.data)
# tpr = temp_confusion_matrix.conf[0][0]/float(temp_confusion_matrix.conf[0][0] + temp_confusion_matrix.conf[0][1])
# fpr = temp_confusion_matrix.conf[1][0]/float(temp_confusion_matrix.conf[1][0] + temp_confusion_matrix.conf[1][1])
# update = None if draw_roc_curve is None else 'append'
# draw_roc_curve = vis.line(X = np.array([fpr]), Y = np.array([tpr]), win = draw_roc_curve, update = update, opts=dict(title="ROC curve"))
loss.backward()
optimizer.step()
temp = timeit.default_timer()
if epoch % 30 == 0 and epoch != 0:
print("TRAINING AT EPOCH %d TOOK %f" % (epoch, (temp-start)))
total_iter += 1
adjust_learning_rate(optimizer, epoch)
stop = timeit.default_timer()
print("TRAINING DONE: TOOK %f s" % (stop-start))
if save_model:
model_save(classifier, "saved_models/experiment_"+str(experiment_num))
# print confusion matrix to verify model
print("CONFUSION MATRIX FOR TRAINING")
if real_exp:
f.write("CONFUSION MATRIX FOR TRAINING")
print(confusion_matrix.conf)
if real_exp:
f.write(np.array2string(confusion_matrix.conf, separator=', '))
# print("CONFUSION MATRIX FOR VALIDATION")
# if real_exp:
# f.write("CONFUSION MATRIX FOR VALIDATION")
# print(confusion_matrix_validation.conf)
# if real_exp:
# f.write(confusion_matrix_validation.conf)
| StarcoderdataPython |
4988325 | <reponame>raper03/login_example
import re
def error(key):
errorlist = {
'eight': '\tPassword is not at least eight characters',
'lower': '\tPassword does not have a lowercase letter',
'upper': '\tPassword does not have a uppercase letter',
'digit': '\tPassword does not have a number',
'symbol': '\tPassword does not have a symbol'
}
error = errorlist.get(key)
return error
def match(text):
eightcharacters = re.compile(r'\w{8}')
lowercase = re.compile(r'[a-z]{1}')
uppercase = re.compile(r'[A-Z]{1}')
containsdigit = re.compile(r'\d{1}')
containssymbol = re.compile(r'[!@#$%^&*]{1}')
patterns = {'eight': eightcharacters, 'lower': lowercase, 'upper': uppercase, 'digit': containsdigit, 'symbol': containssymbol}
# a little fun add-on that checks if the user had any errors so that we can display if they
# have an acceptable password
# also included a strength value to show strength of password
hasError = False
strength = 0
for key in patterns:
if not re.search(patterns[key], text):
print(f'\n{error(key)}') if not hasError else print(error(key))
hasError = True
else:
strength += 1
continue
strength = (strength / len(patterns)) * 100
if strength == 100:
print('\n\tStrong password Accepted!\n')
return True
else:
print(f'\tPassword is %{strength} strong!\n')
return False
if __name__ == "__main__":
while True:
password = input("Enter password: ")
match(password)
| StarcoderdataPython |
3465863 | <filename>tests/pipeline/nodes/model/movenetv1/test_predictor.py<gh_stars>1-10
# Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from pathlib import Path
import cv2
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow.keras.backend as K
import yaml
from peekingduck.pipeline.nodes.model.movenetv1.movenet_files.predictor import Predictor
from tests.conftest import PKD_DIR, TEST_IMAGES_DIR
@pytest.fixture(params=["t2.jpg"])
def single_person_image(request):
yield request.param
@pytest.fixture
def movenet_config():
with open(PKD_DIR / "configs" / "model" / "movenet.yml") as infile:
node_config = yaml.safe_load(infile)
node_config["root"] = Path.cwd()
yield node_config
K.clear_session()
gc.collect()
@pytest.fixture
def model_dir(movenet_config):
return (
movenet_config["root"].parent
/ "peekingduck_weights"
/ movenet_config["weights"][movenet_config["model_format"]]["model_subdir"]
/ movenet_config["model_format"]
)
@pytest.mark.mlmodel
class TestPredictor:
def test_predictor(self, movenet_config, model_dir):
movenet_predictor = Predictor(
model_dir,
movenet_config["model_type"],
movenet_config["weights"][movenet_config["model_format"]]["model_file"],
movenet_config["resolution"],
movenet_config["bbox_score_threshold"],
movenet_config["keypoint_score_threshold"],
)
assert movenet_predictor is not None, "Predictor is not instantiated"
def test_model_creation(self, movenet_config, model_dir):
movenet_predictor = Predictor(
model_dir,
movenet_config["model_type"],
movenet_config["weights"][movenet_config["model_format"]]["model_file"],
movenet_config["resolution"],
movenet_config["bbox_score_threshold"],
movenet_config["keypoint_score_threshold"],
)
assert movenet_predictor.model is not None, "Model is not loaded"
def test_get_resolution_as_tuple(self, movenet_config, model_dir):
resolution = {"height": 256, "width": 256}
movenet_predictor = Predictor(
model_dir,
movenet_config["model_type"],
movenet_config["weights"][movenet_config["model_format"]]["model_file"],
movenet_config["resolution"],
movenet_config["bbox_score_threshold"],
movenet_config["keypoint_score_threshold"],
)
tuple_res = movenet_predictor.get_resolution_as_tuple(resolution)
assert isinstance(
tuple_res, tuple
), f"Resolution in config must be a tuple instead of {type(tuple_res)}"
assert tuple_res == (
256,
256,
), f"Incorrect resolution: expected (256, 256) but got {tuple_res}"
assert (
len(tuple_res) == 2
), f"Wrong resolution dimension: expected 2 but got {len(tuple_res)}"
def test_predict(self, movenet_config, model_dir, single_person_image):
img = cv2.imread(str(TEST_IMAGES_DIR / single_person_image))
movenet_predictor = Predictor(
model_dir,
movenet_config["model_type"],
movenet_config["weights"][movenet_config["model_format"]]["model_file"],
movenet_config["resolution"],
movenet_config["bbox_score_threshold"],
movenet_config["keypoint_score_threshold"],
)
(
bboxes,
valid_keypoints,
keypoints_scores,
keypoints_conns,
) = movenet_predictor.predict(img)
assert bboxes.shape == (1, 4)
assert valid_keypoints.shape == (1, 17, 2)
assert keypoints_scores.shape == (1, 17)
assert keypoints_conns.shape == (1, 19, 2, 2)
def test_get_results_single(self, movenet_config, model_dir):
# prediction for singlepose model is in shape of [1,1,17,13]
# generates random tensor with values from 0.3 to 0.9
prediction = tf.random.uniform(
(1, 1, 17, 3), minval=0.3, maxval=0.9, dtype=tf.dtypes.float32, seed=24
)
movenet_predictor = Predictor(
model_dir,
movenet_config["model_type"],
movenet_config["weights"][movenet_config["model_format"]]["model_file"],
movenet_config["resolution"],
movenet_config["bbox_score_threshold"],
movenet_config["keypoint_score_threshold"],
)
(
bbox,
valid_keypoints,
keypoints_scores,
keypoints_conns,
) = movenet_predictor._get_results_single(prediction)
assert bbox.shape == (1, 4)
assert valid_keypoints.shape == (1, 17, 2)
assert keypoints_scores.shape == (1, 17)
assert keypoints_conns.shape[0] == 1
# generates random tensor with values from 0.0 to 0.1
# since values are below config score threshold
# predictions should be tuples of empty np array
prediction_no_pose = tf.random.uniform(
(1, 1, 17, 3), minval=0.0, maxval=0.1, dtype=tf.dtypes.float32, seed=24
)
(
bbox_no_pose,
valid_keypoints_no_pose,
keypoints_scores_no_pose,
keypoints_conns_no_pose,
) = movenet_predictor._get_results_single(prediction_no_pose)
npt.assert_array_equal(
x=bbox_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected bbox output for prediction with keypoint score below "
f"threshold got {bbox_no_pose} instead of {np.zeros(0)}"
),
)
npt.assert_array_equal(
x=valid_keypoints_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected valid keypoint output for prediction with keypoint score "
f"below threshold got {valid_keypoints_no_pose} instead of {np.zeros(0)}"
),
)
npt.assert_array_equal(
x=keypoints_scores_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected keypoint score output for prediction with keypoint score "
f"below threshold got {keypoints_scores_no_pose} instead of {np.zeros(0)}"
),
)
npt.assert_array_equal(
x=keypoints_conns_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected keypoint connection output for prediction with keypoint "
f"score below threshold got {keypoints_conns_no_pose} instead of {np.zeros(0)}"
),
)
def test_get_results_multi(self, movenet_config, model_dir):
# prediction for multi model is in shape of [1,6,56]
# generates random tensor with values from 0.2 to 0.9
# since threshold in config is at 0.2, this random tensor
# will have at least 1 pose after filtering
prediction = tf.random.uniform(
(1, 6, 56), minval=0.2, maxval=0.9, dtype=tf.dtypes.float32, seed=24
)
movenet_predictor = Predictor(
model_dir,
movenet_config["model_type"],
movenet_config["weights"][movenet_config["model_format"]]["model_file"],
movenet_config["resolution"],
movenet_config["bbox_score_threshold"],
movenet_config["keypoint_score_threshold"],
)
(
bbox,
valid_keypoints,
keypoints_scores,
keypoints_conns,
) = movenet_predictor._get_results_multi(prediction)
# output of random tensor will produce between 1 to 6 valid output
# The valid number of detections will be same for the all outputs,
# which is the value of the 1st index in the shape of the outputs
assert bbox.shape[0] >= 1
assert bbox.shape[0] <= 6
assert bbox.shape[0] == valid_keypoints.shape[0]
assert bbox.shape[0] == keypoints_scores.shape[0]
assert bbox.shape[0] == keypoints_conns.shape[0]
# generates random tensor with values from 0.0 to 0.1
# since values are below config score threshold
# predictions should be tuples of empty numpy arrays
# but with different shapes
prediction_no_pose = tf.random.uniform(
(1, 6, 56), minval=0.0, maxval=0.1, dtype=tf.dtypes.float32, seed=24
)
(
bbox_no_pose,
valid_keypoints_no_pose,
keypoints_scores_no_pose,
keypoints_conns_no_pose,
) = movenet_predictor._get_results_multi(prediction_no_pose)
npt.assert_array_equal(
x=bbox_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected bbox output for prediction with keypoint score below "
f"threshold got {bbox_no_pose} instead of {np.zeros(0)}"
),
)
npt.assert_array_equal(
x=valid_keypoints_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected valid keypoint output for prediction with keypoint score "
f"below threshold got {valid_keypoints_no_pose} instead of {np.zeros(0)}"
),
)
npt.assert_array_equal(
x=keypoints_scores_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected keypoint score output for prediction with keypoint score "
f"below threshold got {keypoints_scores_no_pose} instead of {np.zeros(0)}"
),
)
npt.assert_array_equal(
x=keypoints_conns_no_pose,
y=np.zeros(0),
err_msg=(
"Unexpected keypoint connection output for prediction with keypoint "
f"score below threshold got {keypoints_conns_no_pose} instead of {np.zeros(0)}"
),
)
| StarcoderdataPython |
325963 | <filename>auxiliary/mc_more_precisely.py
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
#****************************************************************************************
#********************* COMPUTING MONTH OF CONCEPTION MORE PRECISELY *********************
#****************************************************************************************
# Create month of birth variable based on month of policy intervention in July 2007
def mc_more_precisely():
dfb = pd.read_stata('data/data_births_20110196.dta')
dfb['m'] = 500 # create the variable (in the end no 500 will be left)
for i in range(11):
dfb.loc[dfb['year'] == 2000 + i,'m'] = dfb['mesp'] - 91 + 12*i
######################### THIS PART IS THE IMPORTANT ONE ################################
# create the variable
dfb['mc'] = np.nan
# compute month of conception using information about #-of weeks of pregancy (semanas)
dfb.loc[(0 < dfb['semanas']) & (dfb['semanas'] <= 21), 'mc'] = dfb['m'] - 4
dfb.loc[(21 < dfb['semanas']) & (dfb['semanas'] <= 25), 'mc'] = dfb['m'] - 5
dfb.loc[(25 < dfb['semanas']) & (dfb['semanas'] <= 29), 'mc'] = dfb['m'] - 6
dfb.loc[(29 < dfb['semanas']) & (dfb['semanas'] <= 34), 'mc'] = dfb['m'] - 7
dfb.loc[(34 < dfb['semanas']) & (dfb['semanas'] <= 38), 'mc'] = dfb['m'] - 8
dfb.loc[(38 < dfb['semanas']) & (dfb['semanas'] <= 43), 'mc'] = dfb['m'] - 9
dfb.loc[43 < dfb['semanas'], 'mc'] = dfb['m'] - 10
# if semanas is missing: approximate mc using premature baby indicator (like the author)
dfb.loc[(np.isnan(dfb['semanas']) | (0 == dfb['semanas'])) & (dfb['prem'] == 1), 'mc'] = dfb['m'] - 9
dfb.loc[(np.isnan(dfb['semanas']) | (0 == dfb['semanas'])) & (dfb['prem'] == 2), 'mc'] = dfb['m'] - 8
##########################################################################################
# GROUP DATA
dfb['n'] = 1 # this variable will indicate the number of conception per month
dfb = dfb.groupby('mc', as_index = False)['n'].count()
# calendar month of conception
dfb['month'] = 0
for i in range(4): #note that range starts at 0 but does not include the last number
dfb.loc[dfb['mc'] == 0 + 12*i, 'month'] = 7
dfb.loc[dfb['mc'] == 1 + 12*i, 'month'] = 8
dfb.loc[dfb['mc'] == 2 + 12*i, 'month'] = 9
dfb.loc[dfb['mc'] == 3 + 12*i, 'month'] = 10
dfb.loc[dfb['mc'] == 4 + 12*i, 'month'] = 11
dfb.loc[dfb['mc'] == 5 + 12*i, 'month'] = 12
dfb.loc[dfb['mc'] == 6 + 12*i, 'month'] = 1
dfb.loc[dfb['mc'] == 7 + 12*i, 'month'] = 2
dfb.loc[dfb['mc'] == 8 + 12*i, 'month'] = 3
dfb.loc[dfb['mc'] == 9 + 12*i, 'month'] = 4
dfb.loc[dfb['mc'] == 10 + 12*i, 'month'] = 5
dfb.loc[dfb['mc'] == 11 + 12*i, 'month'] = 6
for i in range(9):
dfb.loc[dfb['mc'] == -1 - 12*i, 'month'] = 6
dfb.loc[dfb['mc'] == -2 - 12*i, 'month'] = 5
dfb.loc[dfb['mc'] == -3 - 12*i, 'month'] = 4
dfb.loc[dfb['mc'] == -4 - 12*i, 'month'] = 3
dfb.loc[dfb['mc'] == -5 - 12*i, 'month'] = 2
dfb.loc[dfb['mc'] == -6 - 12*i, 'month'] = 1
dfb.loc[dfb['mc'] == -7 - 12*i, 'month'] = 12
dfb.loc[dfb['mc'] == -8 - 12*i, 'month'] = 11
dfb.loc[dfb['mc'] == -9 - 12*i, 'month'] = 10
dfb.loc[dfb['mc'] == -10 - 12*i, 'month'] = 9
dfb.loc[dfb['mc'] == -11 - 12*i, 'month'] = 8
dfb.loc[dfb['mc'] == -12 - 12*i, 'month'] = 7
# one can check that no zero is left
# generate July indicator
dfb['july'] = np.where(dfb['month'] == 7, 1, 0)
# generate number of days in a month
# leap years from 2000 - 2010: 2008, 2004, 2000
# --> mc = 7, 7-12*4 = -41, 7-12*8 = -89
dfb['days'] = np.where((dfb['mc'] == 7) | (dfb['mc'] == -41) | (dfb['mc'] == -89), 29,
# for all other feburarys
np.where(dfb['month'] == 2, 28,
# for April, June, September, November
np.where((dfb['month'] == 4) | (dfb['month'] == 6) |
(dfb['month'] == 9) | (dfb['month'] == 11), 30,
# otherwise
31)))
# indicator for treatment group (post-policy conception), i.e. after June 2007
dfb['post'] = np.where(dfb['mc'] >= 0, 1, 0)
# quadratic and cubic mc
dfb['mc2'] = dfb['mc']*dfb['mc']
dfb['mc3'] = dfb['mc']*dfb['mc']*dfb['mc']
# natural log of number of obs n
dfb['ln'] = np.log(dfb['n'])
# get month dummies
dummies = pd.get_dummies(dfb['month'])
dummies.columns = ['jan','feb','mar','apr','mai','jun','jul','aug','sep','oct','nov','dec']
# bind data frames
dfb = pd.concat([dfb, dummies], axis=1)
return dfb
# *********************************** CONCEPTIONS - REGRESSIONS **********************************
# function creating significance stars:
def star_function(p):
if(round(p,10) <= 0.01):
star = "***"
elif round(p,10) <= 0.05:
star = "**"
elif round(p,10) <= 0.1:
star = "*"
else:
star = " "
return star
def table_reg_output_2(reg_output1, reg_output2):
# Make a table with coefficients and se for post variable
print('\u2014'*116)
# header
print('{:<12s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}'
.format("", "RDD (1)", "", "RDD (2)", "", "RDD (3)", "", "RDD (4)", "", "RDD (5)", "", "MFE (6)", "", \
"MFE (7)", "", "MFE (8)", ""))
print('{:<12s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}{:>10s}{:<3s}'
.format("", "10 years", "", "5 years", "", "12 months", "", "9 months", "", "3 months", "", "10 years", "", \
"7 years", "", "5 years", ""))
print('\u2014'*116)
# REG OUTPUT 1
print('{:<12s}'.format("mc revised"), end="")
# coefficient estimate
for i in range(len(reg_output1)):
print ('\033[1m' '{:>10.4f}{:<3s}' '\033[0m'.format(reg_output1[i].params.post,\
star_function(reg_output1[i].pvalues.post)), end="")
# standard error
print(" "*116)
print( '{:<12s}'.format(""), end="")
for j in range(len(reg_output1)):
print ('{:>10.4f}{:<3s}'.format(reg_output1[j].bse.post, ""), end="")
'''
# p-value
print(" "*116)
print('{:<12s}'.format(""), end="")
for j in range(len(reg_output1)):
print ('\33[31m' '{:>10.4f}{:<3s}' '\033[0m'.format(reg_output1[j].pvalues.post, ""), end="")
'''
# REG OUTPUT 2
print(" "*116)
print(" "*116)
print('{:<12s}'.format("mc old"), end="")
# coefficient estimate
for i in range(len(reg_output2)):
print ('\033[1m' '{:>10.4f}{:<3s}''\033[0m' .format(reg_output2[i].params.post,\
star_function(reg_output2[i].pvalues.post)), end="")
# standard error
print(" "*116)
print('{:<12s}'.format(""), end="")
for j in range(len(reg_output2)):
print ('{:>10.4f}{:<3s}'.format(reg_output2[j].bse.post, ""), end="")
'''
# p-value
print(" "*116)
print('{:<12s}'.format(""), end="")
for j in range(len(reg_output2)):
print ('\33[31m' '{:>10.4f}{:<3s}' '\033[0m'.format(reg_output2[j].pvalues.post, ""), end="")
'''
#footer
print(" "*116)
print('\u2014'*116)
print("Notes: The dependent variable is always the natural logarithm of the monthly number of conceptions.")
print("For each of the specifications, the coefficient of the binary treatment indicator variable is printed in bold")
print("font. The corresponding heteroscedasticity-robust standard errors are reported below.")
print ('***Significance at the 1 percent level.')
print (' **Significance at the 5 percent level.')
print (' *Significance at the 10 percent level.')
| StarcoderdataPython |
1663828 | <filename>stable_nalu/functional/gumbel.py
import torch
def sample_gumbel(placeholder, eps=1e-10, reuse=False):
"""Samples Gumbel(0, 1) values into the placeholder"""
# Uniform sample between [eps, 1)
if reuse:
uniform = placeholder
else:
uniform = placeholder.uniform_(eps, 1)
# Inverse transform
g = -torch.log(-torch.log(uniform))
return g
def sample_gumbel_softmax(placeholder, logits, tau, **kwargs):
"""Samples values from a gumbel softmax
Arguments:
placeholder: A tensor used to specify the device storage
(cpu or cuda). Note that the content of the placeholder
will be overwritten.
logits: log properbilities, you can use log_softmax to
transform a tensor into log properbilities.
tau: the temperature used, must be tau \in (0, \infty]. tau < 1
makes the distribution more categorical. tau > 1 makes
the distribution more uniform.
"""
g = sample_gumbel(placeholder, **kwargs)
return torch.nn.functional.softmax((logits + g) / tau, dim=-1)
def sample_gumbel_max(placeholder, logits, **kwargs):
"""Samples values from a gumbel max
Arguments:
placeholder: A tensor used to specify the device storage
(cpu or cuda). Note that the content of the placeholder
will be overwritten.
logits: log properbilities, you can use log_softmax to
transform a tensor into log properbilities.
"""
g = sample_gumbel(placeholder, **kwargs)
indices = torch.argmax(logits + g, dim=-1)
# Convert indices to a one-hot encoding
one_hot = torch.zeros_like(logits)
one_hot.scatter_(-1, indices, 1)
return one_hot
| StarcoderdataPython |
11356474 |
from app.controllers.shopping_controller import ShoppingController
from app.data_access.personDAL import PersonDal
from app.helpers.mongodbConnector import Connection
connection=Connection()
connection.connect_mongodb()
shopping_controller = ShoppingController()
shopping_controller.create_shopping()
| StarcoderdataPython |
9798192 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddlers.models.ppseg.cvlibs import manager
@manager.LOSSES.add_component
class BootstrappedCrossEntropyLoss(nn.Layer):
"""
Implements the cross entropy loss function.
Args:
min_K (int): the minimum number of pixels to be counted in loss computation.
loss_th (float): the loss threshold. Only loss that is larger than the threshold
would be calculated.
weight (tuple|list, optional): The weight for different classes. Default: None.
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. Default: 255.
"""
def __init__(self, min_K, loss_th, weight=None, ignore_index=255):
super().__init__()
self.ignore_index = ignore_index
self.K = min_K
self.threshold = loss_th
if weight is not None:
weight = paddle.to_tensor(weight, dtype='float32')
self.weight = weight
def forward(self, logit, label):
n, c, h, w = logit.shape
total_loss = 0.0
if len(label.shape) != len(logit.shape):
label = paddle.unsqueeze(label, 1)
for i in range(n):
x = paddle.unsqueeze(logit[i], 0)
y = paddle.unsqueeze(label[i], 0)
x = paddle.transpose(x, (0, 2, 3, 1))
y = paddle.transpose(y, (0, 2, 3, 1))
x = paddle.reshape(x, shape=(-1, c))
y = paddle.reshape(y, shape=(-1, ))
loss = F.cross_entropy(
x,
y,
weight=self.weight,
ignore_index=self.ignore_index,
reduction="none")
sorted_loss = paddle.sort(loss, descending=True)
if sorted_loss[self.K] > self.threshold:
new_indices = paddle.nonzero(sorted_loss > self.threshold)
loss = paddle.gather(sorted_loss, new_indices)
else:
loss = sorted_loss[:self.K]
total_loss += paddle.mean(loss)
return total_loss / float(n)
| StarcoderdataPython |
11342259 | from __future__ import unicode_literals
from .common import InfoExtractor
from .ooyala import OoyalaIE
from ..utils import ExtractorError
class ViceIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?:[^/]+/)+(?P<id>.+)'
_TESTS = [
{
'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
'info_dict': {
'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
'ext': 'mp4',
'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
'duration': 725.983,
},
'params': {
# Requires ffmpeg (m3u8 manifest)
'skip_download': True,
},
}, {
'url': 'https://news.vice.com/video/experimenting-on-animals-inside-the-monkey-lab',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
try:
embed_code = self._search_regex(
r'embedCode=([^&\'"]+)', webpage,
'ooyala embed code')
ooyala_url = OoyalaIE._url_for_embed_code(embed_code)
except ExtractorError:
raise ExtractorError('The page doesn\'t contain a video', expected=True)
return self.url_result(ooyala_url, ie='Ooyala')
| StarcoderdataPython |
11288130 | import sys
import os
class AnsiColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class TestPrinter(object):
def __init__(self):
self.show_success = False
self.colors = True
self.verbose = os.environ["ST_VERBOSE"] if "ST_VERBOSE" in os.environ else 0
pass
def failure_meesage_print(self, message, exception=None):
print AnsiColors.FAIL + exception.__class__.__name__ + "\n" +\
message + AnsiColors.ENDC
raise exception
def success_message_print(self, message):
if self.verbose:
print AnsiColors.OKGREEN + message + AnsiColors.ENDC
printer = TestPrinter()
class MatchResult(object):
def __init__(
self,
matches,
failure,
negated_failure,
is_negated,
trace
):
self.matches = matches
self.failure = failure
self.negated_failure = negated_failure
assert isinstance(matches, bool)
assert isinstance(failure, str)
assert isinstance(negated_failure, str)
if matches and is_negated:
printer.failure_meesage_print(
str(self._failure(trace, negated_failure)),
self._failure(trace, negated_failure)
)
elif not matches and not is_negated:
printer.failure_meesage_print(
str(self._failure(trace, failure)),
self._failure(trace, failure)
)
elif matches and not is_negated:
printer.success_message_print(
negated_failure
)
else:
printer.success_message_print(
failure
)
def _failure(self, trace, what):
return AssertionError("{} - {}: {}".format(
trace.filename,
trace.lineno,
what
))
class Matching(object):
def __init__(self, upper_trace=None):
from inspect import getframeinfo, stack
self.negated = False
self.val = None
if upper_trace is None:
count = 0
while True or count > 10:
self.trace = getframeinfo(stack()[count][0])
if self.trace.function != "__init__":
break
if count == 10:
raise Exception("stack trace too deep")
count += 1
pass
@property
def never(self):
self.negated = True
return self
def get_match_result(self, result, failure, failure_negated):
return MatchResult(
result, failure, failure_negated, self.negated, self.trace
)
class ANumberShould(Matching):
def __init__(self, num):
super(ANumberShould, self).__init__()
self.num = num
@property
def be_nan(self):
import math
return self.get_match_result(
math.isnan(self.num),
self._failure_nan(value=self.num),
self._failure_not_nan()
)
def be_equal_with_precision_to(self, value, precision):
result = abs(self.num - value) / max(abs(self.num),
abs(value)) < 10**(-precision)
return self.get_match_result(
result,
"{} is not equal to {} up to {} digits".format(
self.num, value, precision),
"{} is equal to {} up to {} digits".format(
self.num, value, precision)
)
def be_equal_to(self, value):
return self.get_match_result(
self.num == value,
"{} is not equal to {}".format(self.num, value),
"{} is equal to {}".format(self.num, value)
)
def _failure_nan(self, value):
return "{} is not nan".format(value)
def _failure_not_nan(self):
return "Value is nan"
class ANumber(Matching):
def __init__(self, num):
super(ANumber, self).__init__()
self.num = num
@property
def should(self):
num = ANumberShould(self.num)
num.trace = self.trace
return num
class AString(Matching):
def __init__(self, st):
super(AString, self).__init__()
self.val = st
@property
def should(self):
num = AStringShould(self.val)
num.trace = self.trace
return num
@property
def after_being(self, func):
self.val = self.val.strip
return self
@property
def after_being_stripped(self):
self.val = self.val.strip()
return self
@property
def after_being_uppercased(self):
self.val = self.val.upper()
return self
@property
def after_being_lowercased(self):
self.val = self.val.strip()
return self
class AStringShould(Matching):
def __init__(self, st):
super(AStringShould, self).__init__()
self.num = st
@property
def be_equal_to(self):
num = AStringShould(self.num)
num.trace = self.trace
return num
"""
ANumber(10).should.be_equal_to(9)
ANumber(200).should.be_nan
ANumber(float('nan')).should.be_nan
ANumber(100.1).should.never.be_equal_with_precision_to(100.00001, 3)
"""
| StarcoderdataPython |
4934754 | # Generated by rpcgen.py from portmap.x on Sat Nov 16 15:25:09 2019
PMAPPROC_NULL = 0
PMAPPROC_SET = 1
PMAPPROC_UNSET = 2
PMAPPROC_GETPORT = 3
PMAPPROC_DUMP = 4
PMAPPROC_CALLIT = 5
PMAP_PORT = 111
IPPROTO_TCP = 6
IPPROTO_UDP = 17
PMAP_PROG = 100000
PMAP_VERS = 2
| StarcoderdataPython |
8159881 | <gh_stars>1-10
"""
Endpoints to get the schemas
"""
# Import from libraries
from cornflow_client.airflow.api import Airflow
from flask import current_app
from flask_apispec.views import MethodResource
from flask_apispec import marshal_with, use_kwargs, doc
import logging as log
# Import from internal modules
from .meta_resource import MetaResource
from ..shared.exceptions import AirflowError
from ..schemas.schemas import SchemaOneApp, SchemaRequest, SchemaListApp
class SchemaEndpoint(MetaResource, MethodResource):
"""
Endpoint used to obtain names of available apps
"""
@doc(description="Get list of available apps", tags=["Schemas"])
@marshal_with(SchemaListApp(many=True))
def get(self):
"""
API method to get a list of dag names
:return: A dictionary with a message and a integer with the HTTP status code
:rtype: Tuple(dict, integer)
"""
af_client = Airflow.from_config(current_app.config)
if not af_client.is_alive():
log.error("Airflow not accessible when getting schemas")
raise AirflowError(error="Airflow is not accessible")
log.debug("User gets list of schema")
return af_client.get_all_schemas()
class SchemaDetailsEndpoint(MetaResource, MethodResource):
"""
Endpoint used to obtain schemas for one app
"""
@doc(description="Get instance, solution and config schema", tags=["Schemas"])
@marshal_with(SchemaOneApp)
def get(self, dag_name):
"""
API method to get the input, output and config schemas for a given dag
:return: A dictionary with a message and a integer with the HTTP status code
:rtype: Tuple(dict, integer)
"""
af_client = Airflow.from_config(current_app.config)
if not af_client.is_alive():
log.error("Airflow not accessible when getting schema {}".format(dag_name))
raise AirflowError(error="Airflow is not accessible")
# try airflow and see if dag_name exists
af_client.get_dag_info(dag_name)
log.debug("User gets schema {}".format(dag_name))
# it exists: we try to get its schemas
return af_client.get_schemas_for_dag_name(dag_name)
| StarcoderdataPython |
1759116 | <reponame>NicGobbi/age-of-empires-II-api
from numpy import genfromtxt
import os
from db import db
from api.models.factory import get_model
def populate_db():
for filename in os.listdir(os.path.abspath('./data')):
if not filename.endswith('.csv'):
continue
data = load_data('data/{}'.format(filename))
filename = filename.split(".")[0]
for row in data:
item = get_model(filename, row)
db.session.add(item)
db.session.commit()
def load_data(file_name):
data = genfromtxt(file_name, delimiter=',', skip_header=1,
dtype='unicode', autostrip=True)
return data.tolist()
| StarcoderdataPython |
246326 | <reponame>yashsavani/NeRF-Project
##
%load_ext autoreload
%autoreload 2
##
##
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
##
##
theta = np.radians(30)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
cov = np.array([[3, 0], [0, 0.4]]) @ R
data = np.random.multivariate_normal([0, 0], cov, 100)
plt.scatter(*data.T)
plt.xlim([-4, 4])
plt.ylim([-4, 4])
u, s, _ = np.linalg.svd(data @ data.T)
comp = u@np.diag(s)
plt.quiver(*np.zeros((2, 2)), comp[:,0], comp[:,1], color=['r', 'g'])
##
| StarcoderdataPython |
11393874 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' shared utility functions '''
import pkgutil
import shlex
import subprocess
import os
import platform
import tarfile
import hashlib
import urllib2
import json
import socket
import time
from threading import Timer
import pip
def get_available_port():
""" get a random available port
:returns: port number
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
def pip_check(check, install=None, url=None):
""" check if check_name exists, and install install_name if it does not exist
:check: the package name to be checked
:install: the package to be installed
"""
home = os.path.dirname(os.path.realpath(__file__))
# if pkgutil.find_loader('pip') is None:
# pip_url = 'https://bootstrap.pypa.io/get-pip.py'
# package_name = os.path.basename(url_get_file(pip_url, home, 'getpip'))
# import getpip
# getpip.main()
if pkgutil.find_loader(check) is None:
if url is not None:
package_name = os.path.basename(url_get_file(url, home))
else:
package_name = install if install is not None else check
print '[CIMFUZZ]: setting up {}'.format(package_name)
install_cmd = 'install --user --ignore-installed {}'.format(package_name)
pip.main(shlex.split(install_cmd))
def run_command_noret(command, timeout=None, caller='CIMFUZZ', debug=False, queue=None, env=None):
''' execute command with subprocess and capture output '''
cmd_list = shlex.split(command)
# if env is None:
# env=os.environ.copy()
if debug:
process = subprocess.Popen(cmd_list,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=env)
# if we pass in a queue to record the process information
if queue is not None:
queue.put(process)
try:
if timeout is not None:
timer = Timer(timeout, kill_process, args=(process,timeout, command))
timer.start()
while process.poll() is None:
line = process.stdout.readline()
print '[{}]: {}'.format(caller, line),
print '[{}]: {}'.format(caller, process.stdout.read())
finally:
if timeout is not None:
timer.cancel()
else:
with open(os.devnull, 'wb') as devnull:
stdin = None
if cmd_list[-2] == '<':
stdin = cmd_list[-1]
cmd_list = cmd_list[:-2]
process = subprocess.Popen(cmd_list,
stdout=devnull,
stderr=devnull,
stdin=subprocess.PIPE,
shell=False,
env=env)
# if we pass in a queue to record the process information
if queue is not None:
queue.put(process)
try:
if timeout is not None:
timer = Timer(timeout, kill_process, args=(process, timeout, command, env))
timer.start()
process.communicate(stdin)
except Exception:
print 'S2E process communicate error'
raise
finally:
# kill_process(process)
if timeout is not None:
timer.cancel()
def run_command_ret(command, timeout=None):
''' execute command with subprocess and return output '''
cmd_list = shlex.split(command)
process = subprocess.Popen(cmd_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
return process.communicate()
def kill_process(process, timeout=None, command=None, env=None):
''' try to kill the subprocess.Popen() ed process '''
pid = process.pid
failed = False
try:
os.kill(pid, 9)
except OSError:
failed = True
finally:
if failed:
print 'failed to kill process ({}) with timeout ({})'.format(pid, timeout)
else:
if timeout:
print 'process ({}) terminated with timeout ({})'.format(pid, timeout)
if env:
print env
if command:
print command
def get_terminal_width():
''' get the current width of the terminal '''
size, _ = run_command_ret('stty size')
if size:
try:
return [int(i) for i in size.split()]
except Exception:
pass
return [0, 50]
def print_sep():
''' print seperate line according to the terminal size '''
size = get_terminal_width()
print '-' * int(size[1])
def get_file_arch(binary):
''' get the architecture of the input binary '''
return platform.architecture(binary)
def unzip(file_name, target_dir):
''' unzip the gzip file downloaded from remote and return the full
path to unzipped directory '''
try:
tar = tarfile.open(file_name, 'r:gz')
# assume the first node in the gzip file is the directory
dir_name = tar.getnames()[0]
# extract to target_dir
tar.extractall(target_dir)
tar.close()
except Exception as excpt:
raise excpt
return '{}/{}'.format(target_dir, dir_name)
def check_dir(path):
''' create directory if not exists '''
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc: # Guard against race condition
if exc.errno != os.errno.EEXIST:
raise
def md5sum(binary):
''' calculate md5 hash of an input file '''
hash_md5 = hashlib.md5()
with open(binary, "rb") as file_d:
for chunk in iter(lambda: file_d.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def url_get_file(uri, target_dir, target_name=None):
''' get file from an url '''
file_size_dl = 0
block_sz = 8192
# try to format uri if start with '/' or '.'
if uri.startswith('/') or uri.startswith('.'):
uri = 'file://{}'.format(os.path.realpath(uri))
print uri
request = urllib2.urlopen(uri)
meta = request.info()
file_size = int(meta.getheaders("Content-Length")[0])
print_sep()
print "[CIMFUZZ]: Downloading: [{}]".format(uri)
print "[CIMFUZZ]: Size of file: [{}]".format(file_size)
if target_name is None:
file_name = '{}/{}'.format(target_dir, uri.split('/')[-1])
else:
file_name = '{}/{}'.format(target_dir, target_name)
gz_file = open(file_name, 'wb')
# print the progress
status = ''
bar_size = get_terminal_width()[1] / 2
progress_size = 0
blank_size = bar_size - progress_size
os.system('setterm -cursor off')
while True:
buf = request.read(block_sz)
if not buf:
print '\n[CIMFUZZ]: Download Finished!'
break
if len(status) > 0:
print chr(8)*(len(status)+2),
file_size_dl += len(buf)
percentage = float(file_size_dl) / file_size
gz_file.write(buf)
progress_size = int(bar_size * percentage)
blank_size = bar_size - progress_size
status = "[{0}{1}] {2:d}/{3:d} [{4:.2%}]"\
.format('*' * progress_size,
' ' * blank_size,
file_size_dl,
file_size,
percentage)
print status,
os.system('setterm -cursor on')
gz_file.close()
return file_name
def is_elf(f):
fd = os.open(f,os.O_RDONLY)
try:
magic=os.read(fd,4)
except:
return False
os.close(fd)
if magic[1:] == 'ELF':
return True
else:
return False
def serialize_sql(target, content):
""" serialize sql template and store in target file """
with open(target, 'a') as f:
json.dump(content, f)
def build_cmds(basedir):
''' parse command file and build execute command of binary '''
ret = []
cmd_dir = '{}/cmd'.format(basedir)
for root, _, files in os.walk(cmd_dir):
for cmd_file in files:
cmd_file = '{}/{}'.format(root, cmd_file)
cmd = build_cmd(cmd_file, basedir=basedir)
ret.append(cmd)
break
return ret
def build_cmd(cmd_file, basedir=None):
''' parse command file and build execute command of binary '''
cmd = []
basedir = basedir if basedir is not None else os.path.dirname(cmd_file)
with open(cmd_file, 'r') as f_cmd:
cmd_dict = json.load(f_cmd)
cmd_len = cmd_dict.get('cmd_len')
for idx in range(cmd_len):
detail = cmd_dict.get('pos_{}'.format(idx))
if detail['type'] in ('opt', 'input'):
cmd.append(detail['value'])
else:
target = '{}/{}/{}'.format(basedir, detail['type'], detail['target'])
cmd.append(detail['value'].format(target))
return cmd
| StarcoderdataPython |
5058816 | # sqlite3 handler
# TODO: More on prevention of SQL injection
import sqlite3
from utils.logger import log_adapter
logger = log_adapter.getlogger(__name__)
class DbHandler:
def __init__(self):
self.active = True
self.conn = sqlite3.connect('data.db')
def cleanup(self):
logger.debug("Running cleanup for DbHandler")
self.conn.close()
self.active = False
def create_table( self, tablename, tablestr ):
if not self.active:
raise Exception("Try to operate on closed db handler")
cur = self.conn.cursor()
sta = 'CREATE TABLE IF NOT EXISTS %s (%s)' % ( tablename, tablestr )
cur.execute( sta )
self.conn.commit()
def check_exist( self, table, key, value ):
if not self.active:
raise Exception("Try to operate on closed db handler")
cur = self.conn.cursor()
sta = "SELECT * FROM %s WHERE %s='%s'" % (table, key, value )
cur.execute( sta )
data = cur.fetchall()
return len(data)
def insert( self, table, values ):
if not self.active:
raise Exception("Try to operate on closed db handler")
cur = self.conn.cursor()
sta = 'INSERT INTO %s VALUES %s' % (table, values)
cur.execute( sta )
self.conn.commit()
def delete(self, table, key, value):
if not self.active:
raise Exception("Try to operate on closed db handler")
cur = self.conn.cursor()
sta = "DELETE FROM %s WHERE %s='%s'" % (table, key, value )
cur.execute( sta )
self.conn.commit()
dbHandler = DbHandler()
| StarcoderdataPython |
9603903 | import sys
input = sys.stdin.readline
N, M = map(int, input().split())
adj = [ [] for _ in range(N) ]
indegree = [0] * N
for _ in range(M):
line = list(map(int, input().split()))
K = line[0]
if K == 0: continue
prev = line[1]
for cur in line[2:]:
indegree[cur-1] += 1
adj[prev-1].append(cur-1)
prev = cur
result = [0] * N
q = []
for i in range(N):
if indegree[i] == 0: q.append(i)
for i in range(N):
if q:
cur = q.pop(0)
result[i] = cur+1
for next in adj[cur]:
indegree[next] -= 1
if indegree[next] == 0: q.append(next)
else:
print(0)
sys.exit(0)
for i in result:
print(i)
| StarcoderdataPython |
132410 | class ParsimoniousError(Exception):
def __init__(self, exception):
"""
A class for wrapping parsimonious errors to make them a bit more sensible to users of this library.
:param exception: The original parsimonious exception
:return: self
"""
self.exception = exception
def __unicode__(self):
return u'Encountered an error parsing your api specification. The error was: \n {}'.format(self.exception)
def __str__(self):
return str(unicode(self)) | StarcoderdataPython |
6420598 | <filename>modules_lib/pyEMONCMS/models/node/node_model.py
# -*- coding: utf-8 -*-
#/*
# All Emoncms code is released under the GNU Affero General Public License.
# See COPYRIGHT.txt and LICENSE.txt.
#
# ---------------------------------------------------------------------
# Emoncms - open source energy visualisation
# Part of the OpenEnergyMonitor project:
# http://openenergymonitor.org
#*/
from ironworks import serverTools
from modules_lib.pyEMONCMS import cmsSettings
class Node():
def __init__(self):
self.logger = serverTools.getLogger()
self.settings = serverTools.getCMSSettings()
if self.settings == None:
self.settings = cmsSettings.Settings()
serverTools.setCMSSettings(self.settings)
def createNode(self, data):
success = {'success': False}
userid = data['userid']
description = data['description']
apiKeyWrite = self.settings.getAPIKeyWrite(userid)[0]
if apiKeyWrite == data['apikey_write']:
success = self.settings.createNode(userid, description)
return success
else:
return success
"""def set_decoder(self, userid, nodeid, decoder):
if (!$decoder_in) return false;
$decoder = new stdClass();
$decoder->name = preg_replace('/[^\w\s-:()]/','',$decoder_in->name);
$decoder->updateinterval = (int) $decoder_in->updateinterval;
$decoder->variables = array();
// Ensure each variable is defined with the allowed fields and correct types
foreach ($decoder_in->variables as $variable)
{
$var = new stdClass();
$var->name = preg_replace('/[^\w\s-:]/','',$variable->name);
if (isset($variable->type)) $var->type = (int) $variable->type;
if (isset($variable->scale)) $var->scale = (float) $variable->scale;
if (isset($variable->units)) $var->units = preg_replace('/[^\w\s-°]/','',$variable->units);
if (isset($variable->processlist)) {
$var->processlist = preg_replace('/[^\d-:,.]/','',$variable->processlist);
}
$decoder->variables[] = $var;
}
// Load full nodes defenition from redis or mysql
if ($this->redis) {
$nodes = json_decode($this->redis->get("nodes:$userid"));
} else {
$nodes = $this->get_mysql($userid);
}
// Set the decoder part of the node defenition
if ($nodes!=NULL && isset($nodes->$nodeid))
{
$nodes->$nodeid->decoder = $decoder;
if ($this->redis) $this->redis->set("nodes:$userid",json_encode($nodes));
$this->set_mysql($userid,$nodes);
}
return true;
}
def get_all(self, userid):
if ($this->redis) {
$nodes = $this->redis->get("nodes:$userid");
if ($nodes) {
return json_decode($nodes);
} else {
$nodes = $this->get_mysql($userid);
$this->redis->set("nodes:$userid",json_encode($nodes));
return $nodes;
}
} else {
return $this->get_mysql($userid);
}
}
#----------------------------------------------------------------------------------------------
def process(self, userid, nodes, nodeid, time, data):
{
$bytes = explode(',',$data);
$pos = 0;
if (isset($nodes->$nodeid->decoder) && sizeof($nodes->$nodeid->decoder->variables)>0)
{
foreach($nodes->$nodeid->decoder->variables as $variable)
{
$value = null;
// Byte value
if ($variable->type==0)
{
if (!isset($bytes[$pos])) break;
$value = (int) $bytes[$pos];
$pos += 1;
}
// signed integer
if ($variable->type==1)
{
if (!isset($bytes[$pos+1])) break;
$value = (int) $bytes[$pos] + (int) $bytes[$pos+1]*256;
if ($value>32768) $value += -65536;
$pos += 2;
}
// unsigned long
if ($variable->type==2)
{
if (!isset($bytes[$pos+3])) break;
$value = (int) $bytes[$pos] + (int) $bytes[$pos+1]*256 + (int) $bytes[$pos+2]*65536 + (int) $bytes[$pos+3]*16777216;
//if ($value>32768) $value += -65536;
$pos += 4;
}
if (isset($variable->scale)) $value *= $variable->scale;
if (isset($variable->processlist) && $variable->processlist!='') $this->process->input($time,$value,$variable->processlist);
}
}
}
def setNodeData(self, userid, data):
{
$json = json_encode($data);
$result = $this->mysqli->query("SELECT `userid` FROM node WHERE `userid`='$userid'");
if ($result->num_rows) {
$this->mysqli->query("UPDATE node SET `data`='$json' WHERE `userid`='$userid'");
} else {
$this->mysqli->query("INSERT INTO node (`userid`,`data`) VALUES ('$userid','$json')");
}
}
def getNodeData(self, userid):
{
$result = $this->mysqli->query("SELECT `data` FROM node WHERE `userid`='$userid'");
if ($row = $result->fetch_array()) {
return json_decode($row['data']);
} else {
return false;
}
"""
| StarcoderdataPython |
15229 | # -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable= no-member, arguments-differ, invalid-name
#
# Utilities for using pre-trained models.
import torch
from dgl.data.utils import _get_dgl_url, download
from .moleculenet import *
from .generative_models import *
from .property_prediction import *
from .reaction import *
__all__ = ['load_pretrained']
url = {**moleculenet_url, **generative_url, **property_url, **reaction_url}
def download_and_load_checkpoint(model_name, model, model_postfix,
local_pretrained_path='pre_trained.pth', log=True):
"""Download pretrained model checkpoint
The model will be loaded to CPU.
Parameters
----------
model_name : str
Name of the model
model : nn.Module
Instantiated model instance
model_postfix : str
Postfix for pretrained model checkpoint
local_pretrained_path : str
Local name for the downloaded model checkpoint
log : bool
Whether to print progress for model loading
Returns
-------
model : nn.Module
Pretrained model
"""
url_to_pretrained = _get_dgl_url(model_postfix)
local_pretrained_path = '_'.join([model_name, local_pretrained_path])
download(url_to_pretrained, path=local_pretrained_path, log=log)
checkpoint = torch.load(local_pretrained_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
if log:
print('Pretrained model loaded')
return model
# pylint: disable=I1101
def load_pretrained(model_name, log=True):
"""Load a pretrained model
Parameters
----------
model_name : str
Currently supported options include
* ``'GCN_Tox21'``: A GCN-based model for molecular property prediction on Tox21
* ``'GAT_Tox21'``: A GAT-based model for molecular property prediction on Tox21
* ``'Weave_Tox21'``: A Weave model for molecular property prediction on Tox21
* ``'AttentiveFP_Aromaticity'``: An AttentiveFP model for predicting number of
aromatic atoms on a subset of Pubmed
* ``'DGMG_ChEMBL_canonical'``: A DGMG model trained on ChEMBL with a canonical
atom order
* ``'DGMG_ChEMBL_random'``: A DGMG model trained on ChEMBL for molecule generation
with a random atom order
* ``'DGMG_ZINC_canonical'``: A DGMG model trained on ZINC for molecule generation
with a canonical atom order
* ``'DGMG_ZINC_random'``: A DGMG model pre-trained on ZINC for molecule generation
with a random atom order
* ``'JTNN_ZINC'``: A JTNN model pre-trained on ZINC for molecule generation
* ``'wln_center_uspto'``: A WLN model pre-trained on USPTO for reaction prediction
* ``'wln_rank_uspto'``: A WLN model pre-trained on USPTO for candidate product ranking
* ``'gin_supervised_contextpred'``: A GIN model pre-trained with supervised learning
and context prediction
* ``'gin_supervised_infomax'``: A GIN model pre-trained with supervised learning
and deep graph infomax
* ``'gin_supervised_edgepred'``: A GIN model pre-trained with supervised learning
and edge prediction
* ``'gin_supervised_masking'``: A GIN model pre-trained with supervised learning
and attribute masking
* ``'GCN_canonical_BACE'``: A GCN model trained on BACE with canonical
featurization for atoms
* ``'GCN_attentivefp_BACE'``: A GCN model trained on BACE with attentivefp
featurization for atoms
* ``'GAT_canonical_BACE'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_BACE'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_BACE'``: A Weave model trained on BACE with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BACE'``: A Weave model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BACE'``: An MPNN model trained on BACE with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BACE'``: An MPNN model trained on BACE with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BACE'``: An AttentiveFP model trained on BACE with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on BACE with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BACE'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BACE
* ``'gin_supervised_infomax_BACE'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BACE
* ``'gin_supervised_edgepred_BACE'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BACE
* ``'gin_supervised_masking_BACE'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BACE
* ``'NF_canonical_BACE'``: An NF model trained on BACE with canonical
featurization for atoms
* ``'GCN_canonical_BBBP'``: A GCN model trained on BBBP with canonical
featurization for atoms
* ``'GCN_attentivefp_BBBP'``: A GCN model trained on BBBP with attentivefp
featurization for atoms
* ``'GAT_canonical_BBBP'``: A GAT model trained on BBBP with canonical
featurization for atoms
* ``'GAT_attentivefp_BBBP'``: A GAT model trained on BBBP with attentivefp
featurization for atoms
* ``'Weave_canonical_BBBP'``: A Weave model trained on BBBP with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_BBBP'``: A Weave model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_BBBP'``: An MPNN model trained on BBBP with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_BBBP'``: An MPNN model trained on BBBP with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_BBBP'``: An AttentiveFP model trained on BBBP with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BBBP'``: An AttentiveFP model trained on BBBP with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_BBBP'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on BBBP
* ``'gin_supervised_infomax_BBBP'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on BBBP
* ``'gin_supervised_edgepred_BBBP'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on BBBP
* ``'gin_supervised_masking_BBBP'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on BBBP
* ``'NF_canonical_BBBP'``: An NF model pre-trained on BBBP with canonical
featurization for atoms
* ``'GCN_canonical_ClinTox'``: A GCN model trained on ClinTox with canonical
featurization for atoms
* ``'GCN_attentivefp_ClinTox'``: A GCN model trained on ClinTox with attentivefp
featurization for atoms
* ``'GAT_canonical_ClinTox'``: A GAT model trained on ClinTox with canonical
featurization for atoms
* ``'GAT_attentivefp_ClinTox'``: A GAT model trained on ClinTox with attentivefp
featurization for atoms
* ``'Weave_canonical_ClinTox'``: A Weave model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ClinTox'``: A Weave model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ClinTox'``: An MPNN model trained on ClinTox with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ClinTox'``: An MPNN model trained on ClinTox with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ClinTox'``: An AttentiveFP model trained on ClinTox with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_BACE'``: An AttentiveFP model trained on ClinTox with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_ESOL'``: A GCN model trained on ESOL with canonical
featurization for atoms
* ``'GCN_attentivefp_ESOL'``: A GCN model trained on ESOL with attentivefp
featurization for atoms
* ``'GAT_canonical_ESOL'``: A GAT model trained on ESOL with canonical
featurization for atoms
* ``'GAT_attentivefp_ESOL'``: A GAT model trained on ESOL with attentivefp
featurization for atoms
* ``'Weave_canonical_ESOL'``: A Weave model trained on ESOL with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ESOL'``: A Weave model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ESOL'``: An MPNN model trained on ESOL with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ESOL'``: An MPNN model trained on ESOL with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ESOL'``: An AttentiveFP model trained on ESOL with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ESOL'``: An AttentiveFP model trained on ESOL with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ESOL'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ESOL
* ``'gin_supervised_infomax_ESOL'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ESOL
* ``'gin_supervised_edgepred_ESOL'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ESOL
* ``'gin_supervised_masking_ESOL'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ESOL
* ``'GCN_canonical_FreeSolv'``: A GCN model trained on FreeSolv with canonical
featurization for atoms
* ``'GCN_attentivefp_FreeSolv'``: A GCN model trained on FreeSolv with attentivefp
featurization for atoms
* ``'GAT_canonical_FreeSolv'``: A GAT model trained on FreeSolv with canonical
featurization for atoms
* ``'GAT_attentivefp_FreeSolv'``: A GAT model trained on FreeSolv with attentivefp
featurization for atoms
* ``'Weave_canonical_FreeSolv'``: A Weave model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_FreeSolv'``: A Weave model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_FreeSolv'``: An MPNN model trained on FreeSolv with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_FreeSolv'``: An MPNN model trained on FreeSolv with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_FreeSolv'``: An AttentiveFP model trained on FreeSolv with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_FreeSolv'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_infomax_FreeSolv'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on FreeSolv
* ``'gin_supervised_edgepred_FreeSolv'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on FreeSolv
* ``'gin_supervised_masking_FreeSolv'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on FreeSolv
* ``'GCN_canonical_HIV'``: A GCN model trained on HIV with canonical
featurization for atoms
* ``'GCN_attentivefp_HIV'``: A GCN model trained on HIV with attentivefp
featurization for atoms
* ``'GAT_canonical_HIV'``: A GAT model trained on BACE with canonical
featurization for atoms
* ``'GAT_attentivefp_HIV'``: A GAT model trained on BACE with attentivefp
featurization for atoms
* ``'Weave_canonical_HIV'``: A Weave model trained on HIV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_HIV'``: A Weave model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_HIV'``: An MPNN model trained on HIV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_HIV'``: An MPNN model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_HIV'``: An AttentiveFP model trained on HIV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_HIV'``: An AttentiveFP model trained on HIV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_HIV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on HIV
* ``'gin_supervised_infomax_HIV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on HIV
* ``'gin_supervised_edgepred_HIV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on HIV
* ``'gin_supervised_masking_HIV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on HIV
* ``'NF_canonical_HIV'``: An NF model trained on HIV with canonical
featurization for atoms
* ``'GCN_canonical_Lipophilicity'``: A GCN model trained on Lipophilicity with canonical
featurization for atoms
* ``'GCN_attentivefp_Lipophilicity'``: A GCN model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'GAT_canonical_Lipophilicity'``: A GAT model trained on Lipophilicity with canonical
featurization for atoms
* ``'GAT_attentivefp_Lipophilicity'``: A GAT model trained on Lipophilicity with
attentivefp featurization for atoms
* ``'Weave_canonical_Lipophilicity'``: A Weave model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'Weave_attentivefp_Lipophilicity'``: A Weave model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'MPNN_canonical_Lipophilicity'``: An MPNN model trained on Lipophilicity with
canonical featurization for atoms and bonds
* ``'MPNN_attentivefp_Lipophilicity'``: An MPNN model trained on Lipophilicity with
attentivefp featurization for atoms and bonds
* ``'AttentiveFP_canonical_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Lipophilicity'``: An AttentiveFP model trained on
Lipophilicity with attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_infomax_Lipophilicity'``: A GIN model pre-trained with supervised
learning and infomax, and fine-tuned on Lipophilicity
* ``'gin_supervised_edgepred_Lipophilicity'``: A GIN model pre-trained with supervised
learning and edge prediction, and fine-tuned on Lipophilicity
* ``'gin_supervised_masking_Lipophilicity'``: A GIN model pre-trained with supervised
learning and masking, and fine-tuned on Lipophilicity
* ``'GCN_canonical_MUV'``: A GCN model trained on MUV with canonical
featurization for atoms
* ``'GCN_attentivefp_MUV'``: A GCN model trained on MUV with attentivefp
featurization for atoms
* ``'GAT_canonical_MUV'``: A GAT model trained on MUV with canonical
featurization for atoms
* ``'GAT_attentivefp_MUV'``: A GAT model trained on MUV with attentivefp
featurization for atoms
* ``'Weave_canonical_MUV'``: A Weave model trained on MUV with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_MUV'``: A Weave model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_MUV'``: An MPNN model trained on MUV with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_MUV'``: An MPNN model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_MUV'``: An AttentiveFP model trained on MUV with canonical
featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_MUV'``: An AttentiveFP model trained on MUV with attentivefp
featurization for atoms and bonds
* ``'gin_supervised_contextpred_MUV'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on MUV
* ``'gin_supervised_infomax_MUV'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on MUV
* ``'gin_supervised_edgepred_MUV'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on MUV
* ``'gin_supervised_masking_MUV'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on MUV
* ``'GCN_canonical_PCBA'``: A GCN model trained on PCBA with canonical
featurization for atoms
* ``'GCN_attentivefp_PCBA'``: A GCN model trained on PCBA with attentivefp
featurization for atoms
* ``'GAT_canonical_PCBA'``: A GAT model trained on PCBA with canonical
featurization for atoms
* ``'GAT_attentivefp_PCBA'``: A GAT model trained on PCBA with attentivefp
featurization for atoms
* ``'Weave_canonical_PCBA'``: A Weave model trained on PCBA with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_PCBA'``: A Weave model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_PCBA'``: An MPNN model trained on PCBA with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_PCBA'``: An MPNN model trained on PCBA with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_PCBA'``: An AttentiveFP model trained on PCBA with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_PCBA'``: An AttentiveFP model trained on PCBA with
attentivefp featurization for atoms and bonds
* ``'GCN_canonical_SIDER'``: A GCN model trained on SIDER with canonical
featurization for atoms
* ``'GCN_attentivefp_SIDER'``: A GCN model trained on SIDER with attentivefp
featurization for atoms
* ``'GAT_canonical_SIDER'``: A GAT model trained on SIDER with canonical
featurization for atoms
* ``'GAT_attentivefp_SIDER'``: A GAT model trained on SIDER with attentivefp
featurization for atoms
* ``'Weave_canonical_SIDER'``: A Weave model trained on SIDER with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_SIDER'``: A Weave model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_SIDER'``: An MPNN model trained on SIDER with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_SIDER'``: An MPNN model trained on SIDER with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_SIDER'``: An AttentiveFP model trained on SIDER with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_SIDER'``: An AttentiveFP model trained on SIDER with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_SIDER'``: A GIN model pre-trained with supervised learning
and context prediction, and fine-tuned on SIDER
* ``'gin_supervised_infomax_SIDER'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on SIDER
* ``'gin_supervised_edgepred_SIDER'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on SIDER
* ``'gin_supervised_masking_SIDER'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on SIDER
* ``'NF_canonical_SIDER'``: An NF model trained on SIDER with canonical
featurization for atoms
* ``'GCN_canonical_Tox21'``: A GCN model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_attentivefp_Tox21'``: A GCN model trained on Tox21 with attentivefp
featurization for atoms
* ``'GAT_canonical_Tox21'``: A GAT model trained on Tox21 with canonical
featurization for atoms
* ``'GAT_attentivefp_Tox21'``: A GAT model trained on Tox21 with attentivefp
featurization for atoms
* ``'Weave_canonical_Tox21'``: A Weave model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_Tox21'``: A Weave model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_Tox21'``: An MPNN model trained on Tox21 with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_Tox21'``: An MPNN model trained on Tox21 with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_Tox21'``: An AttentiveFP model trained on Tox21 with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_Tox21'``: An AttentiveFP model trained on Tox21 with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_Tox21'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on Tox21
* ``'gin_supervised_infomax_Tox21'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on Tox21
* ``'gin_supervised_edgepred_Tox21'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on Tox21
* ``'gin_supervised_masking_Tox21'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on Tox21
* ``'NF_canonical_Tox21'``: An NF model trained on Tox21 with canonical
featurization for atoms
* ``'GCN_canonical_ToxCast'``: A GCN model trained on ToxCast with canonical
featurization for atoms
* ``'GCN_attentivefp_ToxCast'``: A GCN model trained on ToxCast with attentivefp
featurization for atoms
* ``'GAT_canonical_ToxCast'``: A GAT model trained on ToxCast with canonical
featurization for atoms
* ``'GAT_attentivefp_ToxCast'``: A GAT model trained on ToxCast with attentivefp
featurization for atoms
* ``'Weave_canonical_ToxCast'``: A Weave model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'Weave_attentivefp_ToxCast'``: A Weave model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'MPNN_canonical_ToxCast'``: An MPNN model trained on ToxCast with canonical
featurization for atoms and bonds
* ``'MPNN_attentivefp_ToxCast'``: An MPNN model trained on ToxCast with attentivefp
featurization for atoms and bonds
* ``'AttentiveFP_canonical_ToxCast'``: An AttentiveFP model trained on ToxCast with
canonical featurization for atoms and bonds
* ``'AttentiveFP_attentivefp_ToxCast'``: An AttentiveFP model trained on ToxCast with
attentivefp featurization for atoms and bonds
* ``'gin_supervised_contextpred_ToxCast'``: A GIN model pre-trained with supervised
learning and context prediction, and fine-tuned on ToxCast
* ``'gin_supervised_infomax_ToxCast'``: A GIN model pre-trained with supervised learning
and infomax, and fine-tuned on ToxCast
* ``'gin_supervised_edgepred_ToxCast'``: A GIN model pre-trained with supervised learning
and edge prediction, and fine-tuned on ToxCast
* ``'gin_supervised_masking_ToxCast'``: A GIN model pre-trained with supervised learning
and masking, and fine-tuned on ToxCast
* ``'NF_canonical_ToxCast'``: An NF model trained on ToxCast with canonical
featurization for atoms and bonds
log : bool
Whether to print progress for model loading
Returns
-------
model
"""
if model_name not in url:
raise RuntimeError("Cannot find a pretrained model with name {}".format(model_name))
for func in [create_moleculenet_model, create_generative_model,
create_property_model, create_reaction_model]:
model = func(model_name)
if model is not None:
break
return download_and_load_checkpoint(model_name, model, url[model_name], log=log)
| StarcoderdataPython |
4945351 | <filename>setup.py<gh_stars>1-10
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name='pyaw',
version='0.0.2',
description='Python Assembly Wrapper',
author='ninjamar',
url='https://github.com/ninjamar/pyaw',
packages=['pyaw'],
long_description=long_description,
long_description_content_type="text/markdown",
license_files = ("LICENSE",),
python_requires='>=3.7',
)
| StarcoderdataPython |
6695476 | <gh_stars>10-100
from __future__ import absolute_import
import os.path
from ocrd_utils import (
getLogger,
make_file_id,
assert_file_grp_cardinality,
MIMETYPE_PAGE
)
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import (
to_xml, AlternativeImageType
)
from ocrd import Processor
from .. import get_ocrd_tool
from .common import (
# binarize,
remove_noise)
TOOL = 'ocrd-cis-ocropy-denoise'
class OcropyDenoise(Processor):
def __init__(self, *args, **kwargs):
self.ocrd_tool = get_ocrd_tool()
kwargs['ocrd_tool'] = self.ocrd_tool['tools'][TOOL]
kwargs['version'] = self.ocrd_tool['version']
super(OcropyDenoise, self).__init__(*args, **kwargs)
def process(self):
"""Despeckle the pages / regions / lines of the workspace.
Open and deserialise PAGE input files and their respective images,
then iterate over the element hierarchy down to the requested
``level-of-operation``.
Next, for each file, crop each segment image according to the layout
annotation (via coordinates into the higher-level image, or from the
alternative image). Then despeckle by removing connected components
smaller than ``noise_maxsize``. Apply results to the image and export
it as an image file.
Add the new image file to the workspace along with the output fileGrp,
and using a file ID with suffix ``.IMG-DESPECK`` along with further
identification of the input element.
Reference each new image in the AlternativeImage of the element.
Produce a new output file by serialising the resulting hierarchy.
"""
LOG = getLogger('processor.OcropyDenoise')
level = self.parameter['level-of-operation']
assert_file_grp_cardinality(self.input_file_grp, 1)
assert_file_grp_cardinality(self.output_file_grp, 1)
for (n, input_file) in enumerate(self.input_files):
LOG.info("INPUT FILE %i / %s", n, input_file.pageId or input_file.ID)
file_id = make_file_id(input_file, self.output_file_grp)
pcgts = page_from_file(self.workspace.download_file(input_file))
self.add_metadata(pcgts)
page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)
page = pcgts.get_Page()
page_image, page_xywh, page_image_info = self.workspace.image_from_page(
page, page_id,
feature_selector='binarized' if level == 'page' else '')
if self.parameter['dpi'] > 0:
zoom = 300.0/self.parameter['dpi']
elif page_image_info.resolution != 1:
dpi = page_image_info.resolution
if page_image_info.resolutionUnit == 'cm':
dpi *= 2.54
LOG.info('Page "%s" uses %f DPI', page_id, dpi)
zoom = 300.0/dpi
else:
zoom = 1
if level == 'page':
self.process_segment(page, page_image, page_xywh, zoom,
input_file.pageId, file_id)
else:
regions = page.get_AllRegions(classes=['Text'])
if not regions:
LOG.warning('Page "%s" contains no text regions', page_id)
for region in regions:
region_image, region_xywh = self.workspace.image_from_segment(
region, page_image, page_xywh,
feature_selector='binarized' if level == 'region' else '')
if level == 'region':
self.process_segment(region, region_image, region_xywh, zoom,
input_file.pageId, file_id + '_' + region.id)
continue
lines = region.get_TextLine()
if not lines:
LOG.warning('Page "%s" region "%s" contains no text lines', page_id, region.id)
for line in lines:
line_image, line_xywh = self.workspace.image_from_segment(
line, region_image, region_xywh,
feature_selector='binarized')
self.process_segment(line, line_image, line_xywh, zoom,
input_file.pageId,
file_id + '_' + region.id + '_' + line.id)
# update METS (add the PAGE file):
file_path = os.path.join(self.output_file_grp, file_id + '.xml')
pcgts.set_pcGtsId(file_id)
out = self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=input_file.pageId,
local_filename=file_path,
mimetype=MIMETYPE_PAGE,
content=to_xml(pcgts))
LOG.info('created file ID: %s, file_grp: %s, path: %s',
file_id, self.output_file_grp, out.local_filename)
def process_segment(self, segment, segment_image, segment_xywh, zoom, page_id, file_id):
LOG = getLogger('processor.OcropyDenoise')
LOG.info("About to despeckle '%s'", file_id)
bin_image = remove_noise(segment_image,
maxsize=self.parameter['noise_maxsize']/zoom*300/72) # in pt
# update METS (add the image file):
file_path = self.workspace.save_image_file(
bin_image,
file_id + '.IMG-DESPECK',
page_id=page_id,
file_grp=self.output_file_grp)
# update PAGE (reference the image file):
segment.add_AlternativeImage(AlternativeImageType(
filename=file_path,
comments=segment_xywh['features'] + ',despeckled'))
| StarcoderdataPython |
9770840 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchsummary import summary
from typing import Tuple
import math
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return x * self.sigmoid(x)
def Round_Channels(ch, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_ch = max(min_value, int(ch + divisor / 2) // divisor * divisor)
if new_ch < 0.9 * ch:
new_ch += divisor
return new_ch
def Round_Repeats(r):
return int(math.ceil(r))
def Drop_Path(x, drop_prob, training):
if drop_prob > 0 and training:
keep_prob = 1 - drop_prob
if x.is_cuda:
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
else:
mask = Variable(torch.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def BatchNorm(channels, eps=1e-3, momentum=0.01):
return nn.BatchNorm2d(channels, eps=eps, momentum=momentum)
def Conv3x3Bn(in_channels, out_channels, stride):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
BatchNorm(out_channels),
Swish())
def Conv1x1Bn(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
BatchNorm(out_channels),
Swish())
class SqueezeAndExcite(nn.Module):
def __init__(self, channels, squeeze_channels, se_ratio):
super(SqueezeAndExcite, self).__init__()
squeeze_channels = squeeze_channels * se_ratio
if not squeeze_channels.is_integer():
raise ValueError('Channels must be divisible by 1/ratio')
squeeze_channels = int(squeeze_channels)
self.se_reduce = nn.Conv2d(channels, squeeze_channels, 1, 1, 0, bias=True)
self.swish = Swish()
self.se_expand = nn.Conv2d(squeeze_channels, channels, 1, 1, 0, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
y = torch.mean(x, (2, 3), keepdim=True)
y = self.se_reduce(y)
y = self.swish(y)
y = self.se_expand(y)
y = self.sigmoid(y)
y = x * y
return y
class MBConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, expand_ratio, se_ratio, drop_path_rate):
super(MBConvBlock, self).__init__()
expand = (expand_ratio != 1)
expand_channels = in_channels * expand_ratio
se = (se_ratio != 0.0)
self.residual_connection = (stride == 1 and in_channels == out_channels)
self.drop_path_rate = drop_path_rate
conv = []
if expand:
# expansion phase
pw_expansion = nn.Sequential(
nn.Conv2d(in_channels, expand_channels, 1, 1, 0, bias=False),
BatchNorm(expand_channels),
Swish()
)
conv.append(pw_expansion)
# depthwise convolution phase
dw = nn.Sequential(
nn.Conv2d(
expand_channels,
expand_channels,
kernel_size,
stride,
kernel_size//2,
groups=expand_channels,
bias=False
),
BatchNorm(expand_channels),
Swish()
)
conv.append(dw)
if se:
# squeeze and excite
squeeze_excite = SqueezeAndExcite(expand_channels, in_channels, se_ratio)
conv.append(squeeze_excite)
# projection phase
pw_projection = nn.Sequential(
nn.Conv2d(expand_channels, out_channels, 1, 1, 0, bias=False),
BatchNorm(out_channels)
)
conv.append(pw_projection)
self.conv = nn.Sequential(*conv)
def forward(self, x):
if self.residual_connection:
y = self.conv(x)
y = Drop_Path(y, self.drop_path_rate, self.training)
return x + y
else:
return self.conv(x)
class EfficientNet(nn.Module):
config = [
# (in_channels, out_channels, kernel_size, stride, expand_ratio, se_ratio, repeats)
[32, 16, 3, 1, 1, 0.25, 1],
[16, 24, 3, 2, 6, 0.25, 2],
[24, 40, 5, 2, 6, 0.25, 2],
[40, 80, 3, 2, 6, 0.25, 3],
[80, 112, 5, 1, 6, 0.25, 3],
[112, 192, 5, 2, 6, 0.25, 4],
[192, 320, 3, 1, 6, 0.25, 1]
]
def __init__(self, model_type: str = 'efficientnet-b0',
num_classes=1000,
stem_channels=32,
feature_size=1280,
drop_connect_rate=0.2):
super(EfficientNet, self).__init__()
param = self._model_selection(model_type)
# scaling width
width_coefficient = param[0]
if width_coefficient != 1.0:
stem_channels = Round_Channels(stem_channels * width_coefficient)
for conf in self.config:
conf[0] = Round_Channels(conf[0] * width_coefficient)
conf[1] = Round_Channels(conf[1] * width_coefficient)
# scaling depth
depth_coefficient = param[1]
if depth_coefficient != 1.0:
for conf in self.config:
conf[6] = Round_Repeats(conf[6] * depth_coefficient)
# scaling resolution
input_size = param[2]
# stem convolution
self.stem_conv = Conv3x3Bn(3, stem_channels, 2)
# total #blocks
total_blocks = 0
for conf in self.config:
total_blocks += conf[6]
# mobile inverted bottleneck
blocks = []
for in_channels, out_channels, kernel_size, stride, expand_ratio, se_ratio, repeats in self.config:
# drop connect rate based on block index
drop_rate = drop_connect_rate * (len(blocks) / total_blocks)
blocks.append(MBConvBlock(in_channels, out_channels, kernel_size, stride, expand_ratio, se_ratio, drop_rate))
for _ in range(repeats - 1):
drop_rate = drop_connect_rate * (len(blocks) / total_blocks)
blocks.append(MBConvBlock(out_channels, out_channels, kernel_size, 1, expand_ratio, se_ratio, drop_rate))
self.blocks = nn.Sequential(*blocks)
# last several layers
self.head_conv = Conv1x1Bn(self.config[-1][1], feature_size)
# self.avgpool = nn.AvgPool2d(input_size // 32, stride=1)
self.dropout = nn.Dropout(param[3])
self.classifier = nn.Linear(feature_size, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.stem_conv(x)
x = self.blocks(x)
x = self.head_conv(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
x = torch.mean(x, (2, 3))
x = self.dropout(x)
x = self.classifier(x)
return x
def _model_selection(self, model_type) -> Tuple:
net_param = {
# 'efficientnet type': (width_coef, depth_coef, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5)}
return net_param[model_type]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def _summary(self, input_size, device='cpu'):
return summary(self, input_size, device=device)
if __name__ == "__main__":
model = EfficientNet('efficientnet-b0')
x = Variable(torch.randn(1, 3, 224, 224))
y = model(x)
assert y.size() == torch.randn(1, 1000).size()
model._summary((3, 224, 224)) | StarcoderdataPython |
1623669 | '''
Bremerton Weak Lensing Round Trip Module for CosmoSIS
ATTRIBUTION: CFHTLens
because this is a copy of the CFHTLens module with some minor modifications.
'''
from cosmosis.datablock import option_section, names as section_names
import bremerton_like
from bremerton_like import n_z_bin
import numpy as np
def setup(options):
sec = option_section
covmat_file = options.get_string(sec, 'covariance_file', default=bremerton_like.DEFAULT_COVMAT)
data_file = options.get_string(sec, 'data_file', default=bremerton_like.DEFAULT_DATA)
#create likelihood calculator
#loads named files and prepares itself
calculator = bremerton_like.BremertonLikelihood(covmat_file, data_file)
#pass back config to the
return calculator
def execute(block, config):
calculator = config
#Get theta for the sample values.
#We need theta twice because our Bremerton
#code wants xminus and xplus
section=section_names.shear_xi
theta = block[section, "theta"]
theta = np.concatenate((theta, theta))
#Get the xi(theta) for these samples, for each pair of bins.
#The likelihood calculator wants a big dictionary
xi_data = {}
for i in xrange(1, n_z_bin+1):
for j in xrange(i, n_z_bin+1):
name = 'xiplus_%d_%d' % (j,i)
xiplus = block[section, name]
name = 'ximinus_%d_%d' % (j,i)
ximinus = block[section, name]
xi = np.concatenate((xiplus, ximinus))
xi_data[(i,j)] = (theta, xi)
#Calculate the likelihood
like = calculator(xi_data)
#save the result
section=section_names.likelihoods
block[section, "bremerton_like"] = like
return 0
def cleanup(config):
#nothing to do here! We just include this
# for completeness. The joy of python.
return 0
| StarcoderdataPython |
3285455 | <reponame>Files-com/files-sdk-python
import unittest
import inspect
import files_sdk
from tests.base import TestBase
from files_sdk.models import FormField
from files_sdk import form_field
class FormFieldTest(TestBase):
pass
# Instance Methods
# Static Methods
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1794448 | <reponame>franekp/interleave
def test_patterns(patterns):
def inner(f):
return f
return inner
| StarcoderdataPython |
11391749 | import ctypes
import errno
import os
import random
import socket
import struct
LIBC = ctypes.CDLL("libc.so.6")
IP_BIND_ADDRESS_NO_PORT = 24
SOCK_DIAG_BY_FAMILY = 20
NLM_F_REQUEST = 1
TCP_ESTABLISHED = 1
NETLINK_SOCK_DIAG = 4
NLMSG_ERROR = 2
INET_DIAG_NOCOOKIE = b'\xff' * 8
nl = None
ephemeral_lo = ephemeral_hi = ephemeral_skip = None
def _netlink_udp_lookup(family, local_addr, remote_addr):
global nl
# Everyone does NLM_F_REQUEST | NLM_F_DUMP. This triggers socket
# traversal, but sadly ignores ip addresses in the lookup. The IP
# stuff must then be expressed with bytecode. To avoid that let's
# run without F_DUMP which will cause going into udp_dump_one
# code, which does inspect ip, port, and cookie. Without F_DUMP we
# also get only a single response.
# NLMsgHdr "length type flags seq pid"
nl_msg = struct.pack(
"=LHHLL",
72, # length
SOCK_DIAG_BY_FAMILY,
NLM_F_REQUEST, # notice: no NLM_F_DUMP
0,
0)
# InetDiagReqV2 "family protocol ext states >>id<<"
req_v2 = struct.pack("=BBBxI", family, socket.IPPROTO_UDP, 0,
1 << TCP_ESTABLISHED)
iface = 0
if family == socket.AF_INET6 and len(remote_addr) > 3:
iface = remote_addr[3]
# InetDiagSockId "sport dport src dst iface cookie"
# citing kernel: /* src and dst are swapped for historical reasons */
sock_id = struct.pack("!HH16s16sI8s", remote_addr[1], local_addr[1],
socket.inet_pton(family, remote_addr[0]),
socket.inet_pton(family, local_addr[0]),
socket.htonl(iface), INET_DIAG_NOCOOKIE)
if nl == None:
nl = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW,
NETLINK_SOCK_DIAG)
nl.connect((0, 0))
nl.send(nl_msg + req_v2 + sock_id)
cookie, addr = None, None
b = nl.recv(4096)
(l, t) = struct.unpack_from("=LH", b, 0)
if t == SOCK_DIAG_BY_FAMILY:
# `struct nlmsghdr` folllwed by `struct inet_diag_msg`
(sport, dport, src, dst, iface,
xcookie) = struct.unpack_from("!HH16s16sI8s", b, 16 + 4)
if family == socket.AF_INET:
addr = ((socket.inet_ntop(family, src[:4]), sport),
(socket.inet_ntop(family, dst[:4]), dport))
else:
addr = ((socket.inet_ntop(family, src), sport),
(socket.inet_ntop(family, dst), dport, 0, iface))
cookie = xcookie
if t == NLMSG_ERROR:
(l, t, f, s, p, e) = struct.unpack_from("=LHHLLI", b, 0)
errno = 0xffffffff + 1 - e
return cookie, addr
def connectx(sd, local_addr, remote_addr, flags=0):
family = sd.getsockopt(socket.SOL_SOCKET, socket.SO_DOMAIN)
sotype = sd.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)
if sotype == socket.SOCK_STREAM:
_connectx_tcp(family, sd, local_addr, remote_addr, flags)
elif sotype == socket.SOCK_DGRAM:
_connectx_udp(family, sd, local_addr, remote_addr, flags)
WILDCARD = ('0.0.0.0', '::')
def _connectx_tcp(family, sd, local_addr, remote_addr, flags=0):
# We want to be able to use the same outbound local port for many
# users on the system. We totally want to reuse the sport. We
# don't really "listen". REUSEADDR is needed to allow subsequent
# bind(*, Y) with our port Y to succeed. Without REUSEADDR the
# bind (or auto-bind) will skip the port limiting total egress
# conns. With TCP port-hijacking for REUSEADDR is not an issue.
# Without REUSEADDR auto-bind over auto-bind would work. But
# specific-bind over auto-bind would fail.
sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NO_PORT only makes sense for auto-bind
if local_addr[1] == 0:
# For both AF_INET and AF_INET6
sd.setsockopt(socket.IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, 1)
if not (local_addr[0] in WILDCARD and local_addr[1] == 0):
sd.bind(local_addr)
sd.connect(remote_addr)
SO_COOKIE = 57
def _connectx_udp(family, sd, local_addr, remote_addr, flags=0):
if local_addr[0] in WILDCARD:
# preserve iface for v6
local_addr = list(local_addr)
port = local_addr[1]
local_addr = _get_src_route(family, remote_addr)
local_addr = list(local_addr)
local_addr[1] = port
local_addr = tuple(local_addr)
if local_addr[1] == 0:
# Here's the deal. We can't do auto port assignment without
# REUSEADDR, since we want to share ports with other sockets.
# We cant do REUSEADDR=1 since it might give us a port number
# already used for our 4-tuple.
local_addr = list(local_addr)
local_addr[1] = _get_udp_port(family, local_addr, remote_addr)
local_addr = tuple(local_addr)
cookie = sd.getsockopt(socket.SOL_SOCKET, SO_COOKIE, 8)
# Before 2-tuple bind we totally must have SO_REUSEADDR
sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sd.bind(local_addr)
except OSError:
# bind() might totally fail with EADDRINUSE if there is a
# socket with SO_REUSEADDR=0, which means locked.
raise
# Here we create inconsistent socket state. Acquire lock
# preventing anyone else from doing 2-tuple bind.
sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
c, _ = _netlink_udp_lookup(family, local_addr, remote_addr)
if c != cookie:
# Ideallly dissolve socket association. This is critical
# section, so ensure the socket is actually cleaned.
b = struct.pack("I32s", socket.AF_UNSPEC, b"")
LIBC.connect(sd.fileno(), b, len(b))
sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
raise OSError(errno.EADDRINUSE, 'EADDRINUSE')
# We can continue only if our socket cookie is on top of the
# lookup. This connect should not fail.
sd.connect(remote_addr)
# Exit critical section
sd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return True
def changed_namespace():
global nl
global ephemeral_lo, ephemeral_hi, ephemeral_skip
if nl:
nl.close()
ephemeral_lo = ephemeral_hi = ephemeral_skip = None
nl = None
def _get_src_route(family, remote_addr):
# can be done faster with rtnetlink
s = socket.socket(family, socket.SOCK_DGRAM, 0)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect(remote_addr)
local_addr = s.getsockname()
s.close()
return local_addr
def _read_ephemeral():
global ephemeral_lo, ephemeral_hi, ephemeral_skip
with open('/proc/sys/net/ipv4/ip_local_port_range') as f:
lo, hi = map(int, f.read(512).strip().split())
skip = set()
with open('/proc/sys/net/ipv4/ip_local_reserved_ports') as f:
for port in f.read(512).strip().split(','):
if '-' in port:
l, _, h = port.partition('-')
for c in range(int(l), int(h) + 1):
skip.add(c)
elif port:
skip.add(int(port))
ephemeral_lo, ephemeral_hi, ephemeral_skip = (lo, hi, skip)
def _get_udp_port(family, local_addr, remote_addr):
if ephemeral_lo == None:
_read_ephemeral()
lo, hi = ephemeral_lo, ephemeral_hi
start = random.randint(lo, hi)
off = 0
while off < hi + 1 - lo:
port = start + off
off += 1
if port > hi:
port = port - (hi + 1) + lo
if port in ephemeral_skip:
continue
assert (port >= lo)
assert (port <= hi)
c, _ = _netlink_udp_lookup(family, (local_addr[0], port), remote_addr)
if c is None:
return port
raise OSError(errno.EAGAIN, 'EAGAIN')
| StarcoderdataPython |
9686480 | <filename>canvas/core/routing.py
# coding: utf-8
'''
The module manages a dictionary tree with controller leaves to allow
approximately O(log n) route resolution. The tree is generated from a list of
controllers, as supplied by `create_controllers`, by `create_routing`. To
modify the resultant route map at create time, register an `on_routing`
callback which takes the root of the route map dictionary as an argument.
The return value of the callback will be ignored.
'''
import re
from ..utils import create_callback_registrar, logger
# Create a log.
log = logger(__name__)
# Define a sentinel key for on-branch leaf controllers.
_here_sentinel = object()
# Define the root of the route map, a dictionary tree with controller leaves.
_route_map = dict()
# Define the route map modification callback.
on_routing = create_callback_registrar()
def routing_diag():
return (_route_map, _here_sentinel)
class RouteVariable:
'''Used to store named variable route parts.'''
def __init__(self, name):
'''::name The name of the variable for the eventual `RouteString`.'''
self.name = name
def __repr__(self):
return '<RouteVariable "%s">'%self.name
class RouteString(str):
'''
The string used to contain the current route within the request context.
If a route has variables, their values are accessible as attributes of
this string.
'''
def populated(self, variables):
self.__variables__ = list()
if variables:
self.__variables__.extend(variables.keys())
for key, value in variables.items():
setattr(self, key, value)
return self
def has_variable(self, variable):
return variable in self.__variables__
def create_routing(controller_list):
'''
Populate the global route map with the `Controller`s in `controller_list`.
'''
# Define a route map updater.
def update_route_map(route, controller):
# Follow this route into the route map, generating branches and
# placing the controller as a leaf.
current_node, last_node, last_key = _route_map, None, None
route_parts = route[1:].split('/')
for i, route_part in enumerate(route_parts):
# Check if this is a variable definition, updating the key
# to a variable if it is.
variable_definition = re.match(r'^<(\w+)>$', route_part)
if variable_definition:
route_part = RouteVariable(variable_definition.group(1))
# Assert this isn't a leaf; if it is, make it on-branch.
if not isinstance(current_node, dict):
new_current_node = dict()
new_current_node[_here_sentinel] = current_node
last_node[last_key] = current_node = new_current_node
if route_part not in current_node:
# Expand the tree.
if i == len(route_parts) - 1:
current_node[route_part] = controller
else:
current_node[route_part] = dict()
last_node, last_key = current_node, route_part
current_node = current_node[route_part]
# Update the route map for all routes for all controllers.
for controller in controller_list:
for route in controller.__routes__:
update_route_map(route, controller)
# Invoke modification callbacks.
on_routing.invoke(_route_map)
log_routing(_route_map)
# TODO: De-shit.
def resolve_route(route):
def check_one(current_node, part_stack, variables):
if not part_stack:
# Reached bottom.
if isinstance(current_node, dict):
if _here_sentinel in current_node:
return current_node[_here_sentinel], variables
return None
return current_node, variables
route_part = part_stack.pop(0)
if not isinstance(current_node, dict):
return None
elif route_part in current_node:
return check_one(current_node[route_part], part_stack, variables)
else:
for key, node in current_node.items():
if not isinstance(key, RouteVariable):
continue
variables[key.name] = route_part
copy_variables, copy_stack = dict(variables), list(part_stack)
checked = check_one(current_node[key], copy_stack, copy_variables)
if checked is None:
continue
return checked
return None
result = check_one(_route_map, route[1:].split('/'), dict())
if result is None:
return None, None
return result
def log_routing(routing):
'''Log a formatted representation of the given route map.'''
if not len(routing):
log.info('Created null routing')
return
def name_key(key):
if key is _here_sentinel:
return '.'
elif isinstance(key, RouteVariable):
return '/<%s>'%key.name
else:
return '/%s'%key
def name_value(value):
return '%s (%s)'%(
value.__class__.__name__,
', '.join(value.__verbs__)
)
def key_sort_child(child):
if not isinstance(child[1], dict):
return -1
else:
return len(child[1])
def format_one(level):
if not isinstance(level, dict):
return name_value(level)
else:
key_lengths = list(len(name_key(key)) for key in level.keys())
key_lengths.append(10)
indent = ' '*(max(key_lengths) + 1)
parts = list()
for key, value in sorted(level.items(), key=key_sort_child):
child_str = format_one(value).replace('\n', '\n' + indent)
parts.append(name_key(key) + (indent[len(name_key(key)):]) + child_str)
return '\n'.join(parts)
log.info('Created routing:\n%s', format_one(routing)) | StarcoderdataPython |
247077 | <reponame>VsuManiego/machinery-mst
# TITLE: Checkpoint_2
# PROGRAMMER 1: <NAME>
# PROGRAMMER 2: <NAME>
# PROGRAMMER 3: <NAME>
mymachineinventory = {}
print("Welcome Engineer!")
print("You would like to have a better storage system for your mechanical knowledge.")
print("You have made the right choice!")
import cv2
filepath = ("C:/media/mech.jpg")
image = cv2.imread(filepath)
cv2.imshow("OpenCV Image Reading", image)
cv2.waitKey(3000)
print("Let's begin with your name")
a = str(input("Name of Engineer: "))
print("Hello Engineer ", a)
print("You want to add new set of machineries?")
print("Let's start?")
while (True):
myitem = input("Input new machineries. " "If you're finish, type 'Orayt': ")
if myitem == "Orayt":
print("Information successfully added!")
print("Thanks for the information!")
break
inputs = input("What type of machinery is this: ")
desc = input("What's the use of it: ")
link = input ("Add links: ")
mymachineinventory[(myitem),(inputs),(desc)] = (link)
print(f'New set of inventories {mymachineinventory}.')
| StarcoderdataPython |
1984059 | """Use a class 'instance' as a decorator demo.
Tracing decorator
"""
class Trace:
def __init__(self):
self.enabled = True
def __call__(self, function):
def wrap(*args, **kwargs):
if self.enabled:
print(f"** Tracer information - calling {function}")
return function(*args, **kwargs)
return wrap
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, is_enabled):
self._enabled = is_enabled
if __name__ == "__main__":
tracer = Trace()
@tracer
def rotate_list(l):
return l[1:] + [l[0]]
@tracer
def append_reverse(i, l):
l.append(i)
return sorted(l, reverse=True)
li = [1, 2, 3, 4, 5]
li = append_reverse(19, li)
print(li)
li = rotate_list(li)
print(li)
tracer.enabled = False
li = rotate_list(li)
print(li)
| StarcoderdataPython |
8160826 | <gh_stars>10-100
#
# Copyright (c) <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import logging
import requests
from psutil import net_if_addrs
from socket import AF_INET
from django.conf import settings
from django.core.cache import cache
logger = logging.getLogger(__name__)
def get_lan_address():
lan_address = cache.get('lan_address')
if lan_address is None:
for address in net_if_addrs().get(settings.NETWORK_INTERFACE):
if address.family == AF_INET:
lan_address = address.address
cache.set('lan_address', lan_address, 600)
return lan_address
return lan_address
def get_wan_address():
wan_address = cache.get('wan_address')
if wan_address is None:
url = 'https://dazzlepod.com/ip/me.json'
headers = {'user-agent': settings.USER_AGENT}
try:
response = requests.get(url, headers=headers, timeout=settings.HTTP_TIMEOUT)
except requests.exceptions.RequestException as err:
logger.debug(err)
else:
if response.status_code == 200:
wan_address = response.json().get('ip')
cache.set('wan_address', wan_address, 600)
return wan_address
| StarcoderdataPython |
1949199 | # Generated by Django 3.0 on 2020-06-14 09:35
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveIntegerField(validators=[django.core.validators.MaxValueValidator(9999)])),
('street', models.CharField(max_length=64)),
('city', models.CharField(max_length=64)),
('state', models.CharField(max_length=2, validators=[django.core.validators.MinLengthValidator(2)])),
('zip_code', models.PositiveIntegerField(validators=[django.core.validators.MaxValueValidator(99999)])),
('country_iso_code', models.CharField(max_length=3, validators=[django.core.validators.MinLengthValidator(3)])),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('favorite_city', models.CharField(blank=True, max_length=64)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Letting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('address', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='oc_lettings_site.Address')),
],
),
]
| StarcoderdataPython |
35428 | import asyncio
import pickle
from congregation.net.messages import *
class Handler:
def __init__(self, peer, server: [asyncio.Protocol, None] = None):
self.peer = peer
self.server = server
self.msg_handlers = self._define_msg_map()
def handle_msg(self, data):
"""
determine message type and handle accordingly
"""
if isinstance(data, Msg):
m = data
else:
m = pickle.loads(data)
if m.pid not in self.peer.peer_connections:
raise Exception(f"Msg of type {m.msg_type} received from unrecognized peer: {m.pid}")
self.msg_handlers[m.msg_type](m)
def _define_msg_map(self):
return {
"IAM": self.handle_iam_msg,
"READY": self.handle_ready_msg,
"CONFIG": self.handle_config_msg,
"ACK": self.handle_ack_msg,
"REQUEST": self.handle_request_msg
}
def _check_dispatcher(self, m: [ReadyMsg, ConfigMsg, AckMsg, RequestMsg]):
if self.peer.dispatcher is not None:
if self.peer.dispatcher.dispatch_type == m.job_type:
return True
self.peer.msg_buffer.append(m)
return False
def handle_iam_msg(self, m: IAMMsg):
"""
we need to be able to resolve which party a given connection
is for, which is why a done callback is added to the connection
future which sends an IAMMsg with the pid of the connecting party.
this function sets that connection value in peer.peer_connections
accordingly when an IAMMsg is received.
"""
print(f"IAMMsg received from {m.pid}")
conn = self.peer.peer_connections[m.pid]
if isinstance(conn, asyncio.Future):
if not conn.done():
conn.set_result((self.server.transport, self))
def handle_ready_msg(self, m: ReadyMsg):
if self._check_dispatcher(m):
print(f"ReadyMsg received from party {m.pid} for {m.job_type} job.")
rdy = self.peer.dispatcher.parties_ready[m.pid]
if isinstance(rdy, asyncio.Future):
if not rdy.done():
rdy.set_result(True)
def handle_config_msg(self, m: ConfigMsg):
if self._check_dispatcher(m):
print(f"ConfigMsg received from party {m.pid} for {m.job_type} job.")
cfg = self.peer.dispatcher.parties_config[m.pid]["CFG"]
if isinstance(cfg, asyncio.Future):
if not cfg.done():
cfg.set_result(m.config)
print(f"Sending AckMsg to party {m.pid} for receipt of ConfigMsg for {m.job_type} job.")
self.peer.send_ack(
m.pid,
"CONFIG",
m.job_type
)
def handle_ack_msg(self, m: AckMsg):
if self._check_dispatcher(m):
print(f"AckMsg of type {m.ack_type} received from party {m.pid} for {m.job_type} job.")
if m.ack_type == "CONFIG":
a = self.peer.dispatcher.parties_config[m.pid]["ACK"]
if isinstance(a, asyncio.Future):
if not a.done():
a.set_result(True)
def handle_request_msg(self, m: RequestMsg):
if self._check_dispatcher(m):
print(f"Request message for {m.request_type} received from party {m.pid} for {m.job_type} job.")
if m.request_type == "CONFIG":
self.peer.send_cfg(m.pid, self.peer.dispatcher.config_to_exchange, m.job_type)
| StarcoderdataPython |
1696305 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtWebEngineWidgets import QWebEngineView
LOGIN = '<LOGIN>'
PASSWORD = '<PASSWORD>'
with open('../QWebEngine__append_custom_javascript__jQuery/js/jquery-3.1.1.min.js') as f:
jquery_text = f.read()
jquery_text += "\nvar qt = { 'jQuery': jQuery.noConflict(true) };"
app = QApplication([])
view = QWebEngineView()
view.load(QUrl('https://github.com/login'))
def _on_load_finished(ok: bool):
page = view.page()
url = page.url().toString()
print(url)
if not url.endswith('login'):
return
page.runJavaScript(jquery_text)
page.runJavaScript(f"""
qt.jQuery('#login_field').val('{LOGIN}');
qt.jQuery('#password').val('{PASSWORD}');
qt.jQuery('input[name="commit"]').click();
""")
print()
view.loadProgress.connect(lambda value: view.setWindowTitle('{} ({}%)'.format(view.url().toString(), value)))
view.loadFinished.connect(_on_load_finished)
mw = QMainWindow()
mw.setCentralWidget(view)
mw.resize(500, 500)
mw.show()
app.exec()
| StarcoderdataPython |
6498602 |
import sys
import platform
import os
import datetime
import traceback
import tkinter
import build_info
import locations
# keep those trailing whites!!
TEXT = """
Sorry!
This was unexpected.
We created a Bugreport under: {bugreport}.
It would be great if you could send the bugreport to <EMAIL>
Thanks for your Help!
"""
TEMPLATE = """
BUGREPORT {timestamp}
ERROR:
// This section contains information about the error.
TYPE : {error_type}
VALUE : {error_value}
BACKTRACE : {error_trace}
BUILD INFO:
// This section contains informations about the system your executable was build on.
TIMESTAMP : {build_timestamp}
SYSTEM : {build_system}
COMMIT : {build_commit}
STATUS : {build_status}
USER : {build_user}
PACKAGES : {build_packages}
SYSTEM INFO:
// This section contains informations about your system.
SYSTEM : {system_system}
COMMAND : {system_command}
"""
INDENT = '\n' + ' ' * 20
def bugreport(exc_info=None) -> str:
typ, value, frames = exc_info if exc_info is not None else sys.exc_info()
trace = []
path_to_strip = locations.get_root()
strip_path = lambda s: s[len(path_to_strip) + 1:] if s.startswith(path_to_strip) else s
for frame in traceback.extract_tb(frames, 32):
fn = strip_path(frame[0])
ln = frame[1]
fu = frame[2]
tx = frame[3]
t = '{}:{} {} {}'.format(fn, ln, fu, tx)
trace.insert(0, t)
def prepare(value):
iterable = None
if isinstance(value, str):
iterable = value.split('\n')
else:
try:
iterable = iter(value)
except:
iterable = [str(value)]
return INDENT.join(filter(None, iterable))
aliases = platform.system_alias(platform.system(), platform.release(), platform.version())
system = '{} {} {}'.format(*aliases)
report = TEMPLATE.format(
timestamp=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
error_type=prepare(typ),
error_value=prepare(value),
error_trace=prepare(trace),
build_timestamp=prepare(build_info.TIMESTAMP),
build_system=prepare(build_info.SYSTEM),
build_commit=prepare(build_info.COMMIT),
build_status=prepare(build_info.STATUS),
build_user=prepare(build_info.USER),
build_packages=prepare(build_info.PACKAGES),
system_system=prepare(system),
system_command=prepare(' '.join(sys.argv)))
report_file = locations.user('bugreport.txt')
# with 'w' we override,... but i dont want to grow the file uncontrolled,
# so maybe i rewrite it later to append,.. but then we need some kind of
# ring buffer.
with open(report_file, 'w') as f:
f.write(report)
try:
root = tkinter.Tk()
root.title('Coder - Unexpected Error')
tkinter.Label(root, text=TEXT.format(bugreport=report_file)).pack(side=tkinter.TOP)
tkinter.Button(root, text='Close', command=lambda: root.quit()).pack(side=tkinter.BOTTOM)
root.mainloop()
except:
pass
if not getattr(sys, 'frozen', False):
print(report)
return report_file
if __name__ == '__main__':
try:
x = 0
y = 1
z = y / x
except:
bugreport()
| StarcoderdataPython |
3316725 | <reponame>elliottt/rules_tree_sitter<gh_stars>1-10
load("@rules_tree_sitter//tree_sitter/internal:versions.bzl", _get_version_info = "get_version_info")
_TREE_SITTER_BUILD = """
cc_library(
name = "tree_sitter_lib",
srcs = glob([
"lib/src/*.c",
"lib/src/*.h",
"lib/src/unicode/*.c",
"lib/src/unicode/*.h"
], exclude = ["lib/src/lib.c"]),
hdrs = glob(["lib/include/tree_sitter/*.h"]),
includes = ["lib/include", "lib/src"],
strip_include_prefix = "lib/include",
linkstatic = True,
visibility = ["//visibility:public"],
)
"""
_TREE_SITTER_BIN_BUILD_HEADER = """
load("@rules_tree_sitter//tree_sitter/internal:repository.bzl", "tree_sitter_binary")
"""
_TREE_SITTER_BIN_BUILD = """
tree_sitter_binary(
name = "{key}",
archive = "{key}.gz",
visibility = ["//visibility:public"],
exec_compatible_with = [{platform}],
)
"""
def _tree_sitter_binary(ctx):
archive = ctx.file.archive
tree_sitter = ctx.actions.declare_file(ctx.label.name, sibling = archive)
ctx.actions.run_shell(
inputs = [ctx.file.archive],
outputs = [tree_sitter],
command ="""
gunzip "{archive}" -c > "{output}"
chmod +x "{output}"
""".format(
archive = ctx.file.archive.path,
output = tree_sitter.path,
),
)
return [
DefaultInfo(executable = tree_sitter)
]
tree_sitter_binary = rule(
implementation = _tree_sitter_binary,
attrs = {
"archive": attr.label(
mandatory = True,
allow_single_file = True,
),
},
provides = [DefaultInfo],
)
TOOL_PLATFORMS = {
"tree-sitter-linux-x64": ["@platforms//os:linux", "@platforms//cpu:x86_64"],
"tree-sitter-macos-x64": ["@platforms//os:macos", "@platforms//cpu:x86_64"],
}
def _tree_sitter_repository(ctx):
info = _get_version_info(version = ctx.attr.version)
if info == None:
fail("No version information available for {}".format(ctx.attr.version))
bin_build = _TREE_SITTER_BIN_BUILD_HEADER
for key in info:
download = info[key]
if download["prefix"] == "":
platform = TOOL_PLATFORMS.get(key, None)
if platform == None:
continue
ctx.download(
url = download["urls"],
sha256 = download["sha256"],
output = "bin/{}.gz".format(key)
)
bin_build += _TREE_SITTER_BIN_BUILD.format(
key = key,
platform = ", ".join(['"{}"'.format(val) for val in platform]),
)
else:
ctx.download_and_extract(
url = download["urls"],
sha256 = download["sha256"],
stripPrefix = download["prefix"],
)
ctx.file("BUILD", _TREE_SITTER_BUILD)
ctx.file("bin/BUILD", bin_build)
tree_sitter_repository = repository_rule(
implementation = _tree_sitter_repository,
attrs = {
"version": attr.string(mandatory = True),
},
)
| StarcoderdataPython |
9684168 | <gh_stars>0
import requests
import logging
import http.client as http_client
import json
import pprint
LOGGING = True
LOGLEVEL = logging.DEBUG
if LOGGING:
http_client.HTTPConnection.debuglevel = 2
logging.basicConfig()
logging.getLogger().setLevel(LOGLEVEL)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(LOGLEVEL)
requests_log.propagate = True
class RESTClient:
def __init__(self):
self.default_headers = {"Content-Type": "application/json"}
def get(self, endpoint, headers=None):
""" GET """
if headers == None:
r = requests.get(endpoint)
else:
r = requests.get(endpoint, headers=headers)
if r.status_code < 300:
return r
else:
# print("status code: %s" % r.status_code)
# print (r.headers)
raise Exception(r.text)
def post(self, endpoint, payload=None, headers=None):
""" POST """
if headers == None:
r = requests.post(endpoint, headers=self.default_headers, data=json.dumps(payload))
else:
headers = headers.update(self.default_headers)
r = requests.post(endpoint, headers=headers, data=json.dumps(payload))
if r.status_code < 300:
return r
else:
# print("status code: %s" % r.status_code)
# print (r.headers)
raise Exception(r.text)
| StarcoderdataPython |
12809624 | #!/usr/bin/env python3
import fire
from cmd_interface import CommandlineInterface
def main():
fire.Fire(CommandlineInterface)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3422059 | # -*- coding: utf-8 -*-
'''
Vdirsyncer is a synchronization tool for vdir. See the README for more details.
'''
# Packagers: Vdirsyncer's version is automatically detected using
# setuptools-scm, but that one is not a runtime dependency.
#
# Do NOT use the GitHub's tarballs, those don't contain any version information
# detectable for setuptools-scm. Rather use the PyPI ones.
import platform
from setuptools import Command, find_packages, setup
requirements = [
# https://github.com/mitsuhiko/click/issues/200
'click>=5.0',
'click-log>=0.1.3',
'click-threading>=0.1.2',
# !=2.9.0: https://github.com/kennethreitz/requests/issues/2930
# >=2.4.1: https://github.com/shazow/urllib3/pull/444
#
# Without the above pull request, `verify=False` also disables fingerprint
# validation. This is *not* what we want, and it's not possible to
# replicate vdirsyncer's current behavior (verifying fingerprints without
# verifying against CAs) with older versions of urllib3.
'requests >=2.4.1, !=2.9.0',
'lxml >=3.1' + (
# See https://github.com/pimutils/vdirsyncer/issues/298
# We pin some LXML version that is known to work with PyPy
# I assume nobody actually uses PyPy with vdirsyncer, so this is
# moot
', <=3.4.4'
if platform.python_implementation() == 'PyPy'
else ''
),
# https://github.com/sigmavirus24/requests-toolbelt/pull/28
# And https://github.com/sigmavirus24/requests-toolbelt/issues/54
'requests_toolbelt >=0.4.0',
# https://github.com/untitaker/python-atomicwrites/commit/4d12f23227b6a944ab1d99c507a69fdbc7c9ed6d # noqa
'atomicwrites>=0.1.7'
]
class PrintRequirements(Command):
description = 'Prints minimal requirements'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for requirement in requirements:
print(requirement.replace(">", "=").replace(" ", ""))
setup(
name='vdirsyncer',
use_scm_version={
'write_to': 'vdirsyncer/version.py',
},
setup_requires=['setuptools_scm'],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/pimutils/vdirsyncer',
description='Synchronize calendars and contacts',
license='MIT',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
entry_points={
'console_scripts': ['vdirsyncer = vdirsyncer.cli:main']
},
install_requires=requirements,
extras_require={
'remotestorage': ['requests-oauthlib']
},
cmdclass={
'minimal_requirements': PrintRequirements
}
)
| StarcoderdataPython |
4865384 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from datetime import datetime
from frappe.utils import getdate, add_days, get_time,get_datetime
from datetime import timedelta
class RiskMitigationPlan(Document):
def validate(self):
for se in self.risk_mitigation_plan_list:
if get_datetime(se.target_date).date() < datetime.today().date():
frappe.throw("Target Date can not be past Date");
@frappe.whitelist()
def get_total_risk_value(risk_analysis):
ret_list= []
ret_list = frappe.get_doc("Risk Analysis",risk_analysis)
return ret_list
@frappe.whitelist()
def get_all_risk_elements(risk_analysis):
ret_list= []
elements = frappe.get_doc("Risk Analysis",risk_analysis)
for i in elements.ra_1:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_2:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_3:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_4:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_5:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_6:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_7:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
return ret_list
@frappe.whitelist()
def get_main_risk_elements(risk_analysis):
ret_list= []
elements = frappe.get_doc("Risk Analysis",risk_analysis)
for i in elements.ra_1:
if i.net_risk_score == 9:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_2:
if i.net_risk_score == 9:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_3:
if i.net_risk_score == 9:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_4:
if i.net_risk_score == 9:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_5:
if i.net_risk_score == 9:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_6:
if i.net_risk_score == 9:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
for i in elements.ra_7:
if i.net_risk_score == 9:
ret = {'risk_element': i.risk_element}
ret_list.append(ret)
return ret_list
| StarcoderdataPython |
8065384 | <gh_stars>0
# Generated by Django 4.0.2 on 2022-03-24 00:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Funcionario',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(blank=True, default='', max_length=60, verbose_name='Nome')),
],
),
migrations.AlterField(
model_name='servico',
name='data_entrada',
field=models.DateTimeField(verbose_name='Data/Hora entrada'),
),
migrations.AlterField(
model_name='servico',
name='data_saida',
field=models.DateTimeField(verbose_name='Data/Hora saída'),
),
migrations.AlterField(
model_name='servico',
name='itens',
field=models.ManyToManyField(db_column='', to='app.TipoServico'),
),
migrations.CreateModel(
name='Acerto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('valor', models.DecimalField(decimal_places=2, default=0, max_digits=999, verbose_name='Valor')),
('valor_comissao', models.DecimalField(decimal_places=2, default=0, max_digits=999, verbose_name='Valor')),
('funcionario', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='app.funcionario', verbose_name='Funcionario')),
('servico', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.servico', verbose_name='Serviço')),
],
),
]
| StarcoderdataPython |
5193654 | from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from accounts.models import RiddanceProfile
UserModel = get_user_model()
@receiver(post_save, sender=UserModel)
def user_created(sender, instance, created, **kwargs):
if created:
profile = RiddanceProfile(
user=instance)
profile.save()
| StarcoderdataPython |
5063939 | #
# list_repo.py
#
import sys
import xml.dom.minidom
import gzip
def listRepo( repo_url ):
from urllib.request import urlopen
#
# Fetch repomd
#
with urlopen( '%s/repodata/repomd.xml' % (repo_url,) ) as req:
repomd = req.read()
dom = xml.dom.minidom.parseString( repomd )
for data_element in dom.getElementsByTagName( 'data' ):
data_type = data_element.getAttribute( 'type' )
if data_type != 'primary':
continue
primary_href = getOnlyElement( data_element, 'location' ).getAttribute( 'href' )
#
# Fetch primary
#
with urlopen( '%s/%s' % (repo_url, primary_href) ) as req:
primary = req.read()
packages = {}
dom = xml.dom.minidom.parseString( gzip.decompress( primary ) )
for package_element in dom.getElementsByTagName( 'package' ):
package_type = package_element.getAttribute( 'type' )
if package_type != 'rpm':
continue
name = getElementText( getOnlyElement( package_element, 'name' ) )
version_element = getOnlyElement( package_element, 'version' )
ver = version_element.getAttribute( 'ver' )
rel = version_element.getAttribute( 'rel' )
version_element = getOnlyElement( package_element, 'time' )
build_time = version_element.getAttribute( 'build' )
packages[ name ] = (ver, rel, float(build_time))
return packages
def getOnlyElement( element, tag ):
all_children = element.getElementsByTagName( tag )
assert len(all_children) == 1
return all_children[0]
def getElementText( element ):
text = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
text.append( node.data )
return ''.join( text )
def unittest( argv ):
all_packages = listRepo( argv[1] )
for name in sorted( all_packages.keys() ):
ver, rel = all_packages[ name ]
print( '%s: %s-%s' % (name, ver, rel) )
return 0
if __name__ == '__main__':
sys.exit( unittest( sys.argv ) )
| StarcoderdataPython |
6446639 | <reponame>shyed2001/Python_Programming<gh_stars>1-10
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: user
#
# Created: 29/04/2019
# Copyright: (c) user 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
print("""
def area_of_circle(r):
return (22/7)*(r)**2
r=float(input("Please input the desired radious,r = "))
print(area_of_circle(r))
def area_of_circle(r):
a= (22/7)*(r)**2
return a
print(area_of_circle(7))
""")
def area_of_circle(r):
return (22/7)*(r)**2
r=float(input("Please input the desired radious,r = "))
print(area_of_circle(r))
def area_of_circle(r):
a= (22/7)*(r)**2
return a
print(area_of_circle(7))
#____________________________________________+++++++++++++++++++
def area_of_circle(radius):
radius = distance(xc, yc, xp, yp)
a= (22/7)*(radius)**2
return a
def distance(x1, y1, x2, y2):
return math.sqrt( (x2-x1)**2 + (y2-y1)**2 )
print(distance(1, 2, 4, 6))
print(area_of_circle(7))
#############################################
def area_of_circle(xc, yc, xp, yp):
radius = distance(xc, yc, xp, yp)
a= (22/7)*(radius)**2
return a
def distance(xc, yc, xp, yp):
return math.sqrt( (xp-xc)**2 + (yp-yc)**2 )
print(area_of_circle(1, 2, 4, 6))
#______________++_____________+++++______________________
##########################################################
def area_of_circle(xc, yc, xp, yp):
radius = math.sqrt( (xp-xc)**2 + (yp-yc)**2 )
a= (22/7)*(radius)**2
return a
#def distance(xc, yc, xp, yp):
#return math.sqrt( (xp-xc)**2 + (yp-yc)**2 )
print(area_of_circle(1, 2, 4, 6))
| StarcoderdataPython |
9695170 | <filename>HDPython/object_name_maker.py
from HDPython.base import g_add_global_reset_function
class objectName:
objectNameList = []
def __init__(self, objTypeName,MemberTypeNames):
self.objTypeName = objTypeName
self.MemberTypeNames = MemberTypeNames
self.HDL_objectName = None
def __eq__(self, rhs):
return self.objTypeName == rhs.objTypeName and self.MemberTypeNames == rhs.MemberTypeNames
def __str__(self):
ret = self.objTypeName +"<"
start =""
for x in self.MemberTypeNames :
ret += start + x
start = ", "
ret += ">"
if self.HDL_objectName:
ret += " --> " + self.HDL_objectName
return ret
def get_Name(self):
candidats = [ x for x in self.objectNameList if x == self]
if candidats:
#print("reuse Name for "+ str(self) + " --> " +candidats[0].HDL_objectName )
return candidats[0].HDL_objectName
sameTypeNameCandidates = [ x for x in self.objectNameList if x.objTypeName == self.objTypeName]
sameMembers = [ x for x in sameTypeNameCandidates if sorted(self.MemberTypeNames) == sorted(x.MemberTypeNames) ]
if sameMembers:
return sameMembers[0].HDL_objectName
self.HDL_objectName = self.objTypeName + str(len(sameTypeNameCandidates)) if len(sameTypeNameCandidates) > 0 else self.objTypeName
self.objectNameList.append(self)
#print("New Name for ", str(self))
return self.HDL_objectName
def make_object_name(objTypeName,MemberTypeNames):
obj = objectName(objTypeName,MemberTypeNames)
return obj.get_Name()
def reset_obj_name_maker():
objectName.objectNameList = []
g_add_global_reset_function(reset_obj_name_maker) | StarcoderdataPython |
1994449 | <reponame>vivekhub/razorpay-python<filename>tests/test_client_settlement.py
import responses
import json
from .helpers import mock_file, ClientTestCase
class TestClientSettlement(ClientTestCase):
def setUp(self):
super(TestClientSettlement, self).setUp()
self.base_url = '{}/settlements'.format(self.base_url)
@responses.activate
def test_settlement_fetch_all(self):
result = mock_file('settlement_collection')
url = self.base_url
responses.add(responses.GET, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.settlement.all(), result)
@responses.activate
def test_settlement_fetch_all_with_options(self):
count = 1
result = mock_file('settlement_collection_with_one_settlement')
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.settlement.all({'count': count}), result)
@responses.activate
def test_settlement_fetch(self):
result = mock_file('fake_settlement')
url = '{}/{}'.format(self.base_url, self.settlement_id)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.settlement.fetch(self.settlement_id), result)
| StarcoderdataPython |
11262966 | <reponame>Toni-d-e-v/rxcsentinetal1
"""
ruxcryptod JSONRPC interface
"""
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import config
import base58
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from masternode import Masternode
from decimal import Decimal
import time
from ruxcryptod import ruxcryptod
class RXCDaemon(ruxcryptod):
@classmethod
def from_ruxcrypto_conf(self, ruxcrypto_dot_conf):
from ruxcrypto_conf import RXCConfig
config_text = RXCConfig.slurp_config_file(ruxcrypto_dot_conf)
creds = RXCConfig.get_rpc_creds(config_text, config.network)
creds[u'host'] = config.rpc_host
return self(**creds)
@classmethod
def from_rxc_conf(self, rxc_dot_conf):
raise RuntimeWarning('This method should not be used with RXC')
| StarcoderdataPython |
1864048 | import os
import hydra
import jax
import jax.numpy as jnp
from flax.serialization import to_state_dict
from omegaconf import DictConfig, OmegaConf
from data import get_dataset
from models.jax import get_model
from neural_kernels.ntk import ntk_eigendecomposition
@hydra.main(config_path="config/compute_ntk", config_name="config")
def main(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg))
model_key = jax.random.PRNGKey(cfg.seed)
train_ds, test_ds = get_dataset(**cfg.data)
data = jnp.concatenate([train_ds["data"], test_ds["data"]], axis=0)
model = get_model(**cfg.model)
init_variables = model.init(model_key, jnp.zeros(cfg.shape, jnp.float32))
print("Computing NTK at init...")
(
eigvals_init,
eigvecs_init,
_,
_,
) = ntk_eigendecomposition(model, init_variables, data, **cfg.ntk)
print("Done!")
print("Saving results...")
init_variables_state_dict = to_state_dict(init_variables)
save_path = f"{hydra.utils.get_original_cwd()}/artifacts/eigenfunctions/{cfg.data.dataset}/{cfg.model.model_name}"
os.makedirs(save_path, exist_ok=True)
jnp.save(f"{save_path}/eigvecs.npy", eigvecs_init)
jnp.save(f"{save_path}/eigvals.npy", eigvals_init)
jnp.save(
f"{save_path}/init_variables.npy",
init_variables_state_dict,
)
jnp.save(f"{save_path}/data.npy", data)
if __name__ == "__main__":
main()
| StarcoderdataPython |
210129 | import base64
import logging
import re
import subprocess
import os
import boto3
from botocore.exceptions import ClientError
from bentoml.exceptions import (
BentoMLException,
MissingDependencyException,
AWSServiceError,
)
logger = logging.getLogger(__name__)
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/\
# using-cfn-describing-stacks.html
FAILED_CLOUDFORMATION_STACK_STATUS = [
"CREATE_FAILED",
# Ongoing creation of one or more stacks with an expected StackId
# but without any templates or resources.
"REVIEW_IN_PROGRESS",
"ROLLBACK_FAILED",
# This status exists only after a failed stack creation.
"ROLLBACK_COMPLETE",
# Ongoing removal of one or more stacks after a failed stack
# creation or after an explicitly cancelled stack creation.
"ROLLBACK_IN_PROGRESS",
]
SUCCESS_CLOUDFORMATION_STACK_STATUS = ["CREATE_COMPLETE", "UPDATE_COMPLETE"]
def generate_aws_compatible_string(*items, max_length=63):
"""
Generate a AWS resource name that is composed from list of string items. This
function replaces all invalid characters in the given items into '-', and allow user
to specify the max_length for each part separately by passing the item and its max
length in a tuple, e.g.:
>> generate_aws_compatible_string("abc", "def")
>> 'abc-def' # concatenate multiple parts
>> generate_aws_compatible_string("abc_def")
>> 'abc-def' # replace invalid chars to '-'
>> generate_aws_compatible_string(("ab", 1), ("bcd", 2), max_length=4)
>> 'a-bc' # trim based on max_length of each part
"""
trimmed_items = [
item[0][: item[1]] if type(item) == tuple else item for item in items
]
items = [item[0] if type(item) == tuple else item for item in items]
for i in range(len(trimmed_items)):
if len("-".join(items)) <= max_length:
break
else:
items[i] = trimmed_items[i]
name = "-".join(items)
if len(name) > max_length:
raise BentoMLException(
"AWS resource name {} exceeds maximum length of {}".format(name, max_length)
)
invalid_chars = re.compile("[^a-zA-Z0-9-]|_")
name = re.sub(invalid_chars, "-", name)
return name
def get_default_aws_region():
try:
aws_session = boto3.session.Session()
region = aws_session.region_name
if not region:
return ""
return aws_session.region_name
except ClientError as e:
# We will do nothing, if there isn't a default region
logger.error("Encounter error when getting default region for AWS: %s", str(e))
return ""
def ensure_sam_available_or_raise():
try:
import samcli
if samcli.__version__ != "0.33.1":
raise BentoMLException(
"aws-sam-cli package requires version 0.33.1 "
"Install the package with `pip install -U aws-sam-cli==0.33.1`"
)
except ImportError:
raise MissingDependencyException(
"aws-sam-cli package is required. Install "
"with `pip install --user aws-sam-cli`"
)
def call_sam_command(command, project_dir, region):
command = ["sam"] + command
# We are passing region as part of the param, due to sam cli is not currently
# using the region that passed in each command. Set the region param as
# AWS_DEFAULT_REGION for the subprocess call
logger.debug('Setting envar "AWS_DEFAULT_REGION" to %s for subprocess call', region)
copied_env = os.environ.copy()
copied_env["AWS_DEFAULT_REGION"] = region
proc = subprocess.Popen(
command,
cwd=project_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=copied_env,
)
stdout, stderr = proc.communicate()
logger.debug("SAM cmd %s output: %s", command, stdout.decode("utf-8"))
return proc.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def validate_sam_template(template_file, aws_region, sam_project_path):
status_code, stdout, stderr = call_sam_command(
["validate", "--template-file", template_file, "--region", aws_region],
project_dir=sam_project_path,
region=aws_region,
)
if status_code != 0:
error_message = stderr
if not error_message:
error_message = stdout
raise BentoMLException(
"Failed to validate lambda template. {}".format(error_message)
)
def cleanup_s3_bucket_if_exist(bucket_name, region):
s3_client = boto3.client('s3', region)
s3 = boto3.resource('s3')
try:
logger.debug('Removing all objects inside bucket %s', bucket_name)
s3.Bucket(bucket_name).objects.all().delete()
logger.debug('Deleting bucket %s', bucket_name)
s3_client.delete_bucket(Bucket=bucket_name)
except ClientError as e:
if e.response and e.response['Error']['Code'] == 'NoSuchBucket':
# If there is no bucket, we just let it silently fail, dont have to do
# any thing
return
else:
raise e
def delete_cloudformation_stack(stack_name, region):
cf_client = boto3.client("cloudformation", region)
cf_client.delete_stack(StackName=stack_name)
def delete_ecr_repository(repository_name, region):
try:
ecr_client = boto3.client("ecr", region)
ecr_client.delete_repository(repositoryName=repository_name, force=True)
except ClientError as e:
if e.response and e.response['Error']['Code'] == 'RepositoryNotFoundException':
# Don't raise error, if the repo can't be found
return
else:
raise e
def get_instance_public_ip(instance_id, region):
ec2_client = boto3.client("ec2", region)
response = ec2_client.describe_instances(InstanceIds=[instance_id])
all_instances = response["Reservations"][0]["Instances"]
if all_instances:
if "PublicIpAddress" in all_instances[0]:
return all_instances[0]["PublicIpAddress"]
return ""
def get_instance_ip_from_scaling_group(autoscaling_group_names, region):
asg_client = boto3.client("autoscaling", region)
response = asg_client.describe_auto_scaling_groups(
AutoScalingGroupNames=autoscaling_group_names
)
all_autoscaling_group_info = response["AutoScalingGroups"]
all_instances = []
if all_autoscaling_group_info:
for group in all_autoscaling_group_info:
for instance in group["Instances"]:
endpoint = get_instance_public_ip(instance["InstanceId"], region)
all_instances.append(
{
"instance_id": instance["InstanceId"],
"endpoint": endpoint,
"state": instance["LifecycleState"],
"health_status": instance["HealthStatus"],
}
)
return all_instances
def get_aws_user_id():
return boto3.client("sts").get_caller_identity().get("Account")
def create_ecr_repository_if_not_exists(region, repository_name):
ecr_client = boto3.client("ecr", region)
try:
result = ecr_client.describe_repositories(repositoryNames=[repository_name])
repository_id = result['repositories'][0]['registryId']
except ecr_client.exceptions.RepositoryNotFoundException:
result = ecr_client.create_repository(repositoryName=repository_name)
repository_id = result['repository']['registryId']
return repository_id
def get_ecr_login_info(region, repository_id):
ecr_client = boto3.client('ecr', region)
token = ecr_client.get_authorization_token(registryIds=[repository_id])
logger.debug("Getting docker login info from AWS")
username, password = (
base64.b64decode(token["authorizationData"][0]["authorizationToken"])
.decode("utf-8")
.split(":")
)
registry_url = token["authorizationData"][0]["proxyEndpoint"]
return registry_url, username, password
def generate_bentoml_exception_from_aws_client_error(e, message_prefix=None):
"""parse botocore.exceptions.ClientError into Bento StatusProto
We handle two most common errors when deploying to Sagemaker.
1. Authentication issue/invalid access(InvalidSignatureException)
2. resources not found (ValidationException)
It will return correlated StatusProto(NOT_FOUND, UNAUTHENTICATED)
Args:
e: ClientError from botocore.exceptions
message_prefix: string
Returns:
StatusProto
"""
error_response = e.response.get("Error", {})
error_code = error_response.get("Code", "Unknown")
error_message = error_response.get("Message", "Unknown")
error_log_message = (
f"AWS ClientError - operation: {e.operation_name}, "
f"code: {error_code}, message: {error_message}"
)
if message_prefix:
error_log_message = f"{message_prefix}; {error_log_message}"
logger.error(error_log_message)
return AWSServiceError(error_log_message)
def describe_cloudformation_stack(region, stack_name):
cf_client = boto3.client("cloudformation", region)
try:
cloudformation_stack_result = cf_client.describe_stacks(StackName=stack_name)
stack_info = cloudformation_stack_result.get('Stacks')
if len(stack_info) < 1:
raise BentoMLException(f'Cloudformation {stack_name} not found')
if len(stack_info) > 1:
raise BentoMLException(
f'Found more than one cloudformation stack for {stack_name}'
)
return stack_info[0]
except ClientError as error:
raise BentoMLException(
f'Failed to describe CloudFormation {stack_name} {error}'
)
| StarcoderdataPython |
4920415 | <gh_stars>0
import xml.etree.ElementTree
import os
def parsing(a,b):
l = {}
for file in os.listdir(a):
key = b + "/" + file[0:len(file)-4] + ".jpg"
root = xml.etree.ElementTree.parse(a+"/"+file).getroot()
m = []
for x in root.findall('object'):
category = x.find('name').text
m.append(category)
l[key] = m
return(l)
#print(l)
#print(len(l.keys()))
| StarcoderdataPython |
12839010 | # -*- coding: utf-8 -*-
import time
from pip_services3_components.log import CachedLogger, LogLevel
class LoggerFixture:
_logger: CachedLogger = None
def __init__(self, logger: CachedLogger):
self._logger = logger
def test_log_level(self):
assert self._logger.get_level() >= LogLevel.Nothing
assert self._logger.get_level() <= LogLevel.Trace
def test_simple_logging(self):
self._logger.set_level(LogLevel.Trace)
self._logger.fatal(None, None, "Fatal error message")
self._logger.error(None, None, "Error message")
self._logger.warn(None, "Warning message")
self._logger.info(None, "Information message")
self._logger.debug(None, "Debug message")
self._logger.trace(None, "Trace message")
self._logger.dump()
time.sleep(1)
def test_error_logging(self):
try:
# Raise an exception
raise Exception()
except Exception as e:
self._logger.fatal("123", e, "Fatal error")
self._logger.error("123", e, "Recoverable error")
self._logger.dump()
time.sleep(1)
| StarcoderdataPython |
3516197 | import yaml
with open('proxy_config_atlas.yaml', 'r') as stream:
proxy_config = yaml.safe_load(stream)
proxy_config['service_uris']['foundations_rest_api'] = 'http://localhost:37722'
with open('proxy_config_atlas.yaml', 'w') as outfile:
yaml.dump(proxy_config, outfile, default_flow_style=False) | StarcoderdataPython |
1722533 | <gh_stars>0
import sys
import os
import glob
import datetime
import json
# project imports
sys.path.insert(0, os.path.dirname(os.path.abspath('')))
from core.data import write_data_to_file, open_data_file
from core.generator import get_training_and_validation_generators
from core.model import isensee2017_model
from unet.unet_config import get_kfold_configuration, get_unet_configuration
from core.training import load_old_model, train_model
from core.metrics import update_global_weights
def get_model_memory_usage(batch_size, model):
""" Based on https://stackoverflow.com/questions/43137288/how-to-determine-needed-memory-of-keras-model"""
import numpy as np
try:
from keras import backend as K
except:
from tensorflow.keras import backend as K
shapes_mem_count = 0
internal_model_mem_count = 0
for l in model.layers:
layer_type = l.__class__.__name__
if layer_type == 'Model':
internal_model_mem_count += get_model_memory_usage(batch_size, l)
single_layer_mem = 1
out_shape = l.output_shape
if type(out_shape) is list:
out_shape = out_shape[0]
for s in out_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
trainable_count = np.sum([K.count_params(p) for p in model.trainable_weights])
non_trainable_count = np.sum([K.count_params(p) for p in model.non_trainable_weights])
number_size = 4.0
if K.floatx() == 'float16':
number_size = 2.0
if K.floatx() == 'float64':
number_size = 8.0
total_memory = number_size * (batch_size * shapes_mem_count + trainable_count + non_trainable_count)
gbytes = np.round(total_memory / (1024.0 ** 3), 3) + internal_model_mem_count
return gbytes
def main():
# variables
kconfig = get_kfold_configuration("")
mconfig = get_unet_configuration(kconfig)
# Change settings manually
# mconfig["input_shape"] = (1,16,16,16)
# instantiate new model
update_global_weights(mconfig)
model_name = 'estimator_model'
model = isensee2017_model(input_shape=mconfig["input_shape"], n_labels=mconfig["n_labels"],
initial_learning_rate=mconfig["initial_learning_rate"],
n_base_filters=mconfig["n_base_filters"],
model_name=model_name)
gbytes = get_model_memory_usage(mconfig["batch_size"], model)
print(gbytes)
os.remove(model_name) # The model function auto saves this dummy model.
if __name__ == "__main__":
main()
| StarcoderdataPython |
3563385 | <reponame>andylucny/JetBotDemos
import torch
from torchvision import transforms as T
import cv2
from PIL import Image
model_name = "trained_models/signatrix_efficientdet_coco.pth"
model = torch.load(model_name).module
model.cuda()
img_path = "../dataset/photo10.png"
image = cv2.imread(img_path)
image = cv2.resize(image,(512,512))
img = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
transform = T.Compose([T.ToTensor()])
img = transform(img)
print('start')
scores, labels, boxes = model(img.cuda().float().unsqueeze(dim=0))
print('stop')
if boxes.shape[0] > 0:
for box_id in range(boxes.shape[0]):
pred_prob = float(scores[box_id])
if pred_prob < 0.3:
break
pred_label = int(labels[box_id])
xmin, ymin, xmax, ymax = boxes[box_id, :]
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0,0,255), 2)
cv2.imwrite("prediction.png", image)
| StarcoderdataPython |
1630193 | """Support functions for pyxmpp2 test suite."""
import os
import sys
import logging
import unittest
TEST_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(TEST_DIR, "data")
RESOURCES = ['network', 'lo-network', 'gsasl']
if "TEST_USE" in os.environ:
RESOURCES = os.environ["TEST_USE"].split()
if "TEST_STACKDUMP_FILE" in os.environ:
import traceback
import threading
import time
def stack_dumper():
stackdump_file = open(os.environ.get("TEST_STACKDUMP_FILE"), "w")
while True:
time.sleep(5)
stackdump_file.write(time.ctime() + "\n")
frames = sys._current_frames()
for frame in frames.values():
traceback.print_stack(frame, file = stackdump_file)
stackdump_file.write("\n")
stackdump_file.flush()
thr = threading.Thread(target = stack_dumper)
thr.daemon = True
thr.start()
# pylint: disable=W0602,C0103
logging_ready = False
def setup_logging():
"""Set up logging for the tests.
Log level used depends on number of '-v' in sys.argv
"""
# pylint: disable=W0603
global logging_ready
if logging_ready:
return
if sys.argv.count("-v") > 2:
logging.basicConfig(level=logging.DEBUG)
elif sys.argv.count("-v") == 2:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
logging_ready = True
def filter_tests(suite):
"""Make a new TestSuite from `suite`, removing test classes
with names starting with '_'."""
result = unittest.TestSuite()
for test in suite:
if isinstance(test, unittest.TestSuite):
result.addTest(filter_tests(test))
elif not test.__class__.__name__.startswith("_"):
result.addTest(test)
return result
def load_tests(loader, tests, pattern):
"""Use default test list, just remove the classes which names start with
'_'."""
# pylint: disable=W0613
suite = filter_tests(tests)
return suite
| StarcoderdataPython |
165945 | class BackendError(Exception):
pass
class ThreadStoppedError(Exception):
pass
| StarcoderdataPython |
3200045 | import numpy as np, re, pickle
from random import shuffle
def init_weight(size1, size2=0, mean = 0, sigma = .1):
if size2 == 0:
return np.random.normal(mean, sigma, (size1, 1))
return np.random.normal(mean, sigma, (size1, size2))
def get_n_feature(line1, line2):
nfeat = [0,0,0]
p = re.compile(' [0-9]+ | [0-9]+\.[0-9]+ ')
m1 = p.findall(line1)
m2 = p.findall(line2)
if m1 and m2:
nfeat[0] = 0
elif not m1 and not m2:
nfeat[0]=1
return nfeat
else:
return nfeat
if len(m1) == len(m2):
nfeat[0] = 1
tm1 = [i for i in m1]
tm2 = [i for i in m2]
for i in m1:
if i in tm2:
tm2.remove(i)
tm1.pop(0)
if not tm2 and not tm1:
nfeat[1]=1
else:
nfeat[0] = 0
nfeat[0] = 0
tm = [m1, m2] if len(m1)<len(m2) else [m2, m1]
for i in tm[1]:
if i in tm[0]:
tm[0].remove(i)
if not tm[0]:
nfeat[2] = 1
return nfeat
def dynamic_pooling(in_matrix, pool_size, pf='min'):
if pf == 'max':
pool_fun = np.max
elif pf == 'mean':
pool_fun = np.mean
else:
pool_fun = np.min
output_matrix = np.zeros((pool_size,pool_size))
dim1, dim2 = in_matrix.shape
while dim1 < pool_size:
in_matrix = np.concatenate([[in_matrix[int(i / 2), :]] for i in range(dim1 * 2)], axis=0)
dim1,_ = in_matrix.shape
while dim2 < pool_size:
in_matrix = np.concatenate([[in_matrix[:,int(i/2)].tolist()] for i in range(dim2*2)],axis=0).transpose()
_,dim2 = in_matrix.shape
# quot1 = floor(dim1 / pool_size);
# quot2 = floor(dim2 / pool_size);
qout1 = int(np.floor(dim1 / pool_size))
qout2 = int(np.floor(dim2 / pool_size))
# rem1 = dim1 - quot1 * pool_size;
# rem2 = dim2 - quot2 * pool_size;
rem1 = dim1 - qout1 * pool_size
rem2 = dim2 - qout2 * pool_size
# vec1 = [0;cumsum(quot1 * ones(pool_size, 1) + [zeros(pool_size - rem1, 1); ones(rem1, 1)])];
# vec2 = [0;cumsum(quot2 * ones(pool_size, 1) + [zeros(pool_size - rem2, 1);ones(rem2, 1)])];
t11 = qout1 * np.ones((pool_size, 1))
t12 = np.concatenate((np.zeros((pool_size - rem1, 1)), np.ones((rem1, 1))))
t21 = qout2 * np.ones((pool_size, 1))
t22 = np.concatenate((np.zeros((pool_size - rem2, 1)), np.ones((rem2, 1))))
vec1 = np.concatenate(([[0]] , np.cumsum(t11+t12,axis=0)),axis=0,)
vec2 = np.concatenate(([[0]] , np.cumsum(t21+t22,axis=0)),axis=0)
# pos = zeros(size(output_matrix));
# pos = cat(3, pos, pos);
# if method == 1
# func = @mean;
# elseif
# method == 2
# func = @min;
# end
#for i=1:pool_size
# for j=1:pool_size
# pooled = input_matrix(vec1(i) + 1:vec1(i + 1), vec2(j) + 1:vec2(j + 1));
# output_matrix(i, j) = func(pooled(:));
# end
# end
# disp(output_matrix);
for i in range(pool_size):
for j in range(pool_size):
l11=int(vec1[i]); l12=int(vec1[i + 1])
l21=int(vec2[j]); l22=int(vec2[j + 1])
pooled = in_matrix[l11:l12, l21:l22]
output_matrix[i,j] = pool_fun(pooled)
return output_matrix
def similarity_matrix(x1, x2):
s_matrix = np.zeros((len(x1), len(x2)))
for i in x1:
for j in x2:
s_matrix[i, j] = np.linalg.norm(x1[i]-x2[j])
s_min = {}
for i in x1:
# s_min[(x1[i], x2[np.argmin(s_matrix[i,])])] = np.amin(s_matrix[i,])
s_min[i] = np.amin(s_matrix[i,])
return s_min, s_matrix
def mini_batch(data, data_size, batch_size):
batches = []
shuffle(data)
i=0
for i in range(1,data_size/batch_size):
batches.append(data[(i-1)*batch_size: i*batch_size])
if data_size%batch_size != 0:
batches.append(data[i*batch_size: data_size])
return batches
def unzip(data):
x=[]
y=[]
for i in data:
x.append(i[0])
y.append(i[1])
return x, y
def get_results(score, y_test):
tp = 0.0; fp = 0.0; fn = 0.0; tn = 0.0; f1=0.0
for i in range(len(y_test)):
# fd.write("\ndesire score : " + str(y_test[i]) + " obtained : " + str(score[i]) + " sentences : " + sents[i] + '\n')
if y_test[i] == 1:
if score[i] == 1:
tp += 1
else:
fn += 1
elif y_test[i] == 0:
if score[i] == 1:
fp += 1
else:
tn += 1
# precision = tp / (tp + fp)
# recall = tp / (tp + fn)
# f1 = 2 * (precision * recall) / (precision + recall)
acc = (tp + tn) / (tp + fp + tn + fn)
return tp, tn, fp, fn, acc, f1 | StarcoderdataPython |
11393399 | import unittest
import os
import numpy as np
from pmutt.io import gaussian
test_file = os.path.join(os.path.dirname(__file__), 'test_gaussian.log')
class TestGaussian(unittest.TestCase):
def test_read_zpe(self):
self.assertAlmostEqual(
gaussian.read_zpe(test_file, units='Ha/molecule'), 0.451590)
def test_read_electronic_and_zpe(self):
self.assertAlmostEqual(
gaussian.read_electronic_and_zpe(test_file, units='Ha/molecule'),
-31835.216711)
def test_read_freq(self):
expected_freq = np.array([
27.8123, 47.5709, 54.1668, 63.3422, 70.3570, 83.5806, 105.7893,
106.3834, 110.1717, 118.9769, 128.5087, 131.6157, 139.4795,
144.8822, 148.6762, 150.7479, 158.0838, 162.6256, 167.1256,
168.3303, 173.9985, 178.1156, 188.6688, 192.7570, 198.7537,
203.8342, 206.0958, 206.9989, 220.1416, 223.4077, 224.4668,
228.7312, 231.6316, 240.6201, 252.2056, 255.3178, 263.2650,
264.7325, 265.8385, 266.4317, 269.3429, 271.3071, 277.1693,
280.3887, 284.9428, 290.9176, 295.4862, 298.4597, 303.3893,
309.2622, 312.9840, 315.8913, 318.1305, 319.7522, 321.2753,
322.4733, 326.9842, 333.4541, 340.0401, 345.5120, 348.0656,
350.8158, 353.0702, 353.6111, 356.1007, 357.6536, 359.2877,
367.0172, 369.3855, 369.8407, 371.1798, 374.0695, 374.6035,
376.8763, 377.6512, 380.5837, 386.9291, 388.8287, 390.1100,
395.3614, 398.0700, 403.8377, 404.0733, 406.3628, 410.7360,
413.7586, 419.0988, 420.4172, 422.7343, 433.6646, 440.4825,
442.7119, 444.4128, 446.2963, 455.4076, 465.6478, 474.9199,
475.4469, 482.6803, 482.7576, 488.8146, 491.5016, 501.5024,
504.8515, 511.2423, 516.6283, 526.2766, 535.2972, 558.6641,
570.2303, 577.7282, 579.9795, 581.9509, 589.6657, 593.8711,
604.6950, 605.9620, 652.3132, 663.5726, 666.5601, 669.5307,
680.1766, 694.5581, 718.8642, 721.0991, 721.6467, 730.9165,
734.4614, 738.8954, 739.2874, 744.5907, 747.6314, 752.4637,
754.0269, 754.7830, 760.8011, 765.1364, 767.2267, 767.9609,
772.7335, 775.1048, 776.9790, 778.1853, 780.4696, 782.4896,
784.6325, 792.9234, 796.1776, 800.2807, 809.2837, 810.2514,
812.6860, 815.0878, 816.1694, 818.6536, 822.8071, 823.8286,
826.6099, 829.4161, 832.3744, 836.4288, 837.7248, 850.0946,
850.5175, 853.2736, 854.5033, 857.6854, 858.5265, 860.3740,
865.7335, 866.7068, 887.7243, 924.0169, 927.7069, 931.9097,
950.7322, 955.9708, 971.7114, 979.8923, 980.4738, 1007.0644,
1017.2820, 1042.7275, 1063.6456, 1068.8569, 1093.0158, 1101.6547,
1124.3030, 1125.2714, 1129.4260, 1135.0911, 1138.0975, 1141.8434,
1147.9123, 1148.0797, 1152.3754, 1157.5488, 1162.5875, 1168.5421,
1170.1698, 1175.7276, 1180.3671, 1191.7226, 1197.5352, 1201.7893,
1202.9845, 1206.5067, 1206.8160, 1209.8491, 1212.5965, 1215.9923,
1216.9741, 1217.3988, 1225.6334, 1231.5981, 1233.0540, 1242.0423,
1244.3377, 1246.8269, 1248.8556, 1255.7530, 1258.2321, 1268.6540,
1273.0555, 1279.0952, 1290.6498, 1330.7360, 1333.6738, 1348.6979,
1350.5632, 1392.6681, 1425.2621, 1485.7062, 1497.9658, 1525.1034,
1536.7057, 2620.6126, 2695.6448, 3054.4311, 3068.2387, 3074.8332,
3111.5420, 3121.8378, 3131.6155, 3149.3723, 3177.2311, 3179.1266,
3617.1548, 3666.5503
])
np.testing.assert_array_almost_equal(
np.array(gaussian.read_frequencies(test_file)), expected_freq)
def test_rotational_temperatures(self):
expected_rot_temp = np.array([0.00005, 0.00004, 0.00004])
np.testing.assert_array_almost_equal(
np.array(gaussian.read_rotational_temperatures(test_file)),
expected_rot_temp)
def test_read_molecular_mass(self):
self.assertAlmostEqual(
gaussian.read_molecular_mass(test_file, units='amu'), 8470.11614)
def test_rot_symmetry_number(self):
self.assertEqual(gaussian.read_rot_symmetry_num(test_file), 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1943992 | <filename>other_methods/sceloss/utils/utils.py
import csv
import sys
import numpy as np
class CSVLogger():
def __init__(self, args, fieldnames, filename='log.csv'):
self.filename = filename
self.csv_file = open(filename, 'w')
# Write model configuration at top of csv
writer = csv.writer(self.csv_file)
for arg in vars(args):
writer.writerow([arg, getattr(args, arg)])
writer.writerow([''])
self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames)
self.writer.writeheader()
self.csv_file.flush()
def writerow(self, row):
self.writer.writerow(row)
self.csv_file.flush()
def close(self):
self.csv_file.close()
class Logger(object):
def __init__(self, filename):
self.terminal = sys.stdout
self.log = open(filename, 'w')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
self.log.flush()
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.max = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.max = max(self.max, val)
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1/batch_size))
return res
def count_parameters_in_MB(model):
return sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary_head" not in name)/1e6
| StarcoderdataPython |
4917728 | CELLTYPEMARKER_TOPOLOGIES = {}
| StarcoderdataPython |
8120330 | import click
from tabulate import tabulate
import ce_api
from ce_api.models import AuthEmail
from ce_cli.cli import cli
from ce_cli.utils import api_client, api_call
from ce_cli.utils import check_login_status, pass_info, Info
from ce_cli.utils import declare, confirmation
from ce_standards import constants
@cli.group()
@pass_info
def auth(info):
"""Authentication utilities of the Core Engine"""
pass
@auth.command()
@pass_info
def login(info):
"""Login with your username and password"""
username = click.prompt('Please enter your email', type=str)
password = click.prompt('Please enter your password', type=str,
hide_input=True)
# API instance
config = ce_api.Configuration()
config.host = constants.API_HOST
api_instance = ce_api.LoginApi(ce_api.ApiClient(config))
output = api_call(
func=api_instance.login_access_token_api_v1_login_access_token_post,
username=username,
password=password
)
info[constants.ACTIVE_USER] = username
declare('Login successful!')
if username in info:
info[username][constants.TOKEN] = output.access_token
else:
info[username] = {constants.TOKEN: output.access_token}
info.save()
@auth.command()
@pass_info
def logout(info):
"""Log out of your account"""
if click.confirm('Are you sure that you want to log out?'):
click.echo('Logged out!')
info[constants.ACTIVE_USER] = None
info.save()
@auth.command()
@click.option('--all', 'r_all', is_flag=True, help='Flag to reset all users')
@pass_info
def reset(info, r_all):
"""Reset cookies"""
if r_all:
if click.confirm('Are you sure that you want to reset for all?'):
info = Info()
info.save()
click.echo('Info reset!')
else:
click.echo('Reset aborted!')
else:
active_user = info[constants.ACTIVE_USER]
if click.confirm('Are you sure that you want to reset info for '
'{}?'.format(active_user)):
info[active_user] = {}
info.save()
click.echo('Info reset!')
else:
click.echo('Reset aborted!')
info[active_user] = {}
info.save()
@auth.command()
@pass_info
def reset_password(info):
"""Send reset password link to registered email address"""
confirmation('Are you sure you want to reset your password? This will '
'trigger an email for resetting your password and '
'clear cookies.', abort=True)
check_login_status(info)
api = ce_api.UsersApi(api_client(info))
user = api_call(api.get_loggedin_user_api_v1_users_me_get)
api = ce_api.LoginApi(api_client(info))
api_call(api.send_reset_pass_email_api_v1_login_email_resetpassword_post,
AuthEmail(email=user.email))
info[constants.ACTIVE_USER] = None
info.save()
declare("Reset password email sent to {}".format(user.email))
@auth.command()
@pass_info
def whoami(info):
"""Info about the account which is currently logged in"""
check_login_status(info)
api = ce_api.UsersApi(api_client(info))
billing_api = ce_api.BillingApi(api_client(info))
user = api_call(api.get_loggedin_user_api_v1_users_me_get)
bill = api_call(billing_api.get_user_billing_api_v1_billing_users_user_id_get,
user_id=user.id)
table = [{
'Email': info[constants.ACTIVE_USER],
'Full Name': user.full_name if user.full_name else '',
'Pipelines Run': user.n_pipelines_executed,
'Processed Datapoints total': bill.total_processed_datapoints,
'Cost Total': bill.cost_total,
'Processed Datapoints this Month':
bill.processed_datapoints_this_month,
'Cost This Month': bill.cost_this_month,
}]
click.echo(tabulate(table, headers='keys', tablefmt='presto'))
@auth.command()
@pass_info
def organization(info):
"""Info about the account which is currently logged in"""
check_login_status(info)
api = ce_api.OrganizationsApi(api_client(info))
billing_api = ce_api.BillingApi(api_client(info))
org = api_call(api.get_loggedin_organization_api_v1_organizations_get)
bill = api_call(
billing_api.get_organization_billing_api_v1_billing_organization_get)
table = [{
'Organization Name': org.name,
'Processed Datapoints total': bill.total_processed_datapoints,
'Cost Total': bill.cost_total,
'Processed Datapoints this Month':
bill.processed_datapoints_this_month,
'Cost This Month': bill.cost_this_month,
}]
click.echo(tabulate(table, headers='keys', tablefmt='presto'))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.